diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b159a4a19..ffae167eec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +Release v1.44.16 (2022-05-17) +=== + +### Service Client Updates +* `service/glue`: Updates service API and documentation + * This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration. +* `service/kms`: Updates service documentation + * Add HMAC best practice tip, annual rotation of AWS managed keys. + Release v1.44.15 (2022-05-16) === diff --git a/aws/version.go b/aws/version.go index 8abaa84d8a..3da0a0194f 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.15" +const SDKVersion = "1.44.16" diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 29127f8a90..e2fbc0cbaa 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -2883,11 +2883,68 @@ "type":"list", "member":{"shape":"Action"} }, + "AdditionalOptions":{ + "type":"map", + "key":{"shape":"EnclosedInStringProperty"}, + "value":{"shape":"EnclosedInStringProperty"} + }, "AdditionalPlanOptionsMap":{ "type":"map", "key":{"shape":"GenericString"}, "value":{"shape":"GenericString"} }, + "AggFunction":{ + "type":"string", + "enum":[ + "avg", + "countDistinct", + "count", + "first", + "last", + "kurtosis", + "max", + "min", + "skewness", + "stddev_samp", + "stddev_pop", + "sum", + "sumDistinct", + "var_samp", + "var_pop" + ] + }, + "Aggregate":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Groups", + "Aggs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Groups":{"shape":"GlueStudioPathList"}, + "Aggs":{"shape":"AggregateOperations"} + } + }, + "AggregateOperation":{ + "type":"structure", + "required":[ + "Column", + "AggFunc" + ], + "members":{ + "Column":{"shape":"EnclosedInStringProperties"}, + "AggFunc":{"shape":"AggFunction"} + } + }, + "AggregateOperations":{ + "type":"list", + "member":{"shape":"AggregateOperation"}, + "max":30, + "min":1 + }, "AlreadyExistsException":{ "type":"structure", "members":{ @@ -2895,6 +2952,38 @@ }, "exception":true }, + "ApplyMapping":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Mapping" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Mapping":{"shape":"Mappings"} + } + }, + "AthenaConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType", + "SchemaName" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "SchemaName":{"shape":"EnclosedInStringProperty"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, "AttemptCount":{"type":"integer"}, "AuditColumnNamesList":{ "type":"list", @@ -2938,6 +3027,21 @@ "type":"list", "member":{"shape":"BackfillError"} }, + "BasicCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "BatchCreatePartitionRequest":{ "type":"structure", "required":[ @@ -3403,6 +3507,35 @@ "max":100, "min":0 }, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedDoubleFraction":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, + "BoxedLong":{ + "type":"long", + "box":true + }, + "BoxedNonNegativeInt":{ + "type":"integer", + "box":true, + "min":0 + }, + "BoxedNonNegativeLong":{ + "type":"long", + "box":true, + "min":0 + }, + "BoxedPositiveInt":{ + "type":"integer", + "box":true, + "min":0 + }, "CancelMLTaskRunRequest":{ "type":"structure", "required":[ @@ -3481,6 +3614,69 @@ "ImportedBy":{"shape":"NameString"} } }, + "CatalogKafkaSource":{ + "type":"structure", + "required":[ + "Name", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "StreamingOptions":{"shape":"KafkaStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "CatalogKinesisSource":{ + "type":"structure", + "required":[ + "Name", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{"shape":"BoxedPositiveInt"}, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "StreamingOptions":{"shape":"KinesisStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "CatalogSchemaChangePolicy":{ + "type":"structure", + "members":{ + "EnableUpdateCatalog":{"shape":"BoxedBoolean"}, + "UpdateBehavior":{"shape":"UpdateCatalogBehavior"} + } + }, + "CatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "CatalogTablesList":{ "type":"list", "member":{"shape":"NameString"}, @@ -3554,6 +3750,67 @@ }, "CodeGenArgName":{"type":"string"}, "CodeGenArgValue":{"type":"string"}, + "CodeGenConfigurationNode":{ + "type":"structure", + "members":{ + "AthenaConnectorSource":{"shape":"AthenaConnectorSource"}, + "JDBCConnectorSource":{"shape":"JDBCConnectorSource"}, + "SparkConnectorSource":{"shape":"SparkConnectorSource"}, + "CatalogSource":{"shape":"CatalogSource"}, + "RedshiftSource":{"shape":"RedshiftSource"}, + "S3CatalogSource":{"shape":"S3CatalogSource"}, + "S3CsvSource":{"shape":"S3CsvSource"}, + "S3JsonSource":{"shape":"S3JsonSource"}, + "S3ParquetSource":{"shape":"S3ParquetSource"}, + "RelationalCatalogSource":{"shape":"RelationalCatalogSource"}, + "DynamoDBCatalogSource":{"shape":"DynamoDBCatalogSource"}, + "JDBCConnectorTarget":{"shape":"JDBCConnectorTarget"}, + "SparkConnectorTarget":{"shape":"SparkConnectorTarget"}, + "CatalogTarget":{"shape":"BasicCatalogTarget"}, + "RedshiftTarget":{"shape":"RedshiftTarget"}, + "S3CatalogTarget":{"shape":"S3CatalogTarget"}, + "S3GlueParquetTarget":{"shape":"S3GlueParquetTarget"}, + "S3DirectTarget":{"shape":"S3DirectTarget"}, + "ApplyMapping":{"shape":"ApplyMapping"}, + "SelectFields":{"shape":"SelectFields"}, + "DropFields":{"shape":"DropFields"}, + "RenameField":{"shape":"RenameField"}, + "Spigot":{"shape":"Spigot"}, + "Join":{"shape":"Join"}, + "SplitFields":{"shape":"SplitFields"}, + "SelectFromCollection":{"shape":"SelectFromCollection"}, + "FillMissingValues":{"shape":"FillMissingValues"}, + "Filter":{"shape":"Filter"}, + "CustomCode":{"shape":"CustomCode"}, + "SparkSQL":{"shape":"SparkSQL"}, + "DirectKinesisSource":{"shape":"DirectKinesisSource"}, + "DirectKafkaSource":{"shape":"DirectKafkaSource"}, + "CatalogKinesisSource":{"shape":"CatalogKinesisSource"}, + "CatalogKafkaSource":{"shape":"CatalogKafkaSource"}, + "DropNullFields":{"shape":"DropNullFields"}, + "Merge":{"shape":"Merge"}, + "Union":{"shape":"Union"}, + "PIIDetection":{"shape":"PIIDetection"}, + "Aggregate":{"shape":"Aggregate"}, + "DropDuplicates":{"shape":"DropDuplicates"}, + "GovernedCatalogTarget":{"shape":"GovernedCatalogTarget"}, + "GovernedCatalogSource":{"shape":"GovernedCatalogSource"}, + "MicrosoftSQLServerCatalogSource":{"shape":"MicrosoftSQLServerCatalogSource"}, + "MySQLCatalogSource":{"shape":"MySQLCatalogSource"}, + "OracleSQLCatalogSource":{"shape":"OracleSQLCatalogSource"}, + "PostgreSQLCatalogSource":{"shape":"PostgreSQLCatalogSource"}, + "MicrosoftSQLServerCatalogTarget":{"shape":"MicrosoftSQLServerCatalogTarget"}, + "MySQLCatalogTarget":{"shape":"MySQLCatalogTarget"}, + "OracleSQLCatalogTarget":{"shape":"OracleSQLCatalogTarget"}, + "PostgreSQLCatalogTarget":{"shape":"PostgreSQLCatalogTarget"} + } + }, + "CodeGenConfigurationNodes":{ + "type":"map", + "key":{"shape":"NodeId"}, + "value":{"shape":"CodeGenConfigurationNode"}, + "sensitive":true + }, "CodeGenEdge":{ "type":"structure", "required":[ @@ -3756,6 +4013,13 @@ "FULL_ALL" ] }, + "CompressionType":{ + "type":"string", + "enum":[ + "gzip", + "bzip2" + ] + }, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -4265,7 +4529,8 @@ "NotificationProperty":{"shape":"NotificationProperty"}, "GlueVersion":{"shape":"GlueVersionString"}, "NumberOfWorkers":{"shape":"NullableInteger"}, - "WorkerType":{"shape":"WorkerType"} + "WorkerType":{"shape":"WorkerType"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, "CreateJobResponse":{ @@ -4603,6 +4868,22 @@ "min":1, "pattern":"[^\\r\\n]" }, + "CustomCode":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Code", + "ClassName" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"ManyInputs"}, + "Code":{"shape":"ExtendedString"}, + "ClassName":{"shape":"EnclosedInStringProperty"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, "CustomEntityType":{ "type":"structure", "required":[ @@ -4703,6 +4984,17 @@ "member":{"shape":"Database"} }, "DatabaseName":{"type":"string"}, + "Datatype":{ + "type":"structure", + "required":[ + "Id", + "Label" + ], + "members":{ + "Id":{"shape":"GenericLimitedString"}, + "Label":{"shape":"GenericLimitedString"} + } + }, "DateColumnStatisticsData":{ "type":"structure", "required":[ @@ -5186,6 +5478,49 @@ "max":25, "min":1 }, + "DirectKafkaSource":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"NodeName"}, + "StreamingOptions":{"shape":"KafkaStreamingSourceOptions"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "DirectKinesisSource":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "StreamingOptions":{"shape":"KinesisStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "DirectSchemaChangePolicy":{ + "type":"structure", + "members":{ + "EnableUpdateCatalog":{"shape":"BoxedBoolean"}, + "UpdateBehavior":{"shape":"UpdateCatalogBehavior"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"} + } + }, "Double":{"type":"double"}, "DoubleColumnStatisticsData":{ "type":"structure", @@ -5201,6 +5536,57 @@ } }, "DoubleValue":{"type":"double"}, + "DropDuplicates":{ + "type":"structure", + "required":[ + "Name", + "Inputs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Columns":{"shape":"LimitedPathList"} + } + }, + "DropFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "DropNullFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "NullCheckBoxList":{"shape":"NullCheckBoxList"}, + "NullTextList":{"shape":"NullValueFields"} + } + }, + "DynamoDBCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "DynamoDBTarget":{ "type":"structure", "members":{ @@ -5231,6 +5617,22 @@ "FALSE" ] }, + "EnclosedInStringProperties":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperty"} + }, + "EnclosedInStringPropertiesMinOne":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperty"} + }, + "EnclosedInStringProperty":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*" + }, + "EnclosedInStringPropertyWithQuote":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*" + }, "EncryptionAtRest":{ "type":"structure", "required":["CatalogEncryptionMode"], @@ -5314,13 +5716,103 @@ "OutputS3Path":{"shape":"UriString"} } }, + "ExtendedString":{ + "type":"string", + "pattern":"[\\s\\S]*" + }, "FieldType":{"type":"string"}, + "FillMissingValues":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ImputedPath" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ImputedPath":{"shape":"EnclosedInStringProperty"}, + "FilledPath":{"shape":"EnclosedInStringProperty"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "LogicalOperator", + "Filters" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "LogicalOperator":{"shape":"FilterLogicalOperator"}, + "Filters":{"shape":"FilterExpressions"} + } + }, + "FilterExpression":{ + "type":"structure", + "required":[ + "Operation", + "Values" + ], + "members":{ + "Operation":{"shape":"FilterOperation"}, + "Negated":{"shape":"BoxedBoolean"}, + "Values":{"shape":"FilterValues"} + } + }, + "FilterExpressions":{ + "type":"list", + "member":{"shape":"FilterExpression"} + }, + "FilterLogicalOperator":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] + }, + "FilterOperation":{ + "type":"string", + "enum":[ + "EQ", + "LT", + "GT", + "LTE", + "GTE", + "REGEX", + "ISNULL" + ] + }, "FilterString":{ "type":"string", "max":2048, "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "FilterValue":{ + "type":"structure", + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{"shape":"FilterValueType"}, + "Value":{"shape":"EnclosedInStringProperties"} + } + }, + "FilterValueType":{ + "type":"string", + "enum":[ + "COLUMNEXTRACTED", + "CONSTANT" + ] + }, + "FilterValues":{ + "type":"list", + "member":{"shape":"FilterValue"} + }, "FindMatchesMetrics":{ "type":"structure", "members":{ @@ -5365,6 +5857,10 @@ "max":1.0, "min":0.0 }, + "GenericLimitedString":{ + "type":"string", + "pattern":"[A-Za-z0-9_-]*" + }, "GenericMap":{ "type":"map", "key":{"shape":"GenericString"}, @@ -6476,12 +6972,59 @@ "UpdateTime":{"shape":"Timestamp"} } }, + "GlueRecordType":{ + "type":"string", + "enum":[ + "DATE", + "STRING", + "TIMESTAMP", + "INT", + "FLOAT", + "LONG", + "BIGDECIMAL", + "BYTE", + "SHORT", + "DOUBLE" + ] + }, "GlueResourceArn":{ "type":"string", "max":10240, "min":1, "pattern":"arn:(aws|aws-us-gov|aws-cn):glue:.*" }, + "GlueSchema":{ + "type":"structure", + "members":{ + "Columns":{"shape":"GlueStudioSchemaColumnList"} + } + }, + "GlueSchemas":{ + "type":"list", + "member":{"shape":"GlueSchema"} + }, + "GlueStudioColumnNameString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "GlueStudioPathList":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperties"} + }, + "GlueStudioSchemaColumn":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"GlueStudioColumnNameString"}, + "Type":{"shape":"ColumnTypeString"} + } + }, + "GlueStudioSchemaColumnList":{ + "type":"list", + "member":{"shape":"GlueStudioSchemaColumn"} + }, "GlueTable":{ "type":"structure", "required":[ @@ -6507,12 +7050,44 @@ "min":1, "pattern":"^\\w+\\.\\w+$" }, - "GrokClassifier":{ + "GovernedCatalogSource":{ "type":"structure", "required":[ "Name", - "Classification", - "GrokPattern" + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "PartitionPredicate":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"S3SourceAdditionalOptions"} + } + }, + "GovernedCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "SchemaChangePolicy":{"shape":"CatalogSchemaChangePolicy"} + } + }, + "GrokClassifier":{ + "type":"structure", + "required":[ + "Name", + "Classification", + "GrokPattern" ], "members":{ "Name":{"shape":"NameString"}, @@ -6618,6 +7193,108 @@ "exception":true }, "IsVersionValid":{"type":"boolean"}, + "JDBCConnectorOptions":{ + "type":"structure", + "members":{ + "FilterPredicate":{"shape":"EnclosedInStringProperty"}, + "PartitionColumn":{"shape":"EnclosedInStringProperty"}, + "LowerBound":{"shape":"BoxedNonNegativeLong"}, + "UpperBound":{"shape":"BoxedNonNegativeLong"}, + "NumPartitions":{"shape":"BoxedNonNegativeLong"}, + "JobBookmarkKeys":{"shape":"EnclosedInStringProperties"}, + "JobBookmarkKeysSortOrder":{"shape":"EnclosedInStringProperty"}, + "DataTypeMapping":{"shape":"JDBCDataTypeMapping"} + } + }, + "JDBCConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"JDBCConnectorOptions"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "Query":{"shape":"SqlQuery"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "JDBCConnectorTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ConnectionName", + "ConnectionTable", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "JDBCDataType":{ + "type":"string", + "enum":[ + "ARRAY", + "BIGINT", + "BINARY", + "BIT", + "BLOB", + "BOOLEAN", + "CHAR", + "CLOB", + "DATALINK", + "DATE", + "DECIMAL", + "DISTINCT", + "DOUBLE", + "FLOAT", + "INTEGER", + "JAVA_OBJECT", + "LONGNVARCHAR", + "LONGVARBINARY", + "LONGVARCHAR", + "NCHAR", + "NCLOB", + "NULL", + "NUMERIC", + "NVARCHAR", + "OTHER", + "REAL", + "REF", + "REF_CURSOR", + "ROWID", + "SMALLINT", + "SQLXML", + "STRUCT", + "TIME", + "TIME_WITH_TIMEZONE", + "TIMESTAMP", + "TIMESTAMP_WITH_TIMEZONE", + "TINYINT", + "VARBINARY", + "VARCHAR" + ] + }, + "JDBCDataTypeMapping":{ + "type":"map", + "key":{"shape":"JDBCDataType"}, + "value":{"shape":"GlueRecordType"} + }, "JdbcTarget":{ "type":"structure", "members":{ @@ -6656,7 +7333,8 @@ "NumberOfWorkers":{"shape":"NullableInteger"}, "SecurityConfiguration":{"shape":"NameString"}, "NotificationProperty":{"shape":"NotificationProperty"}, - "GlueVersion":{"shape":"GlueVersionString"} + "GlueVersion":{"shape":"GlueVersionString"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, "JobBookmarkEntry":{ @@ -6779,9 +7457,53 @@ "NumberOfWorkers":{"shape":"NullableInteger"}, "SecurityConfiguration":{"shape":"NameString"}, "NotificationProperty":{"shape":"NotificationProperty"}, - "GlueVersion":{"shape":"GlueVersionString"} + "GlueVersion":{"shape":"GlueVersionString"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, + "Join":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "JoinType", + "Columns" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "JoinType":{"shape":"JoinType"}, + "Columns":{"shape":"JoinColumns"} + } + }, + "JoinColumn":{ + "type":"structure", + "required":[ + "From", + "Keys" + ], + "members":{ + "From":{"shape":"EnclosedInStringProperty"}, + "Keys":{"shape":"GlueStudioPathList"} + } + }, + "JoinColumns":{ + "type":"list", + "member":{"shape":"JoinColumn"}, + "max":2, + "min":2 + }, + "JoinType":{ + "type":"string", + "enum":[ + "equijoin", + "left", + "right", + "outer", + "leftsemi", + "leftanti" + ] + }, "JsonClassifier":{ "type":"structure", "required":[ @@ -6798,6 +7520,26 @@ }, "JsonPath":{"type":"string"}, "JsonValue":{"type":"string"}, + "KafkaStreamingSourceOptions":{ + "type":"structure", + "members":{ + "BootstrapServers":{"shape":"EnclosedInStringProperty"}, + "SecurityProtocol":{"shape":"EnclosedInStringProperty"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "TopicName":{"shape":"EnclosedInStringProperty"}, + "Assign":{"shape":"EnclosedInStringProperty"}, + "SubscribePattern":{"shape":"EnclosedInStringProperty"}, + "Classification":{"shape":"EnclosedInStringProperty"}, + "Delimiter":{"shape":"EnclosedInStringProperty"}, + "StartingOffsets":{"shape":"EnclosedInStringProperty"}, + "EndingOffsets":{"shape":"EnclosedInStringProperty"}, + "PollTimeoutMs":{"shape":"BoxedNonNegativeLong"}, + "NumRetries":{"shape":"BoxedNonNegativeInt"}, + "RetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "MaxOffsetsPerTrigger":{"shape":"BoxedNonNegativeLong"}, + "MinPartitions":{"shape":"BoxedNonNegativeInt"} + } + }, "KeyList":{ "type":"list", "member":{"shape":"NameString"}, @@ -6825,6 +7567,29 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "KinesisStreamingSourceOptions":{ + "type":"structure", + "members":{ + "EndpointUrl":{"shape":"EnclosedInStringProperty"}, + "StreamName":{"shape":"EnclosedInStringProperty"}, + "Classification":{"shape":"EnclosedInStringProperty"}, + "Delimiter":{"shape":"EnclosedInStringProperty"}, + "StartingPosition":{"shape":"StartingPosition"}, + "MaxFetchTimeInMs":{"shape":"BoxedNonNegativeLong"}, + "MaxFetchRecordsPerShard":{"shape":"BoxedNonNegativeLong"}, + "MaxRecordPerRead":{"shape":"BoxedNonNegativeLong"}, + "AddIdleTimeBetweenReads":{"shape":"BoxedBoolean"}, + "IdleTimeBetweenReadsInMs":{"shape":"BoxedNonNegativeLong"}, + "DescribeShardInterval":{"shape":"BoxedNonNegativeLong"}, + "NumRetries":{"shape":"BoxedNonNegativeInt"}, + "RetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "MaxRetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "AvoidEmptyBatches":{"shape":"BoxedBoolean"}, + "StreamArn":{"shape":"EnclosedInStringProperty"}, + "RoleArn":{"shape":"EnclosedInStringProperty"}, + "RoleSessionName":{"shape":"EnclosedInStringProperty"} + } + }, "KmsKeyArn":{ "type":"string", "pattern":"arn:aws:kms:.*" @@ -6880,6 +7645,14 @@ ] }, "LatestSchemaVersionBoolean":{"type":"boolean"}, + "LimitedPathList":{ + "type":"list", + "member":{"shape":"LimitedStringList"} + }, + "LimitedStringList":{ + "type":"list", + "member":{"shape":"GenericLimitedString"} + }, "LineageConfiguration":{ "type":"structure", "members":{ @@ -7192,6 +7965,11 @@ "SSE-KMS" ] }, + "ManyInputs":{ + "type":"list", + "member":{"shape":"NodeId"}, + "min":1 + }, "MapValue":{ "type":"map", "key":{"shape":"GenericString"}, @@ -7199,6 +7977,17 @@ "max":100, "min":0 }, + "Mapping":{ + "type":"structure", + "members":{ + "ToKey":{"shape":"EnclosedInStringProperty"}, + "FromPath":{"shape":"EnclosedInStringProperties"}, + "FromType":{"shape":"EnclosedInStringProperty"}, + "ToType":{"shape":"EnclosedInStringProperty"}, + "Dropped":{"shape":"BoxedBoolean"}, + "Children":{"shape":"Mappings"} + } + }, "MappingEntry":{ "type":"structure", "members":{ @@ -7214,6 +8003,16 @@ "type":"list", "member":{"shape":"MappingEntry"} }, + "Mappings":{ + "type":"list", + "member":{"shape":"Mapping"} + }, + "MaskValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[*A-Za-z0-9_-]*" + }, "MatchCriteria":{ "type":"list", "member":{"shape":"NameString"}, @@ -7228,6 +8027,21 @@ "min":1 }, "MaxRetries":{"type":"integer"}, + "Merge":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Source", + "PrimaryKeys" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "Source":{"shape":"NodeId"}, + "PrimaryKeys":{"shape":"GlueStudioPathList"} + } + }, "MessagePrefix":{ "type":"string", "max":255, @@ -7271,6 +8085,34 @@ "min":1, "pattern":"[a-zA-Z0-9+-=._./@]+" }, + "MicrosoftSQLServerCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "MicrosoftSQLServerCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "MillisecondsCount":{"type":"long"}, "MongoDBTarget":{ "type":"structure", @@ -7284,6 +8126,34 @@ "type":"list", "member":{"shape":"MongoDBTarget"} }, + "MySQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "MySQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "NameString":{ "type":"string", "max":255, @@ -7312,6 +8182,10 @@ "CrawlerDetails":{"shape":"CrawlerNodeDetails"} } }, + "NodeId":{ + "type":"string", + "pattern":"[A-Za-z0-9_-]*" + }, "NodeIdList":{ "type":"list", "member":{"shape":"NameString"} @@ -7320,6 +8194,10 @@ "type":"list", "member":{"shape":"Node"} }, + "NodeName":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*" + }, "NodeType":{ "type":"string", "enum":[ @@ -7332,6 +8210,10 @@ "type":"double", "min":0.0 }, + "NonNegativeInt":{ + "type":"integer", + "min":0 + }, "NonNegativeInteger":{ "type":"integer", "min":0 @@ -7351,6 +8233,31 @@ "box":true, "min":1 }, + "NullCheckBoxList":{ + "type":"structure", + "members":{ + "IsEmpty":{"shape":"BoxedBoolean"}, + "IsNullString":{"shape":"BoxedBoolean"}, + "IsNegOne":{"shape":"BoxedBoolean"} + } + }, + "NullValueField":{ + "type":"structure", + "required":[ + "Value", + "Datatype" + ], + "members":{ + "Value":{"shape":"EnclosedInStringProperty"}, + "Datatype":{"shape":"Datatype"} + } + }, + "NullValueFields":{ + "type":"list", + "member":{"shape":"NullValueField"}, + "max":50, + "min":0 + }, "NullableBoolean":{ "type":"boolean", "box":true @@ -7363,6 +8270,12 @@ "type":"integer", "box":true }, + "OneInput":{ + "type":"list", + "member":{"shape":"NodeId"}, + "max":1, + "min":1 + }, "OperationTimeoutException":{ "type":"structure", "members":{ @@ -7370,6 +8283,34 @@ }, "exception":true }, + "OracleSQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "OracleSQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "OrchestrationArgumentsMap":{ "type":"map", "key":{"shape":"OrchestrationNameString"}, @@ -7445,6 +8386,25 @@ "CreatedTime":{"shape":"CreatedTimestamp"} } }, + "PIIDetection":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "PiiType", + "EntityTypesToDetect" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PiiType":{"shape":"PiiType"}, + "EntityTypesToDetect":{"shape":"EnclosedInStringProperties"}, + "OutputColumnName":{"shape":"EnclosedInStringProperty"}, + "SampleFraction":{"shape":"BoxedDoubleFraction"}, + "ThresholdFraction":{"shape":"BoxedDoubleFraction"}, + "MaskValue":{"shape":"MaskValue"} + } + }, "PageSize":{ "type":"integer", "box":true, @@ -7461,6 +8421,16 @@ "type":"string", "max":512000 }, + "ParquetCompressionType":{ + "type":"string", + "enum":[ + "snappy", + "lzo", + "gzip", + "uncompressed", + "none" + ] + }, "Partition":{ "type":"structure", "members":{ @@ -7607,10 +8577,57 @@ "AvailabilityZone":{"shape":"NameString"} } }, + "PiiType":{ + "type":"string", + "enum":[ + "RowAudit", + "RowMasking", + "ColumnAudit", + "ColumnMasking" + ] + }, "PolicyJsonString":{ "type":"string", "min":2 }, + "PollingTime":{ + "type":"long", + "box":true, + "min":10 + }, + "PositiveLong":{ + "type":"long", + "box":true, + "min":1 + }, + "PostgreSQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "PostgreSQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "Predecessor":{ "type":"structure", "members":{ @@ -7654,6 +8671,12 @@ "GROUP" ] }, + "Prob":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -7767,6 +8790,15 @@ "NextToken":{"shape":"SchemaRegistryTokenString"} } }, + "QuoteChar":{ + "type":"string", + "enum":[ + "quote", + "quillemet", + "single_quote", + "disabled" + ] + }, "RecordsCount":{ "type":"long", "box":true @@ -7785,6 +8817,39 @@ "RecrawlBehavior":{"shape":"RecrawlBehavior"} } }, + "RedshiftSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "RedshiftTmpDir":{"shape":"EnclosedInStringProperty"}, + "TmpDirIAMRole":{"shape":"EnclosedInStringProperty"} + } + }, + "RedshiftTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "RedshiftTmpDir":{"shape":"EnclosedInStringProperty"}, + "TmpDirIAMRole":{"shape":"EnclosedInStringProperty"}, + "UpsertRedshiftOptions":{"shape":"UpsertRedshiftTargetOptions"} + } + }, "RegisterSchemaVersionInput":{ "type":"structure", "required":[ @@ -7833,6 +8898,19 @@ "DELETING" ] }, + "RelationalCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "RemoveSchemaVersionMetadataInput":{ "type":"structure", "required":["MetadataKeyValue"], @@ -7856,6 +8934,21 @@ "MetadataValue":{"shape":"MetadataValueString"} } }, + "RenameField":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "SourcePath", + "TargetPath" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "SourcePath":{"shape":"EnclosedInStringProperties"}, + "TargetPath":{"shape":"EnclosedInStringProperties"} + } + }, "ReplaceBoolean":{"type":"boolean"}, "ResetJobBookmarkRequest":{ "type":"structure", @@ -7959,6 +9052,95 @@ "Id":{"shape":"IntegerValue"} } }, + "S3CatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "PartitionPredicate":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"S3SourceAdditionalOptions"} + } + }, + "S3CatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "SchemaChangePolicy":{"shape":"CatalogSchemaChangePolicy"} + } + }, + "S3CsvSource":{ + "type":"structure", + "required":[ + "Name", + "Paths", + "Separator", + "QuoteChar" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"CompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "Separator":{"shape":"Separator"}, + "Escaper":{"shape":"EnclosedInStringPropertyWithQuote"}, + "QuoteChar":{"shape":"QuoteChar"}, + "Multiline":{"shape":"BoxedBoolean"}, + "WithHeader":{"shape":"BoxedBoolean"}, + "WriteHeader":{"shape":"BoxedBoolean"}, + "SkipFirst":{"shape":"BoxedBoolean"}, + "OptimizePerformance":{"shape":"BooleanValue"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3DirectSourceAdditionalOptions":{ + "type":"structure", + "members":{ + "BoundedSize":{"shape":"BoxedLong"}, + "BoundedFiles":{"shape":"BoxedLong"}, + "EnableSamplePath":{"shape":"BoxedBoolean"}, + "SamplePath":{"shape":"EnclosedInStringProperty"} + } + }, + "S3DirectTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path", + "Format" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Compression":{"shape":"EnclosedInStringProperty"}, + "Format":{"shape":"TargetFormat"}, + "SchemaChangePolicy":{"shape":"DirectSchemaChangePolicy"} + } + }, "S3Encryption":{ "type":"structure", "members":{ @@ -7978,6 +9160,71 @@ "SSE-S3" ] }, + "S3GlueParquetTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Compression":{"shape":"ParquetCompressionType"}, + "SchemaChangePolicy":{"shape":"DirectSchemaChangePolicy"} + } + }, + "S3JsonSource":{ + "type":"structure", + "required":[ + "Name", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"CompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "JsonPath":{"shape":"EnclosedInStringProperty"}, + "Multiline":{"shape":"BoxedBoolean"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3ParquetSource":{ + "type":"structure", + "required":[ + "Name", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"ParquetCompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3SourceAdditionalOptions":{ + "type":"structure", + "members":{ + "BoundedSize":{"shape":"BoxedLong"}, + "BoundedFiles":{"shape":"BoxedLong"} + } + }, "S3Target":{ "type":"structure", "members":{ @@ -8224,6 +9471,42 @@ "TotalSegments":{"shape":"TotalSegmentsInteger"} } }, + "SelectFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "SelectFromCollection":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Index" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Index":{"shape":"NonNegativeInt"} + } + }, + "Separator":{ + "type":"string", + "enum":[ + "comma", + "ctrla", + "pipe", + "semicolon", + "tab" + ] + }, "SerDeInfo":{ "type":"structure", "members":{ @@ -8311,6 +9594,105 @@ "ASCENDING" ] }, + "SparkConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "SparkConnectorTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "SparkSQL":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "SqlQuery", + "SqlAliases" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"ManyInputs"}, + "SqlQuery":{"shape":"SqlQuery"}, + "SqlAliases":{"shape":"SqlAliases"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "Spigot":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Topk":{"shape":"Topk"}, + "Prob":{"shape":"Prob"} + } + }, + "SplitFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "SqlAlias":{ + "type":"structure", + "required":[ + "From", + "Alias" + ], + "members":{ + "From":{"shape":"NodeId"}, + "Alias":{"shape":"EnclosedInStringPropertyWithQuote"} + } + }, + "SqlAliases":{ + "type":"list", + "member":{"shape":"SqlAlias"} + }, + "SqlQuery":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\s])*" + }, "StartBlueprintRunRequest":{ "type":"structure", "required":[ @@ -8478,6 +9860,14 @@ "BatchWindow":{"shape":"NullableInteger"} } }, + "StartingPosition":{ + "type":"string", + "enum":[ + "latest", + "trim_horizon", + "earliest" + ] + }, "Statement":{ "type":"structure", "members":{ @@ -8608,6 +9998,13 @@ "SchemaReference":{"shape":"SchemaReference"} } }, + "StreamingDataPreviewOptions":{ + "type":"structure", + "members":{ + "PollingTime":{"shape":"PollingTime"}, + "RecordPollingLimit":{"shape":"PositiveLong"} + } + }, "StringColumnStatisticsData":{ "type":"structure", "required":[ @@ -8763,6 +10160,16 @@ "max":50, "min":0 }, + "TargetFormat":{ + "type":"string", + "enum":[ + "json", + "csv", + "avro", + "orc", + "parquet" + ] + }, "TaskRun":{ "type":"structure", "members":{ @@ -8850,6 +10257,12 @@ "Timestamp":{"type":"timestamp"}, "TimestampValue":{"type":"timestamp"}, "Token":{"type":"string"}, + "Topk":{ + "type":"integer", + "box":true, + "max":100, + "min":0 + }, "TotalSegmentsInteger":{ "type":"integer", "max":10, @@ -8998,6 +10411,12 @@ "EventBatchingCondition":{"shape":"EventBatchingCondition"} } }, + "TwoInputs":{ + "type":"list", + "member":{"shape":"NodeId"}, + "max":2, + "min":2 + }, "TypeString":{ "type":"string", "max":20000, @@ -9022,6 +10441,26 @@ "type":"list", "member":{"shape":"UnfilteredPartition"} }, + "Union":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "UnionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "UnionType":{"shape":"UnionType"} + } + }, + "UnionType":{ + "type":"string", + "enum":[ + "ALL", + "DISTINCT" + ] + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -9063,6 +10502,13 @@ "Name":{"shape":"NameString"} } }, + "UpdateCatalogBehavior":{ + "type":"string", + "enum":[ + "UPDATE_IN_DATABASE", + "LOG" + ] + }, "UpdateClassifierRequest":{ "type":"structure", "members":{ @@ -9426,6 +10872,14 @@ } }, "UpdatedTimestamp":{"type":"string"}, + "UpsertRedshiftTargetOptions":{ + "type":"structure", + "members":{ + "TableLocation":{"shape":"EnclosedInStringProperty"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "UpsertKeys":{"shape":"EnclosedInStringPropertiesMinOne"} + } + }, "UriString":{"type":"string"}, "UserDefinedFunction":{ "type":"structure", diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 1bcf7e933b..5926a2bd07 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -212,17 +212,61 @@ "TriggerUpdate$Actions": "

The actions initiated by this trigger.

" } }, + "AdditionalOptions": { + "base": null, + "refs": { + "JDBCConnectorTarget$AdditionalOptions": "

Additional connection options for the connector.

", + "SparkConnectorSource$AdditionalOptions": "

Additional connection options for the connector.

", + "SparkConnectorTarget$AdditionalOptions": "

Additional connection options for the connector.

" + } + }, "AdditionalPlanOptionsMap": { "base": null, "refs": { "GetPlanRequest$AdditionalPlanOptionsMap": "

A map to hold additional optional key-value parameters.

Currently, these key-value pairs are supported:

" } }, + "AggFunction": { + "base": null, + "refs": { + "AggregateOperation$AggFunc": "

Specifies the aggregation function to apply.

Possible aggregation functions include: avg countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop

" + } + }, + "Aggregate": { + "base": "

Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.

", + "refs": { + "CodeGenConfigurationNode$Aggregate": "

Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.

" + } + }, + "AggregateOperation": { + "base": "

Specifies the set of parameters needed to perform aggregation in the aggregate transform.

", + "refs": { + "AggregateOperations$member": null + } + }, + "AggregateOperations": { + "base": null, + "refs": { + "Aggregate$Aggs": "

Specifies the aggregate functions to be performed on specified fields.

" + } + }, "AlreadyExistsException": { "base": "

A resource to be created or added already exists.

", "refs": { } }, + "ApplyMapping": { + "base": "

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

", + "refs": { + "CodeGenConfigurationNode$ApplyMapping": "

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

" + } + }, + "AthenaConnectorSource": { + "base": "

Specifies a connector to an Amazon Athena data source.

", + "refs": { + "CodeGenConfigurationNode$AthenaConnectorSource": "

Specifies a connector to an Amazon Athena data source.

" + } + }, "AttemptCount": { "base": null, "refs": { @@ -273,6 +317,12 @@ "PartitionIndexDescriptor$BackfillErrors": "

A list of errors that can occur when registering partition indexes for an existing table.

" } }, + "BasicCatalogTarget": { + "base": "

Specifies a target that uses a Glue Data Catalog table.

", + "refs": { + "CodeGenConfigurationNode$CatalogTarget": "

Specifies a target that uses a Glue Data Catalog table.

" + } + }, "BatchCreatePartitionRequest": { "base": null, "refs": { @@ -632,6 +682,7 @@ "refs": { "CreateTriggerRequest$StartOnCreation": "

Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.

", "GetJobRunRequest$PredecessorsIncluded": "

True if a list of predecessor runs should be returned.

", + "S3CsvSource$OptimizePerformance": "

A Boolean value that specifies whether to use the advanced SIMD CSV reader along with Apache Arrow based columnar memory formats. Only available in Glue version 3.0.

", "UpdateDevEndpointRequest$UpdateEtlLibraries": "

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.

" } }, @@ -643,6 +694,90 @@ "UpdatePartitionRequest$PartitionValueList": "

List of partition key values that define the partition to update.

" } }, + "BoxedBoolean": { + "base": null, + "refs": { + "CatalogKafkaSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "CatalogKinesisSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "CatalogSchemaChangePolicy$EnableUpdateCatalog": "

Whether to use the specified update behavior when the crawler finds a changed schema.

", + "DirectKafkaSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "DirectKinesisSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "DirectSchemaChangePolicy$EnableUpdateCatalog": "

Whether to use the specified update behavior when the crawler finds a changed schema.

", + "FilterExpression$Negated": "

Whether the expression is to be negated.

", + "KinesisStreamingSourceOptions$AddIdleTimeBetweenReads": "

Adds a time delay between two consecutive getRecords operations. The default value is \"False\". This option is only configurable for Glue version 2.0 and above.

", + "KinesisStreamingSourceOptions$AvoidEmptyBatches": "

Avoids creating an empty microbatch job by checking for unread data in the Kinesis data stream before the batch is started. The default value is \"False\".

", + "Mapping$Dropped": "

If true, then the column is removed.

", + "NullCheckBoxList$IsEmpty": "

Specifies that an empty string is considered as a null value.

", + "NullCheckBoxList$IsNullString": "

Specifies that a value spelling out the word 'null' is considered as a null value.

", + "NullCheckBoxList$IsNegOne": "

Specifies that an integer value of -1 is considered as a null value.

", + "S3CsvSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

", + "S3CsvSource$Multiline": "

A Boolean value that specifies whether a single record can span multiple lines. This can occur when a field contains a quoted new-line character. You must set this option to True if any record spans multiple lines. The default value is False, which allows for more aggressive file-splitting during parsing.

", + "S3CsvSource$WithHeader": "

A Boolean value that specifies whether to treat the first line as a header. The default value is False.

", + "S3CsvSource$WriteHeader": "

A Boolean value that specifies whether to write the header to output. The default value is True.

", + "S3CsvSource$SkipFirst": "

A Boolean value that specifies whether to skip the first data line. The default value is False.

", + "S3DirectSourceAdditionalOptions$EnableSamplePath": "

Sets option to enable a sample path.

", + "S3JsonSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

", + "S3JsonSource$Multiline": "

A Boolean value that specifies whether a single record can span multiple lines. This can occur when a field contains a quoted new-line character. You must set this option to True if any record spans multiple lines. The default value is False, which allows for more aggressive file-splitting during parsing.

", + "S3ParquetSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

" + } + }, + "BoxedDoubleFraction": { + "base": null, + "refs": { + "PIIDetection$SampleFraction": "

Indicates the fraction of the data to sample when scanning for PII entities.

", + "PIIDetection$ThresholdFraction": "

Indicates the fraction of the data that must be met in order for a column to be identified as PII data.

" + } + }, + "BoxedLong": { + "base": null, + "refs": { + "S3DirectSourceAdditionalOptions$BoundedSize": "

Sets the upper limit for the target size of the dataset in bytes that will be processed.

", + "S3DirectSourceAdditionalOptions$BoundedFiles": "

Sets the upper limit for the target number of files that will be processed.

", + "S3SourceAdditionalOptions$BoundedSize": "

Sets the upper limit for the target size of the dataset in bytes that will be processed.

", + "S3SourceAdditionalOptions$BoundedFiles": "

Sets the upper limit for the target number of files that will be processed.

" + } + }, + "BoxedNonNegativeInt": { + "base": null, + "refs": { + "KafkaStreamingSourceOptions$NumRetries": "

The number of times to retry before failing to fetch Kafka offsets. The default value is 3.

", + "KafkaStreamingSourceOptions$MinPartitions": "

The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.

", + "KinesisStreamingSourceOptions$NumRetries": "

The maximum number of retries for Kinesis Data Streams API requests. The default value is 3.

", + "S3CsvSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3CsvSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

", + "S3JsonSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3JsonSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

", + "S3ParquetSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3ParquetSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

" + } + }, + "BoxedNonNegativeLong": { + "base": null, + "refs": { + "JDBCConnectorOptions$LowerBound": "

The minimum value of partitionColumn that is used to decide partition stride.

", + "JDBCConnectorOptions$UpperBound": "

The maximum value of partitionColumn that is used to decide partition stride.

", + "JDBCConnectorOptions$NumPartitions": "

The number of partitions. This value, along with lowerBound (inclusive) and upperBound (exclusive), form partition strides for generated WHERE clause expressions that are used to split the partitionColumn.

", + "KafkaStreamingSourceOptions$PollTimeoutMs": "

The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512.

", + "KafkaStreamingSourceOptions$RetryIntervalMs": "

The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10.

", + "KafkaStreamingSourceOptions$MaxOffsetsPerTrigger": "

The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across topicPartitions of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.

", + "KinesisStreamingSourceOptions$MaxFetchTimeInMs": "

The maximum time spent in the job executor to fetch a record from the Kinesis data stream per shard, specified in milliseconds (ms). The default value is 1000.

", + "KinesisStreamingSourceOptions$MaxFetchRecordsPerShard": "

The maximum number of records to fetch per shard in the Kinesis data stream. The default value is 100000.

", + "KinesisStreamingSourceOptions$MaxRecordPerRead": "

The maximum number of records to fetch from the Kinesis data stream in each getRecords operation. The default value is 10000.

", + "KinesisStreamingSourceOptions$IdleTimeBetweenReadsInMs": "

The minimum time delay between two consecutive getRecords operations, specified in ms. The default value is 1000. This option is only configurable for Glue version 2.0 and above.

", + "KinesisStreamingSourceOptions$DescribeShardInterval": "

The minimum time interval between two ListShards API calls for your script to consider resharding. The default value is 1s.

", + "KinesisStreamingSourceOptions$RetryIntervalMs": "

The cool-off time period (specified in ms) before retrying the Kinesis Data Streams API call. The default value is 1000.

", + "KinesisStreamingSourceOptions$MaxRetryIntervalMs": "

The maximum cool-off time period (specified in ms) between two retries of a Kinesis Data Streams API call. The default value is 10000.

" + } + }, + "BoxedPositiveInt": { + "base": null, + "refs": { + "CatalogKafkaSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "CatalogKinesisSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "DirectKafkaSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "DirectKinesisSource$WindowSize": "

The amount of time to spend processing each micro batch.

" + } + }, "CancelMLTaskRunRequest": { "base": null, "refs": { @@ -762,6 +897,31 @@ "GetCatalogImportStatusResponse$ImportStatus": "

The status of the specified catalog migration.

" } }, + "CatalogKafkaSource": { + "base": "

Specifies an Apache Kafka data store in the Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogKafkaSource": "

Specifies an Apache Kafka data store in the Data Catalog.

" + } + }, + "CatalogKinesisSource": { + "base": "

Specifies a Kinesis data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogKinesisSource": "

Specifies a Kinesis data source in the Glue Data Catalog.

" + } + }, + "CatalogSchemaChangePolicy": { + "base": "

A policy that specifies update behavior for the crawler.

", + "refs": { + "GovernedCatalogTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the governed catalog.

", + "S3CatalogTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

" + } + }, + "CatalogSource": { + "base": "

Specifies a data store in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogSource": "

Specifies a data store in the Glue Data Catalog.

" + } + }, "CatalogTablesList": { "base": null, "refs": { @@ -847,6 +1007,20 @@ "CodeGenNodeArg$Value": "

The value of the argument or property.

" } }, + "CodeGenConfigurationNode": { + "base": "

CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.

", + "refs": { + "CodeGenConfigurationNodes$value": null + } + }, + "CodeGenConfigurationNodes": { + "base": null, + "refs": { + "CreateJobRequest$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

", + "Job$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

", + "JobUpdate$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

" + } + }, "CodeGenEdge": { "base": "

Represents a directional edge in a directed acyclic graph (DAG).

", "refs": { @@ -991,6 +1165,7 @@ "base": null, "refs": { "Column$Type": "

The data type of the Column.

", + "GlueStudioSchemaColumn$Type": "

The hive type for this column in the Glue Studio schema.

", "KeySchemaElement$Type": "

The type of a partition key.

", "SchemaColumn$DataType": "

The type of data in the column.

" } @@ -1030,6 +1205,13 @@ "UpdateSchemaInput$Compatibility": "

The new compatibility setting for the schema.

" } }, + "CompressionType": { + "base": null, + "refs": { + "S3CsvSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3JsonSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

" + } + }, "ConcurrentModificationException": { "base": "

Two processes are trying to modify a resource simultaneously.

", "refs": { @@ -1539,6 +1721,12 @@ "UpdateCsvClassifierRequest$QuoteSymbol": "

A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.

" } }, + "CustomCode": { + "base": "

Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.

", + "refs": { + "CodeGenConfigurationNode$CustomCode": "

Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.

" + } + }, "CustomEntityType": { "base": "

An object representing a custom pattern for detecting sensitive data across the columns and rows of your structured data.

", "refs": { @@ -1646,6 +1834,12 @@ "UpdateCrawlerRequest$DatabaseName": "

The Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" } }, + "Datatype": { + "base": "

A structure representing the datatype of the value.

", + "refs": { + "NullValueField$Datatype": "

The datatype of the value.

" + } + }, "DateColumnStatisticsData": { "base": "

Defines column statistics supported for timestamp data columns.

", "refs": { @@ -2007,6 +2201,25 @@ "BatchGetDevEndpointsResponse$DevEndpointsNotFound": "

A list of DevEndpoints not found.

" } }, + "DirectKafkaSource": { + "base": "

Specifies an Apache Kafka data store.

", + "refs": { + "CodeGenConfigurationNode$DirectKafkaSource": "

Specifies an Apache Kafka data store.

" + } + }, + "DirectKinesisSource": { + "base": "

Specifies a direct Amazon Kinesis data source.

", + "refs": { + "CodeGenConfigurationNode$DirectKinesisSource": "

Specifies a direct Amazon Kinesis data source.

" + } + }, + "DirectSchemaChangePolicy": { + "base": "

A policy that specifies update behavior for the crawler.

", + "refs": { + "S3DirectTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

", + "S3GlueParquetTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

" + } + }, "Double": { "base": null, "refs": { @@ -2027,6 +2240,30 @@ "Statement$Progress": "

The code execution progress.

" } }, + "DropDuplicates": { + "base": "

Specifies a transform that removes rows of repeating data from a data set.

", + "refs": { + "CodeGenConfigurationNode$DropDuplicates": "

Specifies a transform that removes rows of repeating data from a data set.

" + } + }, + "DropFields": { + "base": "

Specifies a transform that chooses the data property keys that you want to drop.

", + "refs": { + "CodeGenConfigurationNode$DropFields": "

Specifies a transform that chooses the data property keys that you want to drop.

" + } + }, + "DropNullFields": { + "base": "

Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are \"null\", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.

", + "refs": { + "CodeGenConfigurationNode$DropNullFields": "

Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are \"null\", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.

" + } + }, + "DynamoDBCatalogSource": { + "base": "

Specifies a DynamoDB data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$DynamoDBCatalogSource": null + } + }, "DynamoDBTarget": { "base": "

Specifies an Amazon DynamoDB table to crawl.

", "refs": { @@ -2057,6 +2294,157 @@ "PutResourcePolicyRequest$EnableHybrid": "

If 'TRUE', indicates that you are using both methods to grant cross-account access to Data Catalog resources:

Must be set to 'TRUE' if you have already used the Management Console to grant cross-account access, otherwise the call fails. Default is 'FALSE'.

" } }, + "EnclosedInStringProperties": { + "base": null, + "refs": { + "AggregateOperation$Column": "

Specifies the column on the data set on which the aggregation function will be applied.

", + "FilterValue$Value": "

The value to be associated.

", + "GlueStudioPathList$member": null, + "JDBCConnectorOptions$JobBookmarkKeys": "

The name of the job bookmark keys on which to sort.

", + "Mapping$FromPath": "

The table or column to be modified.

", + "PIIDetection$EntityTypesToDetect": "

Indicates the types of entities the PIIDetection transform will identify as PII data.

PII type entities include: PERSON_NAME, DATE, USA_SNN, EMAIL, USA_ITIN, USA_PASSPORT_NUMBER, PHONE_NUMBER, BANK_ACCOUNT, IP_ADDRESS, MAC_ADDRESS, USA_CPT_CODE, USA_HCPCS_CODE, USA_NATIONAL_DRUG_CODE, USA_MEDICARE_BENEFICIARY_IDENTIFIER, USA_HEALTH_INSURANCE_CLAIM_NUMBER,CREDIT_CARD,USA_NATIONAL_PROVIDER_IDENTIFIER,USA_DEA_NUMBER,USA_DRIVING_LICENSE

", + "RenameField$SourcePath": "

A JSON path to a variable in the data structure for the source data.

", + "RenameField$TargetPath": "

A JSON path to a variable in the data structure for the target data.

", + "S3CsvSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3CsvSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

", + "S3JsonSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3JsonSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

", + "S3ParquetSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3ParquetSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

" + } + }, + "EnclosedInStringPropertiesMinOne": { + "base": null, + "refs": { + "UpsertRedshiftTargetOptions$UpsertKeys": "

The keys used to determine whether to perform an update or insert.

" + } + }, + "EnclosedInStringProperty": { + "base": null, + "refs": { + "AdditionalOptions$key": null, + "AdditionalOptions$value": null, + "AthenaConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "AthenaConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "AthenaConnectorSource$ConnectionType": "

The type of connection, such as marketplace.athena or custom.athena, designating a connection to an Amazon Athena data store.

", + "AthenaConnectorSource$SchemaName": "

The name of the Cloudwatch log group to read from. For example, /aws-glue/jobs/output.

", + "BasicCatalogTarget$Database": "

The database that contains the table you want to use as the target. This database must already exist in the Data Catalog.

", + "BasicCatalogTarget$Table": "

The table that defines the schema of your output data. This table must already exist in the Data Catalog.

", + "CatalogKafkaSource$Table": "

The name of the table in the database to read from.

", + "CatalogKafkaSource$Database": "

The name of the database to read from.

", + "CatalogKinesisSource$Table": "

The name of the table in the database to read from.

", + "CatalogKinesisSource$Database": "

The name of the database to read from.

", + "CatalogSource$Database": "

The name of the database to read from.

", + "CatalogSource$Table": "

The name of the table in the database to read from.

", + "CustomCode$ClassName": "

The name defined for the custom code node class.

", + "DirectSchemaChangePolicy$Table": "

Specifies the table in the database that the schema change policy applies to.

", + "DirectSchemaChangePolicy$Database": "

Specifies the database that the schema change policy applies to.

", + "DynamoDBCatalogSource$Database": "

The name of the database to read from.

", + "DynamoDBCatalogSource$Table": "

The name of the table in the database to read from.

", + "EnclosedInStringProperties$member": null, + "EnclosedInStringPropertiesMinOne$member": null, + "FillMissingValues$ImputedPath": "

A JSON path to a variable in the data structure for the dataset that is imputed.

", + "FillMissingValues$FilledPath": "

A JSON path to a variable in the data structure for the dataset that is filled.

", + "GovernedCatalogSource$Database": "

The database to read from.

", + "GovernedCatalogSource$Table": "

The database table to read from.

", + "GovernedCatalogSource$PartitionPredicate": "

Partitions satisfying this predicate are deleted. Files within the retention period in these partitions are not deleted. Set to \"\" – empty by default.

", + "GovernedCatalogTarget$Table": "

The name of the table in the database to write to.

", + "GovernedCatalogTarget$Database": "

The name of the database to write to.

", + "JDBCConnectorOptions$FilterPredicate": "

Extra condition clause to filter data from source. For example:

BillingCity='Mountain View'

When using a query instead of a table name, you should validate that the query works with the specified filterPredicate.

", + "JDBCConnectorOptions$PartitionColumn": "

The name of an integer column that is used for partitioning. This option works only when it's included with lowerBound, upperBound, and numPartitions. This option works the same way as in the Spark SQL JDBC reader.

", + "JDBCConnectorOptions$JobBookmarkKeysSortOrder": "

Specifies an ascending or descending sort order.

", + "JDBCConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "JDBCConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "JDBCConnectorSource$ConnectionType": "

The type of connection, such as marketplace.jdbc or custom.jdbc, designating a connection to a JDBC data store.

", + "JDBCConnectorTarget$ConnectionName": "

The name of the connection that is associated with the connector.

", + "JDBCConnectorTarget$ConnectorName": "

The name of a connector that will be used.

", + "JDBCConnectorTarget$ConnectionType": "

The type of connection, such as marketplace.jdbc or custom.jdbc, designating a connection to a JDBC data target.

", + "JoinColumn$From": "

The column to be joined.

", + "KafkaStreamingSourceOptions$BootstrapServers": "

A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This option must be specified in the API call or defined in the table metadata in the Data Catalog.

", + "KafkaStreamingSourceOptions$SecurityProtocol": "

The protocol used to communicate with brokers. The possible values are \"SSL\" or \"PLAINTEXT\".

", + "KafkaStreamingSourceOptions$ConnectionName": "

The name of the connection.

", + "KafkaStreamingSourceOptions$TopicName": "

The topic name as specified in Apache Kafka. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$Assign": "

The specific TopicPartitions to consume. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$SubscribePattern": "

A Java regex string that identifies the topic list to subscribe to. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$Classification": "

An optional classification.

", + "KafkaStreamingSourceOptions$Delimiter": "

Specifies the delimiter character.

", + "KafkaStreamingSourceOptions$StartingOffsets": "

The starting position in the Kafka topic to read data from. The possible values are \"earliest\" or \"latest\". The default value is \"latest\".

", + "KafkaStreamingSourceOptions$EndingOffsets": "

The end point when a batch query is ended. Possible values are either \"latest\" or a JSON string that specifies an ending offset for each TopicPartition.

", + "KinesisStreamingSourceOptions$EndpointUrl": "

The URL of the Kinesis endpoint.

", + "KinesisStreamingSourceOptions$StreamName": "

The name of the Kinesis data stream.

", + "KinesisStreamingSourceOptions$Classification": "

An optional classification.

", + "KinesisStreamingSourceOptions$Delimiter": "

Specifies the delimiter character.

", + "KinesisStreamingSourceOptions$StreamArn": "

The Amazon Resource Name (ARN) of the Kinesis data stream.

", + "KinesisStreamingSourceOptions$RoleArn": "

The Amazon Resource Name (ARN) of the role to assume using AWS Security Token Service (AWS STS). This role must have permissions for describe or read record operations for the Kinesis data stream. You must use this parameter when accessing a data stream in a different account. Used in conjunction with \"awsSTSSessionName\".

", + "KinesisStreamingSourceOptions$RoleSessionName": "

An identifier for the session assuming the role using AWS STS. You must use this parameter when accessing a data stream in a different account. Used in conjunction with \"awsSTSRoleARN\".

", + "Mapping$ToKey": "

After the apply mapping, what the name of the column should be. Can be the same as FromPath.

", + "Mapping$FromType": "

The type of the data to be modified.

", + "Mapping$ToType": "

The data type that the data is to be modified to.

", + "MicrosoftSQLServerCatalogSource$Database": "

The name of the database to read from.

", + "MicrosoftSQLServerCatalogSource$Table": "

The name of the table in the database to read from.

", + "MicrosoftSQLServerCatalogTarget$Database": "

The name of the database to write to.

", + "MicrosoftSQLServerCatalogTarget$Table": "

The name of the table in the database to write to.

", + "MySQLCatalogSource$Database": "

The name of the database to read from.

", + "MySQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "MySQLCatalogTarget$Database": "

The name of the database to write to.

", + "MySQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "NullValueField$Value": "

The value of the null placeholder.

", + "OracleSQLCatalogSource$Database": "

The name of the database to read from.

", + "OracleSQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "OracleSQLCatalogTarget$Database": "

The name of the database to write to.

", + "OracleSQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "PIIDetection$OutputColumnName": "

Indicates the output column name that will contain any entity type detected in that row.

", + "PostgreSQLCatalogSource$Database": "

The name of the database to read from.

", + "PostgreSQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "PostgreSQLCatalogTarget$Database": "

The name of the database to write to.

", + "PostgreSQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "RedshiftSource$Database": "

The database to read from.

", + "RedshiftSource$Table": "

The database table to read from.

", + "RedshiftSource$RedshiftTmpDir": "

The Amazon S3 path where temporary data can be staged when copying out of the database.

", + "RedshiftSource$TmpDirIAMRole": "

The IAM role with permissions.

", + "RedshiftTarget$Database": "

The name of the database to write to.

", + "RedshiftTarget$Table": "

The name of the table in the database to write to.

", + "RedshiftTarget$RedshiftTmpDir": "

The Amazon S3 path where temporary data can be staged when copying out of the database.

", + "RedshiftTarget$TmpDirIAMRole": "

The IAM role with permissions.

", + "RelationalCatalogSource$Database": "

The name of the database to read from.

", + "RelationalCatalogSource$Table": "

The name of the table in the database to read from.

", + "S3CatalogSource$Database": "

The database to read from.

", + "S3CatalogSource$Table": "

The database table to read from.

", + "S3CatalogSource$PartitionPredicate": "

Partitions satisfying this predicate are deleted. Files within the retention period in these partitions are not deleted. Set to \"\" – empty by default.

", + "S3CatalogTarget$Table": "

The name of the table in the database to write to.

", + "S3CatalogTarget$Database": "

The name of the database to write to.

", + "S3CsvSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3CsvSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "S3DirectSourceAdditionalOptions$SamplePath": "

If enabled, specifies the sample path.

", + "S3DirectTarget$Path": "

A single Amazon S3 path to write to.

", + "S3DirectTarget$Compression": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3GlueParquetTarget$Path": "

A single Amazon S3 path to write to.

", + "S3JsonSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3JsonSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "S3JsonSource$JsonPath": "

A JsonPath string defining the JSON data.

", + "S3ParquetSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3ParquetSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "SparkConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "SparkConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "SparkConnectorSource$ConnectionType": "

The type of connection, such as marketplace.spark or custom.spark, designating a connection to an Apache Spark data store.

", + "SparkConnectorTarget$ConnectionName": "

The name of a connection for an Apache Spark connector.

", + "SparkConnectorTarget$ConnectorName": "

The name of an Apache Spark connector.

", + "SparkConnectorTarget$ConnectionType": "

The type of connection, such as marketplace.spark or custom.spark, designating a connection to an Apache Spark data store.

", + "Spigot$Path": "

A path in Amazon S3 where the transform will write a subset of records from the dataset to a JSON file in an Amazon S3 bucket.

", + "UpsertRedshiftTargetOptions$TableLocation": "

The physical location of the Redshift table.

", + "UpsertRedshiftTargetOptions$ConnectionName": "

The name of the connection to use to write to Redshift.

" + } + }, + "EnclosedInStringPropertyWithQuote": { + "base": null, + "refs": { + "AthenaConnectorSource$ConnectionTable": "

The name of the table in the data source.

", + "JDBCConnectorSource$ConnectionTable": "

The name of the table in the data source.

", + "JDBCConnectorTarget$ConnectionTable": "

The name of the table in the data target.

", + "S3CsvSource$Escaper": "

Specifies a character to use for escaping. This option is used only when reading CSV files. The default value is none. If enabled, the character which immediately follows is used as-is, except for a small set of well-known escapes (\\n, \\r, \\t, and \\0).

", + "SqlAlias$Alias": "

A temporary name given to a table, or a column in a table.

" + } + }, "EncryptionAtRest": { "base": "

Specifies the encryption-at-rest configuration for the Data Catalog.

", "refs": { @@ -2170,6 +2558,12 @@ "TaskRunProperties$ExportLabelsTaskRunProperties": "

The configuration properties for an exporting labels task run.

" } }, + "ExtendedString": { + "base": null, + "refs": { + "CustomCode$Code": "

The custom code that is used to perform the data transformation.

" + } + }, "FieldType": { "base": null, "refs": { @@ -2177,12 +2571,66 @@ "MappingEntry$TargetType": "

The target type.

" } }, + "FillMissingValues": { + "base": "

Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.

", + "refs": { + "CodeGenConfigurationNode$FillMissingValues": "

Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.

" + } + }, + "Filter": { + "base": "

Specifies a transform that splits a dataset into two, based on a filter condition.

", + "refs": { + "CodeGenConfigurationNode$Filter": "

Specifies a transform that splits a dataset into two, based on a filter condition.

" + } + }, + "FilterExpression": { + "base": "

Specifies a filter expression.

", + "refs": { + "FilterExpressions$member": null + } + }, + "FilterExpressions": { + "base": null, + "refs": { + "Filter$Filters": "

Specifies a filter expression.

" + } + }, + "FilterLogicalOperator": { + "base": null, + "refs": { + "Filter$LogicalOperator": "

The operator used to filter rows by comparing the key value to a specified value.

" + } + }, + "FilterOperation": { + "base": null, + "refs": { + "FilterExpression$Operation": "

The type of operation to perform in the expression.

" + } + }, "FilterString": { "base": null, "refs": { "GetTablesRequest$Expression": "

A regular expression pattern. If present, only those tables whose names match the pattern are returned.

" } }, + "FilterValue": { + "base": "

Represents a single entry in the list of values for a FilterExpression.

", + "refs": { + "FilterValues$member": null + } + }, + "FilterValueType": { + "base": null, + "refs": { + "FilterValue$Type": "

The type of filter value.

" + } + }, + "FilterValues": { + "base": null, + "refs": { + "FilterExpression$Values": "

A list of filter values.

" + } + }, "FindMatchesMetrics": { "base": "

The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.

", "refs": { @@ -2229,6 +2677,14 @@ "FindMatchesParameters$AccuracyCostTradeoff": "

The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.

Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.

Cost measures how many compute resources, and thus money, are consumed to run the transform.

" } }, + "GenericLimitedString": { + "base": null, + "refs": { + "Datatype$Id": "

The datatype of the value.

", + "Datatype$Label": "

A label assigned to the datatype.

", + "LimitedStringList$member": null + } + }, "GenericMap": { "base": null, "refs": { @@ -2982,6 +3438,12 @@ "GetResourcePoliciesResponseList$member": null } }, + "GlueRecordType": { + "base": null, + "refs": { + "JDBCDataTypeMapping$value": null + } + }, "GlueResourceArn": { "base": null, "refs": { @@ -3012,6 +3474,60 @@ "UpdateSchemaResponse$SchemaArn": "

The Amazon Resource Name (ARN) of the schema.

" } }, + "GlueSchema": { + "base": "

Specifies a user-defined schema when a schema cannot be determined by AWS Glue.

", + "refs": { + "GlueSchemas$member": null + } + }, + "GlueSchemas": { + "base": null, + "refs": { + "AthenaConnectorSource$OutputSchemas": "

Specifies the data schema for the custom Athena source.

", + "CustomCode$OutputSchemas": "

Specifies the data schema for the custom code transform.

", + "JDBCConnectorSource$OutputSchemas": "

Specifies the data schema for the custom JDBC source.

", + "JDBCConnectorTarget$OutputSchemas": "

Specifies the data schema for the JDBC target.

", + "S3CsvSource$OutputSchemas": "

Specifies the data schema for the S3 CSV source.

", + "S3JsonSource$OutputSchemas": "

Specifies the data schema for the S3 JSON source.

", + "S3ParquetSource$OutputSchemas": "

Specifies the data schema for the S3 Parquet source.

", + "SparkConnectorSource$OutputSchemas": "

Specifies data schema for the custom spark source.

", + "SparkConnectorTarget$OutputSchemas": "

Specifies the data schema for the custom spark target.

", + "SparkSQL$OutputSchemas": "

Specifies the data schema for the SparkSQL transform.

" + } + }, + "GlueStudioColumnNameString": { + "base": null, + "refs": { + "GlueStudioSchemaColumn$Name": "

The name of the column in the Glue Studio schema.

" + } + }, + "GlueStudioPathList": { + "base": null, + "refs": { + "Aggregate$Groups": "

Specifies the fields to group by.

", + "DropFields$Paths": "

A JSON path to a variable in the data structure.

", + "GovernedCatalogTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "JoinColumn$Keys": "

The key of the column to be joined.

", + "Merge$PrimaryKeys": "

The list of primary key fields to match records from the source and staging dynamic frames.

", + "S3CatalogTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "S3DirectTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "S3GlueParquetTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "SelectFields$Paths": "

A JSON path to a variable in the data structure.

", + "SplitFields$Paths": "

A JSON path to a variable in the data structure.

" + } + }, + "GlueStudioSchemaColumn": { + "base": "

Specifies a single column in a Glue schema definition.

", + "refs": { + "GlueStudioSchemaColumnList$member": null + } + }, + "GlueStudioSchemaColumnList": { + "base": null, + "refs": { + "GlueSchema$Columns": "

Specifies the column definitions that make up a Glue schema.

" + } + }, "GlueTable": { "base": "

The database and table in the Glue Data Catalog that is used for input or output data.

", "refs": { @@ -3045,6 +3561,18 @@ "UpdateMLTransformRequest$GlueVersion": "

This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.

" } }, + "GovernedCatalogSource": { + "base": "

Specifies the data store in the governed Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$GovernedCatalogSource": "

Specifies a data source in a goverened Data Catalog.

" + } + }, + "GovernedCatalogTarget": { + "base": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$GovernedCatalogTarget": "

Specifies a data target that writes to a goverened catalog.

" + } + }, "GrokClassifier": { "base": "

A classifier that uses grok patterns.

", "refs": { @@ -3228,6 +3756,36 @@ "CheckSchemaVersionValidityResponse$Valid": "

Return true, if the schema is valid and false otherwise.

" } }, + "JDBCConnectorOptions": { + "base": "

Additional connection options for the connector.

", + "refs": { + "JDBCConnectorSource$AdditionalOptions": "

Additional connection options for the connector.

" + } + }, + "JDBCConnectorSource": { + "base": "

Specifies a connector to a JDBC data source.

", + "refs": { + "CodeGenConfigurationNode$JDBCConnectorSource": "

Specifies a connector to a JDBC data source.

" + } + }, + "JDBCConnectorTarget": { + "base": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

", + "refs": { + "CodeGenConfigurationNode$JDBCConnectorTarget": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

" + } + }, + "JDBCDataType": { + "base": null, + "refs": { + "JDBCDataTypeMapping$key": null + } + }, + "JDBCDataTypeMapping": { + "base": null, + "refs": { + "JDBCConnectorOptions$DataTypeMapping": "

Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option \"dataTypeMapping\":{\"FLOAT\":\"STRING\"} maps data fields of JDBC type FLOAT into the Java String type by calling the ResultSet.getString() method of the driver, and uses it to build the Glue record. The ResultSet object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.

" + } + }, "JdbcTarget": { "base": "

Specifies a JDBC data store to crawl.

", "refs": { @@ -3330,6 +3888,30 @@ "UpdateJobRequest$JobUpdate": "

Specifies the values with which to update the job definition.

" } }, + "Join": { + "base": "

Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.

", + "refs": { + "CodeGenConfigurationNode$Join": "

Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.

" + } + }, + "JoinColumn": { + "base": "

Specifies a column to be joined.

", + "refs": { + "JoinColumns$member": null + } + }, + "JoinColumns": { + "base": null, + "refs": { + "Join$Columns": "

A list of the two columns to be joined.

" + } + }, + "JoinType": { + "base": null, + "refs": { + "Join$JoinType": "

Specifies the type of join to be performed on the datasets.

" + } + }, "JsonClassifier": { "base": "

A classifier for JSON content.

", "refs": { @@ -3350,6 +3932,13 @@ "JobBookmarkEntry$JobBookmark": "

The bookmark itself.

" } }, + "KafkaStreamingSourceOptions": { + "base": "

Additional options for streaming.

", + "refs": { + "CatalogKafkaSource$StreamingOptions": "

Specifies the streaming options.

", + "DirectKafkaSource$StreamingOptions": "

Specifies the streaming options.

" + } + }, "KeyList": { "base": null, "refs": { @@ -3374,6 +3963,13 @@ "ParametersMap$key": null } }, + "KinesisStreamingSourceOptions": { + "base": "

Additional options for the Amazon Kinesis streaming data source.

", + "refs": { + "CatalogKinesisSource$StreamingOptions": "

Additional options for the Kinesis streaming data source.

", + "DirectKinesisSource$StreamingOptions": "

Additional options for the Kinesis streaming data source.

" + } + }, "KmsKeyArn": { "base": null, "refs": { @@ -3436,6 +4032,18 @@ "SchemaVersionNumber$LatestVersion": "

The latest version available for the schema.

" } }, + "LimitedPathList": { + "base": null, + "refs": { + "DropDuplicates$Columns": "

The name of the columns to be merged or removed if repeating.

" + } + }, + "LimitedStringList": { + "base": null, + "refs": { + "LimitedPathList$member": null + } + }, "LineageConfiguration": { "base": "

Specifies data lineage configuration settings for the crawler.

", "refs": { @@ -3669,6 +4277,13 @@ "MLUserDataEncryption$MlUserDataEncryptionMode": "

The encryption mode applied to user data. Valid values are:

" } }, + "ManyInputs": { + "base": null, + "refs": { + "CustomCode$Inputs": "

The data inputs identified by their node names.

", + "SparkSQL$Inputs": "

The data inputs identified by their node names. You can associate a table name with each input node to use in the SQL query. The name you choose must meet the Spark SQL naming restrictions.

" + } + }, "MapValue": { "base": null, "refs": { @@ -3678,6 +4293,12 @@ "UpdateDevEndpointRequest$AddArguments": "

The map of arguments to add the map of arguments used to configure the DevEndpoint.

Valid arguments are:

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" } }, + "Mapping": { + "base": "

Specifies the mapping of data property keys.

", + "refs": { + "Mappings$member": null + } + }, "MappingEntry": { "base": "

Defines a mapping.

", "refs": { @@ -3691,6 +4312,19 @@ "GetPlanRequest$Mapping": "

The list of mappings from a source table to target tables.

" } }, + "Mappings": { + "base": null, + "refs": { + "ApplyMapping$Mapping": "

Specifies the mapping of data property keys in the data source to data property keys in the data target.

", + "Mapping$Children": "

Only applicable to nested data structures. If you want to change the parent structure, but also one of its children, you can fill out this data strucutre. It is also Mapping, but its FromPath will be the parent's FromPath plus the FromPath from this structure.

For the children part, suppose you have the structure:

{ \"FromPath\": \"OuterStructure\", \"ToKey\": \"OuterStructure\", \"ToType\": \"Struct\", \"Dropped\": false, \"Chidlren\": [{ \"FromPath\": \"inner\", \"ToKey\": \"inner\", \"ToType\": \"Double\", \"Dropped\": false, }] }

You can specify a Mapping that looks like:

{ \"FromPath\": \"OuterStructure\", \"ToKey\": \"OuterStructure\", \"ToType\": \"Struct\", \"Dropped\": false, \"Chidlren\": [{ \"FromPath\": \"inner\", \"ToKey\": \"inner\", \"ToType\": \"Double\", \"Dropped\": false, }] }

" + } + }, + "MaskValue": { + "base": null, + "refs": { + "PIIDetection$MaskValue": "

Indicates the value that will replace the detected entity.

" + } + }, "MatchCriteria": { "base": null, "refs": { @@ -3721,6 +4355,12 @@ "JobUpdate$MaxRetries": "

The maximum number of times to retry this job if it fails.

" } }, + "Merge": { + "base": "

Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.

", + "refs": { + "CodeGenConfigurationNode$Merge": "

Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.

" + } + }, "MessagePrefix": { "base": null, "refs": { @@ -3808,6 +4448,18 @@ "RemoveSchemaVersionMetadataResponse$MetadataValue": "

The value of the metadata key.

" } }, + "MicrosoftSQLServerCatalogSource": { + "base": "

Specifies a Microsoft SQL server data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$MicrosoftSQLServerCatalogSource": "

Specifies a Microsoft SQL server data source in the Glue Data Catalog.

" + } + }, + "MicrosoftSQLServerCatalogTarget": { + "base": "

Specifies a target that uses Microsoft SQL.

", + "refs": { + "CodeGenConfigurationNode$MicrosoftSQLServerCatalogTarget": "

Specifies a target that uses Microsoft SQL.

" + } + }, "MillisecondsCount": { "base": null, "refs": { @@ -3826,6 +4478,18 @@ "CrawlerTargets$MongoDBTargets": "

Specifies Amazon DocumentDB or MongoDB targets.

" } }, + "MySQLCatalogSource": { + "base": "

Specifies a MySQL data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$MySQLCatalogSource": "

Specifies a MySQL data source in the Glue Data Catalog.

" + } + }, + "MySQLCatalogTarget": { + "base": "

Specifies a target that uses MySQL.

", + "refs": { + "CodeGenConfigurationNode$MySQLCatalogTarget": "

Specifies a target that uses MySQL.

" + } + }, "NameString": { "base": null, "refs": { @@ -4133,6 +4797,17 @@ "NodeList$member": null } }, + "NodeId": { + "base": null, + "refs": { + "CodeGenConfigurationNodes$key": null, + "ManyInputs$member": null, + "Merge$Source": "

The source DynamicFrame that will be merged with a staging DynamicFrame.

", + "OneInput$member": null, + "SqlAlias$From": "

A table, or a column in a table.

", + "TwoInputs$member": null + } + }, "NodeIdList": { "base": null, "refs": { @@ -4146,6 +4821,61 @@ "WorkflowGraph$Nodes": "

A list of the the Glue components belong to the workflow represented as nodes.

" } }, + "NodeName": { + "base": null, + "refs": { + "Aggregate$Name": "

The name of the transform node.

", + "ApplyMapping$Name": "

The name of the transform node.

", + "AthenaConnectorSource$Name": "

The name of the data source.

", + "BasicCatalogTarget$Name": "

The name of your data target.

", + "CatalogKafkaSource$Name": "

The name of the data store.

", + "CatalogKinesisSource$Name": "

The name of the data source.

", + "CatalogSource$Name": "

The name of the data store.

", + "CustomCode$Name": "

The name of the transform node.

", + "DirectKafkaSource$Name": "

The name of the data store.

", + "DirectKinesisSource$Name": "

The name of the data source.

", + "DropDuplicates$Name": "

The name of the transform node.

", + "DropFields$Name": "

The name of the transform node.

", + "DropNullFields$Name": "

The name of the transform node.

", + "DynamoDBCatalogSource$Name": "

The name of the data source.

", + "FillMissingValues$Name": "

The name of the transform node.

", + "Filter$Name": "

The name of the transform node.

", + "GovernedCatalogSource$Name": "

The name of the data store.

", + "GovernedCatalogTarget$Name": "

The name of the data target.

", + "JDBCConnectorSource$Name": "

The name of the data source.

", + "JDBCConnectorTarget$Name": "

The name of the data target.

", + "Join$Name": "

The name of the transform node.

", + "Merge$Name": "

The name of the transform node.

", + "MicrosoftSQLServerCatalogSource$Name": "

The name of the data source.

", + "MicrosoftSQLServerCatalogTarget$Name": "

The name of the data target.

", + "MySQLCatalogSource$Name": "

The name of the data source.

", + "MySQLCatalogTarget$Name": "

The name of the data target.

", + "OracleSQLCatalogSource$Name": "

The name of the data source.

", + "OracleSQLCatalogTarget$Name": "

The name of the data target.

", + "PIIDetection$Name": "

The name of the transform node.

", + "PostgreSQLCatalogSource$Name": "

The name of the data source.

", + "PostgreSQLCatalogTarget$Name": "

The name of the data target.

", + "RedshiftSource$Name": "

The name of the Amazon Redshift data store.

", + "RedshiftTarget$Name": "

The name of the data target.

", + "RelationalCatalogSource$Name": "

The name of the data source.

", + "RenameField$Name": "

The name of the transform node.

", + "S3CatalogSource$Name": "

The name of the data store.

", + "S3CatalogTarget$Name": "

The name of the data target.

", + "S3CsvSource$Name": "

The name of the data store.

", + "S3DirectTarget$Name": "

The name of the data target.

", + "S3GlueParquetTarget$Name": "

The name of the data target.

", + "S3JsonSource$Name": "

The name of the data store.

", + "S3ParquetSource$Name": "

The name of the data store.

", + "SelectFields$Name": "

The name of the transform node.

", + "SelectFromCollection$Name": "

The name of the transform node.

", + "SparkConnectorSource$Name": "

The name of the data source.

", + "SparkConnectorTarget$Name": "

The name of the data target.

", + "SparkSQL$Name": "

The name of the transform node.

", + "Spigot$Name": "

The name of the transform node.

", + "SplitFields$Name": "

The name of the transform node.

", + "Union$Name": "

The name of the transform node.

" + } + }, "NodeType": { "base": null, "refs": { @@ -4162,6 +4892,12 @@ "StringColumnStatisticsData$AverageLength": "

The average string length in the column.

" } }, + "NonNegativeInt": { + "base": null, + "refs": { + "SelectFromCollection$Index": "

The index for the DynamicFrame to be selected.

" + } + }, "NonNegativeInteger": { "base": null, "refs": { @@ -4211,6 +4947,24 @@ "NotificationProperty$NotifyDelayAfter": "

After a job run starts, the number of minutes to wait before sending a job run delay notification.

" } }, + "NullCheckBoxList": { + "base": "

Represents whether certain values are recognized as null values for removal.

", + "refs": { + "DropNullFields$NullCheckBoxList": "

A structure that represents whether certain values are recognized as null values for removal.

" + } + }, + "NullValueField": { + "base": "

Represents a custom null value such as a zeros or other value being used as a null placeholder unique to the dataset.

", + "refs": { + "NullValueFields$member": null + } + }, + "NullValueFields": { + "base": null, + "refs": { + "DropNullFields$NullTextList": "

A structure that specifies a list of NullValueField structures that represent a custom null value such as zero or other value being used as a null placeholder unique to the dataset.

The DropNullFields transform removes custom null values only if both the value of the null placeholder and the datatype match the data.

" + } + }, "NullableBoolean": { "base": null, "refs": { @@ -4282,11 +5036,53 @@ "Workflow$MaxConcurrentRuns": "

You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs.

" } }, + "OneInput": { + "base": null, + "refs": { + "Aggregate$Inputs": "

Specifies the fields and rows to use as inputs for the aggregate transform.

", + "ApplyMapping$Inputs": "

The data inputs identified by their node names.

", + "BasicCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "DropDuplicates$Inputs": "

The data inputs identified by their node names.

", + "DropFields$Inputs": "

The data inputs identified by their node names.

", + "DropNullFields$Inputs": "

The data inputs identified by their node names.

", + "FillMissingValues$Inputs": "

The data inputs identified by their node names.

", + "Filter$Inputs": "

The data inputs identified by their node names.

", + "GovernedCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "JDBCConnectorTarget$Inputs": "

The nodes that are inputs to the data target.

", + "MicrosoftSQLServerCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "MySQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "OracleSQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "PIIDetection$Inputs": "

The node ID inputs to the transform.

", + "PostgreSQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "RedshiftTarget$Inputs": "

The nodes that are inputs to the data target.

", + "RenameField$Inputs": "

The data inputs identified by their node names.

", + "S3CatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "S3DirectTarget$Inputs": "

The nodes that are inputs to the data target.

", + "S3GlueParquetTarget$Inputs": "

The nodes that are inputs to the data target.

", + "SelectFields$Inputs": "

The data inputs identified by their node names.

", + "SelectFromCollection$Inputs": "

The data inputs identified by their node names.

", + "SparkConnectorTarget$Inputs": "

The nodes that are inputs to the data target.

", + "Spigot$Inputs": "

The data inputs identified by their node names.

", + "SplitFields$Inputs": "

The data inputs identified by their node names.

" + } + }, "OperationTimeoutException": { "base": "

The operation timed out.

", "refs": { } }, + "OracleSQLCatalogSource": { + "base": "

Specifies an Oracle data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$OracleSQLCatalogSource": "

Specifies an Oracle data source in the Glue Data Catalog.

" + } + }, + "OracleSQLCatalogTarget": { + "base": "

Specifies a target that uses Oracle SQL.

", + "refs": { + "CodeGenConfigurationNode$OracleSQLCatalogTarget": "

Specifies a target that uses Oracle SQL.

" + } + }, "OrchestrationArgumentsMap": { "base": null, "refs": { @@ -4391,6 +5187,12 @@ "OtherMetadataValueList$member": null } }, + "PIIDetection": { + "base": "

Specifies a transform that identifies, removes or masks PII data.

", + "refs": { + "CodeGenConfigurationNode$PIIDetection": "

Specifies a transform that identifies, removes or masks PII data.

" + } + }, "PageSize": { "base": null, "refs": { @@ -4455,6 +5257,13 @@ "ParametersMap$value": null } }, + "ParquetCompressionType": { + "base": null, + "refs": { + "S3GlueParquetTarget$Compression": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3ParquetSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

" + } + }, "Partition": { "base": "

Represents a slice of table data.

", "refs": { @@ -4594,6 +5403,12 @@ "ConnectionInput$PhysicalConnectionRequirements": "

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" } }, + "PiiType": { + "base": null, + "refs": { + "PIIDetection$PiiType": "

Indicates the type of PIIDetection transform.

" + } + }, "PolicyJsonString": { "base": null, "refs": { @@ -4602,6 +5417,30 @@ "PutResourcePolicyRequest$PolicyInJson": "

Contains the policy document to set, in JSON format.

" } }, + "PollingTime": { + "base": null, + "refs": { + "StreamingDataPreviewOptions$PollingTime": "

The polling time in milliseconds.

" + } + }, + "PositiveLong": { + "base": null, + "refs": { + "StreamingDataPreviewOptions$RecordPollingLimit": "

The limit to the number of records polled.

" + } + }, + "PostgreSQLCatalogSource": { + "base": "

Specifies a PostgresSQL data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$PostgreSQLCatalogSource": "

Specifies a PostgresSQL data source in the Glue Data Catalog.

" + } + }, + "PostgreSQLCatalogTarget": { + "base": "

Specifies a target that uses Postgres SQL.

", + "refs": { + "CodeGenConfigurationNode$PostgreSQLCatalogTarget": "

Specifies a target that uses Postgres SQL.

" + } + }, "Predecessor": { "base": "

A job run that was used in the predicate of a conditional trigger that triggered this job run.

", "refs": { @@ -4650,6 +5489,12 @@ "UserDefinedFunctionInput$OwnerType": "

The owner type.

" } }, + "Prob": { + "base": null, + "refs": { + "Spigot$Prob": "

The probability (a decimal value with a maximum value of 1) of picking any given record. A value of 1 indicates that each row read from the dataset should be included in the sample output.

" + } + }, "PropertyPredicate": { "base": "

Defines a property predicate.

", "refs": { @@ -4736,6 +5581,12 @@ "refs": { } }, + "QuoteChar": { + "base": null, + "refs": { + "S3CsvSource$QuoteChar": "

Specifies the character to use for quoting. The default is a double quote: '\"'. Set this to -1 to turn off quoting entirely.

" + } + }, "RecordsCount": { "base": null, "refs": { @@ -4759,6 +5610,18 @@ "UpdateCrawlerRequest$RecrawlPolicy": "

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.

" } }, + "RedshiftSource": { + "base": "

Specifies an Amazon Redshift data store.

", + "refs": { + "CodeGenConfigurationNode$RedshiftSource": "

Specifies an Amazon Redshift data store.

" + } + }, + "RedshiftTarget": { + "base": "

Specifies a target that uses Amazon Redshift.

", + "refs": { + "CodeGenConfigurationNode$RedshiftTarget": "

Specifies a target that uses Amazon Redshift.

" + } + }, "RegisterSchemaVersionInput": { "base": null, "refs": { @@ -4799,6 +5662,12 @@ "RegistryListItem$Status": "

The status of the registry.

" } }, + "RelationalCatalogSource": { + "base": "

Specifies a Relational database data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$RelationalCatalogSource": null + } + }, "RemoveSchemaVersionMetadataInput": { "base": null, "refs": { @@ -4809,6 +5678,12 @@ "refs": { } }, + "RenameField": { + "base": "

Specifies a transform that renames a single data property key.

", + "refs": { + "CodeGenConfigurationNode$RenameField": "

Specifies a transform that renames a single data property key.

" + } + }, "ReplaceBoolean": { "base": null, "refs": { @@ -4927,6 +5802,38 @@ "refs": { } }, + "S3CatalogSource": { + "base": "

Specifies an Amazon S3 data store in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$S3CatalogSource": "

Specifies an Amazon S3 data store in the Glue Data Catalog.

" + } + }, + "S3CatalogTarget": { + "base": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$S3CatalogTarget": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

" + } + }, + "S3CsvSource": { + "base": "

Specifies a command-separated value (CSV) data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3CsvSource": "

Specifies a command-separated value (CSV) data store stored in Amazon S3.

" + } + }, + "S3DirectSourceAdditionalOptions": { + "base": "

Specifies additional connection options for the Amazon S3 data store.

", + "refs": { + "S3CsvSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3JsonSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3ParquetSource$AdditionalOptions": "

Specifies additional connection options.

" + } + }, + "S3DirectTarget": { + "base": "

Specifies a data target that writes to Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3DirectTarget": "

Specifies a data target that writes to Amazon S3.

" + } + }, "S3Encryption": { "base": "

Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.

", "refs": { @@ -4945,6 +5852,31 @@ "S3Encryption$S3EncryptionMode": "

The encryption mode to use for Amazon S3 data.

" } }, + "S3GlueParquetTarget": { + "base": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

", + "refs": { + "CodeGenConfigurationNode$S3GlueParquetTarget": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

" + } + }, + "S3JsonSource": { + "base": "

Specifies a JSON data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3JsonSource": "

Specifies a JSON data store stored in Amazon S3.

" + } + }, + "S3ParquetSource": { + "base": "

Specifies an Apache Parquet data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3ParquetSource": "

Specifies an Apache Parquet data store stored in Amazon S3.

" + } + }, + "S3SourceAdditionalOptions": { + "base": "

Specifies additional connection options for the Amazon S3 data store.

", + "refs": { + "GovernedCatalogSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3CatalogSource$AdditionalOptions": "

Specifies additional connection options.

" + } + }, "S3Target": { "base": "

Specifies a data store in Amazon Simple Storage Service (Amazon S3).

", "refs": { @@ -5245,6 +6177,24 @@ "GetUnfilteredPartitionsMetadataRequest$Segment": null } }, + "SelectFields": { + "base": "

Specifies a transform that chooses the data property keys that you want to keep.

", + "refs": { + "CodeGenConfigurationNode$SelectFields": "

Specifies a transform that chooses the data property keys that you want to keep.

" + } + }, + "SelectFromCollection": { + "base": "

Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames. The output is the selected DynamicFrame

", + "refs": { + "CodeGenConfigurationNode$SelectFromCollection": "

Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames. The output is the selected DynamicFrame

" + } + }, + "Separator": { + "base": null, + "refs": { + "S3CsvSource$Separator": "

Specifies the delimiter character. The default is a comma: \",\", but any other character can be specified.

" + } + }, "SerDeInfo": { "base": "

Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.

", "refs": { @@ -5315,6 +6265,55 @@ "TransformSortCriteria$SortDirection": "

The sort direction to be used in the sorting criteria that are associated with the machine learning transform.

" } }, + "SparkConnectorSource": { + "base": "

Specifies a connector to an Apache Spark data source.

", + "refs": { + "CodeGenConfigurationNode$SparkConnectorSource": "

Specifies a connector to an Apache Spark data source.

" + } + }, + "SparkConnectorTarget": { + "base": "

Specifies a target that uses an Apache Spark connector.

", + "refs": { + "CodeGenConfigurationNode$SparkConnectorTarget": "

Specifies a target that uses an Apache Spark connector.

" + } + }, + "SparkSQL": { + "base": "

Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame.

", + "refs": { + "CodeGenConfigurationNode$SparkSQL": "

Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame.

" + } + }, + "Spigot": { + "base": "

Specifies a transform that writes samples of the data to an Amazon S3 bucket.

", + "refs": { + "CodeGenConfigurationNode$Spigot": "

Specifies a transform that writes samples of the data to an Amazon S3 bucket.

" + } + }, + "SplitFields": { + "base": "

Specifies a transform that splits data property keys into two DynamicFrames. The output is a collection of DynamicFrames: one with selected data property keys, and one with the remaining data property keys.

", + "refs": { + "CodeGenConfigurationNode$SplitFields": "

Specifies a transform that splits data property keys into two DynamicFrames. The output is a collection of DynamicFrames: one with selected data property keys, and one with the remaining data property keys.

" + } + }, + "SqlAlias": { + "base": "

Represents a single entry in the list of values for SqlAliases.

", + "refs": { + "SqlAliases$member": null + } + }, + "SqlAliases": { + "base": null, + "refs": { + "SparkSQL$SqlAliases": "

A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named \"MyDataSource\". If you specify From as MyDataSource, and Alias as SqlName, then in your SQL you can do:

select * from SqlName

and that gets data from MyDataSource.

" + } + }, + "SqlQuery": { + "base": null, + "refs": { + "JDBCConnectorSource$Query": "

The table or SQL query to get the data from. You can specify either ConnectionTable or query, but not both.

", + "SparkSQL$SqlQuery": "

A SQL query that must use Spark SQL syntax and return a single data set.

" + } + }, "StartBlueprintRunRequest": { "base": null, "refs": { @@ -5421,6 +6420,12 @@ "WorkflowRun$StartingEventBatchCondition": "

The batch condition that started the workflow run.

" } }, + "StartingPosition": { + "base": null, + "refs": { + "KinesisStreamingSourceOptions$StartingPosition": "

The starting position in the Kinesis data stream to read data from. The possible values are \"latest\", \"trim_horizon\", or \"earliest\". The default value is \"latest\".

" + } + }, "Statement": { "base": "

The statement or request for a particular action to occur in a session.

", "refs": { @@ -5512,6 +6517,15 @@ "TableInput$StorageDescriptor": "

A storage descriptor containing information about the physical storage of this table.

" } }, + "StreamingDataPreviewOptions": { + "base": "

Specifies options related to data preview for viewing a sample of your data.

", + "refs": { + "CatalogKafkaSource$DataPreviewOptions": "

Specifies options related to data preview for viewing a sample of your data.

", + "CatalogKinesisSource$DataPreviewOptions": "

Additional options for data preview.

", + "DirectKafkaSource$DataPreviewOptions": "

Specifies options related to data preview for viewing a sample of your data.

", + "DirectKinesisSource$DataPreviewOptions": "

Additional options for data preview.

" + } + }, "StringColumnStatisticsData": { "base": "

Defines column statistics supported for character sequence data values.

", "refs": { @@ -5666,6 +6680,12 @@ "TagResourceRequest$TagsToAdd": "

Tags to add to this resource.

" } }, + "TargetFormat": { + "base": null, + "refs": { + "S3DirectTarget$Format": "

Specifies the data output format for the target.

" + } + }, "TaskRun": { "base": "

The sampling parameters that are associated with the machine learning transform.

", "refs": { @@ -5854,6 +6874,12 @@ "SearchTablesResponse$NextToken": "

A continuation token, present if the current list segment is not the last.

" } }, + "Topk": { + "base": null, + "refs": { + "Spigot$Topk": "

Specifies a number of records to write starting from the beginning of the dataset.

" + } + }, "TotalSegmentsInteger": { "base": null, "refs": { @@ -5994,6 +7020,14 @@ "UpdateTriggerRequest$TriggerUpdate": "

The new values with which to update the trigger.

" } }, + "TwoInputs": { + "base": null, + "refs": { + "Join$Inputs": "

The data inputs identified by their node names.

", + "Merge$Inputs": "

The data inputs identified by their node names.

", + "Union$Inputs": "

The node ID inputs to the transform.

" + } + }, "TypeString": { "base": null, "refs": { @@ -6020,6 +7054,18 @@ "GetUnfilteredPartitionsMetadataResponse$UnfilteredPartitions": null } }, + "Union": { + "base": "

Specifies a transform that combines the rows from two or more datasets into a single result.

", + "refs": { + "CodeGenConfigurationNode$Union": "

Specifies a transform that combines the rows from two or more datasets into a single result.

" + } + }, + "UnionType": { + "base": null, + "refs": { + "Union$UnionType": "

Indicates the type of Union transform.

Specify ALL to join all rows from data sources to the resulting DynamicFrame. The resulting union does not remove duplicate rows.

Specify DISTINCT to remove duplicate rows in the resulting DynamicFrame.

" + } + }, "UntagResourceRequest": { "base": null, "refs": { @@ -6046,6 +7092,13 @@ "refs": { } }, + "UpdateCatalogBehavior": { + "base": null, + "refs": { + "CatalogSchemaChangePolicy$UpdateBehavior": "

The update behavior when the crawler finds a changed schema.

", + "DirectSchemaChangePolicy$UpdateBehavior": "

The update behavior when the crawler finds a changed schema.

" + } + }, "UpdateClassifierRequest": { "base": null, "refs": { @@ -6256,6 +7309,12 @@ "SchemaListItem$UpdatedTime": "

The date and time that a schema was updated.

" } }, + "UpsertRedshiftTargetOptions": { + "base": "

The options to configure an upsert operation when writing to a Redshift target .

", + "refs": { + "RedshiftTarget$UpsertRedshiftOptions": "

The set of options to configure an upsert operation when writing to a Redshift target.

" + } + }, "UriString": { "base": null, "refs": { diff --git a/models/apis/kms/2014-11-01/docs-2.json b/models/apis/kms/2014-11-01/docs-2.json index 9f734cbeec..2634654b0e 100644 --- a/models/apis/kms/2014-11-01/docs-2.json +++ b/models/apis/kms/2014-11-01/docs-2.json @@ -1,33 +1,33 @@ { "version": "2.0", - "service": "Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

", + "service": "Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

", "operations": { "CancelKeyDeletion": "

Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:CancelKeyDeletion (key policy)

Related operations: ScheduleKeyDeletion

", "ConnectCustomKeyStore": "

Connects or reconnects a custom key store to its associated CloudHSM cluster.

The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

To connect a custom key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

The connection process can take an extended amount of time to complete; up to 20 minutes. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

During the connection process, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

If you are having trouble connecting or disconnecting a custom key store, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:ConnectCustomKeyStore (IAM policy)

Related operations

", "CreateAlias": "

Creates a friendly name for a KMS key.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", "CreateCustomKeyStore": "

Creates a custom key store that is associated with an CloudHSM cluster that you own and manage.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Before you create the custom key store, you must assemble the required elements, including an CloudHSM cluster that fulfills the requirements for a custom key store. For details about the required elements, see Assemble the Prerequisites in the Key Management Service Developer Guide.

When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM cluster. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

For help with failures, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:CreateCustomKeyStore (IAM policy).

Related operations:

", "CreateGrant": "

Adds a grant to a KMS key.

A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

The CreateGrant operation returns a GrantToken and a GrantId.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:CreateGrant (key policy)

Related operations:

", - "CreateKey": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.

In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric KMS key never leaves AWS KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of AWS KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

To create a symmetric encryption KMS key in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

Custom key stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS key or an asymmetric KMS key in a custom key store. For information about custom key stores in KMS see Custom key stores in KMS in the Key Management Service Developer Guide .

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

", - "Decrypt": "

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

", + "CreateKey": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.

In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

To create a symmetric encryption KMS key in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

Custom key stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS key or an asymmetric KMS key in a custom key store. For information about custom key stores in KMS see Custom key stores in KMS in the Key Management Service Developer Guide .

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

", + "Decrypt": "

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

", "DeleteAlias": "

Deletes the specified alias.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", "DeleteCustomKeyStore": "

Deletes a custom key store. This operation does not delete the CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the KMS keys. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DeleteCustomKeyStore (IAM policy)

Related operations:

", "DeleteImportedKeyMaterial": "

Deletes key material that you previously imported. This operation makes the specified KMS key unusable. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

After you delete key material, you can use ImportKeyMaterial to reimport the same key material into the KMS key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DeleteImportedKeyMaterial (key policy)

Related operations:

", "DescribeCustomKeyStores": "

Gets information about custom key stores in the account and Region.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

To determine whether the custom key store is connected to its CloudHSM cluster, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If your custom key store state is CONNECTED but you are having trouble using it, make sure that its associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any.

For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DescribeCustomKeyStores (IAM policy)

Related operations:

", "DescribeKey": "

Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For multi-Region keys, it displays the primary key and all related replica keys.

DescribeKey does not return the following information:

In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:DescribeKey (key policy)

Related operations:

", "DisableKey": "

Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKey (key policy)

Related operations: EnableKey

", - "DisableKeyRotation": "

Disables automatic rotation of the key material for the specified symmetric encryption KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

", + "DisableKeyRotation": "

Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

", "DisconnectCustomKeyStore": "

Disconnects the custom key store from its associated CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use KMS keys in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

Related operations:

", "EnableKey": "

Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKey (key policy)

Related operations: DisableKey

", - "EnableKeyRotation": "

Enables automatic rotation of the key material for the specified symmetric encryption KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

", + "EnableKeyRotation": "

Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

When you enable automatic rotation of acustomer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You cannot enable or disable automatic rotation Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

", "Encrypt": "

Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key type.

When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Encrypt (key policy)

Related operations:

", "GenerateDataKey": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation. You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

How to use your data key

We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

To encrypt data outside of KMS:

  1. Use the GenerateDataKey operation to get a data key.

  2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

  3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

To decrypt data outside of KMS:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKey (key policy)

Related operations:

", "GenerateDataKeyPair": "

Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyPair (key policy)

Related operations:

", "GenerateDataKeyPairWithoutPlaintext": "

Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

Related operations:

", - "GenerateDataKeyWithoutPlaintext": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key. It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

", - "GenerateMac": "

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm computes the HMAC for the message and the key as described in RFC 2104.

You can use the HMAC that this operation generates with the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

", + "GenerateDataKeyWithoutPlaintext": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

", + "GenerateMac": "

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm computes the HMAC for the message and the key as described in RFC 2104.

You can use the HMAC that this operation generates with the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

", "GenerateRandom": "

Returns a random byte string that is cryptographically secure.

By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster that is associated with a custom key store, specify the custom key store ID.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

Required permissions: kms:GenerateRandom (IAM policy)

", "GetKeyPolicy": "

Gets a key policy attached to the specified KMS key.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetKeyPolicy (key policy)

Related operations: PutKeyPolicy

", - "GetKeyRotationStatus": "

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key. The key rotation status for these KMS keys is always false.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

", + "GetKeyRotationStatus": "

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

When you enable automatic rotation for customer managed KMS keys, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

", "GetParametersForImport": "

Returns the items you need to import key material into a symmetric encryption KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

This operation returns a public key and an import token. Use the public key to encrypt the symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.

You must specify the key ID of the symmetric encryption KMS key into which you will import key material. This KMS key's Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.

To import key material, you must use the public key and import token from the same response. These items are valid for 24 hours. The expiration date and time appear in the GetParametersForImport response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetParametersForImport (key policy)

Related operations:

", "GetPublicKey": "

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS. For details, see Special Considerations for Downloading Public Keys.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

", "ImportKeyMaterial": "

Imports key material into an existing symmetric encryption KMS key that was created without key material. After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material.

You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and then importing key material, see Importing Key Material in the Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key.

If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ImportKeyMaterial (key policy)

Related operations:

", @@ -43,7 +43,7 @@ "RetireGrant": "

Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

Required permissions::Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

Related operations:

", "RevokeGrant": "

Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:RevokeGrant (key policy).

Related operations:

", "ScheduleKeyDeletion": "

Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use DisableKey.

If you schedule deletion of a KMS key from a custom key store, when the waiting period expires, ScheduleKeyDeletion deletes the KMS key from KMS. Then KMS makes a best effort to delete the key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ScheduleKeyDeletion (key policy)

Related operations

", - "Sign": "

Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

To use the Sign operation, provide the following information:

When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Sign (key policy)

Related operations: Verify

", + "Sign": "

Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

To use the Sign operation, provide the following information:

When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Sign (key policy)

Related operations: Verify

", "TagResource": "

Adds or edits tags on a customer managed key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:TagResource (key policy)

Related operations

", "UntagResource": "

Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:UntagResource (key policy)

Related operations

", "UpdateAlias": "

Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

The current and new KMS key must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", @@ -730,7 +730,7 @@ } }, "IncorrectKeyException": { - "base": "

The request was rejected because the specified KMS key cannot decrypt the data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must identify the same KMS key that was used to encrypt the ciphertext.

", + "base": "

The request was rejected because the specified KMS key cannot decrypt the data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must identify the same KMS key that was used to encrypt the ciphertext.

", "refs": { } }, @@ -819,12 +819,12 @@ "DisableKeyRequest$KeyId": "

Identifies the KMS key to disable.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "DisableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "EnableKeyRequest$KeyId": "

Identifies the KMS key to enable.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", - "EnableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", + "EnableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "EncryptRequest$KeyId": "

Identifies the KMS key to use in the encryption operation. The KMS key must have a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "EncryptResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that was used to encrypt the plaintext.

", "GenerateDataKeyPairRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyPairResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the private key.

", - "GenerateDataKeyPairWithoutPlaintextRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", + "GenerateDataKeyPairWithoutPlaintextRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyPairWithoutPlaintextResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the private key.

", "GenerateDataKeyRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the data key. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the data key.

", @@ -898,7 +898,7 @@ "KeySpec": { "base": null, "refs": { - "CreateKeyRequest$KeySpec": "

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit symmetric key for encryption and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

", + "CreateKeyRequest$KeySpec": "

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit symmetric key for encryption and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

", "GetPublicKeyResponse$KeySpec": "

The type of the of the public key that was downloaded.

", "KeyMetadata$KeySpec": "

Describes the type of key material in the KMS key.

" } @@ -1081,7 +1081,7 @@ "NullableBooleanType": { "base": null, "refs": { - "CreateKeyRequest$MultiRegion": "

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a symmetric or asymmetric multi-Region key, and you can create a multi-Region key with imported key material. However, you cannot create a multi-Region key in a custom key store.

", + "CreateKeyRequest$MultiRegion": "

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. However, you cannot create a multi-Region key in a custom key store.

", "KeyMetadata$MultiRegion": "

Indicates whether the KMS key is a multi-Region (True) or regional (False) key. This value is True for multi-Region primary and replica keys and False for regional KMS keys.

For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

" } }, @@ -1104,7 +1104,7 @@ "base": null, "refs": { "KeyMetadata$PendingDeletionWindowInDays": "

The waiting period before the primary key in a multi-Region key is deleted. This waiting period begins when the last of its replica keys is deleted. This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. That indicates that the KMS key is the primary key in a multi-Region key, it is scheduled for deletion, and it still has existing replica keys.

When a single-Region KMS key or a multi-Region replica key is scheduled for deletion, its deletion date is displayed in the DeletionDate field. However, when the primary key in a multi-Region key is scheduled for deletion, its waiting period doesn't begin until all of its replica keys are deleted. This value displays that waiting period. When the last replica key in the multi-Region key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion to PendingDeletion and the deletion date appears in the DeletionDate field.

", - "ScheduleKeyDeletionRequest$PendingWindowInDays": "

The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

If the KMS key is a multi-Region primary key with replicas, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

", + "ScheduleKeyDeletionRequest$PendingWindowInDays": "

The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

If the KMS key is a multi-Region primary key with replica keys, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

", "ScheduleKeyDeletionResponse$PendingWindowInDays": "

The waiting period before the KMS key is deleted.

If the KMS key is a multi-Region primary key with replicas, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

" } }, @@ -1140,10 +1140,10 @@ "PolicyType": { "base": null, "refs": { - "CreateKeyRequest$Policy": "

The key policy to attach to the KMS key.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default Key Policy in the Key Management Service Developer Guide.

The key policy size quota is 32 kilobytes (32768 bytes).

For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

", + "CreateKeyRequest$Policy": "

The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

If you provide a key policy, it must meet the following criteria:

A key policy document must conform to the following rules.

For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

", "GetKeyPolicyResponse$Policy": "

A key policy document in JSON format.

", - "PutKeyPolicyRequest$Policy": "

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, see Resource Quotas in the Key Management Service Developer Guide.

", - "ReplicateKeyRequest$Policy": "

The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

If you provide a key policy, it must meet the following criteria:

", + "PutKeyPolicyRequest$Policy": "

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

A key policy document must conform to the following rules.

", + "ReplicateKeyRequest$Policy": "

The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

If you provide a key policy, it must meet the following criteria:

A key policy document must conform to the following rules.

", "ReplicateKeyResponse$ReplicaPolicy": "

The key policy of the new replica key. The value is a key policy document in JSON format.

" } }, diff --git a/service/glue/api.go b/service/glue/api.go index e91b45789a..188a25e6b3 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -18921,6 +18921,179 @@ func (s *Action) SetTimeout(v int64) *Action { return s } +// Specifies a transform that groups rows by chosen fields and computes the +// aggregated value by specified function. +type Aggregate struct { + _ struct{} `type:"structure"` + + // Specifies the aggregate functions to be performed on specified fields. + // + // Aggs is a required field + Aggs []*AggregateOperation `min:"1" type:"list" required:"true"` + + // Specifies the fields to group by. + // + // Groups is a required field + Groups [][]*string `type:"list" required:"true"` + + // Specifies the fields and rows to use as inputs for the aggregate transform. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Aggregate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Aggregate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Aggregate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Aggregate"} + if s.Aggs == nil { + invalidParams.Add(request.NewErrParamRequired("Aggs")) + } + if s.Aggs != nil && len(s.Aggs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggs", 1)) + } + if s.Groups == nil { + invalidParams.Add(request.NewErrParamRequired("Groups")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Aggs != nil { + for i, v := range s.Aggs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggs sets the Aggs field's value. +func (s *Aggregate) SetAggs(v []*AggregateOperation) *Aggregate { + s.Aggs = v + return s +} + +// SetGroups sets the Groups field's value. +func (s *Aggregate) SetGroups(v [][]*string) *Aggregate { + s.Groups = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *Aggregate) SetInputs(v []*string) *Aggregate { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *Aggregate) SetName(v string) *Aggregate { + s.Name = &v + return s +} + +// Specifies the set of parameters needed to perform aggregation in the aggregate +// transform. +type AggregateOperation struct { + _ struct{} `type:"structure"` + + // Specifies the aggregation function to apply. + // + // Possible aggregation functions include: avg countDistinct, count, first, + // last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, + // var_samp, var_pop + // + // AggFunc is a required field + AggFunc *string `type:"string" required:"true" enum:"AggFunction"` + + // Specifies the column on the data set on which the aggregation function will + // be applied. + // + // Column is a required field + Column []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregateOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregateOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AggregateOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AggregateOperation"} + if s.AggFunc == nil { + invalidParams.Add(request.NewErrParamRequired("AggFunc")) + } + if s.Column == nil { + invalidParams.Add(request.NewErrParamRequired("Column")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggFunc sets the AggFunc field's value. +func (s *AggregateOperation) SetAggFunc(v string) *AggregateOperation { + s.AggFunc = &v + return s +} + +// SetColumn sets the Column field's value. +func (s *AggregateOperation) SetColumn(v []*string) *AggregateOperation { + s.Column = v + return s +} + // A resource to be created or added already exists. type AlreadyExistsException struct { _ struct{} `type:"structure"` @@ -18986,6 +19159,220 @@ func (s *AlreadyExistsException) RequestID() string { return s.RespMetadata.RequestID } +// Specifies a transform that maps data property keys in the data source to +// data property keys in the data target. You can rename keys, modify the data +// types for keys, and choose which keys to drop from the dataset. +type ApplyMapping struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // Specifies the mapping of data property keys in the data source to data property + // keys in the data target. + // + // Mapping is a required field + Mapping []*Mapping `type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ApplyMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ApplyMapping) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplyMapping) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplyMapping"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Mapping == nil { + invalidParams.Add(request.NewErrParamRequired("Mapping")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *ApplyMapping) SetInputs(v []*string) *ApplyMapping { + s.Inputs = v + return s +} + +// SetMapping sets the Mapping field's value. +func (s *ApplyMapping) SetMapping(v []*Mapping) *ApplyMapping { + s.Mapping = v + return s +} + +// SetName sets the Name field's value. +func (s *ApplyMapping) SetName(v string) *ApplyMapping { + s.Name = &v + return s +} + +// Specifies a connector to an Amazon Athena data source. +type AthenaConnectorSource struct { + _ struct{} `type:"structure"` + + // The name of the connection that is associated with the connector. + // + // ConnectionName is a required field + ConnectionName *string `type:"string" required:"true"` + + // The name of the table in the data source. + ConnectionTable *string `type:"string"` + + // The type of connection, such as marketplace.athena or custom.athena, designating + // a connection to an Amazon Athena data store. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true"` + + // The name of a connector that assists with accessing the data store in Glue + // Studio. + // + // ConnectorName is a required field + ConnectorName *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the custom Athena source. + OutputSchemas []*GlueSchema `type:"list"` + + // The name of the Cloudwatch log group to read from. For example, /aws-glue/jobs/output. + // + // SchemaName is a required field + SchemaName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AthenaConnectorSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AthenaConnectorSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AthenaConnectorSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AthenaConnectorSource"} + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.ConnectorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *AthenaConnectorSource) SetConnectionName(v string) *AthenaConnectorSource { + s.ConnectionName = &v + return s +} + +// SetConnectionTable sets the ConnectionTable field's value. +func (s *AthenaConnectorSource) SetConnectionTable(v string) *AthenaConnectorSource { + s.ConnectionTable = &v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *AthenaConnectorSource) SetConnectionType(v string) *AthenaConnectorSource { + s.ConnectionType = &v + return s +} + +// SetConnectorName sets the ConnectorName field's value. +func (s *AthenaConnectorSource) SetConnectorName(v string) *AthenaConnectorSource { + s.ConnectorName = &v + return s +} + +// SetName sets the Name field's value. +func (s *AthenaConnectorSource) SetName(v string) *AthenaConnectorSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *AthenaConnectorSource) SetOutputSchemas(v []*GlueSchema) *AthenaConnectorSource { + s.OutputSchemas = v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *AthenaConnectorSource) SetSchemaName(v string) *AthenaConnectorSource { + s.SchemaName = &v + return s +} + // A structure containing information for audit. type AuditContext struct { _ struct{} `type:"structure"` @@ -19096,6 +19483,100 @@ func (s *BackfillError) SetPartitions(v []*PartitionValueList) *BackfillError { return s } +// Specifies a target that uses a Glue Data Catalog table. +type BasicCatalogTarget struct { + _ struct{} `type:"structure"` + + // The database that contains the table you want to use as the target. This + // database must already exist in the Data Catalog. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of your data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The table that defines the schema of your output data. This table must already + // exist in the Data Catalog. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BasicCatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BasicCatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BasicCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BasicCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *BasicCatalogTarget) SetDatabase(v string) *BasicCatalogTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *BasicCatalogTarget) SetInputs(v []*string) *BasicCatalogTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *BasicCatalogTarget) SetName(v string) *BasicCatalogTarget { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *BasicCatalogTarget) SetTable(v string) *BasicCatalogTarget { + s.Table = &v + return s +} + type BatchCreatePartitionInput struct { _ struct{} `type:"structure"` @@ -21719,6 +22200,355 @@ func (s *CatalogImportStatus) SetImportedBy(v string) *CatalogImportStatus { return s } +// Specifies an Apache Kafka data store in the Data Catalog. +type CatalogKafkaSource struct { + _ struct{} `type:"structure"` + + // Specifies options related to data preview for viewing a sample of your data. + DataPreviewOptions *StreamingDataPreviewOptions `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // Whether to automatically determine the schema from the incoming data. + DetectSchema *bool `type:"boolean"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the streaming options. + StreamingOptions *KafkaStreamingSourceOptions `type:"structure"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` + + // The amount of time to spend processing each micro batch. + WindowSize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogKafkaSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogKafkaSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CatalogKafkaSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CatalogKafkaSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + if s.DataPreviewOptions != nil { + if err := s.DataPreviewOptions.Validate(); err != nil { + invalidParams.AddNested("DataPreviewOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataPreviewOptions sets the DataPreviewOptions field's value. +func (s *CatalogKafkaSource) SetDataPreviewOptions(v *StreamingDataPreviewOptions) *CatalogKafkaSource { + s.DataPreviewOptions = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *CatalogKafkaSource) SetDatabase(v string) *CatalogKafkaSource { + s.Database = &v + return s +} + +// SetDetectSchema sets the DetectSchema field's value. +func (s *CatalogKafkaSource) SetDetectSchema(v bool) *CatalogKafkaSource { + s.DetectSchema = &v + return s +} + +// SetName sets the Name field's value. +func (s *CatalogKafkaSource) SetName(v string) *CatalogKafkaSource { + s.Name = &v + return s +} + +// SetStreamingOptions sets the StreamingOptions field's value. +func (s *CatalogKafkaSource) SetStreamingOptions(v *KafkaStreamingSourceOptions) *CatalogKafkaSource { + s.StreamingOptions = v + return s +} + +// SetTable sets the Table field's value. +func (s *CatalogKafkaSource) SetTable(v string) *CatalogKafkaSource { + s.Table = &v + return s +} + +// SetWindowSize sets the WindowSize field's value. +func (s *CatalogKafkaSource) SetWindowSize(v int64) *CatalogKafkaSource { + s.WindowSize = &v + return s +} + +// Specifies a Kinesis data source in the Glue Data Catalog. +type CatalogKinesisSource struct { + _ struct{} `type:"structure"` + + // Additional options for data preview. + DataPreviewOptions *StreamingDataPreviewOptions `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // Whether to automatically determine the schema from the incoming data. + DetectSchema *bool `type:"boolean"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Additional options for the Kinesis streaming data source. + StreamingOptions *KinesisStreamingSourceOptions `type:"structure"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` + + // The amount of time to spend processing each micro batch. + WindowSize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogKinesisSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogKinesisSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CatalogKinesisSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CatalogKinesisSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + if s.DataPreviewOptions != nil { + if err := s.DataPreviewOptions.Validate(); err != nil { + invalidParams.AddNested("DataPreviewOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataPreviewOptions sets the DataPreviewOptions field's value. +func (s *CatalogKinesisSource) SetDataPreviewOptions(v *StreamingDataPreviewOptions) *CatalogKinesisSource { + s.DataPreviewOptions = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *CatalogKinesisSource) SetDatabase(v string) *CatalogKinesisSource { + s.Database = &v + return s +} + +// SetDetectSchema sets the DetectSchema field's value. +func (s *CatalogKinesisSource) SetDetectSchema(v bool) *CatalogKinesisSource { + s.DetectSchema = &v + return s +} + +// SetName sets the Name field's value. +func (s *CatalogKinesisSource) SetName(v string) *CatalogKinesisSource { + s.Name = &v + return s +} + +// SetStreamingOptions sets the StreamingOptions field's value. +func (s *CatalogKinesisSource) SetStreamingOptions(v *KinesisStreamingSourceOptions) *CatalogKinesisSource { + s.StreamingOptions = v + return s +} + +// SetTable sets the Table field's value. +func (s *CatalogKinesisSource) SetTable(v string) *CatalogKinesisSource { + s.Table = &v + return s +} + +// SetWindowSize sets the WindowSize field's value. +func (s *CatalogKinesisSource) SetWindowSize(v int64) *CatalogKinesisSource { + s.WindowSize = &v + return s +} + +// A policy that specifies update behavior for the crawler. +type CatalogSchemaChangePolicy struct { + _ struct{} `type:"structure"` + + // Whether to use the specified update behavior when the crawler finds a changed + // schema. + EnableUpdateCatalog *bool `type:"boolean"` + + // The update behavior when the crawler finds a changed schema. + UpdateBehavior *string `type:"string" enum:"UpdateCatalogBehavior"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogSchemaChangePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogSchemaChangePolicy) GoString() string { + return s.String() +} + +// SetEnableUpdateCatalog sets the EnableUpdateCatalog field's value. +func (s *CatalogSchemaChangePolicy) SetEnableUpdateCatalog(v bool) *CatalogSchemaChangePolicy { + s.EnableUpdateCatalog = &v + return s +} + +// SetUpdateBehavior sets the UpdateBehavior field's value. +func (s *CatalogSchemaChangePolicy) SetUpdateBehavior(v string) *CatalogSchemaChangePolicy { + s.UpdateBehavior = &v + return s +} + +// Specifies a data store in the Glue Data Catalog. +type CatalogSource struct { + _ struct{} `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *CatalogSource) SetDatabase(v string) *CatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *CatalogSource) SetName(v string) *CatalogSource { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *CatalogSource) SetTable(v string) *CatalogSource { + s.Table = &v + return s +} + // Specifies an Glue Data Catalog target. type CatalogTarget struct { _ struct{} `type:"structure"` @@ -22009,6 +22839,764 @@ func (s *CloudWatchEncryption) SetKmsKeyArn(v string) *CloudWatchEncryption { return s } +// CodeGenConfigurationNode enumerates all valid Node types. One and only one +// of its member variables can be populated. +type CodeGenConfigurationNode struct { + _ struct{} `type:"structure"` + + // Specifies a transform that groups rows by chosen fields and computes the + // aggregated value by specified function. + Aggregate *Aggregate `type:"structure"` + + // Specifies a transform that maps data property keys in the data source to + // data property keys in the data target. You can rename keys, modify the data + // types for keys, and choose which keys to drop from the dataset. + ApplyMapping *ApplyMapping `type:"structure"` + + // Specifies a connector to an Amazon Athena data source. + AthenaConnectorSource *AthenaConnectorSource `type:"structure"` + + // Specifies an Apache Kafka data store in the Data Catalog. + CatalogKafkaSource *CatalogKafkaSource `type:"structure"` + + // Specifies a Kinesis data source in the Glue Data Catalog. + CatalogKinesisSource *CatalogKinesisSource `type:"structure"` + + // Specifies a data store in the Glue Data Catalog. + CatalogSource *CatalogSource `type:"structure"` + + // Specifies a target that uses a Glue Data Catalog table. + CatalogTarget *BasicCatalogTarget `type:"structure"` + + // Specifies a transform that uses custom code you provide to perform the data + // transformation. The output is a collection of DynamicFrames. + CustomCode *CustomCode `type:"structure"` + + // Specifies an Apache Kafka data store. + DirectKafkaSource *DirectKafkaSource `type:"structure"` + + // Specifies a direct Amazon Kinesis data source. + DirectKinesisSource *DirectKinesisSource `type:"structure"` + + // Specifies a transform that removes rows of repeating data from a data set. + DropDuplicates *DropDuplicates `type:"structure"` + + // Specifies a transform that chooses the data property keys that you want to + // drop. + DropFields *DropFields `type:"structure"` + + // Specifies a transform that removes columns from the dataset if all values + // in the column are 'null'. By default, Glue Studio will recognize null objects, + // but some values such as empty strings, strings that are "null", -1 integers + // or other placeholders such as zeros, are not automatically recognized as + // nulls. + DropNullFields *DropNullFields `type:"structure"` + + // Specifies a DynamoDB data source in the Glue Data Catalog. + DynamoDBCatalogSource *DynamoDBCatalogSource `type:"structure"` + + // Specifies a transform that locates records in the dataset that have missing + // values and adds a new field with a value determined by imputation. The input + // data set is used to train the machine learning model that determines what + // the missing value should be. + FillMissingValues *FillMissingValues `type:"structure"` + + // Specifies a transform that splits a dataset into two, based on a filter condition. + Filter *Filter `type:"structure"` + + // Specifies a data source in a goverened Data Catalog. + GovernedCatalogSource *GovernedCatalogSource `type:"structure"` + + // Specifies a data target that writes to a goverened catalog. + GovernedCatalogTarget *GovernedCatalogTarget `type:"structure"` + + // Specifies a connector to a JDBC data source. + JDBCConnectorSource *JDBCConnectorSource `type:"structure"` + + // Specifies a data target that writes to Amazon S3 in Apache Parquet columnar + // storage. + JDBCConnectorTarget *JDBCConnectorTarget `type:"structure"` + + // Specifies a transform that joins two datasets into one dataset using a comparison + // phrase on the specified data property keys. You can use inner, outer, left, + // right, left semi, and left anti joins. + Join *Join `type:"structure"` + + // Specifies a transform that merges a DynamicFrame with a staging DynamicFrame + // based on the specified primary keys to identify records. Duplicate records + // (records with the same primary keys) are not de-duplicated. + Merge *Merge `type:"structure"` + + // Specifies a Microsoft SQL server data source in the Glue Data Catalog. + MicrosoftSQLServerCatalogSource *MicrosoftSQLServerCatalogSource `type:"structure"` + + // Specifies a target that uses Microsoft SQL. + MicrosoftSQLServerCatalogTarget *MicrosoftSQLServerCatalogTarget `type:"structure"` + + // Specifies a MySQL data source in the Glue Data Catalog. + MySQLCatalogSource *MySQLCatalogSource `type:"structure"` + + // Specifies a target that uses MySQL. + MySQLCatalogTarget *MySQLCatalogTarget `type:"structure"` + + // Specifies an Oracle data source in the Glue Data Catalog. + OracleSQLCatalogSource *OracleSQLCatalogSource `type:"structure"` + + // Specifies a target that uses Oracle SQL. + OracleSQLCatalogTarget *OracleSQLCatalogTarget `type:"structure"` + + // Specifies a transform that identifies, removes or masks PII data. + PIIDetection *PIIDetection `type:"structure"` + + // Specifies a PostgresSQL data source in the Glue Data Catalog. + PostgreSQLCatalogSource *PostgreSQLCatalogSource `type:"structure"` + + // Specifies a target that uses Postgres SQL. + PostgreSQLCatalogTarget *PostgreSQLCatalogTarget `type:"structure"` + + // Specifies an Amazon Redshift data store. + RedshiftSource *RedshiftSource `type:"structure"` + + // Specifies a target that uses Amazon Redshift. + RedshiftTarget *RedshiftTarget `type:"structure"` + + // Specifies a Relational database data source in the Glue Data Catalog. + RelationalCatalogSource *RelationalCatalogSource `type:"structure"` + + // Specifies a transform that renames a single data property key. + RenameField *RenameField `type:"structure"` + + // Specifies an Amazon S3 data store in the Glue Data Catalog. + S3CatalogSource *S3CatalogSource `type:"structure"` + + // Specifies a data target that writes to Amazon S3 using the Glue Data Catalog. + S3CatalogTarget *S3CatalogTarget `type:"structure"` + + // Specifies a command-separated value (CSV) data store stored in Amazon S3. + S3CsvSource *S3CsvSource `type:"structure"` + + // Specifies a data target that writes to Amazon S3. + S3DirectTarget *S3DirectTarget `type:"structure"` + + // Specifies a data target that writes to Amazon S3 in Apache Parquet columnar + // storage. + S3GlueParquetTarget *S3GlueParquetTarget `type:"structure"` + + // Specifies a JSON data store stored in Amazon S3. + S3JsonSource *S3JsonSource `type:"structure"` + + // Specifies an Apache Parquet data store stored in Amazon S3. + S3ParquetSource *S3ParquetSource `type:"structure"` + + // Specifies a transform that chooses the data property keys that you want to + // keep. + SelectFields *SelectFields `type:"structure"` + + // Specifies a transform that chooses one DynamicFrame from a collection of + // DynamicFrames. The output is the selected DynamicFrame + SelectFromCollection *SelectFromCollection `type:"structure"` + + // Specifies a connector to an Apache Spark data source. + SparkConnectorSource *SparkConnectorSource `type:"structure"` + + // Specifies a target that uses an Apache Spark connector. + SparkConnectorTarget *SparkConnectorTarget `type:"structure"` + + // Specifies a transform where you enter a SQL query using Spark SQL syntax + // to transform the data. The output is a single DynamicFrame. + SparkSQL *SparkSQL `type:"structure"` + + // Specifies a transform that writes samples of the data to an Amazon S3 bucket. + Spigot *Spigot `type:"structure"` + + // Specifies a transform that splits data property keys into two DynamicFrames. + // The output is a collection of DynamicFrames: one with selected data property + // keys, and one with the remaining data property keys. + SplitFields *SplitFields `type:"structure"` + + // Specifies a transform that combines the rows from two or more datasets into + // a single result. + Union *Union `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CodeGenConfigurationNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CodeGenConfigurationNode) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CodeGenConfigurationNode) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CodeGenConfigurationNode"} + if s.Aggregate != nil { + if err := s.Aggregate.Validate(); err != nil { + invalidParams.AddNested("Aggregate", err.(request.ErrInvalidParams)) + } + } + if s.ApplyMapping != nil { + if err := s.ApplyMapping.Validate(); err != nil { + invalidParams.AddNested("ApplyMapping", err.(request.ErrInvalidParams)) + } + } + if s.AthenaConnectorSource != nil { + if err := s.AthenaConnectorSource.Validate(); err != nil { + invalidParams.AddNested("AthenaConnectorSource", err.(request.ErrInvalidParams)) + } + } + if s.CatalogKafkaSource != nil { + if err := s.CatalogKafkaSource.Validate(); err != nil { + invalidParams.AddNested("CatalogKafkaSource", err.(request.ErrInvalidParams)) + } + } + if s.CatalogKinesisSource != nil { + if err := s.CatalogKinesisSource.Validate(); err != nil { + invalidParams.AddNested("CatalogKinesisSource", err.(request.ErrInvalidParams)) + } + } + if s.CatalogSource != nil { + if err := s.CatalogSource.Validate(); err != nil { + invalidParams.AddNested("CatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.CatalogTarget != nil { + if err := s.CatalogTarget.Validate(); err != nil { + invalidParams.AddNested("CatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.CustomCode != nil { + if err := s.CustomCode.Validate(); err != nil { + invalidParams.AddNested("CustomCode", err.(request.ErrInvalidParams)) + } + } + if s.DirectKafkaSource != nil { + if err := s.DirectKafkaSource.Validate(); err != nil { + invalidParams.AddNested("DirectKafkaSource", err.(request.ErrInvalidParams)) + } + } + if s.DirectKinesisSource != nil { + if err := s.DirectKinesisSource.Validate(); err != nil { + invalidParams.AddNested("DirectKinesisSource", err.(request.ErrInvalidParams)) + } + } + if s.DropDuplicates != nil { + if err := s.DropDuplicates.Validate(); err != nil { + invalidParams.AddNested("DropDuplicates", err.(request.ErrInvalidParams)) + } + } + if s.DropFields != nil { + if err := s.DropFields.Validate(); err != nil { + invalidParams.AddNested("DropFields", err.(request.ErrInvalidParams)) + } + } + if s.DropNullFields != nil { + if err := s.DropNullFields.Validate(); err != nil { + invalidParams.AddNested("DropNullFields", err.(request.ErrInvalidParams)) + } + } + if s.DynamoDBCatalogSource != nil { + if err := s.DynamoDBCatalogSource.Validate(); err != nil { + invalidParams.AddNested("DynamoDBCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.FillMissingValues != nil { + if err := s.FillMissingValues.Validate(); err != nil { + invalidParams.AddNested("FillMissingValues", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.GovernedCatalogSource != nil { + if err := s.GovernedCatalogSource.Validate(); err != nil { + invalidParams.AddNested("GovernedCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.GovernedCatalogTarget != nil { + if err := s.GovernedCatalogTarget.Validate(); err != nil { + invalidParams.AddNested("GovernedCatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.JDBCConnectorSource != nil { + if err := s.JDBCConnectorSource.Validate(); err != nil { + invalidParams.AddNested("JDBCConnectorSource", err.(request.ErrInvalidParams)) + } + } + if s.JDBCConnectorTarget != nil { + if err := s.JDBCConnectorTarget.Validate(); err != nil { + invalidParams.AddNested("JDBCConnectorTarget", err.(request.ErrInvalidParams)) + } + } + if s.Join != nil { + if err := s.Join.Validate(); err != nil { + invalidParams.AddNested("Join", err.(request.ErrInvalidParams)) + } + } + if s.Merge != nil { + if err := s.Merge.Validate(); err != nil { + invalidParams.AddNested("Merge", err.(request.ErrInvalidParams)) + } + } + if s.MicrosoftSQLServerCatalogSource != nil { + if err := s.MicrosoftSQLServerCatalogSource.Validate(); err != nil { + invalidParams.AddNested("MicrosoftSQLServerCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.MicrosoftSQLServerCatalogTarget != nil { + if err := s.MicrosoftSQLServerCatalogTarget.Validate(); err != nil { + invalidParams.AddNested("MicrosoftSQLServerCatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.MySQLCatalogSource != nil { + if err := s.MySQLCatalogSource.Validate(); err != nil { + invalidParams.AddNested("MySQLCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.MySQLCatalogTarget != nil { + if err := s.MySQLCatalogTarget.Validate(); err != nil { + invalidParams.AddNested("MySQLCatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.OracleSQLCatalogSource != nil { + if err := s.OracleSQLCatalogSource.Validate(); err != nil { + invalidParams.AddNested("OracleSQLCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.OracleSQLCatalogTarget != nil { + if err := s.OracleSQLCatalogTarget.Validate(); err != nil { + invalidParams.AddNested("OracleSQLCatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.PIIDetection != nil { + if err := s.PIIDetection.Validate(); err != nil { + invalidParams.AddNested("PIIDetection", err.(request.ErrInvalidParams)) + } + } + if s.PostgreSQLCatalogSource != nil { + if err := s.PostgreSQLCatalogSource.Validate(); err != nil { + invalidParams.AddNested("PostgreSQLCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.PostgreSQLCatalogTarget != nil { + if err := s.PostgreSQLCatalogTarget.Validate(); err != nil { + invalidParams.AddNested("PostgreSQLCatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.RedshiftSource != nil { + if err := s.RedshiftSource.Validate(); err != nil { + invalidParams.AddNested("RedshiftSource", err.(request.ErrInvalidParams)) + } + } + if s.RedshiftTarget != nil { + if err := s.RedshiftTarget.Validate(); err != nil { + invalidParams.AddNested("RedshiftTarget", err.(request.ErrInvalidParams)) + } + } + if s.RelationalCatalogSource != nil { + if err := s.RelationalCatalogSource.Validate(); err != nil { + invalidParams.AddNested("RelationalCatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.RenameField != nil { + if err := s.RenameField.Validate(); err != nil { + invalidParams.AddNested("RenameField", err.(request.ErrInvalidParams)) + } + } + if s.S3CatalogSource != nil { + if err := s.S3CatalogSource.Validate(); err != nil { + invalidParams.AddNested("S3CatalogSource", err.(request.ErrInvalidParams)) + } + } + if s.S3CatalogTarget != nil { + if err := s.S3CatalogTarget.Validate(); err != nil { + invalidParams.AddNested("S3CatalogTarget", err.(request.ErrInvalidParams)) + } + } + if s.S3CsvSource != nil { + if err := s.S3CsvSource.Validate(); err != nil { + invalidParams.AddNested("S3CsvSource", err.(request.ErrInvalidParams)) + } + } + if s.S3DirectTarget != nil { + if err := s.S3DirectTarget.Validate(); err != nil { + invalidParams.AddNested("S3DirectTarget", err.(request.ErrInvalidParams)) + } + } + if s.S3GlueParquetTarget != nil { + if err := s.S3GlueParquetTarget.Validate(); err != nil { + invalidParams.AddNested("S3GlueParquetTarget", err.(request.ErrInvalidParams)) + } + } + if s.S3JsonSource != nil { + if err := s.S3JsonSource.Validate(); err != nil { + invalidParams.AddNested("S3JsonSource", err.(request.ErrInvalidParams)) + } + } + if s.S3ParquetSource != nil { + if err := s.S3ParquetSource.Validate(); err != nil { + invalidParams.AddNested("S3ParquetSource", err.(request.ErrInvalidParams)) + } + } + if s.SelectFields != nil { + if err := s.SelectFields.Validate(); err != nil { + invalidParams.AddNested("SelectFields", err.(request.ErrInvalidParams)) + } + } + if s.SelectFromCollection != nil { + if err := s.SelectFromCollection.Validate(); err != nil { + invalidParams.AddNested("SelectFromCollection", err.(request.ErrInvalidParams)) + } + } + if s.SparkConnectorSource != nil { + if err := s.SparkConnectorSource.Validate(); err != nil { + invalidParams.AddNested("SparkConnectorSource", err.(request.ErrInvalidParams)) + } + } + if s.SparkConnectorTarget != nil { + if err := s.SparkConnectorTarget.Validate(); err != nil { + invalidParams.AddNested("SparkConnectorTarget", err.(request.ErrInvalidParams)) + } + } + if s.SparkSQL != nil { + if err := s.SparkSQL.Validate(); err != nil { + invalidParams.AddNested("SparkSQL", err.(request.ErrInvalidParams)) + } + } + if s.Spigot != nil { + if err := s.Spigot.Validate(); err != nil { + invalidParams.AddNested("Spigot", err.(request.ErrInvalidParams)) + } + } + if s.SplitFields != nil { + if err := s.SplitFields.Validate(); err != nil { + invalidParams.AddNested("SplitFields", err.(request.ErrInvalidParams)) + } + } + if s.Union != nil { + if err := s.Union.Validate(); err != nil { + invalidParams.AddNested("Union", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregate sets the Aggregate field's value. +func (s *CodeGenConfigurationNode) SetAggregate(v *Aggregate) *CodeGenConfigurationNode { + s.Aggregate = v + return s +} + +// SetApplyMapping sets the ApplyMapping field's value. +func (s *CodeGenConfigurationNode) SetApplyMapping(v *ApplyMapping) *CodeGenConfigurationNode { + s.ApplyMapping = v + return s +} + +// SetAthenaConnectorSource sets the AthenaConnectorSource field's value. +func (s *CodeGenConfigurationNode) SetAthenaConnectorSource(v *AthenaConnectorSource) *CodeGenConfigurationNode { + s.AthenaConnectorSource = v + return s +} + +// SetCatalogKafkaSource sets the CatalogKafkaSource field's value. +func (s *CodeGenConfigurationNode) SetCatalogKafkaSource(v *CatalogKafkaSource) *CodeGenConfigurationNode { + s.CatalogKafkaSource = v + return s +} + +// SetCatalogKinesisSource sets the CatalogKinesisSource field's value. +func (s *CodeGenConfigurationNode) SetCatalogKinesisSource(v *CatalogKinesisSource) *CodeGenConfigurationNode { + s.CatalogKinesisSource = v + return s +} + +// SetCatalogSource sets the CatalogSource field's value. +func (s *CodeGenConfigurationNode) SetCatalogSource(v *CatalogSource) *CodeGenConfigurationNode { + s.CatalogSource = v + return s +} + +// SetCatalogTarget sets the CatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetCatalogTarget(v *BasicCatalogTarget) *CodeGenConfigurationNode { + s.CatalogTarget = v + return s +} + +// SetCustomCode sets the CustomCode field's value. +func (s *CodeGenConfigurationNode) SetCustomCode(v *CustomCode) *CodeGenConfigurationNode { + s.CustomCode = v + return s +} + +// SetDirectKafkaSource sets the DirectKafkaSource field's value. +func (s *CodeGenConfigurationNode) SetDirectKafkaSource(v *DirectKafkaSource) *CodeGenConfigurationNode { + s.DirectKafkaSource = v + return s +} + +// SetDirectKinesisSource sets the DirectKinesisSource field's value. +func (s *CodeGenConfigurationNode) SetDirectKinesisSource(v *DirectKinesisSource) *CodeGenConfigurationNode { + s.DirectKinesisSource = v + return s +} + +// SetDropDuplicates sets the DropDuplicates field's value. +func (s *CodeGenConfigurationNode) SetDropDuplicates(v *DropDuplicates) *CodeGenConfigurationNode { + s.DropDuplicates = v + return s +} + +// SetDropFields sets the DropFields field's value. +func (s *CodeGenConfigurationNode) SetDropFields(v *DropFields) *CodeGenConfigurationNode { + s.DropFields = v + return s +} + +// SetDropNullFields sets the DropNullFields field's value. +func (s *CodeGenConfigurationNode) SetDropNullFields(v *DropNullFields) *CodeGenConfigurationNode { + s.DropNullFields = v + return s +} + +// SetDynamoDBCatalogSource sets the DynamoDBCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetDynamoDBCatalogSource(v *DynamoDBCatalogSource) *CodeGenConfigurationNode { + s.DynamoDBCatalogSource = v + return s +} + +// SetFillMissingValues sets the FillMissingValues field's value. +func (s *CodeGenConfigurationNode) SetFillMissingValues(v *FillMissingValues) *CodeGenConfigurationNode { + s.FillMissingValues = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *CodeGenConfigurationNode) SetFilter(v *Filter) *CodeGenConfigurationNode { + s.Filter = v + return s +} + +// SetGovernedCatalogSource sets the GovernedCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetGovernedCatalogSource(v *GovernedCatalogSource) *CodeGenConfigurationNode { + s.GovernedCatalogSource = v + return s +} + +// SetGovernedCatalogTarget sets the GovernedCatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetGovernedCatalogTarget(v *GovernedCatalogTarget) *CodeGenConfigurationNode { + s.GovernedCatalogTarget = v + return s +} + +// SetJDBCConnectorSource sets the JDBCConnectorSource field's value. +func (s *CodeGenConfigurationNode) SetJDBCConnectorSource(v *JDBCConnectorSource) *CodeGenConfigurationNode { + s.JDBCConnectorSource = v + return s +} + +// SetJDBCConnectorTarget sets the JDBCConnectorTarget field's value. +func (s *CodeGenConfigurationNode) SetJDBCConnectorTarget(v *JDBCConnectorTarget) *CodeGenConfigurationNode { + s.JDBCConnectorTarget = v + return s +} + +// SetJoin sets the Join field's value. +func (s *CodeGenConfigurationNode) SetJoin(v *Join) *CodeGenConfigurationNode { + s.Join = v + return s +} + +// SetMerge sets the Merge field's value. +func (s *CodeGenConfigurationNode) SetMerge(v *Merge) *CodeGenConfigurationNode { + s.Merge = v + return s +} + +// SetMicrosoftSQLServerCatalogSource sets the MicrosoftSQLServerCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetMicrosoftSQLServerCatalogSource(v *MicrosoftSQLServerCatalogSource) *CodeGenConfigurationNode { + s.MicrosoftSQLServerCatalogSource = v + return s +} + +// SetMicrosoftSQLServerCatalogTarget sets the MicrosoftSQLServerCatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetMicrosoftSQLServerCatalogTarget(v *MicrosoftSQLServerCatalogTarget) *CodeGenConfigurationNode { + s.MicrosoftSQLServerCatalogTarget = v + return s +} + +// SetMySQLCatalogSource sets the MySQLCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetMySQLCatalogSource(v *MySQLCatalogSource) *CodeGenConfigurationNode { + s.MySQLCatalogSource = v + return s +} + +// SetMySQLCatalogTarget sets the MySQLCatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetMySQLCatalogTarget(v *MySQLCatalogTarget) *CodeGenConfigurationNode { + s.MySQLCatalogTarget = v + return s +} + +// SetOracleSQLCatalogSource sets the OracleSQLCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetOracleSQLCatalogSource(v *OracleSQLCatalogSource) *CodeGenConfigurationNode { + s.OracleSQLCatalogSource = v + return s +} + +// SetOracleSQLCatalogTarget sets the OracleSQLCatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetOracleSQLCatalogTarget(v *OracleSQLCatalogTarget) *CodeGenConfigurationNode { + s.OracleSQLCatalogTarget = v + return s +} + +// SetPIIDetection sets the PIIDetection field's value. +func (s *CodeGenConfigurationNode) SetPIIDetection(v *PIIDetection) *CodeGenConfigurationNode { + s.PIIDetection = v + return s +} + +// SetPostgreSQLCatalogSource sets the PostgreSQLCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetPostgreSQLCatalogSource(v *PostgreSQLCatalogSource) *CodeGenConfigurationNode { + s.PostgreSQLCatalogSource = v + return s +} + +// SetPostgreSQLCatalogTarget sets the PostgreSQLCatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetPostgreSQLCatalogTarget(v *PostgreSQLCatalogTarget) *CodeGenConfigurationNode { + s.PostgreSQLCatalogTarget = v + return s +} + +// SetRedshiftSource sets the RedshiftSource field's value. +func (s *CodeGenConfigurationNode) SetRedshiftSource(v *RedshiftSource) *CodeGenConfigurationNode { + s.RedshiftSource = v + return s +} + +// SetRedshiftTarget sets the RedshiftTarget field's value. +func (s *CodeGenConfigurationNode) SetRedshiftTarget(v *RedshiftTarget) *CodeGenConfigurationNode { + s.RedshiftTarget = v + return s +} + +// SetRelationalCatalogSource sets the RelationalCatalogSource field's value. +func (s *CodeGenConfigurationNode) SetRelationalCatalogSource(v *RelationalCatalogSource) *CodeGenConfigurationNode { + s.RelationalCatalogSource = v + return s +} + +// SetRenameField sets the RenameField field's value. +func (s *CodeGenConfigurationNode) SetRenameField(v *RenameField) *CodeGenConfigurationNode { + s.RenameField = v + return s +} + +// SetS3CatalogSource sets the S3CatalogSource field's value. +func (s *CodeGenConfigurationNode) SetS3CatalogSource(v *S3CatalogSource) *CodeGenConfigurationNode { + s.S3CatalogSource = v + return s +} + +// SetS3CatalogTarget sets the S3CatalogTarget field's value. +func (s *CodeGenConfigurationNode) SetS3CatalogTarget(v *S3CatalogTarget) *CodeGenConfigurationNode { + s.S3CatalogTarget = v + return s +} + +// SetS3CsvSource sets the S3CsvSource field's value. +func (s *CodeGenConfigurationNode) SetS3CsvSource(v *S3CsvSource) *CodeGenConfigurationNode { + s.S3CsvSource = v + return s +} + +// SetS3DirectTarget sets the S3DirectTarget field's value. +func (s *CodeGenConfigurationNode) SetS3DirectTarget(v *S3DirectTarget) *CodeGenConfigurationNode { + s.S3DirectTarget = v + return s +} + +// SetS3GlueParquetTarget sets the S3GlueParquetTarget field's value. +func (s *CodeGenConfigurationNode) SetS3GlueParquetTarget(v *S3GlueParquetTarget) *CodeGenConfigurationNode { + s.S3GlueParquetTarget = v + return s +} + +// SetS3JsonSource sets the S3JsonSource field's value. +func (s *CodeGenConfigurationNode) SetS3JsonSource(v *S3JsonSource) *CodeGenConfigurationNode { + s.S3JsonSource = v + return s +} + +// SetS3ParquetSource sets the S3ParquetSource field's value. +func (s *CodeGenConfigurationNode) SetS3ParquetSource(v *S3ParquetSource) *CodeGenConfigurationNode { + s.S3ParquetSource = v + return s +} + +// SetSelectFields sets the SelectFields field's value. +func (s *CodeGenConfigurationNode) SetSelectFields(v *SelectFields) *CodeGenConfigurationNode { + s.SelectFields = v + return s +} + +// SetSelectFromCollection sets the SelectFromCollection field's value. +func (s *CodeGenConfigurationNode) SetSelectFromCollection(v *SelectFromCollection) *CodeGenConfigurationNode { + s.SelectFromCollection = v + return s +} + +// SetSparkConnectorSource sets the SparkConnectorSource field's value. +func (s *CodeGenConfigurationNode) SetSparkConnectorSource(v *SparkConnectorSource) *CodeGenConfigurationNode { + s.SparkConnectorSource = v + return s +} + +// SetSparkConnectorTarget sets the SparkConnectorTarget field's value. +func (s *CodeGenConfigurationNode) SetSparkConnectorTarget(v *SparkConnectorTarget) *CodeGenConfigurationNode { + s.SparkConnectorTarget = v + return s +} + +// SetSparkSQL sets the SparkSQL field's value. +func (s *CodeGenConfigurationNode) SetSparkSQL(v *SparkSQL) *CodeGenConfigurationNode { + s.SparkSQL = v + return s +} + +// SetSpigot sets the Spigot field's value. +func (s *CodeGenConfigurationNode) SetSpigot(v *Spigot) *CodeGenConfigurationNode { + s.Spigot = v + return s +} + +// SetSplitFields sets the SplitFields field's value. +func (s *CodeGenConfigurationNode) SetSplitFields(v *SplitFields) *CodeGenConfigurationNode { + s.SplitFields = v + return s +} + +// SetUnion sets the Union field's value. +func (s *CodeGenConfigurationNode) SetUnion(v *Union) *CodeGenConfigurationNode { + s.Union = v + return s +} + // Represents a directional edge in a directed acyclic graph (DAG). type CodeGenEdge struct { _ struct{} `type:"structure"` @@ -25715,6 +27303,14 @@ type CreateJobInput struct { // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity *int64 `deprecated:"true" type:"integer"` + // The representation of a directed acyclic graph on which both the Glue Studio + // visual component and Glue Studio code generation is based. + // + // CodeGenConfigurationNodes is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateJobInput's + // String and GoString methods. + CodeGenConfigurationNodes map[string]*CodeGenConfigurationNode `type:"map" sensitive:"true"` + // The JobCommand that runs this job. // // Command is a required field @@ -25880,6 +27476,16 @@ func (s *CreateJobInput) Validate() error { if s.Timeout != nil && *s.Timeout < 1 { invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) } + if s.CodeGenConfigurationNodes != nil { + for i, v := range s.CodeGenConfigurationNodes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CodeGenConfigurationNodes", i), err.(request.ErrInvalidParams)) + } + } + } if s.NotificationProperty != nil { if err := s.NotificationProperty.Validate(); err != nil { invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) @@ -25898,6 +27504,12 @@ func (s *CreateJobInput) SetAllocatedCapacity(v int64) *CreateJobInput { return s } +// SetCodeGenConfigurationNodes sets the CodeGenConfigurationNodes field's value. +func (s *CreateJobInput) SetCodeGenConfigurationNodes(v map[string]*CodeGenConfigurationNode) *CreateJobInput { + s.CodeGenConfigurationNodes = v + return s +} + // SetCommand sets the Command field's value. func (s *CreateJobInput) SetCommand(v *JobCommand) *CreateJobInput { s.Command = v @@ -28308,6 +29920,118 @@ func (s *CsvClassifier) SetVersion(v int64) *CsvClassifier { return s } +// Specifies a transform that uses custom code you provide to perform the data +// transformation. The output is a collection of DynamicFrames. +type CustomCode struct { + _ struct{} `type:"structure"` + + // The name defined for the custom code node class. + // + // ClassName is a required field + ClassName *string `type:"string" required:"true"` + + // The custom code that is used to perform the data transformation. + // + // Code is a required field + Code *string `type:"string" required:"true"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the custom code transform. + OutputSchemas []*GlueSchema `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CustomCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CustomCode) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomCode) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomCode"} + if s.ClassName == nil { + invalidParams.Add(request.NewErrParamRequired("ClassName")) + } + if s.Code == nil { + invalidParams.Add(request.NewErrParamRequired("Code")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClassName sets the ClassName field's value. +func (s *CustomCode) SetClassName(v string) *CustomCode { + s.ClassName = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CustomCode) SetCode(v string) *CustomCode { + s.Code = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *CustomCode) SetInputs(v []*string) *CustomCode { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *CustomCode) SetName(v string) *CustomCode { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *CustomCode) SetOutputSchemas(v []*GlueSchema) *CustomCode { + s.OutputSchemas = v + return s +} + // An object representing a custom pattern for detecting sensitive data across // the columns and rows of your structured data. type CustomEntityType struct { @@ -28752,6 +30476,67 @@ func (s *DatabaseInput) SetTargetDatabase(v *DatabaseIdentifier) *DatabaseInput return s } +// A structure representing the datatype of the value. +type Datatype struct { + _ struct{} `type:"structure"` + + // The datatype of the value. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // A label assigned to the datatype. + // + // Label is a required field + Label *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datatype) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datatype) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Datatype) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Datatype"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *Datatype) SetId(v string) *Datatype { + s.Id = &v + return s +} + +// SetLabel sets the Label field's value. +func (s *Datatype) SetLabel(v string) *Datatype { + s.Label = &v + return s +} + // Defines column statistics supported for timestamp data columns. type DateColumnStatisticsData struct { _ struct{} `type:"structure"` @@ -31411,183 +33196,712 @@ func (s DevEndpoint) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DevEndpoint) GoString() string { - return s.String() +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DevEndpoint) GoString() string { + return s.String() +} + +// SetArguments sets the Arguments field's value. +func (s *DevEndpoint) SetArguments(v map[string]*string) *DevEndpoint { + s.Arguments = v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DevEndpoint) SetAvailabilityZone(v string) *DevEndpoint { + s.AvailabilityZone = &v + return s +} + +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *DevEndpoint) SetCreatedTimestamp(v time.Time) *DevEndpoint { + s.CreatedTimestamp = &v + return s +} + +// SetEndpointName sets the EndpointName field's value. +func (s *DevEndpoint) SetEndpointName(v string) *DevEndpoint { + s.EndpointName = &v + return s +} + +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *DevEndpoint) SetExtraJarsS3Path(v string) *DevEndpoint { + s.ExtraJarsS3Path = &v + return s +} + +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *DevEndpoint) SetExtraPythonLibsS3Path(v string) *DevEndpoint { + s.ExtraPythonLibsS3Path = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DevEndpoint) SetFailureReason(v string) *DevEndpoint { + s.FailureReason = &v + return s +} + +// SetGlueVersion sets the GlueVersion field's value. +func (s *DevEndpoint) SetGlueVersion(v string) *DevEndpoint { + s.GlueVersion = &v + return s +} + +// SetLastModifiedTimestamp sets the LastModifiedTimestamp field's value. +func (s *DevEndpoint) SetLastModifiedTimestamp(v time.Time) *DevEndpoint { + s.LastModifiedTimestamp = &v + return s +} + +// SetLastUpdateStatus sets the LastUpdateStatus field's value. +func (s *DevEndpoint) SetLastUpdateStatus(v string) *DevEndpoint { + s.LastUpdateStatus = &v + return s +} + +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *DevEndpoint) SetNumberOfNodes(v int64) *DevEndpoint { + s.NumberOfNodes = &v + return s +} + +// SetNumberOfWorkers sets the NumberOfWorkers field's value. +func (s *DevEndpoint) SetNumberOfWorkers(v int64) *DevEndpoint { + s.NumberOfWorkers = &v + return s +} + +// SetPrivateAddress sets the PrivateAddress field's value. +func (s *DevEndpoint) SetPrivateAddress(v string) *DevEndpoint { + s.PrivateAddress = &v + return s +} + +// SetPublicAddress sets the PublicAddress field's value. +func (s *DevEndpoint) SetPublicAddress(v string) *DevEndpoint { + s.PublicAddress = &v + return s +} + +// SetPublicKey sets the PublicKey field's value. +func (s *DevEndpoint) SetPublicKey(v string) *DevEndpoint { + s.PublicKey = &v + return s +} + +// SetPublicKeys sets the PublicKeys field's value. +func (s *DevEndpoint) SetPublicKeys(v []*string) *DevEndpoint { + s.PublicKeys = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DevEndpoint) SetRoleArn(v string) *DevEndpoint { + s.RoleArn = &v + return s +} + +// SetSecurityConfiguration sets the SecurityConfiguration field's value. +func (s *DevEndpoint) SetSecurityConfiguration(v string) *DevEndpoint { + s.SecurityConfiguration = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *DevEndpoint) SetSecurityGroupIds(v []*string) *DevEndpoint { + s.SecurityGroupIds = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DevEndpoint) SetStatus(v string) *DevEndpoint { + s.Status = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DevEndpoint) SetSubnetId(v string) *DevEndpoint { + s.SubnetId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DevEndpoint) SetVpcId(v string) *DevEndpoint { + s.VpcId = &v + return s +} + +// SetWorkerType sets the WorkerType field's value. +func (s *DevEndpoint) SetWorkerType(v string) *DevEndpoint { + s.WorkerType = &v + return s +} + +// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. +func (s *DevEndpoint) SetYarnEndpointAddress(v string) *DevEndpoint { + s.YarnEndpointAddress = &v + return s +} + +// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. +func (s *DevEndpoint) SetZeppelinRemoteSparkInterpreterPort(v int64) *DevEndpoint { + s.ZeppelinRemoteSparkInterpreterPort = &v + return s +} + +// Custom libraries to be loaded into a development endpoint. +type DevEndpointCustomLibraries struct { + _ struct{} `type:"structure"` + + // The path to one or more Java .jar files in an S3 bucket that should be loaded + // in your DevEndpoint. + // + // You can only use pure Java/Scala libraries with a DevEndpoint. + ExtraJarsS3Path *string `type:"string"` + + // The paths to one or more Python libraries in an Amazon Simple Storage Service + // (Amazon S3) bucket that should be loaded in your DevEndpoint. Multiple values + // must be complete paths separated by a comma. + // + // You can only use pure Python libraries with a DevEndpoint. Libraries that + // rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python + // data analysis library, are not currently supported. + ExtraPythonLibsS3Path *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DevEndpointCustomLibraries) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DevEndpointCustomLibraries) GoString() string { + return s.String() +} + +// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. +func (s *DevEndpointCustomLibraries) SetExtraJarsS3Path(v string) *DevEndpointCustomLibraries { + s.ExtraJarsS3Path = &v + return s +} + +// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. +func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndpointCustomLibraries { + s.ExtraPythonLibsS3Path = &v + return s +} + +// Specifies an Apache Kafka data store. +type DirectKafkaSource struct { + _ struct{} `type:"structure"` + + // Specifies options related to data preview for viewing a sample of your data. + DataPreviewOptions *StreamingDataPreviewOptions `type:"structure"` + + // Whether to automatically determine the schema from the incoming data. + DetectSchema *bool `type:"boolean"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the streaming options. + StreamingOptions *KafkaStreamingSourceOptions `type:"structure"` + + // The amount of time to spend processing each micro batch. + WindowSize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectKafkaSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectKafkaSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DirectKafkaSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DirectKafkaSource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.DataPreviewOptions != nil { + if err := s.DataPreviewOptions.Validate(); err != nil { + invalidParams.AddNested("DataPreviewOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataPreviewOptions sets the DataPreviewOptions field's value. +func (s *DirectKafkaSource) SetDataPreviewOptions(v *StreamingDataPreviewOptions) *DirectKafkaSource { + s.DataPreviewOptions = v + return s +} + +// SetDetectSchema sets the DetectSchema field's value. +func (s *DirectKafkaSource) SetDetectSchema(v bool) *DirectKafkaSource { + s.DetectSchema = &v + return s +} + +// SetName sets the Name field's value. +func (s *DirectKafkaSource) SetName(v string) *DirectKafkaSource { + s.Name = &v + return s +} + +// SetStreamingOptions sets the StreamingOptions field's value. +func (s *DirectKafkaSource) SetStreamingOptions(v *KafkaStreamingSourceOptions) *DirectKafkaSource { + s.StreamingOptions = v + return s +} + +// SetWindowSize sets the WindowSize field's value. +func (s *DirectKafkaSource) SetWindowSize(v int64) *DirectKafkaSource { + s.WindowSize = &v + return s +} + +// Specifies a direct Amazon Kinesis data source. +type DirectKinesisSource struct { + _ struct{} `type:"structure"` + + // Additional options for data preview. + DataPreviewOptions *StreamingDataPreviewOptions `type:"structure"` + + // Whether to automatically determine the schema from the incoming data. + DetectSchema *bool `type:"boolean"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Additional options for the Kinesis streaming data source. + StreamingOptions *KinesisStreamingSourceOptions `type:"structure"` + + // The amount of time to spend processing each micro batch. + WindowSize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectKinesisSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectKinesisSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DirectKinesisSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DirectKinesisSource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.DataPreviewOptions != nil { + if err := s.DataPreviewOptions.Validate(); err != nil { + invalidParams.AddNested("DataPreviewOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataPreviewOptions sets the DataPreviewOptions field's value. +func (s *DirectKinesisSource) SetDataPreviewOptions(v *StreamingDataPreviewOptions) *DirectKinesisSource { + s.DataPreviewOptions = v + return s +} + +// SetDetectSchema sets the DetectSchema field's value. +func (s *DirectKinesisSource) SetDetectSchema(v bool) *DirectKinesisSource { + s.DetectSchema = &v + return s +} + +// SetName sets the Name field's value. +func (s *DirectKinesisSource) SetName(v string) *DirectKinesisSource { + s.Name = &v + return s +} + +// SetStreamingOptions sets the StreamingOptions field's value. +func (s *DirectKinesisSource) SetStreamingOptions(v *KinesisStreamingSourceOptions) *DirectKinesisSource { + s.StreamingOptions = v + return s +} + +// SetWindowSize sets the WindowSize field's value. +func (s *DirectKinesisSource) SetWindowSize(v int64) *DirectKinesisSource { + s.WindowSize = &v + return s +} + +// A policy that specifies update behavior for the crawler. +type DirectSchemaChangePolicy struct { + _ struct{} `type:"structure"` + + // Specifies the database that the schema change policy applies to. + Database *string `type:"string"` + + // Whether to use the specified update behavior when the crawler finds a changed + // schema. + EnableUpdateCatalog *bool `type:"boolean"` + + // Specifies the table in the database that the schema change policy applies + // to. + Table *string `type:"string"` + + // The update behavior when the crawler finds a changed schema. + UpdateBehavior *string `type:"string" enum:"UpdateCatalogBehavior"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectSchemaChangePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DirectSchemaChangePolicy) GoString() string { + return s.String() +} + +// SetDatabase sets the Database field's value. +func (s *DirectSchemaChangePolicy) SetDatabase(v string) *DirectSchemaChangePolicy { + s.Database = &v + return s +} + +// SetEnableUpdateCatalog sets the EnableUpdateCatalog field's value. +func (s *DirectSchemaChangePolicy) SetEnableUpdateCatalog(v bool) *DirectSchemaChangePolicy { + s.EnableUpdateCatalog = &v + return s +} + +// SetTable sets the Table field's value. +func (s *DirectSchemaChangePolicy) SetTable(v string) *DirectSchemaChangePolicy { + s.Table = &v + return s +} + +// SetUpdateBehavior sets the UpdateBehavior field's value. +func (s *DirectSchemaChangePolicy) SetUpdateBehavior(v string) *DirectSchemaChangePolicy { + s.UpdateBehavior = &v + return s +} + +// Defines column statistics supported for floating-point number data columns. +type DoubleColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // The highest value in the column. + MaximumValue *float64 `type:"double"` + + // The lowest value in the column. + MinimumValue *float64 `type:"double"` + + // The number of distinct values in a column. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // The number of null values in the column. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DoubleColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DoubleColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DoubleColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DoubleColumnStatisticsData"} + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetArguments sets the Arguments field's value. -func (s *DevEndpoint) SetArguments(v map[string]*string) *DevEndpoint { - s.Arguments = v +// SetMaximumValue sets the MaximumValue field's value. +func (s *DoubleColumnStatisticsData) SetMaximumValue(v float64) *DoubleColumnStatisticsData { + s.MaximumValue = &v return s } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *DevEndpoint) SetAvailabilityZone(v string) *DevEndpoint { - s.AvailabilityZone = &v +// SetMinimumValue sets the MinimumValue field's value. +func (s *DoubleColumnStatisticsData) SetMinimumValue(v float64) *DoubleColumnStatisticsData { + s.MinimumValue = &v return s } -// SetCreatedTimestamp sets the CreatedTimestamp field's value. -func (s *DevEndpoint) SetCreatedTimestamp(v time.Time) *DevEndpoint { - s.CreatedTimestamp = &v +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *DoubleColumnStatisticsData) SetNumberOfDistinctValues(v int64) *DoubleColumnStatisticsData { + s.NumberOfDistinctValues = &v return s } -// SetEndpointName sets the EndpointName field's value. -func (s *DevEndpoint) SetEndpointName(v string) *DevEndpoint { - s.EndpointName = &v +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *DoubleColumnStatisticsData) SetNumberOfNulls(v int64) *DoubleColumnStatisticsData { + s.NumberOfNulls = &v return s } -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpoint) SetExtraJarsS3Path(v string) *DevEndpoint { - s.ExtraJarsS3Path = &v - return s -} +// Specifies a transform that removes rows of repeating data from a data set. +type DropDuplicates struct { + _ struct{} `type:"structure"` -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpoint) SetExtraPythonLibsS3Path(v string) *DevEndpoint { - s.ExtraPythonLibsS3Path = &v - return s -} + // The name of the columns to be merged or removed if repeating. + Columns [][]*string `type:"list"` -// SetFailureReason sets the FailureReason field's value. -func (s *DevEndpoint) SetFailureReason(v string) *DevEndpoint { - s.FailureReason = &v - return s -} + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` -// SetGlueVersion sets the GlueVersion field's value. -func (s *DevEndpoint) SetGlueVersion(v string) *DevEndpoint { - s.GlueVersion = &v - return s + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` } -// SetLastModifiedTimestamp sets the LastModifiedTimestamp field's value. -func (s *DevEndpoint) SetLastModifiedTimestamp(v time.Time) *DevEndpoint { - s.LastModifiedTimestamp = &v - return s +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DropDuplicates) String() string { + return awsutil.Prettify(s) } -// SetLastUpdateStatus sets the LastUpdateStatus field's value. -func (s *DevEndpoint) SetLastUpdateStatus(v string) *DevEndpoint { - s.LastUpdateStatus = &v - return s +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DropDuplicates) GoString() string { + return s.String() } -// SetNumberOfNodes sets the NumberOfNodes field's value. -func (s *DevEndpoint) SetNumberOfNodes(v int64) *DevEndpoint { - s.NumberOfNodes = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DropDuplicates) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DropDuplicates"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } -// SetNumberOfWorkers sets the NumberOfWorkers field's value. -func (s *DevEndpoint) SetNumberOfWorkers(v int64) *DevEndpoint { - s.NumberOfWorkers = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPrivateAddress sets the PrivateAddress field's value. -func (s *DevEndpoint) SetPrivateAddress(v string) *DevEndpoint { - s.PrivateAddress = &v +// SetColumns sets the Columns field's value. +func (s *DropDuplicates) SetColumns(v [][]*string) *DropDuplicates { + s.Columns = v return s } -// SetPublicAddress sets the PublicAddress field's value. -func (s *DevEndpoint) SetPublicAddress(v string) *DevEndpoint { - s.PublicAddress = &v +// SetInputs sets the Inputs field's value. +func (s *DropDuplicates) SetInputs(v []*string) *DropDuplicates { + s.Inputs = v return s } -// SetPublicKey sets the PublicKey field's value. -func (s *DevEndpoint) SetPublicKey(v string) *DevEndpoint { - s.PublicKey = &v +// SetName sets the Name field's value. +func (s *DropDuplicates) SetName(v string) *DropDuplicates { + s.Name = &v return s } -// SetPublicKeys sets the PublicKeys field's value. -func (s *DevEndpoint) SetPublicKeys(v []*string) *DevEndpoint { - s.PublicKeys = v - return s -} +// Specifies a transform that chooses the data property keys that you want to +// drop. +type DropFields struct { + _ struct{} `type:"structure"` -// SetRoleArn sets the RoleArn field's value. -func (s *DevEndpoint) SetRoleArn(v string) *DevEndpoint { - s.RoleArn = &v - return s -} + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` -// SetSecurityConfiguration sets the SecurityConfiguration field's value. -func (s *DevEndpoint) SetSecurityConfiguration(v string) *DevEndpoint { - s.SecurityConfiguration = &v - return s -} + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *DevEndpoint) SetSecurityGroupIds(v []*string) *DevEndpoint { - s.SecurityGroupIds = v - return s + // A JSON path to a variable in the data structure. + // + // Paths is a required field + Paths [][]*string `type:"list" required:"true"` } -// SetStatus sets the Status field's value. -func (s *DevEndpoint) SetStatus(v string) *DevEndpoint { - s.Status = &v - return s +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DropFields) String() string { + return awsutil.Prettify(s) } -// SetSubnetId sets the SubnetId field's value. -func (s *DevEndpoint) SetSubnetId(v string) *DevEndpoint { - s.SubnetId = &v - return s +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DropFields) GoString() string { + return s.String() } -// SetVpcId sets the VpcId field's value. -func (s *DevEndpoint) SetVpcId(v string) *DevEndpoint { - s.VpcId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DropFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DropFields"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetWorkerType sets the WorkerType field's value. -func (s *DevEndpoint) SetWorkerType(v string) *DevEndpoint { - s.WorkerType = &v +// SetInputs sets the Inputs field's value. +func (s *DropFields) SetInputs(v []*string) *DropFields { + s.Inputs = v return s } -// SetYarnEndpointAddress sets the YarnEndpointAddress field's value. -func (s *DevEndpoint) SetYarnEndpointAddress(v string) *DevEndpoint { - s.YarnEndpointAddress = &v +// SetName sets the Name field's value. +func (s *DropFields) SetName(v string) *DropFields { + s.Name = &v return s } -// SetZeppelinRemoteSparkInterpreterPort sets the ZeppelinRemoteSparkInterpreterPort field's value. -func (s *DevEndpoint) SetZeppelinRemoteSparkInterpreterPort(v int64) *DevEndpoint { - s.ZeppelinRemoteSparkInterpreterPort = &v +// SetPaths sets the Paths field's value. +func (s *DropFields) SetPaths(v [][]*string) *DropFields { + s.Paths = v return s } -// Custom libraries to be loaded into a development endpoint. -type DevEndpointCustomLibraries struct { +// Specifies a transform that removes columns from the dataset if all values +// in the column are 'null'. By default, Glue Studio will recognize null objects, +// but some values such as empty strings, strings that are "null", -1 integers +// or other placeholders such as zeros, are not automatically recognized as +// nulls. +type DropNullFields struct { _ struct{} `type:"structure"` - // The path to one or more Java .jar files in an S3 bucket that should be loaded - // in your DevEndpoint. + // The data inputs identified by their node names. // - // You can only use pure Java/Scala libraries with a DevEndpoint. - ExtraJarsS3Path *string `type:"string"` + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` - // The paths to one or more Python libraries in an Amazon Simple Storage Service - // (Amazon S3) bucket that should be loaded in your DevEndpoint. Multiple values - // must be complete paths separated by a comma. + // The name of the transform node. // - // You can only use pure Python libraries with a DevEndpoint. Libraries that - // rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python - // data analysis library, are not currently supported. - ExtraPythonLibsS3Path *string `type:"string"` + // Name is a required field + Name *string `type:"string" required:"true"` + + // A structure that represents whether certain values are recognized as null + // values for removal. + NullCheckBoxList *NullCheckBoxList `type:"structure"` + + // A structure that specifies a list of NullValueField structures that represent + // a custom null value such as zero or other value being used as a null placeholder + // unique to the dataset. + // + // The DropNullFields transform removes custom null values only if both the + // value of the null placeholder and the datatype match the data. + NullTextList []*NullValueField `type:"list"` } // String returns the string representation. @@ -31595,7 +33909,7 @@ type DevEndpointCustomLibraries struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DevEndpointCustomLibraries) String() string { +func (s DropNullFields) String() string { return awsutil.Prettify(s) } @@ -31604,41 +33918,81 @@ func (s DevEndpointCustomLibraries) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DevEndpointCustomLibraries) GoString() string { +func (s DropNullFields) GoString() string { return s.String() } -// SetExtraJarsS3Path sets the ExtraJarsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraJarsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraJarsS3Path = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DropNullFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DropNullFields"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.NullTextList != nil { + for i, v := range s.NullTextList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NullTextList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *DropNullFields) SetInputs(v []*string) *DropNullFields { + s.Inputs = v return s } -// SetExtraPythonLibsS3Path sets the ExtraPythonLibsS3Path field's value. -func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndpointCustomLibraries { - s.ExtraPythonLibsS3Path = &v +// SetName sets the Name field's value. +func (s *DropNullFields) SetName(v string) *DropNullFields { + s.Name = &v return s } -// Defines column statistics supported for floating-point number data columns. -type DoubleColumnStatisticsData struct { - _ struct{} `type:"structure"` +// SetNullCheckBoxList sets the NullCheckBoxList field's value. +func (s *DropNullFields) SetNullCheckBoxList(v *NullCheckBoxList) *DropNullFields { + s.NullCheckBoxList = v + return s +} - // The highest value in the column. - MaximumValue *float64 `type:"double"` +// SetNullTextList sets the NullTextList field's value. +func (s *DropNullFields) SetNullTextList(v []*NullValueField) *DropNullFields { + s.NullTextList = v + return s +} - // The lowest value in the column. - MinimumValue *float64 `type:"double"` +// Specifies a DynamoDB data source in the Glue Data Catalog. +type DynamoDBCatalogSource struct { + _ struct{} `type:"structure"` - // The number of distinct values in a column. + // The name of the database to read from. // - // NumberOfDistinctValues is a required field - NumberOfDistinctValues *int64 `type:"long" required:"true"` + // Database is a required field + Database *string `type:"string" required:"true"` - // The number of null values in the column. + // The name of the data source. // - // NumberOfNulls is a required field - NumberOfNulls *int64 `type:"long" required:"true"` + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` } // String returns the string representation. @@ -31646,7 +34000,7 @@ type DoubleColumnStatisticsData struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DoubleColumnStatisticsData) String() string { +func (s DynamoDBCatalogSource) String() string { return awsutil.Prettify(s) } @@ -31655,18 +34009,21 @@ func (s DoubleColumnStatisticsData) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DoubleColumnStatisticsData) GoString() string { +func (s DynamoDBCatalogSource) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DoubleColumnStatisticsData) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DoubleColumnStatisticsData"} - if s.NumberOfDistinctValues == nil { - invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) +func (s *DynamoDBCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DynamoDBCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.NumberOfNulls == nil { - invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) } if invalidParams.Len() > 0 { @@ -31675,27 +34032,21 @@ func (s *DoubleColumnStatisticsData) Validate() error { return nil } -// SetMaximumValue sets the MaximumValue field's value. -func (s *DoubleColumnStatisticsData) SetMaximumValue(v float64) *DoubleColumnStatisticsData { - s.MaximumValue = &v - return s -} - -// SetMinimumValue sets the MinimumValue field's value. -func (s *DoubleColumnStatisticsData) SetMinimumValue(v float64) *DoubleColumnStatisticsData { - s.MinimumValue = &v +// SetDatabase sets the Database field's value. +func (s *DynamoDBCatalogSource) SetDatabase(v string) *DynamoDBCatalogSource { + s.Database = &v return s } -// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. -func (s *DoubleColumnStatisticsData) SetNumberOfDistinctValues(v int64) *DoubleColumnStatisticsData { - s.NumberOfDistinctValues = &v +// SetName sets the Name field's value. +func (s *DynamoDBCatalogSource) SetName(v string) *DynamoDBCatalogSource { + s.Name = &v return s } -// SetNumberOfNulls sets the NumberOfNulls field's value. -func (s *DoubleColumnStatisticsData) SetNumberOfNulls(v int64) *DoubleColumnStatisticsData { - s.NumberOfNulls = &v +// SetTable sets the Table field's value. +func (s *DynamoDBCatalogSource) SetTable(v string) *DynamoDBCatalogSource { + s.Table = &v return s } @@ -32302,6 +34653,340 @@ func (s *ExportLabelsTaskRunProperties) SetOutputS3Path(v string) *ExportLabelsT return s } +// Specifies a transform that locates records in the dataset that have missing +// values and adds a new field with a value determined by imputation. The input +// data set is used to train the machine learning model that determines what +// the missing value should be. +type FillMissingValues struct { + _ struct{} `type:"structure"` + + // A JSON path to a variable in the data structure for the dataset that is filled. + FilledPath *string `type:"string"` + + // A JSON path to a variable in the data structure for the dataset that is imputed. + // + // ImputedPath is a required field + ImputedPath *string `type:"string" required:"true"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FillMissingValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FillMissingValues) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FillMissingValues) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FillMissingValues"} + if s.ImputedPath == nil { + invalidParams.Add(request.NewErrParamRequired("ImputedPath")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilledPath sets the FilledPath field's value. +func (s *FillMissingValues) SetFilledPath(v string) *FillMissingValues { + s.FilledPath = &v + return s +} + +// SetImputedPath sets the ImputedPath field's value. +func (s *FillMissingValues) SetImputedPath(v string) *FillMissingValues { + s.ImputedPath = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *FillMissingValues) SetInputs(v []*string) *FillMissingValues { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *FillMissingValues) SetName(v string) *FillMissingValues { + s.Name = &v + return s +} + +// Specifies a transform that splits a dataset into two, based on a filter condition. +type Filter struct { + _ struct{} `type:"structure"` + + // Specifies a filter expression. + // + // Filters is a required field + Filters []*FilterExpression `type:"list" required:"true"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The operator used to filter rows by comparing the key value to a specified + // value. + // + // LogicalOperator is a required field + LogicalOperator *string `type:"string" required:"true" enum:"FilterLogicalOperator"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.LogicalOperator == nil { + invalidParams.Add(request.NewErrParamRequired("LogicalOperator")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *Filter) SetFilters(v []*FilterExpression) *Filter { + s.Filters = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *Filter) SetInputs(v []*string) *Filter { + s.Inputs = v + return s +} + +// SetLogicalOperator sets the LogicalOperator field's value. +func (s *Filter) SetLogicalOperator(v string) *Filter { + s.LogicalOperator = &v + return s +} + +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v + return s +} + +// Specifies a filter expression. +type FilterExpression struct { + _ struct{} `type:"structure"` + + // Whether the expression is to be negated. + Negated *bool `type:"boolean"` + + // The type of operation to perform in the expression. + // + // Operation is a required field + Operation *string `type:"string" required:"true" enum:"FilterOperation"` + + // A list of filter values. + // + // Values is a required field + Values []*FilterValue `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterExpression) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterExpression) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterExpression) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterExpression"} + if s.Operation == nil { + invalidParams.Add(request.NewErrParamRequired("Operation")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil { + for i, v := range s.Values { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Values", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNegated sets the Negated field's value. +func (s *FilterExpression) SetNegated(v bool) *FilterExpression { + s.Negated = &v + return s +} + +// SetOperation sets the Operation field's value. +func (s *FilterExpression) SetOperation(v string) *FilterExpression { + s.Operation = &v + return s +} + +// SetValues sets the Values field's value. +func (s *FilterExpression) SetValues(v []*FilterValue) *FilterExpression { + s.Values = v + return s +} + +// Represents a single entry in the list of values for a FilterExpression. +type FilterValue struct { + _ struct{} `type:"structure"` + + // The type of filter value. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"FilterValueType"` + + // The value to be associated. + // + // Value is a required field + Value []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterValue"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetType sets the Type field's value. +func (s *FilterValue) SetType(v string) *FilterValue { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterValue) SetValue(v []*string) *FilterValue { + s.Value = v + return s +} + // The evaluation metrics for the find matches algorithm. The quality of your // machine learning transform is measured by getting your transform to predict // some matches and comparing the results to known matches from the same dataset. @@ -39973,6 +42658,319 @@ func (s *GluePolicy) SetUpdateTime(v time.Time) *GluePolicy { return s } +// Specifies a user-defined schema when a schema cannot be determined by AWS +// Glue. +type GlueSchema struct { + _ struct{} `type:"structure"` + + // Specifies the column definitions that make up a Glue schema. + Columns []*GlueStudioSchemaColumn `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlueSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlueSchema) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlueSchema) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlueSchema"} + if s.Columns != nil { + for i, v := range s.Columns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumns sets the Columns field's value. +func (s *GlueSchema) SetColumns(v []*GlueStudioSchemaColumn) *GlueSchema { + s.Columns = v + return s +} + +// Specifies a single column in a Glue schema definition. +type GlueStudioSchemaColumn struct { + _ struct{} `type:"structure"` + + // The name of the column in the Glue Studio schema. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The hive type for this column in the Glue Studio schema. + Type *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlueStudioSchemaColumn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlueStudioSchemaColumn) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlueStudioSchemaColumn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlueStudioSchemaColumn"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GlueStudioSchemaColumn) SetName(v string) *GlueStudioSchemaColumn { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *GlueStudioSchemaColumn) SetType(v string) *GlueStudioSchemaColumn { + s.Type = &v + return s +} + +// Specifies the data store in the governed Glue Data Catalog. +type GovernedCatalogSource struct { + _ struct{} `type:"structure"` + + // Specifies additional connection options. + AdditionalOptions *S3SourceAdditionalOptions `type:"structure"` + + // The database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Partitions satisfying this predicate are deleted. Files within the retention + // period in these partitions are not deleted. Set to "" – empty by default. + PartitionPredicate *string `type:"string"` + + // The database table to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GovernedCatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GovernedCatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GovernedCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GovernedCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *GovernedCatalogSource) SetAdditionalOptions(v *S3SourceAdditionalOptions) *GovernedCatalogSource { + s.AdditionalOptions = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *GovernedCatalogSource) SetDatabase(v string) *GovernedCatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *GovernedCatalogSource) SetName(v string) *GovernedCatalogSource { + s.Name = &v + return s +} + +// SetPartitionPredicate sets the PartitionPredicate field's value. +func (s *GovernedCatalogSource) SetPartitionPredicate(v string) *GovernedCatalogSource { + s.PartitionPredicate = &v + return s +} + +// SetTable sets the Table field's value. +func (s *GovernedCatalogSource) SetTable(v string) *GovernedCatalogSource { + s.Table = &v + return s +} + +// Specifies a data target that writes to Amazon S3 using the Glue Data Catalog. +type GovernedCatalogTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies native partitioning using a sequence of keys. + PartitionKeys [][]*string `type:"list"` + + // A policy that specifies update behavior for the governed catalog. + SchemaChangePolicy *CatalogSchemaChangePolicy `type:"structure"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GovernedCatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GovernedCatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GovernedCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GovernedCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *GovernedCatalogTarget) SetDatabase(v string) *GovernedCatalogTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *GovernedCatalogTarget) SetInputs(v []*string) *GovernedCatalogTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *GovernedCatalogTarget) SetName(v string) *GovernedCatalogTarget { + s.Name = &v + return s +} + +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *GovernedCatalogTarget) SetPartitionKeys(v [][]*string) *GovernedCatalogTarget { + s.PartitionKeys = v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *GovernedCatalogTarget) SetSchemaChangePolicy(v *CatalogSchemaChangePolicy) *GovernedCatalogTarget { + s.SchemaChangePolicy = v + return s +} + +// SetTable sets the Table field's value. +func (s *GovernedCatalogTarget) SetTable(v string) *GovernedCatalogTarget { + s.Table = &v + return s +} + // A classifier that uses grok patterns. type GrokClassifier struct { _ struct{} `type:"structure"` @@ -40630,6 +43628,404 @@ func (s *InvalidStateException) RequestID() string { return s.RespMetadata.RequestID } +// Additional connection options for the connector. +type JDBCConnectorOptions struct { + _ struct{} `type:"structure"` + + // Custom data type mapping that builds a mapping from a JDBC data type to an + // Glue data type. For example, the option "dataTypeMapping":{"FLOAT":"STRING"} + // maps data fields of JDBC type FLOAT into the Java String type by calling + // the ResultSet.getString() method of the driver, and uses it to build the + // Glue record. The ResultSet object is implemented by each driver, so the behavior + // is specific to the driver you use. Refer to the documentation for your JDBC + // driver to understand how the driver performs the conversions. + DataTypeMapping map[string]*string `type:"map"` + + // Extra condition clause to filter data from source. For example: + // + // BillingCity='Mountain View' + // + // When using a query instead of a table name, you should validate that the + // query works with the specified filterPredicate. + FilterPredicate *string `type:"string"` + + // The name of the job bookmark keys on which to sort. + JobBookmarkKeys []*string `type:"list"` + + // Specifies an ascending or descending sort order. + JobBookmarkKeysSortOrder *string `type:"string"` + + // The minimum value of partitionColumn that is used to decide partition stride. + LowerBound *int64 `type:"long"` + + // The number of partitions. This value, along with lowerBound (inclusive) and + // upperBound (exclusive), form partition strides for generated WHERE clause + // expressions that are used to split the partitionColumn. + NumPartitions *int64 `type:"long"` + + // The name of an integer column that is used for partitioning. This option + // works only when it's included with lowerBound, upperBound, and numPartitions. + // This option works the same way as in the Spark SQL JDBC reader. + PartitionColumn *string `type:"string"` + + // The maximum value of partitionColumn that is used to decide partition stride. + UpperBound *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorOptions) GoString() string { + return s.String() +} + +// SetDataTypeMapping sets the DataTypeMapping field's value. +func (s *JDBCConnectorOptions) SetDataTypeMapping(v map[string]*string) *JDBCConnectorOptions { + s.DataTypeMapping = v + return s +} + +// SetFilterPredicate sets the FilterPredicate field's value. +func (s *JDBCConnectorOptions) SetFilterPredicate(v string) *JDBCConnectorOptions { + s.FilterPredicate = &v + return s +} + +// SetJobBookmarkKeys sets the JobBookmarkKeys field's value. +func (s *JDBCConnectorOptions) SetJobBookmarkKeys(v []*string) *JDBCConnectorOptions { + s.JobBookmarkKeys = v + return s +} + +// SetJobBookmarkKeysSortOrder sets the JobBookmarkKeysSortOrder field's value. +func (s *JDBCConnectorOptions) SetJobBookmarkKeysSortOrder(v string) *JDBCConnectorOptions { + s.JobBookmarkKeysSortOrder = &v + return s +} + +// SetLowerBound sets the LowerBound field's value. +func (s *JDBCConnectorOptions) SetLowerBound(v int64) *JDBCConnectorOptions { + s.LowerBound = &v + return s +} + +// SetNumPartitions sets the NumPartitions field's value. +func (s *JDBCConnectorOptions) SetNumPartitions(v int64) *JDBCConnectorOptions { + s.NumPartitions = &v + return s +} + +// SetPartitionColumn sets the PartitionColumn field's value. +func (s *JDBCConnectorOptions) SetPartitionColumn(v string) *JDBCConnectorOptions { + s.PartitionColumn = &v + return s +} + +// SetUpperBound sets the UpperBound field's value. +func (s *JDBCConnectorOptions) SetUpperBound(v int64) *JDBCConnectorOptions { + s.UpperBound = &v + return s +} + +// Specifies a connector to a JDBC data source. +type JDBCConnectorSource struct { + _ struct{} `type:"structure"` + + // Additional connection options for the connector. + AdditionalOptions *JDBCConnectorOptions `type:"structure"` + + // The name of the connection that is associated with the connector. + // + // ConnectionName is a required field + ConnectionName *string `type:"string" required:"true"` + + // The name of the table in the data source. + ConnectionTable *string `type:"string"` + + // The type of connection, such as marketplace.jdbc or custom.jdbc, designating + // a connection to a JDBC data store. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true"` + + // The name of a connector that assists with accessing the data store in Glue + // Studio. + // + // ConnectorName is a required field + ConnectorName *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the custom JDBC source. + OutputSchemas []*GlueSchema `type:"list"` + + // The table or SQL query to get the data from. You can specify either ConnectionTable + // or query, but not both. + Query *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JDBCConnectorSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JDBCConnectorSource"} + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.ConnectorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *JDBCConnectorSource) SetAdditionalOptions(v *JDBCConnectorOptions) *JDBCConnectorSource { + s.AdditionalOptions = v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *JDBCConnectorSource) SetConnectionName(v string) *JDBCConnectorSource { + s.ConnectionName = &v + return s +} + +// SetConnectionTable sets the ConnectionTable field's value. +func (s *JDBCConnectorSource) SetConnectionTable(v string) *JDBCConnectorSource { + s.ConnectionTable = &v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *JDBCConnectorSource) SetConnectionType(v string) *JDBCConnectorSource { + s.ConnectionType = &v + return s +} + +// SetConnectorName sets the ConnectorName field's value. +func (s *JDBCConnectorSource) SetConnectorName(v string) *JDBCConnectorSource { + s.ConnectorName = &v + return s +} + +// SetName sets the Name field's value. +func (s *JDBCConnectorSource) SetName(v string) *JDBCConnectorSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *JDBCConnectorSource) SetOutputSchemas(v []*GlueSchema) *JDBCConnectorSource { + s.OutputSchemas = v + return s +} + +// SetQuery sets the Query field's value. +func (s *JDBCConnectorSource) SetQuery(v string) *JDBCConnectorSource { + s.Query = &v + return s +} + +// Specifies a data target that writes to Amazon S3 in Apache Parquet columnar +// storage. +type JDBCConnectorTarget struct { + _ struct{} `type:"structure"` + + // Additional connection options for the connector. + AdditionalOptions map[string]*string `type:"map"` + + // The name of the connection that is associated with the connector. + // + // ConnectionName is a required field + ConnectionName *string `type:"string" required:"true"` + + // The name of the table in the data target. + // + // ConnectionTable is a required field + ConnectionTable *string `type:"string" required:"true"` + + // The type of connection, such as marketplace.jdbc or custom.jdbc, designating + // a connection to a JDBC data target. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true"` + + // The name of a connector that will be used. + // + // ConnectorName is a required field + ConnectorName *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the JDBC target. + OutputSchemas []*GlueSchema `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JDBCConnectorTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JDBCConnectorTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JDBCConnectorTarget"} + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionTable == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionTable")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.ConnectorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorName")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *JDBCConnectorTarget) SetAdditionalOptions(v map[string]*string) *JDBCConnectorTarget { + s.AdditionalOptions = v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *JDBCConnectorTarget) SetConnectionName(v string) *JDBCConnectorTarget { + s.ConnectionName = &v + return s +} + +// SetConnectionTable sets the ConnectionTable field's value. +func (s *JDBCConnectorTarget) SetConnectionTable(v string) *JDBCConnectorTarget { + s.ConnectionTable = &v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *JDBCConnectorTarget) SetConnectionType(v string) *JDBCConnectorTarget { + s.ConnectionType = &v + return s +} + +// SetConnectorName sets the ConnectorName field's value. +func (s *JDBCConnectorTarget) SetConnectorName(v string) *JDBCConnectorTarget { + s.ConnectorName = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *JDBCConnectorTarget) SetInputs(v []*string) *JDBCConnectorTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *JDBCConnectorTarget) SetName(v string) *JDBCConnectorTarget { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *JDBCConnectorTarget) SetOutputSchemas(v []*GlueSchema) *JDBCConnectorTarget { + s.OutputSchemas = v + return s +} + // Specifies a JDBC data store to crawl. type JdbcTarget struct { _ struct{} `type:"structure"` @@ -40695,6 +44091,14 @@ type Job struct { // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity *int64 `deprecated:"true" type:"integer"` + // The representation of a directed acyclic graph on which both the Glue Studio + // visual component and Glue Studio code generation is based. + // + // CodeGenConfigurationNodes is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Job's + // String and GoString methods. + CodeGenConfigurationNodes map[string]*CodeGenConfigurationNode `type:"map" sensitive:"true"` + // The JobCommand that runs this job. Command *JobCommand `type:"structure"` @@ -40837,6 +44241,12 @@ func (s *Job) SetAllocatedCapacity(v int64) *Job { return s } +// SetCodeGenConfigurationNodes sets the CodeGenConfigurationNodes field's value. +func (s *Job) SetCodeGenConfigurationNodes(v map[string]*CodeGenConfigurationNode) *Job { + s.CodeGenConfigurationNodes = v + return s +} + // SetCommand sets the Command field's value. func (s *Job) SetCommand(v *JobCommand) *Job { s.Command = v @@ -41481,6 +44891,14 @@ type JobUpdate struct { // Deprecated: This property is deprecated, use MaxCapacity instead. AllocatedCapacity *int64 `deprecated:"true" type:"integer"` + // The representation of a directed acyclic graph on which both the Glue Studio + // visual component and Glue Studio code generation is based. + // + // CodeGenConfigurationNodes is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by JobUpdate's + // String and GoString methods. + CodeGenConfigurationNodes map[string]*CodeGenConfigurationNode `type:"map" sensitive:"true"` + // The JobCommand that runs this job (required). Command *JobCommand `type:"structure"` @@ -41617,6 +45035,16 @@ func (s *JobUpdate) Validate() error { if s.Timeout != nil && *s.Timeout < 1 { invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) } + if s.CodeGenConfigurationNodes != nil { + for i, v := range s.CodeGenConfigurationNodes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CodeGenConfigurationNodes", i), err.(request.ErrInvalidParams)) + } + } + } if s.NotificationProperty != nil { if err := s.NotificationProperty.Validate(); err != nil { invalidParams.AddNested("NotificationProperty", err.(request.ErrInvalidParams)) @@ -41635,6 +45063,12 @@ func (s *JobUpdate) SetAllocatedCapacity(v int64) *JobUpdate { return s } +// SetCodeGenConfigurationNodes sets the CodeGenConfigurationNodes field's value. +func (s *JobUpdate) SetCodeGenConfigurationNodes(v map[string]*CodeGenConfigurationNode) *JobUpdate { + s.CodeGenConfigurationNodes = v + return s +} + // SetCommand sets the Command field's value. func (s *JobUpdate) SetCommand(v *JobCommand) *JobUpdate { s.Command = v @@ -41731,6 +45165,174 @@ func (s *JobUpdate) SetWorkerType(v string) *JobUpdate { return s } +// Specifies a transform that joins two datasets into one dataset using a comparison +// phrase on the specified data property keys. You can use inner, outer, left, +// right, left semi, and left anti joins. +type Join struct { + _ struct{} `type:"structure"` + + // A list of the two columns to be joined. + // + // Columns is a required field + Columns []*JoinColumn `min:"2" type:"list" required:"true"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"2" type:"list" required:"true"` + + // Specifies the type of join to be performed on the datasets. + // + // JoinType is a required field + JoinType *string `type:"string" required:"true" enum:"JoinType"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Join) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Join) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Join) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Join"} + if s.Columns == nil { + invalidParams.Add(request.NewErrParamRequired("Columns")) + } + if s.Columns != nil && len(s.Columns) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Columns", 2)) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 2)) + } + if s.JoinType == nil { + invalidParams.Add(request.NewErrParamRequired("JoinType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Columns != nil { + for i, v := range s.Columns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumns sets the Columns field's value. +func (s *Join) SetColumns(v []*JoinColumn) *Join { + s.Columns = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *Join) SetInputs(v []*string) *Join { + s.Inputs = v + return s +} + +// SetJoinType sets the JoinType field's value. +func (s *Join) SetJoinType(v string) *Join { + s.JoinType = &v + return s +} + +// SetName sets the Name field's value. +func (s *Join) SetName(v string) *Join { + s.Name = &v + return s +} + +// Specifies a column to be joined. +type JoinColumn struct { + _ struct{} `type:"structure"` + + // The column to be joined. + // + // From is a required field + From *string `type:"string" required:"true"` + + // The key of the column to be joined. + // + // Keys is a required field + Keys [][]*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JoinColumn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JoinColumn) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JoinColumn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JoinColumn"} + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrom sets the From field's value. +func (s *JoinColumn) SetFrom(v string) *JoinColumn { + s.From = &v + return s +} + +// SetKeys sets the Keys field's value. +func (s *JoinColumn) SetKeys(v [][]*string) *JoinColumn { + s.Keys = v + return s +} + // A classifier for JSON content. type JsonClassifier struct { _ struct{} `type:"structure"` @@ -41805,6 +45407,180 @@ func (s *JsonClassifier) SetVersion(v int64) *JsonClassifier { return s } +// Additional options for streaming. +type KafkaStreamingSourceOptions struct { + _ struct{} `type:"structure"` + + // The specific TopicPartitions to consume. You must specify at least one of + // "topicName", "assign" or "subscribePattern". + Assign *string `type:"string"` + + // A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. + // This option must be specified in the API call or defined in the table metadata + // in the Data Catalog. + BootstrapServers *string `type:"string"` + + // An optional classification. + Classification *string `type:"string"` + + // The name of the connection. + ConnectionName *string `type:"string"` + + // Specifies the delimiter character. + Delimiter *string `type:"string"` + + // The end point when a batch query is ended. Possible values are either "latest" + // or a JSON string that specifies an ending offset for each TopicPartition. + EndingOffsets *string `type:"string"` + + // The rate limit on the maximum number of offsets that are processed per trigger + // interval. The specified total number of offsets is proportionally split across + // topicPartitions of different volumes. The default value is null, which means + // that the consumer reads all offsets until the known latest offset. + MaxOffsetsPerTrigger *int64 `type:"long"` + + // The desired minimum number of partitions to read from Kafka. The default + // value is null, which means that the number of spark partitions is equal to + // the number of Kafka partitions. + MinPartitions *int64 `type:"integer"` + + // The number of times to retry before failing to fetch Kafka offsets. The default + // value is 3. + NumRetries *int64 `type:"integer"` + + // The timeout in milliseconds to poll data from Kafka in Spark job executors. + // The default value is 512. + PollTimeoutMs *int64 `type:"long"` + + // The time in milliseconds to wait before retrying to fetch Kafka offsets. + // The default value is 10. + RetryIntervalMs *int64 `type:"long"` + + // The protocol used to communicate with brokers. The possible values are "SSL" + // or "PLAINTEXT". + SecurityProtocol *string `type:"string"` + + // The starting position in the Kafka topic to read data from. The possible + // values are "earliest" or "latest". The default value is "latest". + StartingOffsets *string `type:"string"` + + // A Java regex string that identifies the topic list to subscribe to. You must + // specify at least one of "topicName", "assign" or "subscribePattern". + SubscribePattern *string `type:"string"` + + // The topic name as specified in Apache Kafka. You must specify at least one + // of "topicName", "assign" or "subscribePattern". + TopicName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KafkaStreamingSourceOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KafkaStreamingSourceOptions) GoString() string { + return s.String() +} + +// SetAssign sets the Assign field's value. +func (s *KafkaStreamingSourceOptions) SetAssign(v string) *KafkaStreamingSourceOptions { + s.Assign = &v + return s +} + +// SetBootstrapServers sets the BootstrapServers field's value. +func (s *KafkaStreamingSourceOptions) SetBootstrapServers(v string) *KafkaStreamingSourceOptions { + s.BootstrapServers = &v + return s +} + +// SetClassification sets the Classification field's value. +func (s *KafkaStreamingSourceOptions) SetClassification(v string) *KafkaStreamingSourceOptions { + s.Classification = &v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *KafkaStreamingSourceOptions) SetConnectionName(v string) *KafkaStreamingSourceOptions { + s.ConnectionName = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *KafkaStreamingSourceOptions) SetDelimiter(v string) *KafkaStreamingSourceOptions { + s.Delimiter = &v + return s +} + +// SetEndingOffsets sets the EndingOffsets field's value. +func (s *KafkaStreamingSourceOptions) SetEndingOffsets(v string) *KafkaStreamingSourceOptions { + s.EndingOffsets = &v + return s +} + +// SetMaxOffsetsPerTrigger sets the MaxOffsetsPerTrigger field's value. +func (s *KafkaStreamingSourceOptions) SetMaxOffsetsPerTrigger(v int64) *KafkaStreamingSourceOptions { + s.MaxOffsetsPerTrigger = &v + return s +} + +// SetMinPartitions sets the MinPartitions field's value. +func (s *KafkaStreamingSourceOptions) SetMinPartitions(v int64) *KafkaStreamingSourceOptions { + s.MinPartitions = &v + return s +} + +// SetNumRetries sets the NumRetries field's value. +func (s *KafkaStreamingSourceOptions) SetNumRetries(v int64) *KafkaStreamingSourceOptions { + s.NumRetries = &v + return s +} + +// SetPollTimeoutMs sets the PollTimeoutMs field's value. +func (s *KafkaStreamingSourceOptions) SetPollTimeoutMs(v int64) *KafkaStreamingSourceOptions { + s.PollTimeoutMs = &v + return s +} + +// SetRetryIntervalMs sets the RetryIntervalMs field's value. +func (s *KafkaStreamingSourceOptions) SetRetryIntervalMs(v int64) *KafkaStreamingSourceOptions { + s.RetryIntervalMs = &v + return s +} + +// SetSecurityProtocol sets the SecurityProtocol field's value. +func (s *KafkaStreamingSourceOptions) SetSecurityProtocol(v string) *KafkaStreamingSourceOptions { + s.SecurityProtocol = &v + return s +} + +// SetStartingOffsets sets the StartingOffsets field's value. +func (s *KafkaStreamingSourceOptions) SetStartingOffsets(v string) *KafkaStreamingSourceOptions { + s.StartingOffsets = &v + return s +} + +// SetSubscribePattern sets the SubscribePattern field's value. +func (s *KafkaStreamingSourceOptions) SetSubscribePattern(v string) *KafkaStreamingSourceOptions { + s.SubscribePattern = &v + return s +} + +// SetTopicName sets the TopicName field's value. +func (s *KafkaStreamingSourceOptions) SetTopicName(v string) *KafkaStreamingSourceOptions { + s.TopicName = &v + return s +} + // A partition key pair consisting of a name and a type. type KeySchemaElement struct { _ struct{} `type:"structure"` @@ -41850,6 +45626,212 @@ func (s *KeySchemaElement) SetType(v string) *KeySchemaElement { return s } +// Additional options for the Amazon Kinesis streaming data source. +type KinesisStreamingSourceOptions struct { + _ struct{} `type:"structure"` + + // Adds a time delay between two consecutive getRecords operations. The default + // value is "False". This option is only configurable for Glue version 2.0 and + // above. + AddIdleTimeBetweenReads *bool `type:"boolean"` + + // Avoids creating an empty microbatch job by checking for unread data in the + // Kinesis data stream before the batch is started. The default value is "False". + AvoidEmptyBatches *bool `type:"boolean"` + + // An optional classification. + Classification *string `type:"string"` + + // Specifies the delimiter character. + Delimiter *string `type:"string"` + + // The minimum time interval between two ListShards API calls for your script + // to consider resharding. The default value is 1s. + DescribeShardInterval *int64 `type:"long"` + + // The URL of the Kinesis endpoint. + EndpointUrl *string `type:"string"` + + // The minimum time delay between two consecutive getRecords operations, specified + // in ms. The default value is 1000. This option is only configurable for Glue + // version 2.0 and above. + IdleTimeBetweenReadsInMs *int64 `type:"long"` + + // The maximum number of records to fetch per shard in the Kinesis data stream. + // The default value is 100000. + MaxFetchRecordsPerShard *int64 `type:"long"` + + // The maximum time spent in the job executor to fetch a record from the Kinesis + // data stream per shard, specified in milliseconds (ms). The default value + // is 1000. + MaxFetchTimeInMs *int64 `type:"long"` + + // The maximum number of records to fetch from the Kinesis data stream in each + // getRecords operation. The default value is 10000. + MaxRecordPerRead *int64 `type:"long"` + + // The maximum cool-off time period (specified in ms) between two retries of + // a Kinesis Data Streams API call. The default value is 10000. + MaxRetryIntervalMs *int64 `type:"long"` + + // The maximum number of retries for Kinesis Data Streams API requests. The + // default value is 3. + NumRetries *int64 `type:"integer"` + + // The cool-off time period (specified in ms) before retrying the Kinesis Data + // Streams API call. The default value is 1000. + RetryIntervalMs *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the role to assume using AWS Security Token + // Service (AWS STS). This role must have permissions for describe or read record + // operations for the Kinesis data stream. You must use this parameter when + // accessing a data stream in a different account. Used in conjunction with + // "awsSTSSessionName". + RoleArn *string `type:"string"` + + // An identifier for the session assuming the role using AWS STS. You must use + // this parameter when accessing a data stream in a different account. Used + // in conjunction with "awsSTSRoleARN". + RoleSessionName *string `type:"string"` + + // The starting position in the Kinesis data stream to read data from. The possible + // values are "latest", "trim_horizon", or "earliest". The default value is + // "latest". + StartingPosition *string `type:"string" enum:"StartingPosition"` + + // The Amazon Resource Name (ARN) of the Kinesis data stream. + StreamArn *string `type:"string"` + + // The name of the Kinesis data stream. + StreamName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisStreamingSourceOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisStreamingSourceOptions) GoString() string { + return s.String() +} + +// SetAddIdleTimeBetweenReads sets the AddIdleTimeBetweenReads field's value. +func (s *KinesisStreamingSourceOptions) SetAddIdleTimeBetweenReads(v bool) *KinesisStreamingSourceOptions { + s.AddIdleTimeBetweenReads = &v + return s +} + +// SetAvoidEmptyBatches sets the AvoidEmptyBatches field's value. +func (s *KinesisStreamingSourceOptions) SetAvoidEmptyBatches(v bool) *KinesisStreamingSourceOptions { + s.AvoidEmptyBatches = &v + return s +} + +// SetClassification sets the Classification field's value. +func (s *KinesisStreamingSourceOptions) SetClassification(v string) *KinesisStreamingSourceOptions { + s.Classification = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *KinesisStreamingSourceOptions) SetDelimiter(v string) *KinesisStreamingSourceOptions { + s.Delimiter = &v + return s +} + +// SetDescribeShardInterval sets the DescribeShardInterval field's value. +func (s *KinesisStreamingSourceOptions) SetDescribeShardInterval(v int64) *KinesisStreamingSourceOptions { + s.DescribeShardInterval = &v + return s +} + +// SetEndpointUrl sets the EndpointUrl field's value. +func (s *KinesisStreamingSourceOptions) SetEndpointUrl(v string) *KinesisStreamingSourceOptions { + s.EndpointUrl = &v + return s +} + +// SetIdleTimeBetweenReadsInMs sets the IdleTimeBetweenReadsInMs field's value. +func (s *KinesisStreamingSourceOptions) SetIdleTimeBetweenReadsInMs(v int64) *KinesisStreamingSourceOptions { + s.IdleTimeBetweenReadsInMs = &v + return s +} + +// SetMaxFetchRecordsPerShard sets the MaxFetchRecordsPerShard field's value. +func (s *KinesisStreamingSourceOptions) SetMaxFetchRecordsPerShard(v int64) *KinesisStreamingSourceOptions { + s.MaxFetchRecordsPerShard = &v + return s +} + +// SetMaxFetchTimeInMs sets the MaxFetchTimeInMs field's value. +func (s *KinesisStreamingSourceOptions) SetMaxFetchTimeInMs(v int64) *KinesisStreamingSourceOptions { + s.MaxFetchTimeInMs = &v + return s +} + +// SetMaxRecordPerRead sets the MaxRecordPerRead field's value. +func (s *KinesisStreamingSourceOptions) SetMaxRecordPerRead(v int64) *KinesisStreamingSourceOptions { + s.MaxRecordPerRead = &v + return s +} + +// SetMaxRetryIntervalMs sets the MaxRetryIntervalMs field's value. +func (s *KinesisStreamingSourceOptions) SetMaxRetryIntervalMs(v int64) *KinesisStreamingSourceOptions { + s.MaxRetryIntervalMs = &v + return s +} + +// SetNumRetries sets the NumRetries field's value. +func (s *KinesisStreamingSourceOptions) SetNumRetries(v int64) *KinesisStreamingSourceOptions { + s.NumRetries = &v + return s +} + +// SetRetryIntervalMs sets the RetryIntervalMs field's value. +func (s *KinesisStreamingSourceOptions) SetRetryIntervalMs(v int64) *KinesisStreamingSourceOptions { + s.RetryIntervalMs = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *KinesisStreamingSourceOptions) SetRoleArn(v string) *KinesisStreamingSourceOptions { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *KinesisStreamingSourceOptions) SetRoleSessionName(v string) *KinesisStreamingSourceOptions { + s.RoleSessionName = &v + return s +} + +// SetStartingPosition sets the StartingPosition field's value. +func (s *KinesisStreamingSourceOptions) SetStartingPosition(v string) *KinesisStreamingSourceOptions { + s.StartingPosition = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *KinesisStreamingSourceOptions) SetStreamArn(v string) *KinesisStreamingSourceOptions { + s.StreamArn = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *KinesisStreamingSourceOptions) SetStreamName(v string) *KinesisStreamingSourceOptions { + s.StreamName = &v + return s +} + // Specifies configuration properties for a labeling set generation task run. type LabelingSetGenerationTaskRunProperties struct { _ struct{} `type:"structure"` @@ -44014,28 +47996,466 @@ func (s *MLTransformNotReadyException) StatusCode() int { return s.RespMetadata.StatusCode } -// RequestID returns the service's response RequestID for request. -func (s *MLTransformNotReadyException) RequestID() string { - return s.RespMetadata.RequestID +// RequestID returns the service's response RequestID for request. +func (s *MLTransformNotReadyException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The encryption-at-rest settings of the transform that apply to accessing +// user data. +type MLUserDataEncryption struct { + _ struct{} `type:"structure"` + + // The ID for the customer-provided KMS key. + KmsKeyId *string `min:"1" type:"string"` + + // The encryption mode applied to user data. Valid values are: + // + // * DISABLED: encryption is disabled + // + // * SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) + // for user data stored in Amazon S3. + // + // MlUserDataEncryptionMode is a required field + MlUserDataEncryptionMode *string `type:"string" required:"true" enum:"MLUserDataEncryptionModeString"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MLUserDataEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MLUserDataEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MLUserDataEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MLUserDataEncryption"} + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } + if s.MlUserDataEncryptionMode == nil { + invalidParams.Add(request.NewErrParamRequired("MlUserDataEncryptionMode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *MLUserDataEncryption) SetKmsKeyId(v string) *MLUserDataEncryption { + s.KmsKeyId = &v + return s +} + +// SetMlUserDataEncryptionMode sets the MlUserDataEncryptionMode field's value. +func (s *MLUserDataEncryption) SetMlUserDataEncryptionMode(v string) *MLUserDataEncryption { + s.MlUserDataEncryptionMode = &v + return s +} + +// Specifies the mapping of data property keys. +type Mapping struct { + _ struct{} `type:"structure"` + + // Only applicable to nested data structures. If you want to change the parent + // structure, but also one of its children, you can fill out this data strucutre. + // It is also Mapping, but its FromPath will be the parent's FromPath plus the + // FromPath from this structure. + // + // For the children part, suppose you have the structure: + // + // { "FromPath": "OuterStructure", "ToKey": "OuterStructure", "ToType": "Struct", + // "Dropped": false, "Chidlren": [{ "FromPath": "inner", "ToKey": "inner", "ToType": + // "Double", "Dropped": false, }] } + // + // You can specify a Mapping that looks like: + // + // { "FromPath": "OuterStructure", "ToKey": "OuterStructure", "ToType": "Struct", + // "Dropped": false, "Chidlren": [{ "FromPath": "inner", "ToKey": "inner", "ToType": + // "Double", "Dropped": false, }] } + Children []*Mapping `type:"list"` + + // If true, then the column is removed. + Dropped *bool `type:"boolean"` + + // The table or column to be modified. + FromPath []*string `type:"list"` + + // The type of the data to be modified. + FromType *string `type:"string"` + + // After the apply mapping, what the name of the column should be. Can be the + // same as FromPath. + ToKey *string `type:"string"` + + // The data type that the data is to be modified to. + ToType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Mapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Mapping) GoString() string { + return s.String() +} + +// SetChildren sets the Children field's value. +func (s *Mapping) SetChildren(v []*Mapping) *Mapping { + s.Children = v + return s +} + +// SetDropped sets the Dropped field's value. +func (s *Mapping) SetDropped(v bool) *Mapping { + s.Dropped = &v + return s +} + +// SetFromPath sets the FromPath field's value. +func (s *Mapping) SetFromPath(v []*string) *Mapping { + s.FromPath = v + return s +} + +// SetFromType sets the FromType field's value. +func (s *Mapping) SetFromType(v string) *Mapping { + s.FromType = &v + return s +} + +// SetToKey sets the ToKey field's value. +func (s *Mapping) SetToKey(v string) *Mapping { + s.ToKey = &v + return s +} + +// SetToType sets the ToType field's value. +func (s *Mapping) SetToType(v string) *Mapping { + s.ToType = &v + return s +} + +// Defines a mapping. +type MappingEntry struct { + _ struct{} `type:"structure"` + + // The source path. + SourcePath *string `type:"string"` + + // The name of the source table. + SourceTable *string `type:"string"` + + // The source type. + SourceType *string `type:"string"` + + // The target path. + TargetPath *string `type:"string"` + + // The target table. + TargetTable *string `type:"string"` + + // The target type. + TargetType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MappingEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MappingEntry) GoString() string { + return s.String() +} + +// SetSourcePath sets the SourcePath field's value. +func (s *MappingEntry) SetSourcePath(v string) *MappingEntry { + s.SourcePath = &v + return s +} + +// SetSourceTable sets the SourceTable field's value. +func (s *MappingEntry) SetSourceTable(v string) *MappingEntry { + s.SourceTable = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *MappingEntry) SetSourceType(v string) *MappingEntry { + s.SourceType = &v + return s +} + +// SetTargetPath sets the TargetPath field's value. +func (s *MappingEntry) SetTargetPath(v string) *MappingEntry { + s.TargetPath = &v + return s +} + +// SetTargetTable sets the TargetTable field's value. +func (s *MappingEntry) SetTargetTable(v string) *MappingEntry { + s.TargetTable = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *MappingEntry) SetTargetType(v string) *MappingEntry { + s.TargetType = &v + return s +} + +// Specifies a transform that merges a DynamicFrame with a staging DynamicFrame +// based on the specified primary keys to identify records. Duplicate records +// (records with the same primary keys) are not de-duplicated. +type Merge struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"2" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The list of primary key fields to match records from the source and staging + // dynamic frames. + // + // PrimaryKeys is a required field + PrimaryKeys [][]*string `type:"list" required:"true"` + + // The source DynamicFrame that will be merged with a staging DynamicFrame. + // + // Source is a required field + Source *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Merge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Merge) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Merge) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Merge"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 2)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PrimaryKeys == nil { + invalidParams.Add(request.NewErrParamRequired("PrimaryKeys")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *Merge) SetInputs(v []*string) *Merge { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *Merge) SetName(v string) *Merge { + s.Name = &v + return s +} + +// SetPrimaryKeys sets the PrimaryKeys field's value. +func (s *Merge) SetPrimaryKeys(v [][]*string) *Merge { + s.PrimaryKeys = v + return s +} + +// SetSource sets the Source field's value. +func (s *Merge) SetSource(v string) *Merge { + s.Source = &v + return s +} + +// A structure containing metadata information for a schema version. +type MetadataInfo struct { + _ struct{} `type:"structure"` + + // The time at which the entry was created. + CreatedTime *string `type:"string"` + + // The metadata key’s corresponding value. + MetadataValue *string `min:"1" type:"string"` + + // Other metadata belonging to the same metadata key. + OtherMetadataValueList []*OtherMetadataValueListItem `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataInfo) GoString() string { + return s.String() +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *MetadataInfo) SetCreatedTime(v string) *MetadataInfo { + s.CreatedTime = &v + return s +} + +// SetMetadataValue sets the MetadataValue field's value. +func (s *MetadataInfo) SetMetadataValue(v string) *MetadataInfo { + s.MetadataValue = &v + return s +} + +// SetOtherMetadataValueList sets the OtherMetadataValueList field's value. +func (s *MetadataInfo) SetOtherMetadataValueList(v []*OtherMetadataValueListItem) *MetadataInfo { + s.OtherMetadataValueList = v + return s +} + +// A structure containing a key value pair for metadata. +type MetadataKeyValuePair struct { + _ struct{} `type:"structure"` + + // A metadata key. + MetadataKey *string `min:"1" type:"string"` + + // A metadata key’s corresponding value. + MetadataValue *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataKeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataKeyValuePair) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetadataKeyValuePair) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetadataKeyValuePair"} + if s.MetadataKey != nil && len(*s.MetadataKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetadataKey", 1)) + } + if s.MetadataValue != nil && len(*s.MetadataValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetadataValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetadataKey sets the MetadataKey field's value. +func (s *MetadataKeyValuePair) SetMetadataKey(v string) *MetadataKeyValuePair { + s.MetadataKey = &v + return s } -// The encryption-at-rest settings of the transform that apply to accessing -// user data. -type MLUserDataEncryption struct { - _ struct{} `type:"structure"` +// SetMetadataValue sets the MetadataValue field's value. +func (s *MetadataKeyValuePair) SetMetadataValue(v string) *MetadataKeyValuePair { + s.MetadataValue = &v + return s +} - // The ID for the customer-provided KMS key. - KmsKeyId *string `min:"1" type:"string"` +// Specifies a Microsoft SQL server data source in the Glue Data Catalog. +type MicrosoftSQLServerCatalogSource struct { + _ struct{} `type:"structure"` - // The encryption mode applied to user data. Valid values are: + // The name of the database to read from. // - // * DISABLED: encryption is disabled + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data source. // - // * SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) - // for user data stored in Amazon S3. + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. // - // MlUserDataEncryptionMode is a required field - MlUserDataEncryptionMode *string `type:"string" required:"true" enum:"MLUserDataEncryptionModeString"` + // Table is a required field + Table *string `type:"string" required:"true"` } // String returns the string representation. @@ -44043,7 +48463,7 @@ type MLUserDataEncryption struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MLUserDataEncryption) String() string { +func (s MicrosoftSQLServerCatalogSource) String() string { return awsutil.Prettify(s) } @@ -44052,18 +48472,21 @@ func (s MLUserDataEncryption) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MLUserDataEncryption) GoString() string { +func (s MicrosoftSQLServerCatalogSource) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MLUserDataEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MLUserDataEncryption"} - if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) +func (s *MicrosoftSQLServerCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MicrosoftSQLServerCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.MlUserDataEncryptionMode == nil { - invalidParams.Add(request.NewErrParamRequired("MlUserDataEncryptionMode")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) } if invalidParams.Len() > 0 { @@ -44072,39 +48495,47 @@ func (s *MLUserDataEncryption) Validate() error { return nil } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *MLUserDataEncryption) SetKmsKeyId(v string) *MLUserDataEncryption { - s.KmsKeyId = &v +// SetDatabase sets the Database field's value. +func (s *MicrosoftSQLServerCatalogSource) SetDatabase(v string) *MicrosoftSQLServerCatalogSource { + s.Database = &v return s } -// SetMlUserDataEncryptionMode sets the MlUserDataEncryptionMode field's value. -func (s *MLUserDataEncryption) SetMlUserDataEncryptionMode(v string) *MLUserDataEncryption { - s.MlUserDataEncryptionMode = &v +// SetName sets the Name field's value. +func (s *MicrosoftSQLServerCatalogSource) SetName(v string) *MicrosoftSQLServerCatalogSource { + s.Name = &v return s } -// Defines a mapping. -type MappingEntry struct { - _ struct{} `type:"structure"` - - // The source path. - SourcePath *string `type:"string"` +// SetTable sets the Table field's value. +func (s *MicrosoftSQLServerCatalogSource) SetTable(v string) *MicrosoftSQLServerCatalogSource { + s.Table = &v + return s +} - // The name of the source table. - SourceTable *string `type:"string"` +// Specifies a target that uses Microsoft SQL. +type MicrosoftSQLServerCatalogTarget struct { + _ struct{} `type:"structure"` - // The source type. - SourceType *string `type:"string"` + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` - // The target path. - TargetPath *string `type:"string"` + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` - // The target table. - TargetTable *string `type:"string"` + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` - // The target type. - TargetType *string `type:"string"` + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` } // String returns the string representation. @@ -44112,7 +48543,7 @@ type MappingEntry struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MappingEntry) String() string { +func (s MicrosoftSQLServerCatalogTarget) String() string { return awsutil.Prettify(s) } @@ -44121,58 +48552,77 @@ func (s MappingEntry) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MappingEntry) GoString() string { +func (s MicrosoftSQLServerCatalogTarget) GoString() string { return s.String() } -// SetSourcePath sets the SourcePath field's value. -func (s *MappingEntry) SetSourcePath(v string) *MappingEntry { - s.SourcePath = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *MicrosoftSQLServerCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MicrosoftSQLServerCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } -// SetSourceTable sets the SourceTable field's value. -func (s *MappingEntry) SetSourceTable(v string) *MappingEntry { - s.SourceTable = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSourceType sets the SourceType field's value. -func (s *MappingEntry) SetSourceType(v string) *MappingEntry { - s.SourceType = &v +// SetDatabase sets the Database field's value. +func (s *MicrosoftSQLServerCatalogTarget) SetDatabase(v string) *MicrosoftSQLServerCatalogTarget { + s.Database = &v return s } -// SetTargetPath sets the TargetPath field's value. -func (s *MappingEntry) SetTargetPath(v string) *MappingEntry { - s.TargetPath = &v +// SetInputs sets the Inputs field's value. +func (s *MicrosoftSQLServerCatalogTarget) SetInputs(v []*string) *MicrosoftSQLServerCatalogTarget { + s.Inputs = v return s } -// SetTargetTable sets the TargetTable field's value. -func (s *MappingEntry) SetTargetTable(v string) *MappingEntry { - s.TargetTable = &v +// SetName sets the Name field's value. +func (s *MicrosoftSQLServerCatalogTarget) SetName(v string) *MicrosoftSQLServerCatalogTarget { + s.Name = &v return s } -// SetTargetType sets the TargetType field's value. -func (s *MappingEntry) SetTargetType(v string) *MappingEntry { - s.TargetType = &v +// SetTable sets the Table field's value. +func (s *MicrosoftSQLServerCatalogTarget) SetTable(v string) *MicrosoftSQLServerCatalogTarget { + s.Table = &v return s } -// A structure containing metadata information for a schema version. -type MetadataInfo struct { +// Specifies an Amazon DocumentDB or MongoDB data store to crawl. +type MongoDBTarget struct { _ struct{} `type:"structure"` - // The time at which the entry was created. - CreatedTime *string `type:"string"` + // The name of the connection to use to connect to the Amazon DocumentDB or + // MongoDB target. + ConnectionName *string `type:"string"` - // The metadata key’s corresponding value. - MetadataValue *string `min:"1" type:"string"` + // The path of the Amazon DocumentDB or MongoDB target (database/collection). + Path *string `type:"string"` - // Other metadata belonging to the same metadata key. - OtherMetadataValueList []*OtherMetadataValueListItem `type:"list"` + // Indicates whether to scan all the records, or to sample rows from the table. + // Scanning all the records can take a long time when the table is not a high + // throughput table. + // + // A value of true means to scan all records, while a value of false means to + // sample the records. If no value is specified, the value defaults to true. + ScanAll *bool `type:"boolean"` } // String returns the string representation. @@ -44180,7 +48630,7 @@ type MetadataInfo struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataInfo) String() string { +func (s MongoDBTarget) String() string { return awsutil.Prettify(s) } @@ -44189,37 +48639,46 @@ func (s MetadataInfo) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataInfo) GoString() string { +func (s MongoDBTarget) GoString() string { return s.String() } -// SetCreatedTime sets the CreatedTime field's value. -func (s *MetadataInfo) SetCreatedTime(v string) *MetadataInfo { - s.CreatedTime = &v +// SetConnectionName sets the ConnectionName field's value. +func (s *MongoDBTarget) SetConnectionName(v string) *MongoDBTarget { + s.ConnectionName = &v return s } -// SetMetadataValue sets the MetadataValue field's value. -func (s *MetadataInfo) SetMetadataValue(v string) *MetadataInfo { - s.MetadataValue = &v +// SetPath sets the Path field's value. +func (s *MongoDBTarget) SetPath(v string) *MongoDBTarget { + s.Path = &v return s } -// SetOtherMetadataValueList sets the OtherMetadataValueList field's value. -func (s *MetadataInfo) SetOtherMetadataValueList(v []*OtherMetadataValueListItem) *MetadataInfo { - s.OtherMetadataValueList = v +// SetScanAll sets the ScanAll field's value. +func (s *MongoDBTarget) SetScanAll(v bool) *MongoDBTarget { + s.ScanAll = &v return s } -// A structure containing a key value pair for metadata. -type MetadataKeyValuePair struct { +// Specifies a MySQL data source in the Glue Data Catalog. +type MySQLCatalogSource struct { _ struct{} `type:"structure"` - // A metadata key. - MetadataKey *string `min:"1" type:"string"` + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` - // A metadata key’s corresponding value. - MetadataValue *string `min:"1" type:"string"` + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` } // String returns the string representation. @@ -44227,7 +48686,7 @@ type MetadataKeyValuePair struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataKeyValuePair) String() string { +func (s MySQLCatalogSource) String() string { return awsutil.Prettify(s) } @@ -44236,18 +48695,21 @@ func (s MetadataKeyValuePair) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataKeyValuePair) GoString() string { +func (s MySQLCatalogSource) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MetadataKeyValuePair) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetadataKeyValuePair"} - if s.MetadataKey != nil && len(*s.MetadataKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MetadataKey", 1)) +func (s *MySQLCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MySQLCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.MetadataValue != nil && len(*s.MetadataValue) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MetadataValue", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) } if invalidParams.Len() > 0 { @@ -44256,36 +48718,47 @@ func (s *MetadataKeyValuePair) Validate() error { return nil } -// SetMetadataKey sets the MetadataKey field's value. -func (s *MetadataKeyValuePair) SetMetadataKey(v string) *MetadataKeyValuePair { - s.MetadataKey = &v +// SetDatabase sets the Database field's value. +func (s *MySQLCatalogSource) SetDatabase(v string) *MySQLCatalogSource { + s.Database = &v return s } -// SetMetadataValue sets the MetadataValue field's value. -func (s *MetadataKeyValuePair) SetMetadataValue(v string) *MetadataKeyValuePair { - s.MetadataValue = &v +// SetName sets the Name field's value. +func (s *MySQLCatalogSource) SetName(v string) *MySQLCatalogSource { + s.Name = &v return s } -// Specifies an Amazon DocumentDB or MongoDB data store to crawl. -type MongoDBTarget struct { +// SetTable sets the Table field's value. +func (s *MySQLCatalogSource) SetTable(v string) *MySQLCatalogSource { + s.Table = &v + return s +} + +// Specifies a target that uses MySQL. +type MySQLCatalogTarget struct { _ struct{} `type:"structure"` - // The name of the connection to use to connect to the Amazon DocumentDB or - // MongoDB target. - ConnectionName *string `type:"string"` + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` - // The path of the Amazon DocumentDB or MongoDB target (database/collection). - Path *string `type:"string"` + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` - // Indicates whether to scan all the records, or to sample rows from the table. - // Scanning all the records can take a long time when the table is not a high - // throughput table. + // The name of the data target. // - // A value of true means to scan all records, while a value of false means to - // sample the records. If no value is specified, the value defaults to true. - ScanAll *bool `type:"boolean"` + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` } // String returns the string representation. @@ -44293,7 +48766,7 @@ type MongoDBTarget struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MongoDBTarget) String() string { +func (s MySQLCatalogTarget) String() string { return awsutil.Prettify(s) } @@ -44302,25 +48775,56 @@ func (s MongoDBTarget) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MongoDBTarget) GoString() string { +func (s MySQLCatalogTarget) GoString() string { return s.String() } -// SetConnectionName sets the ConnectionName field's value. -func (s *MongoDBTarget) SetConnectionName(v string) *MongoDBTarget { - s.ConnectionName = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *MySQLCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MySQLCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *MySQLCatalogTarget) SetDatabase(v string) *MySQLCatalogTarget { + s.Database = &v return s } -// SetPath sets the Path field's value. -func (s *MongoDBTarget) SetPath(v string) *MongoDBTarget { - s.Path = &v +// SetInputs sets the Inputs field's value. +func (s *MySQLCatalogTarget) SetInputs(v []*string) *MySQLCatalogTarget { + s.Inputs = v return s } -// SetScanAll sets the ScanAll field's value. -func (s *MongoDBTarget) SetScanAll(v bool) *MongoDBTarget { - s.ScanAll = &v +// SetName sets the Name field's value. +func (s *MySQLCatalogTarget) SetName(v string) *MySQLCatalogTarget { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *MySQLCatalogTarget) SetTable(v string) *MySQLCatalogTarget { + s.Table = &v return s } @@ -44513,6 +49017,124 @@ func (s *NotificationProperty) SetNotifyDelayAfter(v int64) *NotificationPropert return s } +// Represents whether certain values are recognized as null values for removal. +type NullCheckBoxList struct { + _ struct{} `type:"structure"` + + // Specifies that an empty string is considered as a null value. + IsEmpty *bool `type:"boolean"` + + // Specifies that an integer value of -1 is considered as a null value. + IsNegOne *bool `type:"boolean"` + + // Specifies that a value spelling out the word 'null' is considered as a null + // value. + IsNullString *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NullCheckBoxList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NullCheckBoxList) GoString() string { + return s.String() +} + +// SetIsEmpty sets the IsEmpty field's value. +func (s *NullCheckBoxList) SetIsEmpty(v bool) *NullCheckBoxList { + s.IsEmpty = &v + return s +} + +// SetIsNegOne sets the IsNegOne field's value. +func (s *NullCheckBoxList) SetIsNegOne(v bool) *NullCheckBoxList { + s.IsNegOne = &v + return s +} + +// SetIsNullString sets the IsNullString field's value. +func (s *NullCheckBoxList) SetIsNullString(v bool) *NullCheckBoxList { + s.IsNullString = &v + return s +} + +// Represents a custom null value such as a zeros or other value being used +// as a null placeholder unique to the dataset. +type NullValueField struct { + _ struct{} `type:"structure"` + + // The datatype of the value. + // + // Datatype is a required field + Datatype *Datatype `type:"structure" required:"true"` + + // The value of the null placeholder. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NullValueField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NullValueField) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NullValueField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NullValueField"} + if s.Datatype == nil { + invalidParams.Add(request.NewErrParamRequired("Datatype")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Datatype != nil { + if err := s.Datatype.Validate(); err != nil { + invalidParams.AddNested("Datatype", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatatype sets the Datatype field's value. +func (s *NullValueField) SetDatatype(v *Datatype) *NullValueField { + s.Datatype = v + return s +} + +// SetValue sets the Value field's value. +func (s *NullValueField) SetValue(v string) *NullValueField { + s.Value = &v + return s +} + // The operation timed out. type OperationTimeoutException struct { _ struct{} `type:"structure"` @@ -44578,6 +49200,173 @@ func (s *OperationTimeoutException) RequestID() string { return s.RespMetadata.RequestID } +// Specifies an Oracle data source in the Glue Data Catalog. +type OracleSQLCatalogSource struct { + _ struct{} `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OracleSQLCatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OracleSQLCatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OracleSQLCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OracleSQLCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *OracleSQLCatalogSource) SetDatabase(v string) *OracleSQLCatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *OracleSQLCatalogSource) SetName(v string) *OracleSQLCatalogSource { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *OracleSQLCatalogSource) SetTable(v string) *OracleSQLCatalogSource { + s.Table = &v + return s +} + +// Specifies a target that uses Oracle SQL. +type OracleSQLCatalogTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OracleSQLCatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OracleSQLCatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OracleSQLCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OracleSQLCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *OracleSQLCatalogTarget) SetDatabase(v string) *OracleSQLCatalogTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *OracleSQLCatalogTarget) SetInputs(v []*string) *OracleSQLCatalogTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *OracleSQLCatalogTarget) SetName(v string) *OracleSQLCatalogTarget { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *OracleSQLCatalogTarget) SetTable(v string) *OracleSQLCatalogTarget { + s.Table = &v + return s +} + // Specifies the sort order of a sorted column. type Order struct { _ struct{} `type:"structure"` @@ -44686,6 +49475,141 @@ func (s *OtherMetadataValueListItem) SetMetadataValue(v string) *OtherMetadataVa return s } +// Specifies a transform that identifies, removes or masks PII data. +type PIIDetection struct { + _ struct{} `type:"structure"` + + // Indicates the types of entities the PIIDetection transform will identify + // as PII data. + // + // PII type entities include: PERSON_NAME, DATE, USA_SNN, EMAIL, USA_ITIN, USA_PASSPORT_NUMBER, + // PHONE_NUMBER, BANK_ACCOUNT, IP_ADDRESS, MAC_ADDRESS, USA_CPT_CODE, USA_HCPCS_CODE, + // USA_NATIONAL_DRUG_CODE, USA_MEDICARE_BENEFICIARY_IDENTIFIER, USA_HEALTH_INSURANCE_CLAIM_NUMBER,CREDIT_CARD,USA_NATIONAL_PROVIDER_IDENTIFIER,USA_DEA_NUMBER,USA_DRIVING_LICENSE + // + // EntityTypesToDetect is a required field + EntityTypesToDetect []*string `type:"list" required:"true"` + + // The node ID inputs to the transform. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // Indicates the value that will replace the detected entity. + MaskValue *string `type:"string"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Indicates the output column name that will contain any entity type detected + // in that row. + OutputColumnName *string `type:"string"` + + // Indicates the type of PIIDetection transform. + // + // PiiType is a required field + PiiType *string `type:"string" required:"true" enum:"PiiType"` + + // Indicates the fraction of the data to sample when scanning for PII entities. + SampleFraction *float64 `type:"double"` + + // Indicates the fraction of the data that must be met in order for a column + // to be identified as PII data. + ThresholdFraction *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PIIDetection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PIIDetection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PIIDetection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PIIDetection"} + if s.EntityTypesToDetect == nil { + invalidParams.Add(request.NewErrParamRequired("EntityTypesToDetect")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PiiType == nil { + invalidParams.Add(request.NewErrParamRequired("PiiType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntityTypesToDetect sets the EntityTypesToDetect field's value. +func (s *PIIDetection) SetEntityTypesToDetect(v []*string) *PIIDetection { + s.EntityTypesToDetect = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *PIIDetection) SetInputs(v []*string) *PIIDetection { + s.Inputs = v + return s +} + +// SetMaskValue sets the MaskValue field's value. +func (s *PIIDetection) SetMaskValue(v string) *PIIDetection { + s.MaskValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *PIIDetection) SetName(v string) *PIIDetection { + s.Name = &v + return s +} + +// SetOutputColumnName sets the OutputColumnName field's value. +func (s *PIIDetection) SetOutputColumnName(v string) *PIIDetection { + s.OutputColumnName = &v + return s +} + +// SetPiiType sets the PiiType field's value. +func (s *PIIDetection) SetPiiType(v string) *PIIDetection { + s.PiiType = &v + return s +} + +// SetSampleFraction sets the SampleFraction field's value. +func (s *PIIDetection) SetSampleFraction(v float64) *PIIDetection { + s.SampleFraction = &v + return s +} + +// SetThresholdFraction sets the ThresholdFraction field's value. +func (s *PIIDetection) SetThresholdFraction(v float64) *PIIDetection { + s.ThresholdFraction = &v + return s +} + // Represents a slice of table data. type Partition struct { _ struct{} `type:"structure"` @@ -45243,6 +50167,173 @@ func (s *PhysicalConnectionRequirements) SetSubnetId(v string) *PhysicalConnecti return s } +// Specifies a PostgresSQL data source in the Glue Data Catalog. +type PostgreSQLCatalogSource struct { + _ struct{} `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PostgreSQLCatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PostgreSQLCatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PostgreSQLCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PostgreSQLCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *PostgreSQLCatalogSource) SetDatabase(v string) *PostgreSQLCatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *PostgreSQLCatalogSource) SetName(v string) *PostgreSQLCatalogSource { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *PostgreSQLCatalogSource) SetTable(v string) *PostgreSQLCatalogSource { + s.Table = &v + return s +} + +// Specifies a target that uses Postgres SQL. +type PostgreSQLCatalogTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PostgreSQLCatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PostgreSQLCatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PostgreSQLCatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PostgreSQLCatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *PostgreSQLCatalogTarget) SetDatabase(v string) *PostgreSQLCatalogTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *PostgreSQLCatalogTarget) SetInputs(v []*string) *PostgreSQLCatalogTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *PostgreSQLCatalogTarget) SetName(v string) *PostgreSQLCatalogTarget { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *PostgreSQLCatalogTarget) SetTable(v string) *PostgreSQLCatalogTarget { + s.Table = &v + return s +} + // A job run that was used in the predicate of a conditional trigger that triggered // this job run. type Predecessor struct { @@ -46135,40 +51226,255 @@ func (s *QuerySchemaVersionMetadataOutput) SetSchemaVersionId(v string) *QuerySc type RecrawlPolicy struct { _ struct{} `type:"structure"` - // Specifies whether to crawl the entire dataset again or to crawl only folders - // that were added since the last crawler run. - // - // A value of CRAWL_EVERYTHING specifies crawling the entire dataset again. - // - // A value of CRAWL_NEW_FOLDERS_ONLY specifies crawling only folders that were - // added since the last crawler run. - // - // A value of CRAWL_EVENT_MODE specifies crawling only the changes identified - // by Amazon S3 events. - RecrawlBehavior *string `type:"string" enum:"RecrawlBehavior"` + // Specifies whether to crawl the entire dataset again or to crawl only folders + // that were added since the last crawler run. + // + // A value of CRAWL_EVERYTHING specifies crawling the entire dataset again. + // + // A value of CRAWL_NEW_FOLDERS_ONLY specifies crawling only folders that were + // added since the last crawler run. + // + // A value of CRAWL_EVENT_MODE specifies crawling only the changes identified + // by Amazon S3 events. + RecrawlBehavior *string `type:"string" enum:"RecrawlBehavior"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecrawlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecrawlPolicy) GoString() string { + return s.String() +} + +// SetRecrawlBehavior sets the RecrawlBehavior field's value. +func (s *RecrawlPolicy) SetRecrawlBehavior(v string) *RecrawlPolicy { + s.RecrawlBehavior = &v + return s +} + +// Specifies an Amazon Redshift data store. +type RedshiftSource struct { + _ struct{} `type:"structure"` + + // The database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the Amazon Redshift data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The Amazon S3 path where temporary data can be staged when copying out of + // the database. + RedshiftTmpDir *string `type:"string"` + + // The database table to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` + + // The IAM role with permissions. + TmpDirIAMRole *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RedshiftSource) SetDatabase(v string) *RedshiftSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *RedshiftSource) SetName(v string) *RedshiftSource { + s.Name = &v + return s +} + +// SetRedshiftTmpDir sets the RedshiftTmpDir field's value. +func (s *RedshiftSource) SetRedshiftTmpDir(v string) *RedshiftSource { + s.RedshiftTmpDir = &v + return s +} + +// SetTable sets the Table field's value. +func (s *RedshiftSource) SetTable(v string) *RedshiftSource { + s.Table = &v + return s +} + +// SetTmpDirIAMRole sets the TmpDirIAMRole field's value. +func (s *RedshiftSource) SetTmpDirIAMRole(v string) *RedshiftSource { + s.TmpDirIAMRole = &v + return s +} + +// Specifies a target that uses Amazon Redshift. +type RedshiftTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The Amazon S3 path where temporary data can be staged when copying out of + // the database. + RedshiftTmpDir *string `type:"string"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` + + // The IAM role with permissions. + TmpDirIAMRole *string `type:"string"` + + // The set of options to configure an upsert operation when writing to a Redshift + // target. + UpsertRedshiftOptions *UpsertRedshiftTargetOptions `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedshiftTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RedshiftTarget) SetDatabase(v string) *RedshiftTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *RedshiftTarget) SetInputs(v []*string) *RedshiftTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *RedshiftTarget) SetName(v string) *RedshiftTarget { + s.Name = &v + return s +} + +// SetRedshiftTmpDir sets the RedshiftTmpDir field's value. +func (s *RedshiftTarget) SetRedshiftTmpDir(v string) *RedshiftTarget { + s.RedshiftTmpDir = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RecrawlPolicy) String() string { - return awsutil.Prettify(s) +// SetTable sets the Table field's value. +func (s *RedshiftTarget) SetTable(v string) *RedshiftTarget { + s.Table = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RecrawlPolicy) GoString() string { - return s.String() +// SetTmpDirIAMRole sets the TmpDirIAMRole field's value. +func (s *RedshiftTarget) SetTmpDirIAMRole(v string) *RedshiftTarget { + s.TmpDirIAMRole = &v + return s } -// SetRecrawlBehavior sets the RecrawlBehavior field's value. -func (s *RecrawlPolicy) SetRecrawlBehavior(v string) *RecrawlPolicy { - s.RecrawlBehavior = &v +// SetUpsertRedshiftOptions sets the UpsertRedshiftOptions field's value. +func (s *RedshiftTarget) SetUpsertRedshiftOptions(v *UpsertRedshiftTargetOptions) *RedshiftTarget { + s.UpsertRedshiftOptions = v return s } @@ -46434,6 +51740,81 @@ func (s *RegistryListItem) SetUpdatedTime(v string) *RegistryListItem { return s } +// Specifies a Relational database data source in the Glue Data Catalog. +type RelationalCatalogSource struct { + _ struct{} `type:"structure"` + + // The name of the database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The name of the table in the database to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RelationalCatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RelationalCatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RelationalCatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RelationalCatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RelationalCatalogSource) SetDatabase(v string) *RelationalCatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *RelationalCatalogSource) SetName(v string) *RelationalCatalogSource { + s.Name = &v + return s +} + +// SetTable sets the Table field's value. +func (s *RelationalCatalogSource) SetTable(v string) *RelationalCatalogSource { + s.Table = &v + return s +} + type RemoveSchemaVersionMetadataInput struct { _ struct{} `type:"structure"` @@ -46620,6 +52001,98 @@ func (s *RemoveSchemaVersionMetadataOutput) SetVersionNumber(v int64) *RemoveSch return s } +// Specifies a transform that renames a single data property key. +type RenameField struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A JSON path to a variable in the data structure for the source data. + // + // SourcePath is a required field + SourcePath []*string `type:"list" required:"true"` + + // A JSON path to a variable in the data structure for the target data. + // + // TargetPath is a required field + TargetPath []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RenameField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RenameField) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RenameField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RenameField"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SourcePath == nil { + invalidParams.Add(request.NewErrParamRequired("SourcePath")) + } + if s.TargetPath == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *RenameField) SetInputs(v []*string) *RenameField { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *RenameField) SetName(v string) *RenameField { + s.Name = &v + return s +} + +// SetSourcePath sets the SourcePath field's value. +func (s *RenameField) SetSourcePath(v []*string) *RenameField { + s.SourcePath = v + return s +} + +// SetTargetPath sets the TargetPath field's value. +func (s *RenameField) SetTargetPath(v []*string) *RenameField { + s.TargetPath = v + return s +} + type ResetJobBookmarkInput struct { _ struct{} `type:"structure"` @@ -47118,6 +52591,652 @@ func (s *RunStatementOutput) SetId(v int64) *RunStatementOutput { return s } +// Specifies an Amazon S3 data store in the Glue Data Catalog. +type S3CatalogSource struct { + _ struct{} `type:"structure"` + + // Specifies additional connection options. + AdditionalOptions *S3SourceAdditionalOptions `type:"structure"` + + // The database to read from. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Partitions satisfying this predicate are deleted. Files within the retention + // period in these partitions are not deleted. Set to "" – empty by default. + PartitionPredicate *string `type:"string"` + + // The database table to read from. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CatalogSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CatalogSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3CatalogSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3CatalogSource"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *S3CatalogSource) SetAdditionalOptions(v *S3SourceAdditionalOptions) *S3CatalogSource { + s.AdditionalOptions = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *S3CatalogSource) SetDatabase(v string) *S3CatalogSource { + s.Database = &v + return s +} + +// SetName sets the Name field's value. +func (s *S3CatalogSource) SetName(v string) *S3CatalogSource { + s.Name = &v + return s +} + +// SetPartitionPredicate sets the PartitionPredicate field's value. +func (s *S3CatalogSource) SetPartitionPredicate(v string) *S3CatalogSource { + s.PartitionPredicate = &v + return s +} + +// SetTable sets the Table field's value. +func (s *S3CatalogSource) SetTable(v string) *S3CatalogSource { + s.Table = &v + return s +} + +// Specifies a data target that writes to Amazon S3 using the Glue Data Catalog. +type S3CatalogTarget struct { + _ struct{} `type:"structure"` + + // The name of the database to write to. + // + // Database is a required field + Database *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies native partitioning using a sequence of keys. + PartitionKeys [][]*string `type:"list"` + + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *CatalogSchemaChangePolicy `type:"structure"` + + // The name of the table in the database to write to. + // + // Table is a required field + Table *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CatalogTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CatalogTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3CatalogTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3CatalogTarget"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Table == nil { + invalidParams.Add(request.NewErrParamRequired("Table")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *S3CatalogTarget) SetDatabase(v string) *S3CatalogTarget { + s.Database = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *S3CatalogTarget) SetInputs(v []*string) *S3CatalogTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *S3CatalogTarget) SetName(v string) *S3CatalogTarget { + s.Name = &v + return s +} + +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *S3CatalogTarget) SetPartitionKeys(v [][]*string) *S3CatalogTarget { + s.PartitionKeys = v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *S3CatalogTarget) SetSchemaChangePolicy(v *CatalogSchemaChangePolicy) *S3CatalogTarget { + s.SchemaChangePolicy = v + return s +} + +// SetTable sets the Table field's value. +func (s *S3CatalogTarget) SetTable(v string) *S3CatalogTarget { + s.Table = &v + return s +} + +// Specifies a command-separated value (CSV) data store stored in Amazon S3. +type S3CsvSource struct { + _ struct{} `type:"structure"` + + // Specifies additional connection options. + AdditionalOptions *S3DirectSourceAdditionalOptions `type:"structure"` + + // Specifies how the data is compressed. This is generally not necessary if + // the data has a standard file extension. Possible values are "gzip" and "bzip"). + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies a character to use for escaping. This option is used only when + // reading CSV files. The default value is none. If enabled, the character which + // immediately follows is used as-is, except for a small set of well-known escapes + // (\n, \r, \t, and \0). + Escaper *string `type:"string"` + + // A string containing a JSON list of Unix-style glob patterns to exclude. For + // example, "[\"**.pdf\"]" excludes all PDF files. + Exclusions []*string `type:"list"` + + // Grouping files is turned on by default when the input contains more than + // 50,000 files. To turn on grouping with fewer than 50,000 files, set this + // parameter to "inPartition". To disable grouping when there are more than + // 50,000 files, set this parameter to "none". + GroupFiles *string `type:"string"` + + // The target group size in bytes. The default is computed based on the input + // data size and the size of your cluster. When there are fewer than 50,000 + // input files, "groupFiles" must be set to "inPartition" for this to take effect. + GroupSize *string `type:"string"` + + // This option controls the duration in milliseconds after which the s3 listing + // is likely to be consistent. Files with modification timestamps falling within + // the last maxBand milliseconds are tracked specially when using JobBookmarks + // to account for Amazon S3 eventual consistency. Most users don't need to set + // this option. The default is 900000 milliseconds, or 15 minutes. + MaxBand *int64 `type:"integer"` + + // This option specifies the maximum number of files to save from the last maxBand + // seconds. If this number is exceeded, extra files are skipped and only processed + // in the next job run. + MaxFilesInBand *int64 `type:"integer"` + + // A Boolean value that specifies whether a single record can span multiple + // lines. This can occur when a field contains a quoted new-line character. + // You must set this option to True if any record spans multiple lines. The + // default value is False, which allows for more aggressive file-splitting during + // parsing. + Multiline *bool `type:"boolean"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A Boolean value that specifies whether to use the advanced SIMD CSV reader + // along with Apache Arrow based columnar memory formats. Only available in + // Glue version 3.0. + OptimizePerformance *bool `type:"boolean"` + + // Specifies the data schema for the S3 CSV source. + OutputSchemas []*GlueSchema `type:"list"` + + // A list of the Amazon S3 paths to read from. + // + // Paths is a required field + Paths []*string `type:"list" required:"true"` + + // Specifies the character to use for quoting. The default is a double quote: + // '"'. Set this to -1 to turn off quoting entirely. + // + // QuoteChar is a required field + QuoteChar *string `type:"string" required:"true" enum:"QuoteChar"` + + // If set to true, recursively reads files in all subdirectories under the specified + // paths. + Recurse *bool `type:"boolean"` + + // Specifies the delimiter character. The default is a comma: ",", but any other + // character can be specified. + // + // Separator is a required field + Separator *string `type:"string" required:"true" enum:"Separator"` + + // A Boolean value that specifies whether to skip the first data line. The default + // value is False. + SkipFirst *bool `type:"boolean"` + + // A Boolean value that specifies whether to treat the first line as a header. + // The default value is False. + WithHeader *bool `type:"boolean"` + + // A Boolean value that specifies whether to write the header to output. The + // default value is True. + WriteHeader *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CsvSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3CsvSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3CsvSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3CsvSource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.QuoteChar == nil { + invalidParams.Add(request.NewErrParamRequired("QuoteChar")) + } + if s.Separator == nil { + invalidParams.Add(request.NewErrParamRequired("Separator")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *S3CsvSource) SetAdditionalOptions(v *S3DirectSourceAdditionalOptions) *S3CsvSource { + s.AdditionalOptions = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *S3CsvSource) SetCompressionType(v string) *S3CsvSource { + s.CompressionType = &v + return s +} + +// SetEscaper sets the Escaper field's value. +func (s *S3CsvSource) SetEscaper(v string) *S3CsvSource { + s.Escaper = &v + return s +} + +// SetExclusions sets the Exclusions field's value. +func (s *S3CsvSource) SetExclusions(v []*string) *S3CsvSource { + s.Exclusions = v + return s +} + +// SetGroupFiles sets the GroupFiles field's value. +func (s *S3CsvSource) SetGroupFiles(v string) *S3CsvSource { + s.GroupFiles = &v + return s +} + +// SetGroupSize sets the GroupSize field's value. +func (s *S3CsvSource) SetGroupSize(v string) *S3CsvSource { + s.GroupSize = &v + return s +} + +// SetMaxBand sets the MaxBand field's value. +func (s *S3CsvSource) SetMaxBand(v int64) *S3CsvSource { + s.MaxBand = &v + return s +} + +// SetMaxFilesInBand sets the MaxFilesInBand field's value. +func (s *S3CsvSource) SetMaxFilesInBand(v int64) *S3CsvSource { + s.MaxFilesInBand = &v + return s +} + +// SetMultiline sets the Multiline field's value. +func (s *S3CsvSource) SetMultiline(v bool) *S3CsvSource { + s.Multiline = &v + return s +} + +// SetName sets the Name field's value. +func (s *S3CsvSource) SetName(v string) *S3CsvSource { + s.Name = &v + return s +} + +// SetOptimizePerformance sets the OptimizePerformance field's value. +func (s *S3CsvSource) SetOptimizePerformance(v bool) *S3CsvSource { + s.OptimizePerformance = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *S3CsvSource) SetOutputSchemas(v []*GlueSchema) *S3CsvSource { + s.OutputSchemas = v + return s +} + +// SetPaths sets the Paths field's value. +func (s *S3CsvSource) SetPaths(v []*string) *S3CsvSource { + s.Paths = v + return s +} + +// SetQuoteChar sets the QuoteChar field's value. +func (s *S3CsvSource) SetQuoteChar(v string) *S3CsvSource { + s.QuoteChar = &v + return s +} + +// SetRecurse sets the Recurse field's value. +func (s *S3CsvSource) SetRecurse(v bool) *S3CsvSource { + s.Recurse = &v + return s +} + +// SetSeparator sets the Separator field's value. +func (s *S3CsvSource) SetSeparator(v string) *S3CsvSource { + s.Separator = &v + return s +} + +// SetSkipFirst sets the SkipFirst field's value. +func (s *S3CsvSource) SetSkipFirst(v bool) *S3CsvSource { + s.SkipFirst = &v + return s +} + +// SetWithHeader sets the WithHeader field's value. +func (s *S3CsvSource) SetWithHeader(v bool) *S3CsvSource { + s.WithHeader = &v + return s +} + +// SetWriteHeader sets the WriteHeader field's value. +func (s *S3CsvSource) SetWriteHeader(v bool) *S3CsvSource { + s.WriteHeader = &v + return s +} + +// Specifies additional connection options for the Amazon S3 data store. +type S3DirectSourceAdditionalOptions struct { + _ struct{} `type:"structure"` + + // Sets the upper limit for the target number of files that will be processed. + BoundedFiles *int64 `type:"long"` + + // Sets the upper limit for the target size of the dataset in bytes that will + // be processed. + BoundedSize *int64 `type:"long"` + + // Sets option to enable a sample path. + EnableSamplePath *bool `type:"boolean"` + + // If enabled, specifies the sample path. + SamplePath *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3DirectSourceAdditionalOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3DirectSourceAdditionalOptions) GoString() string { + return s.String() +} + +// SetBoundedFiles sets the BoundedFiles field's value. +func (s *S3DirectSourceAdditionalOptions) SetBoundedFiles(v int64) *S3DirectSourceAdditionalOptions { + s.BoundedFiles = &v + return s +} + +// SetBoundedSize sets the BoundedSize field's value. +func (s *S3DirectSourceAdditionalOptions) SetBoundedSize(v int64) *S3DirectSourceAdditionalOptions { + s.BoundedSize = &v + return s +} + +// SetEnableSamplePath sets the EnableSamplePath field's value. +func (s *S3DirectSourceAdditionalOptions) SetEnableSamplePath(v bool) *S3DirectSourceAdditionalOptions { + s.EnableSamplePath = &v + return s +} + +// SetSamplePath sets the SamplePath field's value. +func (s *S3DirectSourceAdditionalOptions) SetSamplePath(v string) *S3DirectSourceAdditionalOptions { + s.SamplePath = &v + return s +} + +// Specifies a data target that writes to Amazon S3. +type S3DirectTarget struct { + _ struct{} `type:"structure"` + + // Specifies how the data is compressed. This is generally not necessary if + // the data has a standard file extension. Possible values are "gzip" and "bzip"). + Compression *string `type:"string"` + + // Specifies the data output format for the target. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"TargetFormat"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies native partitioning using a sequence of keys. + PartitionKeys [][]*string `type:"list"` + + // A single Amazon S3 path to write to. + // + // Path is a required field + Path *string `type:"string" required:"true"` + + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *DirectSchemaChangePolicy `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3DirectTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3DirectTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DirectTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DirectTarget"} + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCompression sets the Compression field's value. +func (s *S3DirectTarget) SetCompression(v string) *S3DirectTarget { + s.Compression = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *S3DirectTarget) SetFormat(v string) *S3DirectTarget { + s.Format = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *S3DirectTarget) SetInputs(v []*string) *S3DirectTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *S3DirectTarget) SetName(v string) *S3DirectTarget { + s.Name = &v + return s +} + +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *S3DirectTarget) SetPartitionKeys(v [][]*string) *S3DirectTarget { + s.PartitionKeys = v + return s +} + +// SetPath sets the Path field's value. +func (s *S3DirectTarget) SetPath(v string) *S3DirectTarget { + s.Path = &v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *S3DirectTarget) SetSchemaChangePolicy(v *DirectSchemaChangePolicy) *S3DirectTarget { + s.SchemaChangePolicy = v + return s +} + // Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted. type S3Encryption struct { _ struct{} `type:"structure"` @@ -47159,6 +53278,509 @@ func (s *S3Encryption) SetS3EncryptionMode(v string) *S3Encryption { return s } +// Specifies a data target that writes to Amazon S3 in Apache Parquet columnar +// storage. +type S3GlueParquetTarget struct { + _ struct{} `type:"structure"` + + // Specifies how the data is compressed. This is generally not necessary if + // the data has a standard file extension. Possible values are "gzip" and "bzip"). + Compression *string `type:"string" enum:"ParquetCompressionType"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies native partitioning using a sequence of keys. + PartitionKeys [][]*string `type:"list"` + + // A single Amazon S3 path to write to. + // + // Path is a required field + Path *string `type:"string" required:"true"` + + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *DirectSchemaChangePolicy `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3GlueParquetTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3GlueParquetTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3GlueParquetTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3GlueParquetTarget"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCompression sets the Compression field's value. +func (s *S3GlueParquetTarget) SetCompression(v string) *S3GlueParquetTarget { + s.Compression = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *S3GlueParquetTarget) SetInputs(v []*string) *S3GlueParquetTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *S3GlueParquetTarget) SetName(v string) *S3GlueParquetTarget { + s.Name = &v + return s +} + +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *S3GlueParquetTarget) SetPartitionKeys(v [][]*string) *S3GlueParquetTarget { + s.PartitionKeys = v + return s +} + +// SetPath sets the Path field's value. +func (s *S3GlueParquetTarget) SetPath(v string) *S3GlueParquetTarget { + s.Path = &v + return s +} + +// SetSchemaChangePolicy sets the SchemaChangePolicy field's value. +func (s *S3GlueParquetTarget) SetSchemaChangePolicy(v *DirectSchemaChangePolicy) *S3GlueParquetTarget { + s.SchemaChangePolicy = v + return s +} + +// Specifies a JSON data store stored in Amazon S3. +type S3JsonSource struct { + _ struct{} `type:"structure"` + + // Specifies additional connection options. + AdditionalOptions *S3DirectSourceAdditionalOptions `type:"structure"` + + // Specifies how the data is compressed. This is generally not necessary if + // the data has a standard file extension. Possible values are "gzip" and "bzip"). + CompressionType *string `type:"string" enum:"CompressionType"` + + // A string containing a JSON list of Unix-style glob patterns to exclude. For + // example, "[\"**.pdf\"]" excludes all PDF files. + Exclusions []*string `type:"list"` + + // Grouping files is turned on by default when the input contains more than + // 50,000 files. To turn on grouping with fewer than 50,000 files, set this + // parameter to "inPartition". To disable grouping when there are more than + // 50,000 files, set this parameter to "none". + GroupFiles *string `type:"string"` + + // The target group size in bytes. The default is computed based on the input + // data size and the size of your cluster. When there are fewer than 50,000 + // input files, "groupFiles" must be set to "inPartition" for this to take effect. + GroupSize *string `type:"string"` + + // A JsonPath string defining the JSON data. + JsonPath *string `type:"string"` + + // This option controls the duration in milliseconds after which the s3 listing + // is likely to be consistent. Files with modification timestamps falling within + // the last maxBand milliseconds are tracked specially when using JobBookmarks + // to account for Amazon S3 eventual consistency. Most users don't need to set + // this option. The default is 900000 milliseconds, or 15 minutes. + MaxBand *int64 `type:"integer"` + + // This option specifies the maximum number of files to save from the last maxBand + // seconds. If this number is exceeded, extra files are skipped and only processed + // in the next job run. + MaxFilesInBand *int64 `type:"integer"` + + // A Boolean value that specifies whether a single record can span multiple + // lines. This can occur when a field contains a quoted new-line character. + // You must set this option to True if any record spans multiple lines. The + // default value is False, which allows for more aggressive file-splitting during + // parsing. + Multiline *bool `type:"boolean"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the S3 JSON source. + OutputSchemas []*GlueSchema `type:"list"` + + // A list of the Amazon S3 paths to read from. + // + // Paths is a required field + Paths []*string `type:"list" required:"true"` + + // If set to true, recursively reads files in all subdirectories under the specified + // paths. + Recurse *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3JsonSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3JsonSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3JsonSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3JsonSource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *S3JsonSource) SetAdditionalOptions(v *S3DirectSourceAdditionalOptions) *S3JsonSource { + s.AdditionalOptions = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *S3JsonSource) SetCompressionType(v string) *S3JsonSource { + s.CompressionType = &v + return s +} + +// SetExclusions sets the Exclusions field's value. +func (s *S3JsonSource) SetExclusions(v []*string) *S3JsonSource { + s.Exclusions = v + return s +} + +// SetGroupFiles sets the GroupFiles field's value. +func (s *S3JsonSource) SetGroupFiles(v string) *S3JsonSource { + s.GroupFiles = &v + return s +} + +// SetGroupSize sets the GroupSize field's value. +func (s *S3JsonSource) SetGroupSize(v string) *S3JsonSource { + s.GroupSize = &v + return s +} + +// SetJsonPath sets the JsonPath field's value. +func (s *S3JsonSource) SetJsonPath(v string) *S3JsonSource { + s.JsonPath = &v + return s +} + +// SetMaxBand sets the MaxBand field's value. +func (s *S3JsonSource) SetMaxBand(v int64) *S3JsonSource { + s.MaxBand = &v + return s +} + +// SetMaxFilesInBand sets the MaxFilesInBand field's value. +func (s *S3JsonSource) SetMaxFilesInBand(v int64) *S3JsonSource { + s.MaxFilesInBand = &v + return s +} + +// SetMultiline sets the Multiline field's value. +func (s *S3JsonSource) SetMultiline(v bool) *S3JsonSource { + s.Multiline = &v + return s +} + +// SetName sets the Name field's value. +func (s *S3JsonSource) SetName(v string) *S3JsonSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *S3JsonSource) SetOutputSchemas(v []*GlueSchema) *S3JsonSource { + s.OutputSchemas = v + return s +} + +// SetPaths sets the Paths field's value. +func (s *S3JsonSource) SetPaths(v []*string) *S3JsonSource { + s.Paths = v + return s +} + +// SetRecurse sets the Recurse field's value. +func (s *S3JsonSource) SetRecurse(v bool) *S3JsonSource { + s.Recurse = &v + return s +} + +// Specifies an Apache Parquet data store stored in Amazon S3. +type S3ParquetSource struct { + _ struct{} `type:"structure"` + + // Specifies additional connection options. + AdditionalOptions *S3DirectSourceAdditionalOptions `type:"structure"` + + // Specifies how the data is compressed. This is generally not necessary if + // the data has a standard file extension. Possible values are "gzip" and "bzip"). + CompressionType *string `type:"string" enum:"ParquetCompressionType"` + + // A string containing a JSON list of Unix-style glob patterns to exclude. For + // example, "[\"**.pdf\"]" excludes all PDF files. + Exclusions []*string `type:"list"` + + // Grouping files is turned on by default when the input contains more than + // 50,000 files. To turn on grouping with fewer than 50,000 files, set this + // parameter to "inPartition". To disable grouping when there are more than + // 50,000 files, set this parameter to "none". + GroupFiles *string `type:"string"` + + // The target group size in bytes. The default is computed based on the input + // data size and the size of your cluster. When there are fewer than 50,000 + // input files, "groupFiles" must be set to "inPartition" for this to take effect. + GroupSize *string `type:"string"` + + // This option controls the duration in milliseconds after which the s3 listing + // is likely to be consistent. Files with modification timestamps falling within + // the last maxBand milliseconds are tracked specially when using JobBookmarks + // to account for Amazon S3 eventual consistency. Most users don't need to set + // this option. The default is 900000 milliseconds, or 15 minutes. + MaxBand *int64 `type:"integer"` + + // This option specifies the maximum number of files to save from the last maxBand + // seconds. If this number is exceeded, extra files are skipped and only processed + // in the next job run. + MaxFilesInBand *int64 `type:"integer"` + + // The name of the data store. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the S3 Parquet source. + OutputSchemas []*GlueSchema `type:"list"` + + // A list of the Amazon S3 paths to read from. + // + // Paths is a required field + Paths []*string `type:"list" required:"true"` + + // If set to true, recursively reads files in all subdirectories under the specified + // paths. + Recurse *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3ParquetSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3ParquetSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3ParquetSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3ParquetSource"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *S3ParquetSource) SetAdditionalOptions(v *S3DirectSourceAdditionalOptions) *S3ParquetSource { + s.AdditionalOptions = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *S3ParquetSource) SetCompressionType(v string) *S3ParquetSource { + s.CompressionType = &v + return s +} + +// SetExclusions sets the Exclusions field's value. +func (s *S3ParquetSource) SetExclusions(v []*string) *S3ParquetSource { + s.Exclusions = v + return s +} + +// SetGroupFiles sets the GroupFiles field's value. +func (s *S3ParquetSource) SetGroupFiles(v string) *S3ParquetSource { + s.GroupFiles = &v + return s +} + +// SetGroupSize sets the GroupSize field's value. +func (s *S3ParquetSource) SetGroupSize(v string) *S3ParquetSource { + s.GroupSize = &v + return s +} + +// SetMaxBand sets the MaxBand field's value. +func (s *S3ParquetSource) SetMaxBand(v int64) *S3ParquetSource { + s.MaxBand = &v + return s +} + +// SetMaxFilesInBand sets the MaxFilesInBand field's value. +func (s *S3ParquetSource) SetMaxFilesInBand(v int64) *S3ParquetSource { + s.MaxFilesInBand = &v + return s +} + +// SetName sets the Name field's value. +func (s *S3ParquetSource) SetName(v string) *S3ParquetSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *S3ParquetSource) SetOutputSchemas(v []*GlueSchema) *S3ParquetSource { + s.OutputSchemas = v + return s +} + +// SetPaths sets the Paths field's value. +func (s *S3ParquetSource) SetPaths(v []*string) *S3ParquetSource { + s.Paths = v + return s +} + +// SetRecurse sets the Recurse field's value. +func (s *S3ParquetSource) SetRecurse(v bool) *S3ParquetSource { + s.Recurse = &v + return s +} + +// Specifies additional connection options for the Amazon S3 data store. +type S3SourceAdditionalOptions struct { + _ struct{} `type:"structure"` + + // Sets the upper limit for the target number of files that will be processed. + BoundedFiles *int64 `type:"long"` + + // Sets the upper limit for the target size of the dataset in bytes that will + // be processed. + BoundedSize *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3SourceAdditionalOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3SourceAdditionalOptions) GoString() string { + return s.String() +} + +// SetBoundedFiles sets the BoundedFiles field's value. +func (s *S3SourceAdditionalOptions) SetBoundedFiles(v int64) *S3SourceAdditionalOptions { + s.BoundedFiles = &v + return s +} + +// SetBoundedSize sets the BoundedSize field's value. +func (s *S3SourceAdditionalOptions) SetBoundedSize(v int64) *S3SourceAdditionalOptions { + s.BoundedSize = &v + return s +} + // Specifies a data store in Amazon Simple Storage Service (Amazon S3). type S3Target struct { _ struct{} `type:"structure"` @@ -48245,6 +54867,164 @@ func (s *Segment) SetTotalSegments(v int64) *Segment { return s } +// Specifies a transform that chooses the data property keys that you want to +// keep. +type SelectFields struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A JSON path to a variable in the data structure. + // + // Paths is a required field + Paths [][]*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectFields) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectFields"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *SelectFields) SetInputs(v []*string) *SelectFields { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SelectFields) SetName(v string) *SelectFields { + s.Name = &v + return s +} + +// SetPaths sets the Paths field's value. +func (s *SelectFields) SetPaths(v [][]*string) *SelectFields { + s.Paths = v + return s +} + +// Specifies a transform that chooses one DynamicFrame from a collection of +// DynamicFrames. The output is the selected DynamicFrame +type SelectFromCollection struct { + _ struct{} `type:"structure"` + + // The index for the DynamicFrame to be selected. + // + // Index is a required field + Index *int64 `type:"integer" required:"true"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectFromCollection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectFromCollection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectFromCollection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectFromCollection"} + if s.Index == nil { + invalidParams.Add(request.NewErrParamRequired("Index")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndex sets the Index field's value. +func (s *SelectFromCollection) SetIndex(v int64) *SelectFromCollection { + s.Index = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *SelectFromCollection) SetInputs(v []*string) *SelectFromCollection { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SelectFromCollection) SetName(v string) *SelectFromCollection { + s.Name = &v + return s +} + // Information about a serialization/deserialization program (SerDe) that serves // as an extractor and loader. type SerDeInfo struct { @@ -48603,6 +55383,632 @@ func (s *SortCriterion) SetSort(v string) *SortCriterion { return s } +// Specifies a connector to an Apache Spark data source. +type SparkConnectorSource struct { + _ struct{} `type:"structure"` + + // Additional connection options for the connector. + AdditionalOptions map[string]*string `type:"map"` + + // The name of the connection that is associated with the connector. + // + // ConnectionName is a required field + ConnectionName *string `type:"string" required:"true"` + + // The type of connection, such as marketplace.spark or custom.spark, designating + // a connection to an Apache Spark data store. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true"` + + // The name of a connector that assists with accessing the data store in Glue + // Studio. + // + // ConnectorName is a required field + ConnectorName *string `type:"string" required:"true"` + + // The name of the data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies data schema for the custom spark source. + OutputSchemas []*GlueSchema `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkConnectorSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkConnectorSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SparkConnectorSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SparkConnectorSource"} + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.ConnectorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *SparkConnectorSource) SetAdditionalOptions(v map[string]*string) *SparkConnectorSource { + s.AdditionalOptions = v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *SparkConnectorSource) SetConnectionName(v string) *SparkConnectorSource { + s.ConnectionName = &v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *SparkConnectorSource) SetConnectionType(v string) *SparkConnectorSource { + s.ConnectionType = &v + return s +} + +// SetConnectorName sets the ConnectorName field's value. +func (s *SparkConnectorSource) SetConnectorName(v string) *SparkConnectorSource { + s.ConnectorName = &v + return s +} + +// SetName sets the Name field's value. +func (s *SparkConnectorSource) SetName(v string) *SparkConnectorSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *SparkConnectorSource) SetOutputSchemas(v []*GlueSchema) *SparkConnectorSource { + s.OutputSchemas = v + return s +} + +// Specifies a target that uses an Apache Spark connector. +type SparkConnectorTarget struct { + _ struct{} `type:"structure"` + + // Additional connection options for the connector. + AdditionalOptions map[string]*string `type:"map"` + + // The name of a connection for an Apache Spark connector. + // + // ConnectionName is a required field + ConnectionName *string `type:"string" required:"true"` + + // The type of connection, such as marketplace.spark or custom.spark, designating + // a connection to an Apache Spark data store. + // + // ConnectionType is a required field + ConnectionType *string `type:"string" required:"true"` + + // The name of an Apache Spark connector. + // + // ConnectorName is a required field + ConnectorName *string `type:"string" required:"true"` + + // The nodes that are inputs to the data target. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the data target. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the custom spark target. + OutputSchemas []*GlueSchema `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkConnectorTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkConnectorTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SparkConnectorTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SparkConnectorTarget"} + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.ConnectionType == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionType")) + } + if s.ConnectorName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectorName")) + } + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *SparkConnectorTarget) SetAdditionalOptions(v map[string]*string) *SparkConnectorTarget { + s.AdditionalOptions = v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *SparkConnectorTarget) SetConnectionName(v string) *SparkConnectorTarget { + s.ConnectionName = &v + return s +} + +// SetConnectionType sets the ConnectionType field's value. +func (s *SparkConnectorTarget) SetConnectionType(v string) *SparkConnectorTarget { + s.ConnectionType = &v + return s +} + +// SetConnectorName sets the ConnectorName field's value. +func (s *SparkConnectorTarget) SetConnectorName(v string) *SparkConnectorTarget { + s.ConnectorName = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *SparkConnectorTarget) SetInputs(v []*string) *SparkConnectorTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SparkConnectorTarget) SetName(v string) *SparkConnectorTarget { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *SparkConnectorTarget) SetOutputSchemas(v []*GlueSchema) *SparkConnectorTarget { + s.OutputSchemas = v + return s +} + +// Specifies a transform where you enter a SQL query using Spark SQL syntax +// to transform the data. The output is a single DynamicFrame. +type SparkSQL struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. You can associate a table + // name with each input node to use in the SQL query. The name you choose must + // meet the Spark SQL naming restrictions. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies the data schema for the SparkSQL transform. + OutputSchemas []*GlueSchema `type:"list"` + + // A list of aliases. An alias allows you to specify what name to use in the + // SQL for a given input. For example, you have a datasource named "MyDataSource". + // If you specify From as MyDataSource, and Alias as SqlName, then in your SQL + // you can do: + // + // select * from SqlName + // + // and that gets data from MyDataSource. + // + // SqlAliases is a required field + SqlAliases []*SqlAlias `type:"list" required:"true"` + + // A SQL query that must use Spark SQL syntax and return a single data set. + // + // SqlQuery is a required field + SqlQuery *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkSQL) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SparkSQL) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SparkSQL) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SparkSQL"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SqlAliases == nil { + invalidParams.Add(request.NewErrParamRequired("SqlAliases")) + } + if s.SqlQuery == nil { + invalidParams.Add(request.NewErrParamRequired("SqlQuery")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SqlAliases != nil { + for i, v := range s.SqlAliases { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SqlAliases", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *SparkSQL) SetInputs(v []*string) *SparkSQL { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SparkSQL) SetName(v string) *SparkSQL { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *SparkSQL) SetOutputSchemas(v []*GlueSchema) *SparkSQL { + s.OutputSchemas = v + return s +} + +// SetSqlAliases sets the SqlAliases field's value. +func (s *SparkSQL) SetSqlAliases(v []*SqlAlias) *SparkSQL { + s.SqlAliases = v + return s +} + +// SetSqlQuery sets the SqlQuery field's value. +func (s *SparkSQL) SetSqlQuery(v string) *SparkSQL { + s.SqlQuery = &v + return s +} + +// Specifies a transform that writes samples of the data to an Amazon S3 bucket. +type Spigot struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A path in Amazon S3 where the transform will write a subset of records from + // the dataset to a JSON file in an Amazon S3 bucket. + // + // Path is a required field + Path *string `type:"string" required:"true"` + + // The probability (a decimal value with a maximum value of 1) of picking any + // given record. A value of 1 indicates that each row read from the dataset + // should be included in the sample output. + Prob *float64 `type:"double"` + + // Specifies a number of records to write starting from the beginning of the + // dataset. + Topk *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Spigot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Spigot) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Spigot) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Spigot"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *Spigot) SetInputs(v []*string) *Spigot { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *Spigot) SetName(v string) *Spigot { + s.Name = &v + return s +} + +// SetPath sets the Path field's value. +func (s *Spigot) SetPath(v string) *Spigot { + s.Path = &v + return s +} + +// SetProb sets the Prob field's value. +func (s *Spigot) SetProb(v float64) *Spigot { + s.Prob = &v + return s +} + +// SetTopk sets the Topk field's value. +func (s *Spigot) SetTopk(v int64) *Spigot { + s.Topk = &v + return s +} + +// Specifies a transform that splits data property keys into two DynamicFrames. +// The output is a collection of DynamicFrames: one with selected data property +// keys, and one with the remaining data property keys. +type SplitFields struct { + _ struct{} `type:"structure"` + + // The data inputs identified by their node names. + // + // Inputs is a required field + Inputs []*string `min:"1" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A JSON path to a variable in the data structure. + // + // Paths is a required field + Paths [][]*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SplitFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SplitFields) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SplitFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SplitFields"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *SplitFields) SetInputs(v []*string) *SplitFields { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SplitFields) SetName(v string) *SplitFields { + s.Name = &v + return s +} + +// SetPaths sets the Paths field's value. +func (s *SplitFields) SetPaths(v [][]*string) *SplitFields { + s.Paths = v + return s +} + +// Represents a single entry in the list of values for SqlAliases. +type SqlAlias struct { + _ struct{} `type:"structure"` + + // A temporary name given to a table, or a column in a table. + // + // Alias is a required field + Alias *string `type:"string" required:"true"` + + // A table, or a column in a table. + // + // From is a required field + From *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SqlAlias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SqlAlias) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqlAlias) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlAlias"} + if s.Alias == nil { + invalidParams.Add(request.NewErrParamRequired("Alias")) + } + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlias sets the Alias field's value. +func (s *SqlAlias) SetAlias(v string) *SqlAlias { + s.Alias = &v + return s +} + +// SetFrom sets the From field's value. +func (s *SqlAlias) SetFrom(v string) *SqlAlias { + s.From = &v + return s +} + type StartBlueprintRunInput struct { _ struct{} `type:"structure"` @@ -50459,6 +57865,63 @@ func (s *StorageDescriptor) SetStoredAsSubDirectories(v bool) *StorageDescriptor return s } +// Specifies options related to data preview for viewing a sample of your data. +type StreamingDataPreviewOptions struct { + _ struct{} `type:"structure"` + + // The polling time in milliseconds. + PollingTime *int64 `min:"10" type:"long"` + + // The limit to the number of records polled. + RecordPollingLimit *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamingDataPreviewOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamingDataPreviewOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingDataPreviewOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingDataPreviewOptions"} + if s.PollingTime != nil && *s.PollingTime < 10 { + invalidParams.Add(request.NewErrParamMinValue("PollingTime", 10)) + } + if s.RecordPollingLimit != nil && *s.RecordPollingLimit < 1 { + invalidParams.Add(request.NewErrParamMinValue("RecordPollingLimit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPollingTime sets the PollingTime field's value. +func (s *StreamingDataPreviewOptions) SetPollingTime(v int64) *StreamingDataPreviewOptions { + s.PollingTime = &v + return s +} + +// SetRecordPollingLimit sets the RecordPollingLimit field's value. +func (s *StreamingDataPreviewOptions) SetRecordPollingLimit(v int64) *StreamingDataPreviewOptions { + s.RecordPollingLimit = &v + return s +} + // Defines column statistics supported for character sequence data values. type StringColumnStatisticsData struct { _ struct{} `type:"structure"` @@ -52283,6 +59746,90 @@ func (s *UnfilteredPartition) SetPartition(v *Partition) *UnfilteredPartition { return s } +// Specifies a transform that combines the rows from two or more datasets into +// a single result. +type Union struct { + _ struct{} `type:"structure"` + + // The node ID inputs to the transform. + // + // Inputs is a required field + Inputs []*string `min:"2" type:"list" required:"true"` + + // The name of the transform node. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Indicates the type of Union transform. + // + // Specify ALL to join all rows from data sources to the resulting DynamicFrame. + // The resulting union does not remove duplicate rows. + // + // Specify DISTINCT to remove duplicate rows in the resulting DynamicFrame. + // + // UnionType is a required field + UnionType *string `type:"string" required:"true" enum:"UnionType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Union) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Union) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Union) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Union"} + if s.Inputs == nil { + invalidParams.Add(request.NewErrParamRequired("Inputs")) + } + if s.Inputs != nil && len(s.Inputs) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 2)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.UnionType == nil { + invalidParams.Add(request.NewErrParamRequired("UnionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputs sets the Inputs field's value. +func (s *Union) SetInputs(v []*string) *Union { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *Union) SetName(v string) *Union { + s.Name = &v + return s +} + +// SetUnionType sets the UnionType field's value. +func (s *Union) SetUnionType(v string) *Union { + s.UnionType = &v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -54990,6 +62537,56 @@ func (s *UpdateXMLClassifierRequest) SetRowTag(v string) *UpdateXMLClassifierReq return s } +// The options to configure an upsert operation when writing to a Redshift target . +type UpsertRedshiftTargetOptions struct { + _ struct{} `type:"structure"` + + // The name of the connection to use to write to Redshift. + ConnectionName *string `type:"string"` + + // The physical location of the Redshift table. + TableLocation *string `type:"string"` + + // The keys used to determine whether to perform an update or insert. + UpsertKeys []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpsertRedshiftTargetOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpsertRedshiftTargetOptions) GoString() string { + return s.String() +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *UpsertRedshiftTargetOptions) SetConnectionName(v string) *UpsertRedshiftTargetOptions { + s.ConnectionName = &v + return s +} + +// SetTableLocation sets the TableLocation field's value. +func (s *UpsertRedshiftTargetOptions) SetTableLocation(v string) *UpsertRedshiftTargetOptions { + s.TableLocation = &v + return s +} + +// SetUpsertKeys sets the UpsertKeys field's value. +func (s *UpsertRedshiftTargetOptions) SetUpsertKeys(v []*string) *UpsertRedshiftTargetOptions { + s.UpsertKeys = v + return s +} + // Represents the equivalent of a Hive user-defined function (UDF) definition. type UserDefinedFunction struct { _ struct{} `type:"structure"` @@ -55756,6 +63353,74 @@ func (s *XMLClassifier) SetVersion(v int64) *XMLClassifier { return s } +const ( + // AggFunctionAvg is a AggFunction enum value + AggFunctionAvg = "avg" + + // AggFunctionCountDistinct is a AggFunction enum value + AggFunctionCountDistinct = "countDistinct" + + // AggFunctionCount is a AggFunction enum value + AggFunctionCount = "count" + + // AggFunctionFirst is a AggFunction enum value + AggFunctionFirst = "first" + + // AggFunctionLast is a AggFunction enum value + AggFunctionLast = "last" + + // AggFunctionKurtosis is a AggFunction enum value + AggFunctionKurtosis = "kurtosis" + + // AggFunctionMax is a AggFunction enum value + AggFunctionMax = "max" + + // AggFunctionMin is a AggFunction enum value + AggFunctionMin = "min" + + // AggFunctionSkewness is a AggFunction enum value + AggFunctionSkewness = "skewness" + + // AggFunctionStddevSamp is a AggFunction enum value + AggFunctionStddevSamp = "stddev_samp" + + // AggFunctionStddevPop is a AggFunction enum value + AggFunctionStddevPop = "stddev_pop" + + // AggFunctionSum is a AggFunction enum value + AggFunctionSum = "sum" + + // AggFunctionSumDistinct is a AggFunction enum value + AggFunctionSumDistinct = "sumDistinct" + + // AggFunctionVarSamp is a AggFunction enum value + AggFunctionVarSamp = "var_samp" + + // AggFunctionVarPop is a AggFunction enum value + AggFunctionVarPop = "var_pop" +) + +// AggFunction_Values returns all elements of the AggFunction enum +func AggFunction_Values() []string { + return []string{ + AggFunctionAvg, + AggFunctionCountDistinct, + AggFunctionCount, + AggFunctionFirst, + AggFunctionLast, + AggFunctionKurtosis, + AggFunctionMax, + AggFunctionMin, + AggFunctionSkewness, + AggFunctionStddevSamp, + AggFunctionStddevPop, + AggFunctionSum, + AggFunctionSumDistinct, + AggFunctionVarSamp, + AggFunctionVarPop, + } +} + const ( // BackfillErrorCodeEncryptedPartitionError is a BackfillErrorCode enum value BackfillErrorCodeEncryptedPartitionError = "ENCRYPTED_PARTITION_ERROR" @@ -55968,6 +63633,22 @@ func Compatibility_Values() []string { } } +const ( + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "gzip" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "bzip2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + const ( // ConnectionPropertyKeyHost is a ConnectionPropertyKey enum value ConnectionPropertyKeyHost = "HOST" @@ -56292,6 +63973,286 @@ func ExistCondition_Values() []string { } } +const ( + // FilterLogicalOperatorAnd is a FilterLogicalOperator enum value + FilterLogicalOperatorAnd = "AND" + + // FilterLogicalOperatorOr is a FilterLogicalOperator enum value + FilterLogicalOperatorOr = "OR" +) + +// FilterLogicalOperator_Values returns all elements of the FilterLogicalOperator enum +func FilterLogicalOperator_Values() []string { + return []string{ + FilterLogicalOperatorAnd, + FilterLogicalOperatorOr, + } +} + +const ( + // FilterOperationEq is a FilterOperation enum value + FilterOperationEq = "EQ" + + // FilterOperationLt is a FilterOperation enum value + FilterOperationLt = "LT" + + // FilterOperationGt is a FilterOperation enum value + FilterOperationGt = "GT" + + // FilterOperationLte is a FilterOperation enum value + FilterOperationLte = "LTE" + + // FilterOperationGte is a FilterOperation enum value + FilterOperationGte = "GTE" + + // FilterOperationRegex is a FilterOperation enum value + FilterOperationRegex = "REGEX" + + // FilterOperationIsnull is a FilterOperation enum value + FilterOperationIsnull = "ISNULL" +) + +// FilterOperation_Values returns all elements of the FilterOperation enum +func FilterOperation_Values() []string { + return []string{ + FilterOperationEq, + FilterOperationLt, + FilterOperationGt, + FilterOperationLte, + FilterOperationGte, + FilterOperationRegex, + FilterOperationIsnull, + } +} + +const ( + // FilterValueTypeColumnextracted is a FilterValueType enum value + FilterValueTypeColumnextracted = "COLUMNEXTRACTED" + + // FilterValueTypeConstant is a FilterValueType enum value + FilterValueTypeConstant = "CONSTANT" +) + +// FilterValueType_Values returns all elements of the FilterValueType enum +func FilterValueType_Values() []string { + return []string{ + FilterValueTypeColumnextracted, + FilterValueTypeConstant, + } +} + +const ( + // GlueRecordTypeDate is a GlueRecordType enum value + GlueRecordTypeDate = "DATE" + + // GlueRecordTypeString is a GlueRecordType enum value + GlueRecordTypeString = "STRING" + + // GlueRecordTypeTimestamp is a GlueRecordType enum value + GlueRecordTypeTimestamp = "TIMESTAMP" + + // GlueRecordTypeInt is a GlueRecordType enum value + GlueRecordTypeInt = "INT" + + // GlueRecordTypeFloat is a GlueRecordType enum value + GlueRecordTypeFloat = "FLOAT" + + // GlueRecordTypeLong is a GlueRecordType enum value + GlueRecordTypeLong = "LONG" + + // GlueRecordTypeBigdecimal is a GlueRecordType enum value + GlueRecordTypeBigdecimal = "BIGDECIMAL" + + // GlueRecordTypeByte is a GlueRecordType enum value + GlueRecordTypeByte = "BYTE" + + // GlueRecordTypeShort is a GlueRecordType enum value + GlueRecordTypeShort = "SHORT" + + // GlueRecordTypeDouble is a GlueRecordType enum value + GlueRecordTypeDouble = "DOUBLE" +) + +// GlueRecordType_Values returns all elements of the GlueRecordType enum +func GlueRecordType_Values() []string { + return []string{ + GlueRecordTypeDate, + GlueRecordTypeString, + GlueRecordTypeTimestamp, + GlueRecordTypeInt, + GlueRecordTypeFloat, + GlueRecordTypeLong, + GlueRecordTypeBigdecimal, + GlueRecordTypeByte, + GlueRecordTypeShort, + GlueRecordTypeDouble, + } +} + +const ( + // JDBCDataTypeArray is a JDBCDataType enum value + JDBCDataTypeArray = "ARRAY" + + // JDBCDataTypeBigint is a JDBCDataType enum value + JDBCDataTypeBigint = "BIGINT" + + // JDBCDataTypeBinary is a JDBCDataType enum value + JDBCDataTypeBinary = "BINARY" + + // JDBCDataTypeBit is a JDBCDataType enum value + JDBCDataTypeBit = "BIT" + + // JDBCDataTypeBlob is a JDBCDataType enum value + JDBCDataTypeBlob = "BLOB" + + // JDBCDataTypeBoolean is a JDBCDataType enum value + JDBCDataTypeBoolean = "BOOLEAN" + + // JDBCDataTypeChar is a JDBCDataType enum value + JDBCDataTypeChar = "CHAR" + + // JDBCDataTypeClob is a JDBCDataType enum value + JDBCDataTypeClob = "CLOB" + + // JDBCDataTypeDatalink is a JDBCDataType enum value + JDBCDataTypeDatalink = "DATALINK" + + // JDBCDataTypeDate is a JDBCDataType enum value + JDBCDataTypeDate = "DATE" + + // JDBCDataTypeDecimal is a JDBCDataType enum value + JDBCDataTypeDecimal = "DECIMAL" + + // JDBCDataTypeDistinct is a JDBCDataType enum value + JDBCDataTypeDistinct = "DISTINCT" + + // JDBCDataTypeDouble is a JDBCDataType enum value + JDBCDataTypeDouble = "DOUBLE" + + // JDBCDataTypeFloat is a JDBCDataType enum value + JDBCDataTypeFloat = "FLOAT" + + // JDBCDataTypeInteger is a JDBCDataType enum value + JDBCDataTypeInteger = "INTEGER" + + // JDBCDataTypeJavaObject is a JDBCDataType enum value + JDBCDataTypeJavaObject = "JAVA_OBJECT" + + // JDBCDataTypeLongnvarchar is a JDBCDataType enum value + JDBCDataTypeLongnvarchar = "LONGNVARCHAR" + + // JDBCDataTypeLongvarbinary is a JDBCDataType enum value + JDBCDataTypeLongvarbinary = "LONGVARBINARY" + + // JDBCDataTypeLongvarchar is a JDBCDataType enum value + JDBCDataTypeLongvarchar = "LONGVARCHAR" + + // JDBCDataTypeNchar is a JDBCDataType enum value + JDBCDataTypeNchar = "NCHAR" + + // JDBCDataTypeNclob is a JDBCDataType enum value + JDBCDataTypeNclob = "NCLOB" + + // JDBCDataTypeNull is a JDBCDataType enum value + JDBCDataTypeNull = "NULL" + + // JDBCDataTypeNumeric is a JDBCDataType enum value + JDBCDataTypeNumeric = "NUMERIC" + + // JDBCDataTypeNvarchar is a JDBCDataType enum value + JDBCDataTypeNvarchar = "NVARCHAR" + + // JDBCDataTypeOther is a JDBCDataType enum value + JDBCDataTypeOther = "OTHER" + + // JDBCDataTypeReal is a JDBCDataType enum value + JDBCDataTypeReal = "REAL" + + // JDBCDataTypeRef is a JDBCDataType enum value + JDBCDataTypeRef = "REF" + + // JDBCDataTypeRefCursor is a JDBCDataType enum value + JDBCDataTypeRefCursor = "REF_CURSOR" + + // JDBCDataTypeRowid is a JDBCDataType enum value + JDBCDataTypeRowid = "ROWID" + + // JDBCDataTypeSmallint is a JDBCDataType enum value + JDBCDataTypeSmallint = "SMALLINT" + + // JDBCDataTypeSqlxml is a JDBCDataType enum value + JDBCDataTypeSqlxml = "SQLXML" + + // JDBCDataTypeStruct is a JDBCDataType enum value + JDBCDataTypeStruct = "STRUCT" + + // JDBCDataTypeTime is a JDBCDataType enum value + JDBCDataTypeTime = "TIME" + + // JDBCDataTypeTimeWithTimezone is a JDBCDataType enum value + JDBCDataTypeTimeWithTimezone = "TIME_WITH_TIMEZONE" + + // JDBCDataTypeTimestamp is a JDBCDataType enum value + JDBCDataTypeTimestamp = "TIMESTAMP" + + // JDBCDataTypeTimestampWithTimezone is a JDBCDataType enum value + JDBCDataTypeTimestampWithTimezone = "TIMESTAMP_WITH_TIMEZONE" + + // JDBCDataTypeTinyint is a JDBCDataType enum value + JDBCDataTypeTinyint = "TINYINT" + + // JDBCDataTypeVarbinary is a JDBCDataType enum value + JDBCDataTypeVarbinary = "VARBINARY" + + // JDBCDataTypeVarchar is a JDBCDataType enum value + JDBCDataTypeVarchar = "VARCHAR" +) + +// JDBCDataType_Values returns all elements of the JDBCDataType enum +func JDBCDataType_Values() []string { + return []string{ + JDBCDataTypeArray, + JDBCDataTypeBigint, + JDBCDataTypeBinary, + JDBCDataTypeBit, + JDBCDataTypeBlob, + JDBCDataTypeBoolean, + JDBCDataTypeChar, + JDBCDataTypeClob, + JDBCDataTypeDatalink, + JDBCDataTypeDate, + JDBCDataTypeDecimal, + JDBCDataTypeDistinct, + JDBCDataTypeDouble, + JDBCDataTypeFloat, + JDBCDataTypeInteger, + JDBCDataTypeJavaObject, + JDBCDataTypeLongnvarchar, + JDBCDataTypeLongvarbinary, + JDBCDataTypeLongvarchar, + JDBCDataTypeNchar, + JDBCDataTypeNclob, + JDBCDataTypeNull, + JDBCDataTypeNumeric, + JDBCDataTypeNvarchar, + JDBCDataTypeOther, + JDBCDataTypeReal, + JDBCDataTypeRef, + JDBCDataTypeRefCursor, + JDBCDataTypeRowid, + JDBCDataTypeSmallint, + JDBCDataTypeSqlxml, + JDBCDataTypeStruct, + JDBCDataTypeTime, + JDBCDataTypeTimeWithTimezone, + JDBCDataTypeTimestamp, + JDBCDataTypeTimestampWithTimezone, + JDBCDataTypeTinyint, + JDBCDataTypeVarbinary, + JDBCDataTypeVarchar, + } +} + const ( // JobBookmarksEncryptionModeDisabled is a JobBookmarksEncryptionMode enum value JobBookmarksEncryptionModeDisabled = "DISABLED" @@ -56344,6 +64305,38 @@ func JobRunState_Values() []string { } } +const ( + // JoinTypeEquijoin is a JoinType enum value + JoinTypeEquijoin = "equijoin" + + // JoinTypeLeft is a JoinType enum value + JoinTypeLeft = "left" + + // JoinTypeRight is a JoinType enum value + JoinTypeRight = "right" + + // JoinTypeOuter is a JoinType enum value + JoinTypeOuter = "outer" + + // JoinTypeLeftsemi is a JoinType enum value + JoinTypeLeftsemi = "leftsemi" + + // JoinTypeLeftanti is a JoinType enum value + JoinTypeLeftanti = "leftanti" +) + +// JoinType_Values returns all elements of the JoinType enum +func JoinType_Values() []string { + return []string{ + JoinTypeEquijoin, + JoinTypeLeft, + JoinTypeRight, + JoinTypeOuter, + JoinTypeLeftsemi, + JoinTypeLeftanti, + } +} + const ( // LanguagePython is a Language enum value LanguagePython = "PYTHON" @@ -56444,6 +64437,34 @@ func NodeType_Values() []string { } } +const ( + // ParquetCompressionTypeSnappy is a ParquetCompressionType enum value + ParquetCompressionTypeSnappy = "snappy" + + // ParquetCompressionTypeLzo is a ParquetCompressionType enum value + ParquetCompressionTypeLzo = "lzo" + + // ParquetCompressionTypeGzip is a ParquetCompressionType enum value + ParquetCompressionTypeGzip = "gzip" + + // ParquetCompressionTypeUncompressed is a ParquetCompressionType enum value + ParquetCompressionTypeUncompressed = "uncompressed" + + // ParquetCompressionTypeNone is a ParquetCompressionType enum value + ParquetCompressionTypeNone = "none" +) + +// ParquetCompressionType_Values returns all elements of the ParquetCompressionType enum +func ParquetCompressionType_Values() []string { + return []string{ + ParquetCompressionTypeSnappy, + ParquetCompressionTypeLzo, + ParquetCompressionTypeGzip, + ParquetCompressionTypeUncompressed, + ParquetCompressionTypeNone, + } +} + const ( // PartitionIndexStatusCreating is a PartitionIndexStatus enum value PartitionIndexStatusCreating = "CREATING" @@ -56528,6 +64549,30 @@ func PermissionType_Values() []string { } } +const ( + // PiiTypeRowAudit is a PiiType enum value + PiiTypeRowAudit = "RowAudit" + + // PiiTypeRowMasking is a PiiType enum value + PiiTypeRowMasking = "RowMasking" + + // PiiTypeColumnAudit is a PiiType enum value + PiiTypeColumnAudit = "ColumnAudit" + + // PiiTypeColumnMasking is a PiiType enum value + PiiTypeColumnMasking = "ColumnMasking" +) + +// PiiType_Values returns all elements of the PiiType enum +func PiiType_Values() []string { + return []string{ + PiiTypeRowAudit, + PiiTypeRowMasking, + PiiTypeColumnAudit, + PiiTypeColumnMasking, + } +} + const ( // PrincipalTypeUser is a PrincipalType enum value PrincipalTypeUser = "USER" @@ -56548,6 +64593,30 @@ func PrincipalType_Values() []string { } } +const ( + // QuoteCharQuote is a QuoteChar enum value + QuoteCharQuote = "quote" + + // QuoteCharQuillemet is a QuoteChar enum value + QuoteCharQuillemet = "quillemet" + + // QuoteCharSingleQuote is a QuoteChar enum value + QuoteCharSingleQuote = "single_quote" + + // QuoteCharDisabled is a QuoteChar enum value + QuoteCharDisabled = "disabled" +) + +// QuoteChar_Values returns all elements of the QuoteChar enum +func QuoteChar_Values() []string { + return []string{ + QuoteCharQuote, + QuoteCharQuillemet, + QuoteCharSingleQuote, + QuoteCharDisabled, + } +} + const ( // RecrawlBehaviorCrawlEverything is a RecrawlBehavior enum value RecrawlBehaviorCrawlEverything = "CRAWL_EVERYTHING" @@ -56716,6 +64785,34 @@ func SchemaVersionStatus_Values() []string { } } +const ( + // SeparatorComma is a Separator enum value + SeparatorComma = "comma" + + // SeparatorCtrla is a Separator enum value + SeparatorCtrla = "ctrla" + + // SeparatorPipe is a Separator enum value + SeparatorPipe = "pipe" + + // SeparatorSemicolon is a Separator enum value + SeparatorSemicolon = "semicolon" + + // SeparatorTab is a Separator enum value + SeparatorTab = "tab" +) + +// Separator_Values returns all elements of the Separator enum +func Separator_Values() []string { + return []string{ + SeparatorComma, + SeparatorCtrla, + SeparatorPipe, + SeparatorSemicolon, + SeparatorTab, + } +} + const ( // SessionStatusProvisioning is a SessionStatus enum value SessionStatusProvisioning = "PROVISIONING" @@ -56780,6 +64877,26 @@ func SortDirectionType_Values() []string { } } +const ( + // StartingPositionLatest is a StartingPosition enum value + StartingPositionLatest = "latest" + + // StartingPositionTrimHorizon is a StartingPosition enum value + StartingPositionTrimHorizon = "trim_horizon" + + // StartingPositionEarliest is a StartingPosition enum value + StartingPositionEarliest = "earliest" +) + +// StartingPosition_Values returns all elements of the StartingPosition enum +func StartingPosition_Values() []string { + return []string{ + StartingPositionLatest, + StartingPositionTrimHorizon, + StartingPositionEarliest, + } +} + const ( // StatementStateWaiting is a StatementState enum value StatementStateWaiting = "WAITING" @@ -56812,6 +64929,34 @@ func StatementState_Values() []string { } } +const ( + // TargetFormatJson is a TargetFormat enum value + TargetFormatJson = "json" + + // TargetFormatCsv is a TargetFormat enum value + TargetFormatCsv = "csv" + + // TargetFormatAvro is a TargetFormat enum value + TargetFormatAvro = "avro" + + // TargetFormatOrc is a TargetFormat enum value + TargetFormatOrc = "orc" + + // TargetFormatParquet is a TargetFormat enum value + TargetFormatParquet = "parquet" +) + +// TargetFormat_Values returns all elements of the TargetFormat enum +func TargetFormat_Values() []string { + return []string{ + TargetFormatJson, + TargetFormatCsv, + TargetFormatAvro, + TargetFormatOrc, + TargetFormatParquet, + } +} + const ( // TaskRunSortColumnTypeTaskRunType is a TaskRunSortColumnType enum value TaskRunSortColumnTypeTaskRunType = "TASK_RUN_TYPE" @@ -57020,6 +65165,22 @@ func TriggerType_Values() []string { } } +const ( + // UnionTypeAll is a UnionType enum value + UnionTypeAll = "ALL" + + // UnionTypeDistinct is a UnionType enum value + UnionTypeDistinct = "DISTINCT" +) + +// UnionType_Values returns all elements of the UnionType enum +func UnionType_Values() []string { + return []string{ + UnionTypeAll, + UnionTypeDistinct, + } +} + const ( // UpdateBehaviorLog is a UpdateBehavior enum value UpdateBehaviorLog = "LOG" @@ -57036,6 +65197,22 @@ func UpdateBehavior_Values() []string { } } +const ( + // UpdateCatalogBehaviorUpdateInDatabase is a UpdateCatalogBehavior enum value + UpdateCatalogBehaviorUpdateInDatabase = "UPDATE_IN_DATABASE" + + // UpdateCatalogBehaviorLog is a UpdateCatalogBehavior enum value + UpdateCatalogBehaviorLog = "LOG" +) + +// UpdateCatalogBehavior_Values returns all elements of the UpdateCatalogBehavior enum +func UpdateCatalogBehavior_Values() []string { + return []string{ + UpdateCatalogBehaviorUpdateInDatabase, + UpdateCatalogBehaviorLog, + } +} + const ( // WorkerTypeStandard is a WorkerType enum value WorkerTypeStandard = "Standard" diff --git a/service/kms/api.go b/service/kms/api.go index 62ca2705c6..079f147142 100644 --- a/service/kms/api.go +++ b/service/kms/api.go @@ -912,12 +912,12 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // and verify. You can't change these properties after the KMS key is created. // // Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key -// pair. The private key in an asymmetric KMS key never leaves AWS KMS unencrypted. +// pair. The private key in an asymmetric KMS key never leaves KMS unencrypted. // However, you can use the GetPublicKey operation to download the public key -// so it can be used outside of AWS KMS. KMS keys with RSA key pairs can be -// used to encrypt or decrypt data or sign and verify messages (but not both). -// KMS keys with ECC key pairs can be used only to sign and verify messages. -// For information about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// so it can be used outside of KMS. KMS keys with RSA key pairs can be used +// to encrypt or decrypt data or sign and verify messages (but not both). KMS +// keys with ECC key pairs can be used only to sign and verify messages. For +// information about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Key Management Service Developer Guide. // // HMAC KMS key @@ -1191,8 +1191,8 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output // // The Decrypt operation also decrypts ciphertext that was encrypted outside // of KMS by the public key in an KMS asymmetric KMS key. However, it cannot -// decrypt symmetric ciphertext produced by other libraries, such as the Amazon -// Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// decrypt ciphertext produced by other libraries, such as the Amazon Web Services +// Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) // or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). // These libraries return a ciphertext format that is incompatible with KMS. // @@ -2195,16 +2195,27 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re // DisableKeyRotation API operation for AWS Key Management Service. // // Disables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric encryption KMS key. +// of the specified symmetric encryption KMS key. // -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// Automatic key rotation is supported only on symmetric encryption KMS keys. +// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), // set the property on the primary key. // +// You can enable (EnableKeyRotation) and disable automatic rotation of the +// key material in customer managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// Key material rotation of Amazon Web Services managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material for every year. +// Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years to every year. For details, see EnableKeyRotation. +// // The KMS key that you use for this operation must be in a compatible key state. // For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. @@ -2589,16 +2600,41 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ // EnableKeyRotation API operation for AWS Key Management Service. // // Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric encryption KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// of the specified symmetric encryption KMS key. +// +// When you enable automatic rotation of acustomer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation +// of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. +// To disable rotation of the key material in a customer managed KMS key, use +// the DisableKeyRotation operation. +// +// Automatic key rotation is supported only on symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), // set the property on the primary key. // +// You cannot enable or disable automatic rotation Amazon Web Services managed +// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). +// KMS always rotates the key material of Amazon Web Services managed keys every +// year. Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years (approximately 1,095 days) to every year (approximately +// 365 days). +// +// New Amazon Web Services managed keys are automatically rotated one year after +// they are created, and approximately every year thereafter. +// +// Existing Amazon Web Services managed keys are automatically rotated one year +// after their most recent rotation, and every year thereafter. +// // The KMS key that you use for this operation must be in a compatible key state. // For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. @@ -3490,14 +3526,16 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho // // This operation is useful for systems that need to encrypt data at some point, // but not immediately. When you need to encrypt the data, you call the Decrypt -// operation on the encrypted copy of the key. It's also useful in distributed -// systems with different levels of trust. For example, you might store encrypted -// data in containers. One component of your system creates new containers and -// stores an encrypted data key with each container. Then, a different component -// puts the data into the containers. That component first decrypts the data -// key, uses the plaintext data key to encrypt data, puts the encrypted data -// into the container, and then destroys the plaintext data key. In this system, -// the component that creates the containers never sees the plaintext data key. +// operation on the encrypted copy of the key. +// +// It's also useful in distributed systems with different levels of trust. For +// example, you might store encrypted data in containers. One component of your +// system creates new containers and stores an encrypted data key with each +// container. Then, a different component puts the data into the containers. +// That component first decrypts the data key, uses the plaintext data key to +// encrypt data, puts the encrypted data into the container, and then destroys +// the plaintext data key. In this system, the component that creates the containers +// never sees the plaintext data key. // // To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext // operations. @@ -3672,6 +3710,13 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request, // KMS support for HMAC KMS keys. For details, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) // in the Key Management Service Developer Guide . // +// Best practices recommend that you limit the time during which any signing +// mechanism, including an HMAC, is effective. This deters an attack where the +// actor uses a signed message to establish validity repeatedly or long after +// the message is superseded. HMAC tags do not include a timestamp, but you +// can include a timestamp in the token or message to help you detect when its +// time to refresh the HMAC. +// // The KMS key that you use for this operation must be in a compatible key state. // For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the Key Management Service Developer Guide. @@ -4038,14 +4083,30 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req // material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) // is enabled for the specified KMS key. // -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// When you enable automatic rotation for customer managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation +// of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. +// +// Automatic key rotation is supported only on symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. The key rotation status for these KMS -// keys is always false. +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key.. +// +// You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) +// of the key material in customer managed KMS keys. Key material rotation of +// Amazon Web Services managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material in Amazon Web Services +// managed KMS keys every year. The key rotation status for Amazon Web Services +// managed KMS keys is always true. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years to every year. For details, see EnableKeyRotation. // // The KMS key that you use for this operation must be in a compatible key state. // For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -4053,11 +4114,15 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req // // * Disabled: The key rotation status does not change when you disable a // KMS key. However, while the KMS key is disabled, KMS does not rotate the -// key material. +// key material. When you re-enable the KMS key, rotation resumes. If the +// key material in the re-enabled KMS key hasn't been rotated in one year, +// KMS rotates it immediately, and every year thereafter. If it's been less +// than a year since the key material in the re-enabled KMS key was rotated, +// the KMS key resumes its prior rotation schedule. // // * Pending deletion: While a KMS key is pending deletion, its key rotation // status is false and KMS does not rotate the key material. If you cancel -// the deletion, the original key rotation status is restored. +// the deletion, the original key rotation status returns to true. // // Cross-account use: Yes. To perform this operation on a KMS key in a different // Amazon Web Services account, specify the key ARN in the value of the KeyId @@ -6644,6 +6709,12 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO // When signing a message, be sure to record the KMS key and the signing algorithm. // This information is required to verify the signature. // +// Best practices recommend that you limit the time during which any signature +// is effective. This deters an attack where the actor uses a signed message +// to establish validity repeatedly or long after the message is superseded. +// Signatures do not include a timestamp, but you can include a timestamp in +// the signed message to help you detect when its time to refresh the signature. +// // To verify the signature that this operation generates, use the Verify operation. // Or use the GetPublicKey operation to download the public key and then use // the public key to verify the signature outside of KMS. @@ -9242,11 +9313,11 @@ type CreateKeyInput struct { // in the Key Management Service Developer Guide . // // The KeySpec determines whether the KMS key contains a symmetric key or an - // asymmetric key pair. It also determines the algorithms that the KMS key supports. - // You can't change the KeySpec after the KMS key is created. To further restrict - // the algorithms that can be used with the KMS key, use a condition key in - // its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm - // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), + // asymmetric key pair. It also determines the cryptographic algorithms that + // the KMS key supports. You can't change the KeySpec after the KMS key is created. + // To further restrict the algorithms that can be used with the KMS key, use + // a condition key in its key policy or IAM policy. For more information, see + // kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), // kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) // or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) // in the Key Management Service Developer Guide . @@ -9307,9 +9378,9 @@ type CreateKeyInput struct { // This value creates a primary key, not a replica. To create a replica key, // use the ReplicateKey operation. // - // You can create a symmetric or asymmetric multi-Region key, and you can create - // a multi-Region key with imported key material. However, you cannot create - // a multi-Region key in a custom key store. + // You can create a multi-Region version of a symmetric encryption KMS key, + // an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. + // However, you cannot create a multi-Region key in a custom key store. MultiRegion *bool `type:"boolean"` // The source of the key material for the KMS key. You cannot change the origin @@ -9329,11 +9400,14 @@ type CreateKeyInput struct { // KMS keys. Origin *string `type:"string" enum:"OriginType"` - // The key policy to attach to the KMS key. + // The key policy to attach to the KMS key. If you do not specify a key policy, + // KMS attaches a default key policy to the KMS key. For more information, see + // Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // in the Key Management Service Developer Guide. // // If you provide a key policy, it must meet the following criteria: // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy + // * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy // must allow the principal that is making the CreateKey request to make // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk // that the KMS key becomes unmanageable. For more information, refer to @@ -9349,11 +9423,18 @@ type CreateKeyInput struct { // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the Amazon Web Services Identity and Access Management User Guide. // - // If you do not provide a key policy, KMS attaches a default key policy to - // the KMS key. For more information, see Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) - // in the Key Management Service Developer Guide. + // A key policy document must conform to the following rules. // - // The key policy size quota is 32 kilobytes (32768 bytes). + // * Up to 32 kilobytes (32768 bytes) + // + // * Must be UTF-8 encoded + // + // * The only Unicode characters that are permitted in a key policy document + // are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), + // and characters in the range U+0020 to U+00FF. + // + // * The Sid element in a key policy statement can include spaces. (Spaces + // are prohibited in the Sid element of an IAM policy document.) // // For help writing and formatting a JSON policy document, see the IAM JSON // Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) @@ -11098,13 +11179,13 @@ func (s EnableKeyOutput) GoString() string { type EnableKeyRotationInput struct { _ struct{} `type:"structure"` - // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation - // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), + // Identifies a symmetric encryption KMS key. You cannot enable or disable automatic + // rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To enable or disable automatic rotation of a set of related multi-Region - // keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), + // The key rotation status of these KMS keys is always false. To enable or disable + // automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), // set the property on the primary key. // // Specify the key ID or key ARN of the KMS key. @@ -15929,9 +16010,18 @@ type PutKeyPolicyInput struct { // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the Amazon Web Services Identity and Access Management User Guide. // - // The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, - // see Resource Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html) - // in the Key Management Service Developer Guide. + // A key policy document must conform to the following rules. + // + // * Up to 32 kilobytes (32768 bytes) + // + // * Must be UTF-8 encoded + // + // * The only Unicode characters that are permitted in a key policy document + // are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), + // and characters in the range U+0020 to U+00FF. + // + // * The Sid element in a key policy statement can include spaces. (Spaces + // are prohibited in the Sid element of an IAM policy document.) // // Policy is a required field Policy *string `min:"1" type:"string" required:"true"` @@ -16391,7 +16481,18 @@ type ReplicateKeyInput struct { // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the Identity and Access Management User Guide . // - // * The key policy size quota is 32 kilobytes (32768 bytes). + // A key policy document must conform to the following rules. + // + // * Up to 32 kilobytes (32768 bytes) + // + // * Must be UTF-8 encoded + // + // * The only Unicode characters that are permitted in a key policy document + // are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), + // and characters in the range U+0020 to U+00FF. + // + // * The Sid element in a key policy statement can include spaces. (Spaces + // are prohibited in the Sid element of an IAM policy document.) Policy *string `min:"1" type:"string"` // The Region ID of the Amazon Web Services Region for this replica key. @@ -16818,9 +16919,9 @@ type ScheduleKeyDeletionInput struct { // The waiting period, specified in number of days. After the waiting period // ends, KMS deletes the KMS key. // - // If the KMS key is a multi-Region primary key with replicas, the waiting period - // begins when the last of its replica keys is deleted. Otherwise, the waiting - // period begins immediately. + // If the KMS key is a multi-Region primary key with replica keys, the waiting + // period begins when the last of its replica keys is deleted. Otherwise, the + // waiting period begins immediately. // // This value is optional. If you include a value, it must be between 7 and // 30, inclusive. If you do not include a value, it defaults to 30. diff --git a/service/kms/doc.go b/service/kms/doc.go index 45cecea7f8..17008d7526 100644 --- a/service/kms/doc.go +++ b/service/kms/doc.go @@ -30,11 +30,11 @@ // see Service endpoints (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) // in the Key Management Service topic of the Amazon Web Services General Reference. // -// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS -// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy -// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral -// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support -// these modes. +// All KMS API calls must be signed and be transmitted using Transport Layer +// Security (TLS). KMS recommends you always use the latest supported TLS version. +// Clients must also support cipher suites with Perfect Forward Secrecy (PFS) +// such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman +// (ECDHE). Most modern systems such as Java 7 and later support these modes. // // Signing Requests //