From 8fa517a2c56d2f98a2e4a9c4ea6fd99b6ce61a71 Mon Sep 17 00:00:00 2001 From: AllanZhengYP Date: Thu, 10 Mar 2022 14:13:06 -0800 Subject: [PATCH] feat(clients): update clients as of 2022/03/10 (#3411) * chore(models): update models as of 2022/03/10 * chore(endpoints): update endpoints model as of 03/10/2022 * feat(clients): update clients as of 03/10/2022 * test(functional): update autoscaling endpoint test case --- clients/client-amplify/src/models/models_0.ts | 73 +- .../src/protocols/Aws_restJson1.ts | 1 + clients/client-amplifyuibuilder/README.md | 17 +- .../src/AmplifyUIBuilder.ts | 29 +- .../src/AmplifyUIBuilderClient.ts | 17 +- .../src/commands/ExportComponentsCommand.ts | 3 +- .../src/commands/ExportThemesCommand.ts | 3 +- .../src/commands/ListComponentsCommand.ts | 3 +- .../src/commands/ListThemesCommand.ts | 3 +- .../src/models/models_0.ts | 302 ++- .../pagination/ExportComponentsPaginator.ts | 58 + .../src/pagination/ExportThemesPaginator.ts | 58 + .../src/pagination/index.ts | 2 + .../src/protocols/Aws_restJson1.ts | 176 ++ clients/client-appflow/src/models/models_0.ts | 33 + .../src/protocols/Aws_restJson1.ts | 35 + clients/client-apprunner/src/endpoints.ts | 30 +- clients/client-athena/src/Athena.ts | 37 + clients/client-athena/src/AthenaClient.ts | 3 + .../src/commands/UpdateNamedQueryCommand.ts | 95 + clients/client-athena/src/commands/index.ts | 1 + clients/client-athena/src/models/models_0.ts | 115 +- .../src/protocols/Aws_json1_1.ts | 102 + clients/client-auto-scaling/src/endpoints.ts | 2 +- clients/client-chime-sdk-meetings/README.md | 3 +- .../src/ChimeSDKMeetings.ts | 3 +- .../src/ChimeSDKMeetingsClient.ts | 3 +- .../src/models/models_0.ts | 109 +- .../src/protocols/Aws_restJson1.ts | 6 + clients/client-cloudtrail/src/CloudTrail.ts | 4 +- .../src/commands/CancelQueryCommand.ts | 2 +- .../src/commands/ListQueriesCommand.ts | 2 +- clients/client-cloudtrail/src/endpoints.ts | 8 +- .../client-cloudtrail/src/models/models_0.ts | 23 +- .../src/protocols/Aws_json1_1.ts | 2 + .../client-cloudwatch-events/src/endpoints.ts | 8 +- .../client-cloudwatch-logs/src/endpoints.ts | 8 +- clients/client-comprehend/src/Comprehend.ts | 164 +- .../client-comprehend/src/ComprehendClient.ts | 24 + ...ibeTargetedSentimentDetectionJobCommand.ts | 108 + ...stTargetedSentimentDetectionJobsCommand.ts | 106 + .../StartSentimentDetectionJobCommand.ts | 2 +- ...artTargetedSentimentDetectionJobCommand.ts | 108 + ...topTargetedSentimentDetectionJobCommand.ts | 115 ++ .../client-comprehend/src/commands/index.ts | 4 + .../client-comprehend/src/models/models_0.ts | 407 +++- ...TargetedSentimentDetectionJobsPaginator.ts | 59 + .../client-comprehend/src/pagination/index.ts | 1 + .../src/protocols/Aws_json1_1.ts | 463 +++++ clients/client-connect/src/models/models_0.ts | 1 + clients/client-devops-guru/src/DevOpsGuru.ts | 82 +- .../src/DevOpsGuruClient.ts | 12 + .../DescribeEventSourcesConfigCommand.ts | 98 + .../SearchOrganizationInsightsCommand.ts | 8 +- .../UpdateEventSourcesConfigCommand.ts | 95 + .../client-devops-guru/src/commands/index.ts | 2 + .../client-devops-guru/src/models/models_0.ts | 576 ++++-- .../src/protocols/Aws_restJson1.ts | 228 +++ clients/client-ec2/src/models/models_0.ts | 1 + clients/client-ec2/src/models/models_1.ts | 24 +- clients/client-ec2/src/models/models_3.ts | 14 +- clients/client-ec2/src/protocols/Aws_ec2.ts | 4 + clients/client-ecr/src/ECR.ts | 17 +- .../BatchCheckLayerAvailabilityCommand.ts | 2 +- .../commands/CompleteLayerUploadCommand.ts | 2 +- .../commands/GetDownloadUrlForLayerCommand.ts | 2 +- .../commands/InitiateLayerUploadCommand.ts | 2 +- .../PutImageScanningConfigurationCommand.ts | 7 +- .../src/commands/UploadLayerPartCommand.ts | 2 +- clients/client-ecr/src/models/models_0.ts | 43 +- .../client-ecr/src/protocols/Aws_json1_1.ts | 4 + clients/client-ecs/src/ECS.ts | 61 +- .../src/commands/CreateServiceCommand.ts | 3 +- .../src/commands/PutAccountSettingCommand.ts | 2 +- .../UpdateContainerInstancesStateCommand.ts | 3 +- .../src/commands/UpdateServiceCommand.ts | 53 +- clients/client-ecs/src/models/models_0.ts | 173 +- .../client-ecs/src/protocols/Aws_json1_1.ts | 11 + clients/client-eks/src/models/models_0.ts | 1 + .../client-elasticache/src/models/models_0.ts | 31 - clients/client-eventbridge/src/endpoints.ts | 8 +- .../client-finspace-data/src/FinspaceData.ts | 347 ++++ .../src/FinspaceDataClient.ts | 49 +- .../commands/CreatePermissionGroupCommand.ts | 95 + .../src/commands/CreateUserCommand.ts | 95 + .../commands/DeletePermissionGroupCommand.ts | 95 + .../src/commands/DisableUserCommand.ts | 95 + .../src/commands/EnableUserCommand.ts | 95 + .../src/commands/GetUserCommand.ts | 95 + .../commands/ListPermissionGroupsCommand.ts | 95 + .../src/commands/ListUsersCommand.ts | 95 + .../src/commands/ResetUserPasswordCommand.ts | 95 + .../commands/UpdatePermissionGroupCommand.ts | 95 + .../src/commands/UpdateUserCommand.ts | 95 + .../src/commands/index.ts | 11 + .../src/models/models_0.ts | 1216 +++++++++-- .../ListPermissionGroupsPaginator.ts | 59 + .../src/pagination/ListUsersPaginator.ts | 55 + .../src/pagination/index.ts | 2 + .../src/protocols/Aws_restJson1.ts | 1706 ++++++++++++--- clients/client-fis/src/models/models_0.ts | 265 +++ .../client-fis/src/protocols/Aws_restJson1.ts | 175 ++ clients/client-fsx/src/FSx.ts | 19 +- .../CreateFileSystemFromBackupCommand.ts | 4 +- .../src/commands/CreateSnapshotCommand.ts | 6 +- .../src/commands/CreateVolumeCommand.ts | 3 +- .../src/commands/DeleteSnapshotCommand.ts | 2 +- .../src/commands/DescribeSnapshotsCommand.ts | 2 +- .../src/commands/UpdateSnapshotCommand.ts | 2 +- clients/client-fsx/src/models/models_0.ts | 283 ++- .../client-fsx/src/protocols/Aws_json1_1.ts | 27 + clients/client-gamelift/src/GameLift.ts | 4 +- .../DescribeEC2InstanceLimitsCommand.ts | 2 +- .../commands/DescribeMatchmakingCommand.ts | 2 +- .../client-greengrassv2/src/GreengrassV2.ts | 8 +- .../src/commands/CreateDeploymentCommand.ts | 4 +- .../commands/GetConnectivityInfoCommand.ts | 2 +- .../commands/UpdateConnectivityInfoCommand.ts | 2 +- .../client-kafkaconnect/src/KafkaConnect.ts | 41 +- .../src/KafkaConnectClient.ts | 3 + .../src/commands/DeleteCustomPluginCommand.ts | 95 + .../src/commands/ListConnectorsCommand.ts | 4 +- .../client-kafkaconnect/src/commands/index.ts | 1 + .../src/models/models_0.ts | 262 ++- .../src/protocols/Aws_restJson1.ts | 112 + clients/client-kendra/src/models/models_0.ts | 248 ++- .../src/protocols/Aws_json1_1.ts | 69 + clients/client-keyspaces/.gitignore | 9 + clients/client-keyspaces/LICENSE | 201 ++ clients/client-keyspaces/README.md | 219 ++ clients/client-keyspaces/package.json | 93 + clients/client-keyspaces/src/Keyspaces.ts | 505 +++++ .../client-keyspaces/src/KeyspacesClient.ts | 311 +++ .../src/commands/CreateKeyspaceCommand.ts | 101 + .../src/commands/CreateTableCommand.ts | 102 + .../src/commands/DeleteKeyspaceCommand.ts | 95 + .../src/commands/DeleteTableCommand.ts | 100 + .../src/commands/GetKeyspaceCommand.ts | 95 + .../src/commands/GetTableCommand.ts | 95 + .../src/commands/ListKeyspacesCommand.ts | 95 + .../src/commands/ListTablesCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/RestoreTableCommand.ts | 139 ++ .../src/commands/TagResourceCommand.ts | 101 + .../src/commands/UntagResourceCommand.ts | 95 + .../src/commands/UpdateTableCommand.ts | 97 + .../client-keyspaces/src/commands/index.ts | 13 + clients/client-keyspaces/src/endpoints.ts | 135 ++ clients/client-keyspaces/src/index.ts | 6 + .../src/models/KeyspacesServiceException.ts | 17 + clients/client-keyspaces/src/models/index.ts | 1 + .../client-keyspaces/src/models/models_0.ts | 1590 ++++++++++++++ .../src/pagination/Interfaces.ts | 8 + .../src/pagination/ListKeyspacesPaginator.ts | 59 + .../src/pagination/ListTablesPaginator.ts | 55 + .../ListTagsForResourcePaginator.ts | 59 + .../client-keyspaces/src/pagination/index.ts | 4 + .../src/protocols/Aws_json1_0.ts | 1823 +++++++++++++++++ .../src/runtimeConfig.browser.ts | 49 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + clients/client-keyspaces/src/runtimeConfig.ts | 65 + clients/client-keyspaces/tsconfig.cjs.json | 8 + clients/client-keyspaces/tsconfig.es.json | 10 + clients/client-keyspaces/tsconfig.json | 12 + clients/client-keyspaces/tsconfig.types.json | 10 + clients/client-keyspaces/typedoc.json | 3 + .../src/models/models_0.ts | 4 +- clients/client-macie/README.md | 12 +- clients/client-macie/src/Macie.ts | 58 +- clients/client-macie/src/MacieClient.ts | 12 +- .../commands/AssociateMemberAccountCommand.ts | 2 +- .../commands/AssociateS3ResourcesCommand.ts | 10 +- .../DisassociateMemberAccountCommand.ts | 3 +- .../DisassociateS3ResourcesCommand.ts | 9 +- .../src/commands/ListMemberAccountsCommand.ts | 3 +- .../src/commands/ListS3ResourcesCommand.ts | 9 +- .../src/commands/UpdateS3ResourcesCommand.ts | 10 +- clients/client-macie/src/models/models_0.ts | 184 +- .../src/commands/TagResourceCommand.ts | 2 +- .../src/models/models_0.ts | 42 +- .../src/models/models_1.ts | 60 +- .../src/models/models_2.ts | 32 + .../src/protocols/Aws_restJson1.ts | 6 + .../src/models/models_0.ts | 1 + clients/client-mgn/src/Mgn.ts | 20 +- .../commands/DeleteVcenterClientCommand.ts | 2 +- .../commands/DescribeJobLogItemsCommand.ts | 2 +- .../src/commands/DescribeJobsCommand.ts | 2 +- .../commands/DescribeVcenterClientsCommand.ts | 2 +- .../commands/DisconnectFromServiceCommand.ts | 2 +- .../src/commands/FinalizeCutoverCommand.ts | 2 +- .../src/commands/MarkAsArchivedCommand.ts | 2 +- .../src/commands/StartReplicationCommand.ts | 2 +- .../src/commands/StartTestCommand.ts | 2 +- ...pdateSourceServerReplicationTypeCommand.ts | 2 +- clients/client-mgn/src/models/models_0.ts | 87 +- .../client-mgn/src/protocols/Aws_restJson1.ts | 11 + .../README.md | 3 + .../src/MigrationHubRefactorSpaces.ts | 48 +- .../src/MigrationHubRefactorSpacesClient.ts | 3 + .../src/commands/CreateApplicationCommand.ts | 4 +- .../src/commands/CreateEnvironmentCommand.ts | 9 +- .../src/commands/CreateRouteCommand.ts | 24 +- .../src/commands/CreateServiceCommand.ts | 2 +- .../commands/ListEnvironmentVpcsCommand.ts | 3 +- .../src/commands/PutResourcePolicyCommand.ts | 3 +- .../src/models/models_0.ts | 32 +- clients/client-panorama/src/Panorama.ts | 4 + .../src/commands/DeletePackageCommand.ts | 4 + .../client-panorama/src/models/models_0.ts | 95 + .../src/protocols/Aws_restJson1.ts | 95 + clients/client-rds/src/RDS.ts | 57 +- .../commands/CopyDBClusterSnapshotCommand.ts | 3 - .../src/commands/CreateDBClusterCommand.ts | 3 - .../CreateDBClusterParameterGroupCommand.ts | 3 - .../CreateDBClusterSnapshotCommand.ts | 3 - .../src/commands/DeleteDBClusterCommand.ts | 3 - .../DeleteDBClusterParameterGroupCommand.ts | 3 - .../DeleteDBClusterSnapshotCommand.ts | 3 - ...DescribeDBClusterParameterGroupsCommand.ts | 3 - .../DescribeDBClusterParametersCommand.ts | 3 - .../DescribeDBClusterSnapshotsCommand.ts | 3 - .../src/commands/DescribeDBClustersCommand.ts | 3 - .../src/commands/FailoverDBClusterCommand.ts | 3 - .../src/commands/ModifyDBClusterCommand.ts | 3 - .../ModifyDBClusterParameterGroupCommand.ts | 3 - .../src/commands/RebootDBClusterCommand.ts | 3 +- .../RemoveRoleFromDBClusterCommand.ts | 3 - .../ResetDBClusterParameterGroupCommand.ts | 3 - .../RestoreDBClusterFromSnapshotCommand.ts | 3 - .../RestoreDBClusterToPointInTimeCommand.ts | 3 - clients/client-rds/src/models/models_0.ts | 27 +- clients/client-rds/src/models/models_1.ts | 115 +- .../client-route53-recovery-cluster/README.md | 45 +- .../src/Route53RecoveryCluster.ts | 167 +- .../src/Route53RecoveryClusterClient.ts | 51 +- .../commands/GetRoutingControlStateCommand.ts | 40 +- .../UpdateRoutingControlStateCommand.ts | 39 +- .../UpdateRoutingControlStatesCommand.ts | 37 +- .../src/models/models_0.ts | 22 +- .../src/protocols/Aws_json1_0.ts | 19 + .../src/models/models_0.ts | 4 + clients/client-sns/src/endpoints.ts | 8 +- clients/client-sqs/src/endpoints.ts | 2 +- clients/client-sts/src/STS.ts | 14 +- .../src/commands/AssumeRoleCommand.ts | 8 +- .../AssumeRoleWithWebIdentityCommand.ts | 6 +- clients/client-sts/src/models/models_0.ts | 12 +- .../client-synthetics/src/models/models_0.ts | 15 +- .../src/TimestreamQuery.ts | 8 +- .../src/commands/CancelQueryCommand.ts | 2 +- .../src/commands/DescribeEndpointsCommand.ts | 4 +- .../src/commands/QueryCommand.ts | 2 +- clients/client-transcribe/src/Transcribe.ts | 110 +- .../CreateCallAnalyticsCategoryCommand.ts | 12 +- .../commands/CreateLanguageModelCommand.ts | 8 +- .../CreateMedicalVocabularyCommand.ts | 12 +- .../src/commands/CreateVocabularyCommand.ts | 9 +- .../commands/CreateVocabularyFilterCommand.ts | 5 +- .../DeleteCallAnalyticsCategoryCommand.ts | 3 +- .../commands/DeleteCallAnalyticsJobCommand.ts | 3 +- .../commands/DeleteLanguageModelCommand.ts | 3 +- .../DeleteMedicalTranscriptionJobCommand.ts | 4 +- .../DeleteMedicalVocabularyCommand.ts | 3 +- .../commands/DeleteTranscriptionJobCommand.ts | 5 +- .../src/commands/DeleteVocabularyCommand.ts | 3 +- .../commands/DeleteVocabularyFilterCommand.ts | 3 +- .../commands/DescribeLanguageModelCommand.ts | 12 +- .../commands/GetCallAnalyticsJobCommand.ts | 11 +- .../GetMedicalTranscriptionJobCommand.ts | 7 +- .../commands/GetTranscriptionJobCommand.ts | 7 +- .../client-transcribe/src/models/models_0.ts | 315 +-- clients/client-transfer/package.json | 1 + clients/client-transfer/src/index.ts | 1 + .../client-transfer/src/models/models_0.ts | 103 +- clients/client-transfer/src/waiters/index.ts | 2 + .../src/waiters/waitForServerOffline.ts | 55 + .../src/waiters/waitForServerOnline.ts | 55 + codegen/sdk-codegen/aws-models/amplify.json | 197 +- .../aws-models/amplifyuibuilder.json | 387 +++- codegen/sdk-codegen/aws-models/appflow.json | 24 + codegen/sdk-codegen/aws-models/athena.json | 125 +- .../aws-models/chime-sdk-meetings.json | 45 +- .../sdk-codegen/aws-models/cloudtrail.json | 30 +- .../sdk-codegen/aws-models/comprehend.json | 423 +++- codegen/sdk-codegen/aws-models/connect.json | 4 + .../sdk-codegen/aws-models/devops-guru.json | 304 ++- codegen/sdk-codegen/aws-models/ec2.json | 30 +- codegen/sdk-codegen/aws-models/ecr.json | 29 +- codegen/sdk-codegen/aws-models/ecs.json | 100 +- codegen/sdk-codegen/aws-models/eks.json | 4 + .../sdk-codegen/aws-models/elasticache.json | 2 +- .../sdk-codegen/aws-models/finspace-data.json | 1426 ++++++++++++- codegen/sdk-codegen/aws-models/fis.json | 269 +++ codegen/sdk-codegen/aws-models/fsx.json | 183 +- codegen/sdk-codegen/aws-models/gamelift.json | 4 +- .../sdk-codegen/aws-models/greengrassv2.json | 6 +- .../sdk-codegen/aws-models/kafkaconnect.json | 287 ++- codegen/sdk-codegen/aws-models/kendra.json | 216 +- codegen/sdk-codegen/aws-models/keyspaces.json | 1762 ++++++++++++++++ .../sdk-codegen/aws-models/lex-models-v2.json | 6 +- codegen/sdk-codegen/aws-models/macie.json | 142 +- .../sdk-codegen/aws-models/mediaconvert.json | 90 +- .../sdk-codegen/aws-models/mediapackage.json | 36 +- codegen/sdk-codegen/aws-models/mgn.json | 138 +- .../migration-hub-refactor-spaces.json | 85 +- codegen/sdk-codegen/aws-models/panorama.json | 220 +- codegen/sdk-codegen/aws-models/rds.json | 60 +- .../aws-models/route53-recovery-cluster.json | 58 +- .../service-catalog-appregistry.json | 38 +- codegen/sdk-codegen/aws-models/sts.json | 8 +- .../sdk-codegen/aws-models/synthetics.json | 14 +- .../aws-models/timestream-query.json | 6 +- .../sdk-codegen/aws-models/transcribe.json | 180 +- codegen/sdk-codegen/aws-models/transfer.json | 114 +- .../aws/typescript/codegen/endpoints.json | 184 +- .../endpoints/test_cases_supported.json | 2 +- 318 files changed, 23925 insertions(+), 3005 deletions(-) create mode 100644 clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts create mode 100644 clients/client-amplifyuibuilder/src/pagination/ExportThemesPaginator.ts create mode 100644 clients/client-athena/src/commands/UpdateNamedQueryCommand.ts create mode 100644 clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts create mode 100644 clients/client-comprehend/src/commands/ListTargetedSentimentDetectionJobsCommand.ts create mode 100644 clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts create mode 100644 clients/client-comprehend/src/commands/StopTargetedSentimentDetectionJobCommand.ts create mode 100644 clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts create mode 100644 clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts create mode 100644 clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts create mode 100644 clients/client-finspace-data/src/commands/CreatePermissionGroupCommand.ts create mode 100644 clients/client-finspace-data/src/commands/CreateUserCommand.ts create mode 100644 clients/client-finspace-data/src/commands/DeletePermissionGroupCommand.ts create mode 100644 clients/client-finspace-data/src/commands/DisableUserCommand.ts create mode 100644 clients/client-finspace-data/src/commands/EnableUserCommand.ts create mode 100644 clients/client-finspace-data/src/commands/GetUserCommand.ts create mode 100644 clients/client-finspace-data/src/commands/ListPermissionGroupsCommand.ts create mode 100644 clients/client-finspace-data/src/commands/ListUsersCommand.ts create mode 100644 clients/client-finspace-data/src/commands/ResetUserPasswordCommand.ts create mode 100644 clients/client-finspace-data/src/commands/UpdatePermissionGroupCommand.ts create mode 100644 clients/client-finspace-data/src/commands/UpdateUserCommand.ts create mode 100644 clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts create mode 100644 clients/client-finspace-data/src/pagination/ListUsersPaginator.ts create mode 100644 clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts create mode 100644 clients/client-keyspaces/.gitignore create mode 100644 clients/client-keyspaces/LICENSE create mode 100644 clients/client-keyspaces/README.md create mode 100644 clients/client-keyspaces/package.json create mode 100644 clients/client-keyspaces/src/Keyspaces.ts create mode 100644 clients/client-keyspaces/src/KeyspacesClient.ts create mode 100644 clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/CreateTableCommand.ts create mode 100644 clients/client-keyspaces/src/commands/DeleteKeyspaceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/DeleteTableCommand.ts create mode 100644 clients/client-keyspaces/src/commands/GetKeyspaceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/GetTableCommand.ts create mode 100644 clients/client-keyspaces/src/commands/ListKeyspacesCommand.ts create mode 100644 clients/client-keyspaces/src/commands/ListTablesCommand.ts create mode 100644 clients/client-keyspaces/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/RestoreTableCommand.ts create mode 100644 clients/client-keyspaces/src/commands/TagResourceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-keyspaces/src/commands/UpdateTableCommand.ts create mode 100644 clients/client-keyspaces/src/commands/index.ts create mode 100644 clients/client-keyspaces/src/endpoints.ts create mode 100644 clients/client-keyspaces/src/index.ts create mode 100644 clients/client-keyspaces/src/models/KeyspacesServiceException.ts create mode 100644 clients/client-keyspaces/src/models/index.ts create mode 100644 clients/client-keyspaces/src/models/models_0.ts create mode 100644 clients/client-keyspaces/src/pagination/Interfaces.ts create mode 100644 clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts create mode 100644 clients/client-keyspaces/src/pagination/ListTablesPaginator.ts create mode 100644 clients/client-keyspaces/src/pagination/ListTagsForResourcePaginator.ts create mode 100644 clients/client-keyspaces/src/pagination/index.ts create mode 100644 clients/client-keyspaces/src/protocols/Aws_json1_0.ts create mode 100644 clients/client-keyspaces/src/runtimeConfig.browser.ts create mode 100644 clients/client-keyspaces/src/runtimeConfig.native.ts create mode 100644 clients/client-keyspaces/src/runtimeConfig.shared.ts create mode 100644 clients/client-keyspaces/src/runtimeConfig.ts create mode 100644 clients/client-keyspaces/tsconfig.cjs.json create mode 100644 clients/client-keyspaces/tsconfig.es.json create mode 100644 clients/client-keyspaces/tsconfig.json create mode 100644 clients/client-keyspaces/tsconfig.types.json create mode 100644 clients/client-keyspaces/typedoc.json create mode 100644 clients/client-transfer/src/waiters/index.ts create mode 100644 clients/client-transfer/src/waiters/waitForServerOffline.ts create mode 100644 clients/client-transfer/src/waiters/waitForServerOnline.ts create mode 100644 codegen/sdk-codegen/aws-models/keyspaces.json diff --git a/clients/client-amplify/src/models/models_0.ts b/clients/client-amplify/src/models/models_0.ts index ae325e8ca1e3..24c821b1e53e 100644 --- a/clients/client-amplify/src/models/models_0.ts +++ b/clients/client-amplify/src/models/models_0.ts @@ -55,7 +55,9 @@ export interface AutoBranchCreationConfig { environmentVariables?: { [key: string]: string }; /** - *

The basic authorization credentials for the autocreated branch. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for the autocreated branch. You must + * base64-encode the authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -66,7 +68,9 @@ export interface AutoBranchCreationConfig { /** *

Enables performance mode for the branch.

- *

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

+ *

Performance mode optimizes for faster hosting performance by keeping content cached at + * the edge for a longer interval. When performance mode is enabled, hosting configuration + * or code changes can take up to 10 minutes to roll out.

*/ enablePerformanceMode?: boolean; @@ -93,6 +97,7 @@ export namespace AutoBranchCreationConfig { export const filterSensitiveLog = (obj: AutoBranchCreationConfig): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), }); } @@ -120,7 +125,8 @@ export interface CustomRule { * *
301
*
- *

Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.

+ *

Represents a 301 (moved pemanently) redirect rule. This and all future + * requests should be directed to the target URL.

*
*
302
*
@@ -155,6 +161,7 @@ export namespace CustomRule { export enum Platform { WEB = "WEB", + WEB_DYNAMIC = "WEB_DYNAMIC", } /** @@ -223,7 +230,9 @@ export interface CreateAppRequest { enableBasicAuth?: boolean; /** - *

The credentials for basic authorization for an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The credentials for basic authorization for an Amplify app. You must base64-encode + * the authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -272,6 +281,7 @@ export namespace CreateAppRequest { ...(obj.oauthToken && { oauthToken: SENSITIVE_STRING }), ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), ...(obj.autoBranchCreationConfig && { autoBranchCreationConfig: AutoBranchCreationConfig.filterSensitiveLog(obj.autoBranchCreationConfig), }), @@ -312,6 +322,12 @@ export namespace ProductionBranch { }); } +export enum RepositoryCloneMethod { + SIGV4 = "SIGV4", + SSH = "SSH", + TOKEN = "TOKEN", +} + /** *

Represents the different branches of a repository for building, deploying, and * hosting an Amplify app.

@@ -343,7 +359,7 @@ export interface App { description: string | undefined; /** - *

The repository for the Amplify app.

+ *

The Git repository for the Amplify app.

*/ repository: string | undefined; @@ -395,7 +411,9 @@ export interface App { enableBasicAuth: boolean | undefined; /** - *

The basic authorization credentials for branches for the Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for branches for the Amplify app. You must + * base64-encode the authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -434,6 +452,14 @@ export interface App { *

Describes the automated branch creation configuration for the Amplify app.

*/ autoBranchCreationConfig?: AutoBranchCreationConfig; + + /** + *

The authentication protocol to use to access the Git repository for an Amplify app. + * For a GitHub repository, specify TOKEN. For an Amazon Web Services CodeCommit repository, + * specify SIGV4. For GitLab and Bitbucket repositories, specify + * SSH.

+ */ + repositoryCloneMethod?: RepositoryCloneMethod | string; } export namespace App { @@ -443,6 +469,7 @@ export namespace App { export const filterSensitiveLog = (obj: App): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), ...(obj.autoBranchCreationConfig && { autoBranchCreationConfig: AutoBranchCreationConfig.filterSensitiveLog(obj.autoBranchCreationConfig), }), @@ -707,7 +734,9 @@ export interface CreateBranchRequest { environmentVariables?: { [key: string]: string }; /** - *

The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for the branch. You must base64-encode the + * authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -718,7 +747,9 @@ export interface CreateBranchRequest { /** *

Enables performance mode for the branch.

- *

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

+ *

Performance mode optimizes for faster hosting performance by keeping content cached at + * the edge for a longer interval. When performance mode is enabled, hosting configuration + * or code changes can take up to 10 minutes to roll out.

*/ enablePerformanceMode?: boolean; @@ -766,6 +797,7 @@ export namespace CreateBranchRequest { export const filterSensitiveLog = (obj: CreateBranchRequest): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), }); } @@ -855,7 +887,9 @@ export interface Branch { /** *

Enables performance mode for the branch.

- *

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

+ *

Performance mode optimizes for faster hosting performance by keeping content cached at + * the edge for a longer interval. When performance mode is enabled, hosting configuration + * or code changes can take up to 10 minutes to roll out.

*/ enablePerformanceMode?: boolean; @@ -865,7 +899,9 @@ export interface Branch { thumbnailUrl?: string; /** - *

The basic authorization credentials for a branch of an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for a branch of an Amplify app. You must + * base64-encode the authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -919,6 +955,7 @@ export namespace Branch { export const filterSensitiveLog = (obj: Branch): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), }); } @@ -2867,7 +2904,9 @@ export interface UpdateAppRequest { enableBasicAuth?: boolean; /** - *

The basic authorization credentials for an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for an Amplify app. You must base64-encode the + * authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -2928,6 +2967,7 @@ export namespace UpdateAppRequest { export const filterSensitiveLog = (obj: UpdateAppRequest): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), ...(obj.autoBranchCreationConfig && { autoBranchCreationConfig: AutoBranchCreationConfig.filterSensitiveLog(obj.autoBranchCreationConfig), }), @@ -3001,7 +3041,9 @@ export interface UpdateBranchRequest { environmentVariables?: { [key: string]: string }; /** - *

The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password.

+ *

The basic authorization credentials for the branch. You must base64-encode the + * authorization credentials and provide them in the format + * user:password.

*/ basicAuthCredentials?: string; @@ -3012,7 +3054,9 @@ export interface UpdateBranchRequest { /** *

Enables performance mode for the branch.

- *

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

+ *

Performance mode optimizes for faster hosting performance by keeping content cached at + * the edge for a longer interval. When performance mode is enabled, hosting configuration + * or code changes can take up to 10 minutes to roll out.

*/ enablePerformanceMode?: boolean; @@ -3055,6 +3099,7 @@ export namespace UpdateBranchRequest { export const filterSensitiveLog = (obj: UpdateBranchRequest): any => ({ ...obj, ...(obj.basicAuthCredentials && { basicAuthCredentials: SENSITIVE_STRING }), + ...(obj.buildSpec && { buildSpec: SENSITIVE_STRING }), }); } @@ -3100,7 +3145,7 @@ export interface UpdateDomainAssociationRequest { /** *

Describes the settings for the subdomain.

*/ - subDomainSettings: SubDomainSetting[] | undefined; + subDomainSettings?: SubDomainSetting[]; /** *

Sets the branch patterns for automatic subdomain creation.

diff --git a/clients/client-amplify/src/protocols/Aws_restJson1.ts b/clients/client-amplify/src/protocols/Aws_restJson1.ts index 976cdc6cac6d..da419be081ae 100644 --- a/clients/client-amplify/src/protocols/Aws_restJson1.ts +++ b/clients/client-amplify/src/protocols/Aws_restJson1.ts @@ -3939,6 +3939,7 @@ const deserializeAws_restJson1App = (output: any, context: __SerdeContext): App ? deserializeAws_restJson1ProductionBranch(output.productionBranch, context) : undefined, repository: __expectString(output.repository), + repositoryCloneMethod: __expectString(output.repositoryCloneMethod), tags: output.tags !== undefined && output.tags !== null ? deserializeAws_restJson1TagMap(output.tags, context) diff --git a/clients/client-amplifyuibuilder/README.md b/clients/client-amplifyuibuilder/README.md index 3520ac7bb99d..c8d3e4d79b32 100644 --- a/clients/client-amplifyuibuilder/README.md +++ b/clients/client-amplifyuibuilder/README.md @@ -7,16 +7,15 @@ AWS SDK for JavaScript AmplifyUIBuilder Client for Node.js, Browser and React Native. -

The Amplify UI Builder API provides a programmatic interface for creating and configuring -user interface (UI) component libraries and themes for use in your Amplify applications. You -can then connect these UI components to an application's backend Amazon Web Services -resources.

-

You can also use the Amplify Studio visual designer to create UI components and model data -for an app. For more information, see Introduction in the +

The Amplify UI Builder API provides a programmatic interface for creating +and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's +backend Amazon Web Services resources.

+

You can also use the Amplify Studio visual designer to create UI components +and model data for an app. For more information, see Introduction in the Amplify Docs.

-

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation -for client app development. For more information, see the Amplify Framework. For more information about -deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide.

+

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and +documentation for client app development. For more information, see the Amplify Framework. For more information about +deploying an Amplify application to Amazon Web Services, see the Amplify User Guide.

## Installing diff --git a/clients/client-amplifyuibuilder/src/AmplifyUIBuilder.ts b/clients/client-amplifyuibuilder/src/AmplifyUIBuilder.ts index 01fe2da51d56..9e155d4d8cab 100644 --- a/clients/client-amplifyuibuilder/src/AmplifyUIBuilder.ts +++ b/clients/client-amplifyuibuilder/src/AmplifyUIBuilder.ts @@ -53,16 +53,15 @@ import { import { UpdateThemeCommand, UpdateThemeCommandInput, UpdateThemeCommandOutput } from "./commands/UpdateThemeCommand"; /** - *

The Amplify UI Builder API provides a programmatic interface for creating and configuring - * user interface (UI) component libraries and themes for use in your Amplify applications. You - * can then connect these UI components to an application's backend Amazon Web Services - * resources.

- *

You can also use the Amplify Studio visual designer to create UI components and model data - * for an app. For more information, see Introduction in the + *

The Amplify UI Builder API provides a programmatic interface for creating + * and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's + * backend Amazon Web Services resources.

+ *

You can also use the Amplify Studio visual designer to create UI components + * and model data for an app. For more information, see Introduction in the * Amplify Docs.

- *

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation - * for client app development. For more information, see the Amplify Framework. For more information about - * deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide.

+ *

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and + * documentation for client app development. For more information, see the Amplify Framework. For more information about + * deploying an Amplify application to Amazon Web Services, see the Amplify User Guide.

*/ export class AmplifyUIBuilder extends AmplifyUIBuilderClient { /** @@ -214,8 +213,7 @@ export class AmplifyUIBuilder extends AmplifyUIBuilderClient { } /** - *

Exports component configurations to code that is ready to integrate into an Amplify - * app.

+ *

Exports component configurations to code that is ready to integrate into an Amplify app.

*/ public exportComponents( args: ExportComponentsCommandInput, @@ -247,8 +245,7 @@ export class AmplifyUIBuilder extends AmplifyUIBuilderClient { } /** - *

Exports theme configurations to code that is ready to integrate into an Amplify - * app.

+ *

Exports theme configurations to code that is ready to integrate into an Amplify app.

*/ public exportThemes( args: ExportThemesCommandInput, @@ -332,7 +329,8 @@ export class AmplifyUIBuilder extends AmplifyUIBuilderClient { } /** - *

Retrieves a list of components for a specified Amplify app and backend environment.

+ *

Retrieves a list of components for a specified Amplify app and backend + * environment.

*/ public listComponents( args: ListComponentsCommandInput, @@ -364,7 +362,8 @@ export class AmplifyUIBuilder extends AmplifyUIBuilderClient { } /** - *

Retrieves a list of themes for a specified Amplify app and backend environment.

+ *

Retrieves a list of themes for a specified Amplify app and backend + * environment.

*/ public listThemes(args: ListThemesCommandInput, options?: __HttpHandlerOptions): Promise; public listThemes(args: ListThemesCommandInput, cb: (err: any, data?: ListThemesCommandOutput) => void): void; diff --git a/clients/client-amplifyuibuilder/src/AmplifyUIBuilderClient.ts b/clients/client-amplifyuibuilder/src/AmplifyUIBuilderClient.ts index 4530a40e142c..3466340cff31 100644 --- a/clients/client-amplifyuibuilder/src/AmplifyUIBuilderClient.ts +++ b/clients/client-amplifyuibuilder/src/AmplifyUIBuilderClient.ts @@ -256,16 +256,15 @@ type AmplifyUIBuilderClientResolvedConfigType = __SmithyResolvedConfiguration<__ export interface AmplifyUIBuilderClientResolvedConfig extends AmplifyUIBuilderClientResolvedConfigType {} /** - *

The Amplify UI Builder API provides a programmatic interface for creating and configuring - * user interface (UI) component libraries and themes for use in your Amplify applications. You - * can then connect these UI components to an application's backend Amazon Web Services - * resources.

- *

You can also use the Amplify Studio visual designer to create UI components and model data - * for an app. For more information, see Introduction in the + *

The Amplify UI Builder API provides a programmatic interface for creating + * and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's + * backend Amazon Web Services resources.

+ *

You can also use the Amplify Studio visual designer to create UI components + * and model data for an app. For more information, see Introduction in the * Amplify Docs.

- *

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation - * for client app development. For more information, see the Amplify Framework. For more information about - * deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide.

+ *

The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and + * documentation for client app development. For more information, see the Amplify Framework. For more information about + * deploying an Amplify application to Amazon Web Services, see the Amplify User Guide.

*/ export class AmplifyUIBuilderClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-amplifyuibuilder/src/commands/ExportComponentsCommand.ts b/clients/client-amplifyuibuilder/src/commands/ExportComponentsCommand.ts index b0c8527dc8c4..c88b030c0c65 100644 --- a/clients/client-amplifyuibuilder/src/commands/ExportComponentsCommand.ts +++ b/clients/client-amplifyuibuilder/src/commands/ExportComponentsCommand.ts @@ -22,8 +22,7 @@ export interface ExportComponentsCommandInput extends ExportComponentsRequest {} export interface ExportComponentsCommandOutput extends ExportComponentsResponse, __MetadataBearer {} /** - *

Exports component configurations to code that is ready to integrate into an Amplify - * app.

+ *

Exports component configurations to code that is ready to integrate into an Amplify app.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-amplifyuibuilder/src/commands/ExportThemesCommand.ts b/clients/client-amplifyuibuilder/src/commands/ExportThemesCommand.ts index c07d94e5abc7..012b9e62c685 100644 --- a/clients/client-amplifyuibuilder/src/commands/ExportThemesCommand.ts +++ b/clients/client-amplifyuibuilder/src/commands/ExportThemesCommand.ts @@ -22,8 +22,7 @@ export interface ExportThemesCommandInput extends ExportThemesRequest {} export interface ExportThemesCommandOutput extends ExportThemesResponse, __MetadataBearer {} /** - *

Exports theme configurations to code that is ready to integrate into an Amplify - * app.

+ *

Exports theme configurations to code that is ready to integrate into an Amplify app.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-amplifyuibuilder/src/commands/ListComponentsCommand.ts b/clients/client-amplifyuibuilder/src/commands/ListComponentsCommand.ts index 3a4d29122964..abba68e8557a 100644 --- a/clients/client-amplifyuibuilder/src/commands/ListComponentsCommand.ts +++ b/clients/client-amplifyuibuilder/src/commands/ListComponentsCommand.ts @@ -22,7 +22,8 @@ export interface ListComponentsCommandInput extends ListComponentsRequest {} export interface ListComponentsCommandOutput extends ListComponentsResponse, __MetadataBearer {} /** - *

Retrieves a list of components for a specified Amplify app and backend environment.

+ *

Retrieves a list of components for a specified Amplify app and backend + * environment.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-amplifyuibuilder/src/commands/ListThemesCommand.ts b/clients/client-amplifyuibuilder/src/commands/ListThemesCommand.ts index a9a605925a41..6eda552fbaba 100644 --- a/clients/client-amplifyuibuilder/src/commands/ListThemesCommand.ts +++ b/clients/client-amplifyuibuilder/src/commands/ListThemesCommand.ts @@ -22,7 +22,8 @@ export interface ListThemesCommandInput extends ListThemesRequest {} export interface ListThemesCommandOutput extends ListThemesResponse, __MetadataBearer {} /** - *

Retrieves a list of themes for a specified Amplify app and backend environment.

+ *

Retrieves a list of themes for a specified Amplify app and backend + * environment.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-amplifyuibuilder/src/models/models_0.ts b/clients/client-amplifyuibuilder/src/models/models_0.ts index 696118f9f3f1..d472b680a7ef 100644 --- a/clients/client-amplifyuibuilder/src/models/models_0.ts +++ b/clients/client-amplifyuibuilder/src/models/models_0.ts @@ -83,13 +83,15 @@ export namespace SortProperty { */ export interface ComponentVariant { /** - *

The combination of variants that comprise this variant.

+ *

The combination of variants that comprise this variant. You can't specify + * tags as a valid property for variantValues.

*/ variantValues?: { [key: string]: string }; /** *

The properties of the component variant that can be overriden when customizing an instance - * of the component.

+ * of the component. You can't specify tags as a valid property for + * overrides.

*/ overrides?: { [key: string]: { [key: string]: string } }; } @@ -182,12 +184,14 @@ export class ServiceQuotaExceededException extends __BaseException { export interface DeleteComponentRequest { /** - *

The unique ID of the Amplify app associated with the component to delete.

+ *

The unique ID of the Amplify app associated with the component to + * delete.

*/ appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -232,9 +236,15 @@ export interface ExportComponentsRequest { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; + + /** + *

The token to request the next page of results.

+ */ + nextToken?: string; } export namespace ExportComponentsRequest { @@ -279,7 +289,8 @@ export interface ListComponentsRequest { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -314,7 +325,8 @@ export interface ComponentSummary { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -514,12 +526,14 @@ export namespace RefreshTokenResponse { export interface DeleteThemeRequest { /** - *

The unique ID of the Amplify app associated with the theme to delete.

+ *

The unique ID of the Amplify app associated with the theme to + * delete.

*/ appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -548,6 +562,11 @@ export interface ExportThemesRequest { *

The name of the backend environment that is part of the Amplify app.

*/ environmentName: string | undefined; + + /** + *

The token to request the next page of results.

+ */ + nextToken?: string; } export namespace ExportThemesRequest { @@ -592,7 +611,8 @@ export interface ListThemesRequest { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -720,8 +740,8 @@ export namespace ThemeValues { } /** - *

Stores information for generating Amplify DataStore queries. Use a Predicate - * to retrieve a subset of the data in a collection.

+ *

Stores information for generating Amplify DataStore queries. Use a + * Predicate to retrieve a subset of the data in a collection.

*/ export interface Predicate { /** @@ -849,8 +869,7 @@ export namespace ComponentDataConfiguration { /** *

Describes the configuration for all of a component's properties. Use - * ComponentProperty to specify the values to render or bind by - * default.

+ * ComponentProperty to specify the values to render or bind by default.

*/ export interface ComponentProperty { /** @@ -864,7 +883,8 @@ export interface ComponentProperty { bindingProperties?: ComponentPropertyBindingProperties; /** - *

The information to bind the component property to data at runtime. Use this for collection components.

+ *

The information to bind the component property to data at runtime. Use this for collection + * components.

*/ collectionBindingProperties?: ComponentPropertyBindingProperties; @@ -894,17 +914,19 @@ export interface ComponentProperty { userAttribute?: string; /** - *

A list of component properties to concatenate to create the value to assign to this component property.

+ *

A list of component properties to concatenate to create the value to assign to this + * component property.

*/ concat?: ComponentProperty[]; /** - *

The conditional expression to use to assign a value to the component property..

+ *

The conditional expression to use to assign a value to the component property.

*/ condition?: ComponentConditionProperty; /** - *

Specifies whether the user configured the property in Amplify Studio after importing it.

+ *

Specifies whether the user configured the property in Amplify Studio after + * importing it.

*/ configured?: boolean; @@ -914,9 +936,20 @@ export interface ComponentProperty { type?: string; /** - *

The default value assigned to property when the component is imported into an app.

+ *

The default value assigned to the property when the component is imported into an + * app.

*/ importedValue?: string; + + /** + *

The name of the component that is affected by an event.

+ */ + componentName?: string; + + /** + *

The name of the component's property that is affected by an event.

+ */ + property?: string; } export namespace ComponentProperty { @@ -1006,7 +1039,8 @@ export interface Theme { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -1089,6 +1123,36 @@ export namespace UpdateThemeData { }); } +/** + *

Represents the state configuration when an action modifies a property of another + * element within the same component.

+ */ +export interface MutationActionSetStateParameter { + /** + *

The name of the component that is being modified.

+ */ + componentName: string | undefined; + + /** + *

The name of the component property to apply the state configuration to.

+ */ + property: string | undefined; + + /** + *

The state configuration to assign to the property.

+ */ + set: ComponentProperty | undefined; +} + +export namespace MutationActionSetStateParameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MutationActionSetStateParameter): any => ({ + ...obj, + }); +} + /** *

Represents a conditional expression to set a component property. Use * ComponentConditionProperty to set a property to different values conditionally, @@ -1125,6 +1189,11 @@ export interface ComponentConditionProperty { *

The value to assign to the property if the condition is not met.

*/ else?: ComponentProperty; + + /** + *

The type of the property to evaluate.

+ */ + operandType?: string; } export namespace ComponentConditionProperty { @@ -1143,7 +1212,8 @@ export interface CreateThemeRequest { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -1256,6 +1326,11 @@ export interface ExportThemesResponse { *

Represents the configuration of the exported themes.

*/ entities: Theme[] | undefined; + + /** + *

The pagination token that's included if more results are available.

+ */ + nextToken?: string; } export namespace ExportThemesResponse { @@ -1267,6 +1342,96 @@ export namespace ExportThemesResponse { }); } +/** + *

Represents the event action configuration for an element of a Component + * or ComponentChild. Use for the workflow feature in Amplify Studio + * that allows you to bind events and actions to components. ActionParameters + * defines the action that is performed when an event occurs on the component.

+ */ +export interface ActionParameters { + /** + *

The type of navigation action. Valid values are url and anchor. This value is required for a navigation action.

+ */ + type?: ComponentProperty; + + /** + *

The URL to the location to open. Specify this value for a navigation action.

+ */ + url?: ComponentProperty; + + /** + *

The HTML anchor link to the location to open. Specify this value for a navigation action.

+ */ + anchor?: ComponentProperty; + + /** + *

The element within the same component to modify when the action occurs.

+ */ + target?: ComponentProperty; + + /** + *

Specifies whether the user should be signed out globally. Specify this value for an auth sign out action.

+ */ + global?: ComponentProperty; + + /** + *

The name of the data model. Use when the action performs an operation on an Amplify DataStore + * model.

+ */ + model?: string; + + /** + *

The unique ID of the component that the ActionParameters apply to.

+ */ + id?: ComponentProperty; + + /** + *

A dictionary of key-value pairs mapping Amplify Studio properties to fields in a data model. Use when the action + * performs an operation on an Amplify DataStore model.

+ */ + fields?: { [key: string]: ComponentProperty }; + + /** + *

A key-value pair that specifies the state property name and its initial value.

+ */ + state?: MutationActionSetStateParameter; +} + +export namespace ActionParameters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ActionParameters): any => ({ + ...obj, + }); +} + +/** + *

Describes the configuration of an event. You can bind an event and a corresponding + * action to a Component or a ComponentChild. A button click + * is an example of an event.

+ */ +export interface ComponentEvent { + /** + *

The action to perform when a specific event is raised.

+ */ + action?: string; + + /** + *

Describes information about the action.

+ */ + parameters?: ActionParameters; +} + +export namespace ComponentEvent { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentEvent): any => ({ + ...obj, + }); +} + /** *

A nested UI configuration within a parent Component.

*/ @@ -1282,7 +1447,8 @@ export interface ComponentChild { name: string | undefined; /** - *

Describes the properties of the child component.

+ *

Describes the properties of the child component. You can't specify tags as a + * valid property for properties.

*/ properties: { [key: string]: ComponentProperty } | undefined; @@ -1290,6 +1456,12 @@ export interface ComponentChild { *

The list of ComponentChild instances for this component.

*/ children?: ComponentChild[]; + + /** + *

Describes the events that can be raised on the child component. Use for the workflow feature in Amplify Studio that allows you to + * bind events and actions to components.

+ */ + events?: { [key: string]: ComponentEvent }; } export namespace ComponentChild { @@ -1302,10 +1474,10 @@ export namespace ComponentChild { } /** - *

Contains the configuration settings for a user interface (UI) element for an Amplify app. A - * component is configured as a primary, stand-alone UI element. Use ComponentChild - * to configure an instance of a Component. A ComponentChild instance - * inherits the configuration of the main Component.

+ *

Contains the configuration settings for a user interface (UI) element for an Amplify app. A component is configured as a primary, stand-alone UI element. Use + * ComponentChild to configure an instance of a Component. A + * ComponentChild instance inherits the configuration of the main + * Component.

*/ export interface Component { /** @@ -1314,7 +1486,8 @@ export interface Component { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -1334,13 +1507,14 @@ export interface Component { name: string | undefined; /** - *

The type of the component. This can be an Amplify custom UI component or another custom - * component.

+ *

The type of the component. This can be an Amplify custom UI component or + * another custom component.

*/ componentType: string | undefined; /** - *

Describes the component's properties.

+ *

Describes the component's properties. You can't specify tags as a valid + * property for properties.

*/ properties: { [key: string]: ComponentProperty } | undefined; @@ -1350,24 +1524,29 @@ export interface Component { children?: ComponentChild[]; /** - *

A list of the component's variants. A variant is a unique style configuration of a - * main component.

+ *

A list of the component's variants. A variant is a unique style configuration of a main + * component.

*/ variants: ComponentVariant[] | undefined; /** *

Describes the component's properties that can be overriden in a customized instance of the - * component.

+ * component. You can't specify tags as a valid property for + * overrides.

*/ overrides: { [key: string]: { [key: string]: string } } | undefined; /** - *

The information to connect a component's properties to data at runtime.

+ *

The information to connect a component's properties to data at runtime. You can't specify + * tags as a valid property for bindingProperties.

+ *

*/ bindingProperties: { [key: string]: ComponentBindingPropertiesValue } | undefined; /** - *

The data binding configuration for the component's properties. Use this for a collection component.

+ *

The data binding configuration for the component's properties. Use this for a collection + * component. You can't specify tags as a valid property for + * collectionProperties.

*/ collectionProperties?: { [key: string]: ComponentDataConfiguration }; @@ -1385,6 +1564,17 @@ export interface Component { *

One or more key-value pairs to use when tagging the component.

*/ tags?: { [key: string]: string }; + + /** + *

Describes the events that can be raised on the component. Use for the workflow feature in Amplify Studio that allows you to + * bind events and actions to components.

+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *

The schema version of the component when it was imported.

+ */ + schemaVersion?: string; } export namespace Component { @@ -1411,8 +1601,8 @@ export interface CreateComponentData { sourceId?: string; /** - *

The component type. This can be an Amplify custom UI component or another custom - * component.

+ *

The component type. This can be an Amplify custom UI component or another + * custom component.

*/ componentType: string | undefined; @@ -1443,7 +1633,8 @@ export interface CreateComponentData { bindingProperties: { [key: string]: ComponentBindingPropertiesValue } | undefined; /** - *

The data binding configuration for customizing a component's properties. Use this for a collection component.

+ *

The data binding configuration for customizing a component's properties. Use this for a + * collection component.

*/ collectionProperties?: { [key: string]: ComponentDataConfiguration }; @@ -1451,6 +1642,16 @@ export interface CreateComponentData { *

One or more key-value pairs to use when tagging the component data.

*/ tags?: { [key: string]: string }; + + /** + *

The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.

+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *

The schema version of the component when it was imported.

+ */ + schemaVersion?: string; } export namespace CreateComponentData { @@ -1482,8 +1683,8 @@ export interface UpdateComponentData { sourceId?: string; /** - *

The type of the component. This can be an Amplify custom UI component or another custom - * component.

+ *

The type of the component. This can be an Amplify custom UI component or + * another custom component.

*/ componentType?: string; @@ -1513,9 +1714,20 @@ export interface UpdateComponentData { bindingProperties?: { [key: string]: ComponentBindingPropertiesValue }; /** - *

The configuration for binding a component's properties to a data model. Use this for a collection component.

+ *

The configuration for binding a component's properties to a data model. Use this for a + * collection component.

*/ collectionProperties?: { [key: string]: ComponentDataConfiguration }; + + /** + *

The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.

+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *

The schema version of the component when it was imported.

+ */ + schemaVersion?: string; } export namespace UpdateComponentData { @@ -1534,7 +1746,8 @@ export interface CreateComponentRequest { appId: string | undefined; /** - *

The name of the backend environment that is a part of the Amplify app.

+ *

The name of the backend environment that is a part of the Amplify + * app.

*/ environmentName: string | undefined; @@ -1647,6 +1860,11 @@ export interface ExportComponentsResponse { *

Represents the configuration of the exported components.

*/ entities: Component[] | undefined; + + /** + *

The pagination token that's included if more results are available.

+ */ + nextToken?: string; } export namespace ExportComponentsResponse { diff --git a/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts b/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts new file mode 100644 index 000000000000..e7186e0d130b --- /dev/null +++ b/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts @@ -0,0 +1,58 @@ +import { Paginator } from "@aws-sdk/types"; + +import { AmplifyUIBuilder } from "../AmplifyUIBuilder"; +import { AmplifyUIBuilderClient } from "../AmplifyUIBuilderClient"; +import { + ExportComponentsCommand, + ExportComponentsCommandInput, + ExportComponentsCommandOutput, +} from "../commands/ExportComponentsCommand"; +import { AmplifyUIBuilderPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AmplifyUIBuilderClient, + input: ExportComponentsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ExportComponentsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: AmplifyUIBuilder, + input: ExportComponentsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.exportComponents(input, ...args); +}; +export async function* paginateExportComponents( + config: AmplifyUIBuilderPaginationConfiguration, + input: ExportComponentsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ExportComponentsCommandOutput; + while (hasNext) { + input.nextToken = token; + if (config.client instanceof AmplifyUIBuilder) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof AmplifyUIBuilderClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected AmplifyUIBuilder | AmplifyUIBuilderClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-amplifyuibuilder/src/pagination/ExportThemesPaginator.ts b/clients/client-amplifyuibuilder/src/pagination/ExportThemesPaginator.ts new file mode 100644 index 000000000000..4a3092803752 --- /dev/null +++ b/clients/client-amplifyuibuilder/src/pagination/ExportThemesPaginator.ts @@ -0,0 +1,58 @@ +import { Paginator } from "@aws-sdk/types"; + +import { AmplifyUIBuilder } from "../AmplifyUIBuilder"; +import { AmplifyUIBuilderClient } from "../AmplifyUIBuilderClient"; +import { + ExportThemesCommand, + ExportThemesCommandInput, + ExportThemesCommandOutput, +} from "../commands/ExportThemesCommand"; +import { AmplifyUIBuilderPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AmplifyUIBuilderClient, + input: ExportThemesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ExportThemesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: AmplifyUIBuilder, + input: ExportThemesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.exportThemes(input, ...args); +}; +export async function* paginateExportThemes( + config: AmplifyUIBuilderPaginationConfiguration, + input: ExportThemesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ExportThemesCommandOutput; + while (hasNext) { + input.nextToken = token; + if (config.client instanceof AmplifyUIBuilder) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof AmplifyUIBuilderClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected AmplifyUIBuilder | AmplifyUIBuilderClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-amplifyuibuilder/src/pagination/index.ts b/clients/client-amplifyuibuilder/src/pagination/index.ts index 21bdcabbcc66..9072e9cc95ba 100644 --- a/clients/client-amplifyuibuilder/src/pagination/index.ts +++ b/clients/client-amplifyuibuilder/src/pagination/index.ts @@ -1,3 +1,5 @@ +export * from "./ExportComponentsPaginator"; +export * from "./ExportThemesPaginator"; export * from "./Interfaces"; export * from "./ListComponentsPaginator"; export * from "./ListThemesPaginator"; diff --git a/clients/client-amplifyuibuilder/src/protocols/Aws_restJson1.ts b/clients/client-amplifyuibuilder/src/protocols/Aws_restJson1.ts index 98827dc6120d..1e5f379272a7 100644 --- a/clients/client-amplifyuibuilder/src/protocols/Aws_restJson1.ts +++ b/clients/client-amplifyuibuilder/src/protocols/Aws_restJson1.ts @@ -35,12 +35,14 @@ import { UpdateComponentCommandInput, UpdateComponentCommandOutput } from "../co import { UpdateThemeCommandInput, UpdateThemeCommandOutput } from "../commands/UpdateThemeCommand"; import { AmplifyUIBuilderServiceException as __BaseException } from "../models/AmplifyUIBuilderServiceException"; import { + ActionParameters, Component, ComponentBindingPropertiesValue, ComponentBindingPropertiesValueProperties, ComponentChild, ComponentConditionProperty, ComponentDataConfiguration, + ComponentEvent, ComponentProperty, ComponentPropertyBindingProperties, ComponentSummary, @@ -51,6 +53,7 @@ import { FormBindingElement, InternalServerException, InvalidParameterException, + MutationActionSetStateParameter, Predicate, RefreshTokenRequestBody, ResourceConflictException, @@ -329,6 +332,9 @@ export const serializeAws_restJson1ExportComponentsCommand = async ( } else { throw new Error("No value provided for input HTTP label: environmentName."); } + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; let body: any; return new __HttpRequest({ protocol, @@ -337,6 +343,7 @@ export const serializeAws_restJson1ExportComponentsCommand = async ( method: "GET", headers, path: resolvedPath, + query, body, }); }; @@ -368,6 +375,9 @@ export const serializeAws_restJson1ExportThemesCommand = async ( } else { throw new Error("No value provided for input HTTP label: environmentName."); } + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; let body: any; return new __HttpRequest({ protocol, @@ -376,6 +386,7 @@ export const serializeAws_restJson1ExportThemesCommand = async ( method: "GET", headers, path: resolvedPath, + query, body, }); }; @@ -980,11 +991,15 @@ export const deserializeAws_restJson1ExportComponentsCommand = async ( const contents: ExportComponentsCommandOutput = { $metadata: deserializeMetadata(output), entities: undefined, + nextToken: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); if (data.entities !== undefined && data.entities !== null) { contents.entities = deserializeAws_restJson1ComponentList(data.entities, context); } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } return Promise.resolve(contents); }; @@ -1027,11 +1042,15 @@ export const deserializeAws_restJson1ExportThemesCommand = async ( const contents: ExportThemesCommandOutput = { $metadata: deserializeMetadata(output), entities: undefined, + nextToken: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); if (data.entities !== undefined && data.entities !== null) { contents.entities = deserializeAws_restJson1ThemeList(data.entities, context); } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } return Promise.resolve(contents); }; @@ -1486,6 +1505,28 @@ const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( return __decorateServiceException(exception, parsedOutput.body); }; +const serializeAws_restJson1ActionParameters = (input: ActionParameters, context: __SerdeContext): any => { + return { + ...(input.anchor !== undefined && + input.anchor !== null && { anchor: serializeAws_restJson1ComponentProperty(input.anchor, context) }), + ...(input.fields !== undefined && + input.fields !== null && { fields: serializeAws_restJson1ComponentProperties(input.fields, context) }), + ...(input.global !== undefined && + input.global !== null && { global: serializeAws_restJson1ComponentProperty(input.global, context) }), + ...(input.id !== undefined && + input.id !== null && { id: serializeAws_restJson1ComponentProperty(input.id, context) }), + ...(input.model !== undefined && input.model !== null && { model: input.model }), + ...(input.state !== undefined && + input.state !== null && { state: serializeAws_restJson1MutationActionSetStateParameter(input.state, context) }), + ...(input.target !== undefined && + input.target !== null && { target: serializeAws_restJson1ComponentProperty(input.target, context) }), + ...(input.type !== undefined && + input.type !== null && { type: serializeAws_restJson1ComponentProperty(input.type, context) }), + ...(input.url !== undefined && + input.url !== null && { url: serializeAws_restJson1ComponentProperty(input.url, context) }), + }; +}; + const serializeAws_restJson1ComponentBindingProperties = ( input: { [key: string]: ComponentBindingPropertiesValue }, context: __SerdeContext @@ -1539,6 +1580,8 @@ const serializeAws_restJson1ComponentChild = (input: ComponentChild, context: __ ...(input.children !== undefined && input.children !== null && { children: serializeAws_restJson1ComponentChildList(input.children, context) }), ...(input.componentType !== undefined && input.componentType !== null && { componentType: input.componentType }), + ...(input.events !== undefined && + input.events !== null && { events: serializeAws_restJson1ComponentEvents(input.events, context) }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.properties !== undefined && input.properties !== null && { @@ -1582,6 +1625,7 @@ const serializeAws_restJson1ComponentConditionProperty = ( input.else !== null && { else: serializeAws_restJson1ComponentProperty(input.else, context) }), ...(input.field !== undefined && input.field !== null && { field: input.field }), ...(input.operand !== undefined && input.operand !== null && { operand: input.operand }), + ...(input.operandType !== undefined && input.operandType !== null && { operandType: input.operandType }), ...(input.operator !== undefined && input.operator !== null && { operator: input.operator }), ...(input.property !== undefined && input.property !== null && { property: input.property }), ...(input.then !== undefined && @@ -1604,6 +1648,29 @@ const serializeAws_restJson1ComponentDataConfiguration = ( }; }; +const serializeAws_restJson1ComponentEvent = (input: ComponentEvent, context: __SerdeContext): any => { + return { + ...(input.action !== undefined && input.action !== null && { action: input.action }), + ...(input.parameters !== undefined && + input.parameters !== null && { parameters: serializeAws_restJson1ActionParameters(input.parameters, context) }), + }; +}; + +const serializeAws_restJson1ComponentEvents = ( + input: { [key: string]: ComponentEvent }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1ComponentEvent(value, context), + }; + }, {}); +}; + const serializeAws_restJson1ComponentOverrides = ( input: { [key: string]: { [key: string]: string } }, context: __SerdeContext @@ -1664,6 +1731,7 @@ const serializeAws_restJson1ComponentProperty = (input: ComponentProperty, conte context ), }), + ...(input.componentName !== undefined && input.componentName !== null && { componentName: input.componentName }), ...(input.concat !== undefined && input.concat !== null && { concat: serializeAws_restJson1ComponentPropertyList(input.concat, context) }), ...(input.condition !== undefined && @@ -1675,6 +1743,7 @@ const serializeAws_restJson1ComponentProperty = (input: ComponentProperty, conte ...(input.event !== undefined && input.event !== null && { event: input.event }), ...(input.importedValue !== undefined && input.importedValue !== null && { importedValue: input.importedValue }), ...(input.model !== undefined && input.model !== null && { model: input.model }), + ...(input.property !== undefined && input.property !== null && { property: input.property }), ...(input.type !== undefined && input.type !== null && { type: input.type }), ...(input.userAttribute !== undefined && input.userAttribute !== null && { userAttribute: input.userAttribute }), ...(input.value !== undefined && input.value !== null && { value: input.value }), @@ -1752,6 +1821,8 @@ const serializeAws_restJson1CreateComponentData = (input: CreateComponentData, c collectionProperties: serializeAws_restJson1ComponentCollectionProperties(input.collectionProperties, context), }), ...(input.componentType !== undefined && input.componentType !== null && { componentType: input.componentType }), + ...(input.events !== undefined && + input.events !== null && { events: serializeAws_restJson1ComponentEvents(input.events, context) }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.overrides !== undefined && input.overrides !== null && { overrides: serializeAws_restJson1ComponentOverrides(input.overrides, context) }), @@ -1759,6 +1830,7 @@ const serializeAws_restJson1CreateComponentData = (input: CreateComponentData, c input.properties !== null && { properties: serializeAws_restJson1ComponentProperties(input.properties, context), }), + ...(input.schemaVersion !== undefined && input.schemaVersion !== null && { schemaVersion: input.schemaVersion }), ...(input.sourceId !== undefined && input.sourceId !== null && { sourceId: input.sourceId }), ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1Tags(input.tags, context) }), ...(input.variants !== undefined && @@ -1820,6 +1892,18 @@ const serializeAws_restJson1IdentifierList = (input: string[], context: __SerdeC }); }; +const serializeAws_restJson1MutationActionSetStateParameter = ( + input: MutationActionSetStateParameter, + context: __SerdeContext +): any => { + return { + ...(input.componentName !== undefined && input.componentName !== null && { componentName: input.componentName }), + ...(input.property !== undefined && input.property !== null && { property: input.property }), + ...(input.set !== undefined && + input.set !== null && { set: serializeAws_restJson1ComponentProperty(input.set, context) }), + }; +}; + const serializeAws_restJson1Predicate = (input: Predicate, context: __SerdeContext): any => { return { ...(input.and !== undefined && @@ -1921,6 +2005,8 @@ const serializeAws_restJson1UpdateComponentData = (input: UpdateComponentData, c collectionProperties: serializeAws_restJson1ComponentCollectionProperties(input.collectionProperties, context), }), ...(input.componentType !== undefined && input.componentType !== null && { componentType: input.componentType }), + ...(input.events !== undefined && + input.events !== null && { events: serializeAws_restJson1ComponentEvents(input.events, context) }), ...(input.id !== undefined && input.id !== null && { id: input.id }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.overrides !== undefined && @@ -1929,6 +2015,7 @@ const serializeAws_restJson1UpdateComponentData = (input: UpdateComponentData, c input.properties !== null && { properties: serializeAws_restJson1ComponentProperties(input.properties, context), }), + ...(input.schemaVersion !== undefined && input.schemaVersion !== null && { schemaVersion: input.schemaVersion }), ...(input.sourceId !== undefined && input.sourceId !== null && { sourceId: input.sourceId }), ...(input.variants !== undefined && input.variants !== null && { variants: serializeAws_restJson1ComponentVariants(input.variants, context) }), @@ -1946,6 +2033,44 @@ const serializeAws_restJson1UpdateThemeData = (input: UpdateThemeData, context: }; }; +const deserializeAws_restJson1ActionParameters = (output: any, context: __SerdeContext): ActionParameters => { + return { + anchor: + output.anchor !== undefined && output.anchor !== null + ? deserializeAws_restJson1ComponentProperty(output.anchor, context) + : undefined, + fields: + output.fields !== undefined && output.fields !== null + ? deserializeAws_restJson1ComponentProperties(output.fields, context) + : undefined, + global: + output.global !== undefined && output.global !== null + ? deserializeAws_restJson1ComponentProperty(output.global, context) + : undefined, + id: + output.id !== undefined && output.id !== null + ? deserializeAws_restJson1ComponentProperty(output.id, context) + : undefined, + model: __expectString(output.model), + state: + output.state !== undefined && output.state !== null + ? deserializeAws_restJson1MutationActionSetStateParameter(output.state, context) + : undefined, + target: + output.target !== undefined && output.target !== null + ? deserializeAws_restJson1ComponentProperty(output.target, context) + : undefined, + type: + output.type !== undefined && output.type !== null + ? deserializeAws_restJson1ComponentProperty(output.type, context) + : undefined, + url: + output.url !== undefined && output.url !== null + ? deserializeAws_restJson1ComponentProperty(output.url, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1Component = (output: any, context: __SerdeContext): Component => { return { appId: __expectString(output.appId), @@ -1967,6 +2092,10 @@ const deserializeAws_restJson1Component = (output: any, context: __SerdeContext) ? __expectNonNull(__parseRfc3339DateTime(output.createdAt)) : undefined, environmentName: __expectString(output.environmentName), + events: + output.events !== undefined && output.events !== null + ? deserializeAws_restJson1ComponentEvents(output.events, context) + : undefined, id: __expectString(output.id), modifiedAt: output.modifiedAt !== undefined && output.modifiedAt !== null @@ -1981,6 +2110,7 @@ const deserializeAws_restJson1Component = (output: any, context: __SerdeContext) output.properties !== undefined && output.properties !== null ? deserializeAws_restJson1ComponentProperties(output.properties, context) : undefined, + schemaVersion: __expectString(output.schemaVersion), sourceId: __expectString(output.sourceId), tags: output.tags !== undefined && output.tags !== null @@ -2050,6 +2180,10 @@ const deserializeAws_restJson1ComponentChild = (output: any, context: __SerdeCon ? deserializeAws_restJson1ComponentChildList(output.children, context) : undefined, componentType: __expectString(output.componentType), + events: + output.events !== undefined && output.events !== null + ? deserializeAws_restJson1ComponentEvents(output.events, context) + : undefined, name: __expectString(output.name), properties: output.properties !== undefined && output.properties !== null @@ -2099,6 +2233,7 @@ const deserializeAws_restJson1ComponentConditionProperty = ( : undefined, field: __expectString(output.field), operand: __expectString(output.operand), + operandType: __expectString(output.operandType), operator: __expectString(output.operator), property: __expectString(output.property), then: @@ -2129,6 +2264,31 @@ const deserializeAws_restJson1ComponentDataConfiguration = ( } as any; }; +const deserializeAws_restJson1ComponentEvent = (output: any, context: __SerdeContext): ComponentEvent => { + return { + action: __expectString(output.action), + parameters: + output.parameters !== undefined && output.parameters !== null + ? deserializeAws_restJson1ActionParameters(output.parameters, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ComponentEvents = ( + output: any, + context: __SerdeContext +): { [key: string]: ComponentEvent } => { + return Object.entries(output).reduce((acc: { [key: string]: ComponentEvent }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1ComponentEvent(value, context), + }; + }, {}); +}; + const deserializeAws_restJson1ComponentList = (output: any, context: __SerdeContext): Component[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -2203,6 +2363,7 @@ const deserializeAws_restJson1ComponentProperty = (output: any, context: __Serde output.collectionBindingProperties !== undefined && output.collectionBindingProperties !== null ? deserializeAws_restJson1ComponentPropertyBindingProperties(output.collectionBindingProperties, context) : undefined, + componentName: __expectString(output.componentName), concat: output.concat !== undefined && output.concat !== null ? deserializeAws_restJson1ComponentPropertyList(output.concat, context) @@ -2216,6 +2377,7 @@ const deserializeAws_restJson1ComponentProperty = (output: any, context: __Serde event: __expectString(output.event), importedValue: __expectString(output.importedValue), model: __expectString(output.model), + property: __expectString(output.property), type: __expectString(output.type), userAttribute: __expectString(output.userAttribute), value: __expectString(output.value), @@ -2340,6 +2502,20 @@ const deserializeAws_restJson1IdentifierList = (output: any, context: __SerdeCon return retVal; }; +const deserializeAws_restJson1MutationActionSetStateParameter = ( + output: any, + context: __SerdeContext +): MutationActionSetStateParameter => { + return { + componentName: __expectString(output.componentName), + property: __expectString(output.property), + set: + output.set !== undefined && output.set !== null + ? deserializeAws_restJson1ComponentProperty(output.set, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1Predicate = (output: any, context: __SerdeContext): Predicate => { return { and: diff --git a/clients/client-appflow/src/models/models_0.ts b/clients/client-appflow/src/models/models_0.ts index 4e92eb561b7b..1c97696f78e1 100644 --- a/clients/client-appflow/src/models/models_0.ts +++ b/clients/client-appflow/src/models/models_0.ts @@ -3660,6 +3660,34 @@ export namespace LookoutMetricsDestinationProperties { }); } +/** + *

The properties that Amazon AppFlow applies when you use Marketo as a flow destination.

+ */ +export interface MarketoDestinationProperties { + /** + *

The object specified in the Marketo flow destination.

+ */ + object: string | undefined; + + /** + *

The settings that determine how Amazon AppFlow handles an error when placing data in the + * destination. For example, this setting would determine if the flow should fail after one + * insertion error, or continue and attempt to insert every record regardless of the initial + * failure. ErrorHandlingConfig is a part of the destination connector details. + *

+ */ + errorHandlingConfig?: ErrorHandlingConfig; +} + +export namespace MarketoDestinationProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MarketoDestinationProperties): any => ({ + ...obj, + }); +} + /** *

The properties that are applied when Amazon Redshift is being used as a destination. *

@@ -4124,6 +4152,11 @@ export interface DestinationConnectorProperties { */ Zendesk?: ZendeskDestinationProperties; + /** + *

The properties required to query Marketo.

+ */ + Marketo?: MarketoDestinationProperties; + /** *

The properties that are required to query the custom Connector.

*/ diff --git a/clients/client-appflow/src/protocols/Aws_restJson1.ts b/clients/client-appflow/src/protocols/Aws_restJson1.ts index b06faa1914f5..c8c94db02475 100644 --- a/clients/client-appflow/src/protocols/Aws_restJson1.ts +++ b/clients/client-appflow/src/protocols/Aws_restJson1.ts @@ -142,6 +142,7 @@ import { LookoutMetricsDestinationProperties, MarketoConnectorProfileCredentials, MarketoConnectorProfileProperties, + MarketoDestinationProperties, MarketoMetadata, MarketoSourceProperties, OAuth2Credentials, @@ -2752,6 +2753,10 @@ const serializeAws_restJson1DestinationConnectorProperties = ( input.LookoutMetrics !== null && { LookoutMetrics: serializeAws_restJson1LookoutMetricsDestinationProperties(input.LookoutMetrics, context), }), + ...(input.Marketo !== undefined && + input.Marketo !== null && { + Marketo: serializeAws_restJson1MarketoDestinationProperties(input.Marketo, context), + }), ...(input.Redshift !== undefined && input.Redshift !== null && { Redshift: serializeAws_restJson1RedshiftDestinationProperties(input.Redshift, context), @@ -3015,6 +3020,19 @@ const serializeAws_restJson1MarketoConnectorProfileProperties = ( }; }; +const serializeAws_restJson1MarketoDestinationProperties = ( + input: MarketoDestinationProperties, + context: __SerdeContext +): any => { + return { + ...(input.errorHandlingConfig !== undefined && + input.errorHandlingConfig !== null && { + errorHandlingConfig: serializeAws_restJson1ErrorHandlingConfig(input.errorHandlingConfig, context), + }), + ...(input.object !== undefined && input.object !== null && { object: input.object }), + }; +}; + const serializeAws_restJson1MarketoSourceProperties = ( input: MarketoSourceProperties, context: __SerdeContext @@ -4525,6 +4543,10 @@ const deserializeAws_restJson1DestinationConnectorProperties = ( output.LookoutMetrics !== undefined && output.LookoutMetrics !== null ? deserializeAws_restJson1LookoutMetricsDestinationProperties(output.LookoutMetrics, context) : undefined, + Marketo: + output.Marketo !== undefined && output.Marketo !== null + ? deserializeAws_restJson1MarketoDestinationProperties(output.Marketo, context) + : undefined, Redshift: output.Redshift !== undefined && output.Redshift !== null ? deserializeAws_restJson1RedshiftDestinationProperties(output.Redshift, context) @@ -4918,6 +4940,19 @@ const deserializeAws_restJson1MarketoConnectorProfileProperties = ( } as any; }; +const deserializeAws_restJson1MarketoDestinationProperties = ( + output: any, + context: __SerdeContext +): MarketoDestinationProperties => { + return { + errorHandlingConfig: + output.errorHandlingConfig !== undefined && output.errorHandlingConfig !== null + ? deserializeAws_restJson1ErrorHandlingConfig(output.errorHandlingConfig, context) + : undefined, + object: __expectString(output.object), + } as any; +}; + const deserializeAws_restJson1MarketoMetadata = (output: any, context: __SerdeContext): MarketoMetadata => { return {} as any; }; diff --git a/clients/client-apprunner/src/endpoints.ts b/clients/client-apprunner/src/endpoints.ts index 9a055bee25aa..a83c5122d048 100644 --- a/clients/client-apprunner/src/endpoints.ts +++ b/clients/client-apprunner/src/endpoints.ts @@ -1,7 +1,32 @@ import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; -const regionHash: RegionHash = {}; +const regionHash: RegionHash = { + "us-east-1": { + variants: [ + { + hostname: "apprunner-fips.us-east-1.amazonaws.com", + tags: ["fips"], + }, + ], + }, + "us-east-2": { + variants: [ + { + hostname: "apprunner-fips.us-east-2.amazonaws.com", + tags: ["fips"], + }, + ], + }, + "us-west-2": { + variants: [ + { + hostname: "apprunner-fips.us-west-2.amazonaws.com", + tags: ["fips"], + }, + ], + }, +}; const partitionHash: PartitionHash = { aws: { @@ -22,6 +47,9 @@ const partitionHash: PartitionHash = { "eu-west-1", "eu-west-2", "eu-west-3", + "fips-us-east-1", + "fips-us-east-2", + "fips-us-west-2", "me-south-1", "sa-east-1", "us-east-1", diff --git a/clients/client-athena/src/Athena.ts b/clients/client-athena/src/Athena.ts index 7b9790cdbf16..b15c5e9fa1c6 100644 --- a/clients/client-athena/src/Athena.ts +++ b/clients/client-athena/src/Athena.ts @@ -153,6 +153,11 @@ import { UpdateDataCatalogCommandInput, UpdateDataCatalogCommandOutput, } from "./commands/UpdateDataCatalogCommand"; +import { + UpdateNamedQueryCommand, + UpdateNamedQueryCommandInput, + UpdateNamedQueryCommandOutput, +} from "./commands/UpdateNamedQueryCommand"; import { UpdatePreparedStatementCommand, UpdatePreparedStatementCommandInput, @@ -1264,6 +1269,38 @@ export class Athena extends AthenaClient { } } + /** + *

Updates a NamedQuery object. The database or workgroup cannot be updated.

+ */ + public updateNamedQuery( + args: UpdateNamedQueryCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateNamedQuery( + args: UpdateNamedQueryCommandInput, + cb: (err: any, data?: UpdateNamedQueryCommandOutput) => void + ): void; + public updateNamedQuery( + args: UpdateNamedQueryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateNamedQueryCommandOutput) => void + ): void; + public updateNamedQuery( + args: UpdateNamedQueryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateNamedQueryCommandOutput) => void), + cb?: (err: any, data?: UpdateNamedQueryCommandOutput) => void + ): Promise | void { + const command = new UpdateNamedQueryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Updates a prepared statement.

*/ diff --git a/clients/client-athena/src/AthenaClient.ts b/clients/client-athena/src/AthenaClient.ts index 09ea8780a054..f36790d67c51 100644 --- a/clients/client-athena/src/AthenaClient.ts +++ b/clients/client-athena/src/AthenaClient.ts @@ -107,6 +107,7 @@ import { StopQueryExecutionCommandInput, StopQueryExecutionCommandOutput } from import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; import { UpdateDataCatalogCommandInput, UpdateDataCatalogCommandOutput } from "./commands/UpdateDataCatalogCommand"; +import { UpdateNamedQueryCommandInput, UpdateNamedQueryCommandOutput } from "./commands/UpdateNamedQueryCommand"; import { UpdatePreparedStatementCommandInput, UpdatePreparedStatementCommandOutput, @@ -147,6 +148,7 @@ export type ServiceInputTypes = | TagResourceCommandInput | UntagResourceCommandInput | UpdateDataCatalogCommandInput + | UpdateNamedQueryCommandInput | UpdatePreparedStatementCommandInput | UpdateWorkGroupCommandInput; @@ -183,6 +185,7 @@ export type ServiceOutputTypes = | TagResourceCommandOutput | UntagResourceCommandOutput | UpdateDataCatalogCommandOutput + | UpdateNamedQueryCommandOutput | UpdatePreparedStatementCommandOutput | UpdateWorkGroupCommandOutput; diff --git a/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts b/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts new file mode 100644 index 000000000000..15808dbc2a1e --- /dev/null +++ b/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AthenaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AthenaClient"; +import { UpdateNamedQueryInput, UpdateNamedQueryOutput } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateNamedQueryCommand, + serializeAws_json1_1UpdateNamedQueryCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateNamedQueryCommandInput extends UpdateNamedQueryInput {} +export interface UpdateNamedQueryCommandOutput extends UpdateNamedQueryOutput, __MetadataBearer {} + +/** + *

Updates a NamedQuery object. The database or workgroup cannot be updated.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AthenaClient, UpdateNamedQueryCommand } from "@aws-sdk/client-athena"; // ES Modules import + * // const { AthenaClient, UpdateNamedQueryCommand } = require("@aws-sdk/client-athena"); // CommonJS import + * const client = new AthenaClient(config); + * const command = new UpdateNamedQueryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateNamedQueryCommandInput} for command's `input` shape. + * @see {@link UpdateNamedQueryCommandOutput} for command's `response` shape. + * @see {@link AthenaClientResolvedConfig | config} for AthenaClient's `config` shape. + * + */ +export class UpdateNamedQueryCommand extends $Command< + UpdateNamedQueryCommandInput, + UpdateNamedQueryCommandOutput, + AthenaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateNamedQueryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AthenaClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AthenaClient"; + const commandName = "UpdateNamedQueryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateNamedQueryInput.filterSensitiveLog, + outputFilterSensitiveLog: UpdateNamedQueryOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateNamedQueryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateNamedQueryCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1UpdateNamedQueryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-athena/src/commands/index.ts b/clients/client-athena/src/commands/index.ts index 30907772a33c..bc5321ac84cb 100644 --- a/clients/client-athena/src/commands/index.ts +++ b/clients/client-athena/src/commands/index.ts @@ -30,5 +30,6 @@ export * from "./StopQueryExecutionCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; export * from "./UpdateDataCatalogCommand"; +export * from "./UpdateNamedQueryCommand"; export * from "./UpdatePreparedStatementCommand"; export * from "./UpdateWorkGroupCommand"; diff --git a/clients/client-athena/src/models/models_0.ts b/clients/client-athena/src/models/models_0.ts index 9ae1687fab58..390aa2fc63e2 100644 --- a/clients/client-athena/src/models/models_0.ts +++ b/clients/client-athena/src/models/models_0.ts @@ -3,6 +3,39 @@ import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; import { AthenaServiceException as __BaseException } from "./AthenaServiceException"; +export enum S3AclOption { + BUCKET_OWNER_FULL_CONTROL = "BUCKET_OWNER_FULL_CONTROL", +} + +/** + *

Indicates that an Amazon S3 canned ACL should be set to control ownership of + * stored query results. When Athena stores query results in Amazon S3, + * the canned ACL is set with the x-amz-acl request header. For more + * information about S3 Object Ownership, see Object Ownership settings in the Amazon S3 User + * Guide.

+ */ +export interface AclConfiguration { + /** + *

The Amazon S3 canned ACL that Athena should specify when storing + * query results. Currently the only supported canned ACL is + * BUCKET_OWNER_FULL_CONTROL. If a query runs in a workgroup and the + * workgroup overrides client-side settings, then the Amazon S3 canned ACL + * specified in the workgroup's settings is used for all queries that run in the workgroup. + * For more information about Amazon S3 canned ACLs, see Canned ACL in the Amazon S3 User + * Guide.

+ */ + S3AclOption: S3AclOption | string | undefined; +} + +export namespace AclConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AclConfiguration): any => ({ + ...obj, + }); +} + export interface BatchGetNamedQueryInput { /** *

An array of query IDs.

@@ -20,8 +53,8 @@ export namespace BatchGetNamedQueryInput { } /** - *

A query, where QueryString is the list of SQL query statements that - * comprise the query.

+ *

A query, where QueryString contains the SQL statements that + * make up the query.

*/ export interface NamedQuery { /** @@ -40,7 +73,7 @@ export interface NamedQuery { Database: string | undefined; /** - *

The SQL query statements that comprise the query.

+ *

The SQL statements that make up the query.

*/ QueryString: string | undefined; @@ -317,6 +350,16 @@ export interface ResultConfiguration { * and Workgroup Settings Override Client-Side Settings.

*/ ExpectedBucketOwner?: string; + + /** + *

Indicates that an Amazon S3 canned ACL should be set to control ownership of + * stored query results. Currently the only supported canned ACL is + * BUCKET_OWNER_FULL_CONTROL. This is a client-side setting. If workgroup + * settings override client-side settings, then the query uses the ACL configuration that + * is specified for the workgroup, and also uses the location for storing query results + * specified in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.

+ */ + AclConfiguration?: AclConfiguration; } export namespace ResultConfiguration { @@ -401,7 +444,7 @@ export namespace QueryExecutionStatistics { * AthenaError feature provides standardized error information to help you * understand failed queries and take steps after a query failure occurs. * AthenaError includes an ErrorCategory field that specifies - * whether the cause of the failed query is due to system error, user error, or unknown + * whether the cause of the failed query is due to system error, user error, or other * error.

*/ export interface AthenaError { @@ -413,7 +456,7 @@ export interface AthenaError { *

* 2 - User

*

- * 3 - Unknown

+ * 3 - Other

*/ ErrorCategory?: number; @@ -1645,7 +1688,7 @@ export namespace Datum { } /** - *

The rows that comprise a query result table.

+ *

The rows that make up a query result table.

*/ export interface Row { /** @@ -1664,7 +1707,7 @@ export namespace Row { } /** - *

The metadata and rows that comprise a query result set. The metadata describes the + *

The metadata and rows that make up a query result set. The metadata describes the * column structure and data types. To return a ResultSet object, use GetQueryResults.

*/ export interface ResultSet { @@ -2751,6 +2794,48 @@ export namespace UpdateDataCatalogOutput { }); } +export interface UpdateNamedQueryInput { + /** + *

The unique identifier (UUID) of the query.

+ */ + NamedQueryId: string | undefined; + + /** + *

The name of the query.

+ */ + Name: string | undefined; + + /** + *

The query description.

+ */ + Description?: string; + + /** + *

The contents of the query with all query statements.

+ */ + QueryString: string | undefined; +} + +export namespace UpdateNamedQueryInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNamedQueryInput): any => ({ + ...obj, + }); +} + +export interface UpdateNamedQueryOutput {} + +export namespace UpdateNamedQueryOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNamedQueryOutput): any => ({ + ...obj, + }); +} + export interface UpdatePreparedStatementInput { /** *

The name of the prepared statement.

@@ -2862,6 +2947,22 @@ export interface ResultConfigurationUpdates { * Client-Side Settings.

*/ RemoveExpectedBucketOwner?: boolean; + + /** + *

The ACL configuration for the query results.

+ */ + AclConfiguration?: AclConfiguration; + + /** + *

If set to true, indicates that the previously-specified ACL configuration + * for queries in this workgroup should be ignored and set to null. If set to + * false or not set, and a value is present in the + * AclConfiguration of ResultConfigurationUpdates, the + * AclConfiguration in the workgroup's ResultConfiguration is + * updated with the new value. For more information, see Workgroup Settings Override + * Client-Side Settings.

+ */ + RemoveAclConfiguration?: boolean; } export namespace ResultConfigurationUpdates { diff --git a/clients/client-athena/src/protocols/Aws_json1_1.ts b/clients/client-athena/src/protocols/Aws_json1_1.ts index f554fa3013dd..b9b68d47ebb2 100644 --- a/clients/client-athena/src/protocols/Aws_json1_1.ts +++ b/clients/client-athena/src/protocols/Aws_json1_1.ts @@ -73,6 +73,7 @@ import { StopQueryExecutionCommandInput, StopQueryExecutionCommandOutput } from import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; import { UpdateDataCatalogCommandInput, UpdateDataCatalogCommandOutput } from "../commands/UpdateDataCatalogCommand"; +import { UpdateNamedQueryCommandInput, UpdateNamedQueryCommandOutput } from "../commands/UpdateNamedQueryCommand"; import { UpdatePreparedStatementCommandInput, UpdatePreparedStatementCommandOutput, @@ -80,6 +81,7 @@ import { import { UpdateWorkGroupCommandInput, UpdateWorkGroupCommandOutput } from "../commands/UpdateWorkGroupCommand"; import { AthenaServiceException as __BaseException } from "../models/AthenaServiceException"; import { + AclConfiguration, AthenaError, BatchGetNamedQueryInput, BatchGetNamedQueryOutput, @@ -174,6 +176,8 @@ import { UntagResourceOutput, UpdateDataCatalogInput, UpdateDataCatalogOutput, + UpdateNamedQueryInput, + UpdateNamedQueryOutput, UpdatePreparedStatementInput, UpdatePreparedStatementOutput, UpdateWorkGroupInput, @@ -600,6 +604,19 @@ export const serializeAws_json1_1UpdateDataCatalogCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateNamedQueryCommand = async ( + input: UpdateNamedQueryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonAthena.UpdateNamedQuery", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateNamedQueryInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1UpdatePreparedStatementCommand = async ( input: UpdatePreparedStatementCommandInput, context: __SerdeContext @@ -2128,6 +2145,52 @@ const deserializeAws_json1_1UpdateDataCatalogCommandError = async ( } }; +export const deserializeAws_json1_1UpdateNamedQueryCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateNamedQueryCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateNamedQueryOutput(data, context); + const response: UpdateNamedQueryCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateNamedQueryCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.athena#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.athena#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1UpdatePreparedStatementCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2288,6 +2351,12 @@ const deserializeAws_json1_1TooManyRequestsExceptionResponse = async ( return __decorateServiceException(exception, body); }; +const serializeAws_json1_1AclConfiguration = (input: AclConfiguration, context: __SerdeContext): any => { + return { + ...(input.S3AclOption !== undefined && input.S3AclOption !== null && { S3AclOption: input.S3AclOption }), + }; +}; + const serializeAws_json1_1BatchGetNamedQueryInput = (input: BatchGetNamedQueryInput, context: __SerdeContext): any => { return { ...(input.NamedQueryIds !== undefined && @@ -2585,6 +2654,10 @@ const serializeAws_json1_1QueryExecutionIdList = (input: string[], context: __Se const serializeAws_json1_1ResultConfiguration = (input: ResultConfiguration, context: __SerdeContext): any => { return { + ...(input.AclConfiguration !== undefined && + input.AclConfiguration !== null && { + AclConfiguration: serializeAws_json1_1AclConfiguration(input.AclConfiguration, context), + }), ...(input.EncryptionConfiguration !== undefined && input.EncryptionConfiguration !== null && { EncryptionConfiguration: serializeAws_json1_1EncryptionConfiguration(input.EncryptionConfiguration, context), @@ -2601,6 +2674,10 @@ const serializeAws_json1_1ResultConfigurationUpdates = ( context: __SerdeContext ): any => { return { + ...(input.AclConfiguration !== undefined && + input.AclConfiguration !== null && { + AclConfiguration: serializeAws_json1_1AclConfiguration(input.AclConfiguration, context), + }), ...(input.EncryptionConfiguration !== undefined && input.EncryptionConfiguration !== null && { EncryptionConfiguration: serializeAws_json1_1EncryptionConfiguration(input.EncryptionConfiguration, context), @@ -2609,6 +2686,8 @@ const serializeAws_json1_1ResultConfigurationUpdates = ( input.ExpectedBucketOwner !== null && { ExpectedBucketOwner: input.ExpectedBucketOwner }), ...(input.OutputLocation !== undefined && input.OutputLocation !== null && { OutputLocation: input.OutputLocation }), + ...(input.RemoveAclConfiguration !== undefined && + input.RemoveAclConfiguration !== null && { RemoveAclConfiguration: input.RemoveAclConfiguration }), ...(input.RemoveEncryptionConfiguration !== undefined && input.RemoveEncryptionConfiguration !== null && { RemoveEncryptionConfiguration: input.RemoveEncryptionConfiguration, @@ -2699,6 +2778,15 @@ const serializeAws_json1_1UpdateDataCatalogInput = (input: UpdateDataCatalogInpu }; }; +const serializeAws_json1_1UpdateNamedQueryInput = (input: UpdateNamedQueryInput, context: __SerdeContext): any => { + return { + ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.NamedQueryId !== undefined && input.NamedQueryId !== null && { NamedQueryId: input.NamedQueryId }), + ...(input.QueryString !== undefined && input.QueryString !== null && { QueryString: input.QueryString }), + }; +}; + const serializeAws_json1_1UpdatePreparedStatementInput = ( input: UpdatePreparedStatementInput, context: __SerdeContext @@ -2784,6 +2872,12 @@ const serializeAws_json1_1WorkGroupConfigurationUpdates = ( }; }; +const deserializeAws_json1_1AclConfiguration = (output: any, context: __SerdeContext): AclConfiguration => { + return { + S3AclOption: __expectString(output.S3AclOption), + } as any; +}; + const deserializeAws_json1_1AthenaError = (output: any, context: __SerdeContext): AthenaError => { return { ErrorCategory: __expectInt32(output.ErrorCategory), @@ -3421,6 +3515,10 @@ const deserializeAws_json1_1ResourceNotFoundException = ( const deserializeAws_json1_1ResultConfiguration = (output: any, context: __SerdeContext): ResultConfiguration => { return { + AclConfiguration: + output.AclConfiguration !== undefined && output.AclConfiguration !== null + ? deserializeAws_json1_1AclConfiguration(output.AclConfiguration, context) + : undefined, EncryptionConfiguration: output.EncryptionConfiguration !== undefined && output.EncryptionConfiguration !== null ? deserializeAws_json1_1EncryptionConfiguration(output.EncryptionConfiguration, context) @@ -3624,6 +3722,10 @@ const deserializeAws_json1_1UpdateDataCatalogOutput = ( return {} as any; }; +const deserializeAws_json1_1UpdateNamedQueryOutput = (output: any, context: __SerdeContext): UpdateNamedQueryOutput => { + return {} as any; +}; + const deserializeAws_json1_1UpdatePreparedStatementOutput = ( output: any, context: __SerdeContext diff --git a/clients/client-auto-scaling/src/endpoints.ts b/clients/client-auto-scaling/src/endpoints.ts index 0464377927dd..78e8d7ab0ed3 100644 --- a/clients/client-auto-scaling/src/endpoints.ts +++ b/clients/client-auto-scaling/src/endpoints.ts @@ -108,7 +108,7 @@ const partitionHash: PartitionHash = { tags: [], }, { - hostname: "autoscaling-fips.{region}.amazonaws.com", + hostname: "autoscaling.{region}.amazonaws.com", tags: ["fips"], }, { diff --git a/clients/client-chime-sdk-meetings/README.md b/clients/client-chime-sdk-meetings/README.md index 18a84ae16cc5..fbaef462172a 100644 --- a/clients/client-chime-sdk-meetings/README.md +++ b/clients/client-chime-sdk-meetings/README.md @@ -8,8 +8,7 @@ AWS SDK for JavaScript ChimeSDKMeetings Client for Node.js, Browser and React Native.

The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and -receive meeting notifications. For more information -about the meeting APIs, see Amazon Chime SDK meetings.

+receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings.

## Installing diff --git a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts index 2b909fcbd01c..b1392a6bcebd 100644 --- a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts +++ b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts @@ -51,8 +51,7 @@ import { /** *

The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and - * receive meeting notifications. For more information - * about the meeting APIs, see Amazon Chime SDK meetings.

+ * receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings.

*/ export class ChimeSDKMeetings extends ChimeSDKMeetingsClient { /** diff --git a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts index 31341ada678a..0a7e42b3e013 100644 --- a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts +++ b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts @@ -257,8 +257,7 @@ export interface ChimeSDKMeetingsClientResolvedConfig extends ChimeSDKMeetingsCl /** *

The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and - * receive meeting notifications. For more information - * about the meeting APIs, see Amazon Chime SDK meetings.

+ * receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings.

*/ export class ChimeSDKMeetingsClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-chime-sdk-meetings/src/models/models_0.ts b/clients/client-chime-sdk-meetings/src/models/models_0.ts index 3b6524d70e07..6e05a30ba9ea 100644 --- a/clients/client-chime-sdk-meetings/src/models/models_0.ts +++ b/clients/client-chime-sdk-meetings/src/models/models_0.ts @@ -52,7 +52,7 @@ export enum MeetingFeatureStatus { } /** - *

An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.

+ *

An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.

*/ export interface AudioFeatures { /** @@ -409,7 +409,7 @@ export class UnprocessableEntityException extends __BaseException { } /** - *

The configuration settings of the features available to a meeting.

+ *

The configuration settings of the features available to a meeting.>

*/ export interface MeetingFeaturesConfiguration { /** @@ -470,43 +470,26 @@ export interface CreateMeetingRequest { * *

* Available values: - * af-south-1 - * , - * ap-northeast-1 - * , - * ap-northeast-2 - * , - * ap-south-1 - * , - * ap-southeast-1 - * , - * ap-southeast-2 - * , - * ca-central-1 - * , - * eu-central-1 - * , - * eu-north-1 - * , - * eu-south-1 - * , - * eu-west-1 - * , - * eu-west-2 - * , - * eu-west-3 - * , - * sa-east-1 - * , - * us-east-1 - * , - * us-east-2 - * , - * us-west-1 - * , - * us-west-2 - * . + * af-south-1, + * ap-northeast-1, + * ap-northeast-2, + * ap-south-1, + * ap-southeast-1, + * ap-southeast-2, + * ca-central-1, + * eu-central-1, + * eu-north-1, + * eu-south-1, + * eu-west-1, + * eu-west-2, + * eu-west-3, + * sa-east-1, + * us-east-1, + * us-east-2, + * us-west-1, + * us-west-2. *

+ *

Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

*/ MediaRegion: string | undefined; @@ -626,6 +609,7 @@ export interface Meeting { * eu-west-1, eu-west-2, eu-west-3, * sa-east-1, us-east-1, us-east-2, * us-west-1, us-west-2.

+ *

Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

*/ MediaRegion?: string; @@ -677,6 +661,29 @@ export interface CreateMeetingWithAttendeesRequest { /** *

The Region in which to create the meeting.

+ * + *

+ * Available values: + * af-south-1, + * ap-northeast-1, + * ap-northeast-2, + * ap-south-1, + * ap-southeast-1, + * ap-southeast-2, + * ca-central-1, + * eu-central-1, + * eu-north-1, + * eu-south-1, + * eu-west-1, + * eu-west-2, + * eu-west-3, + * sa-east-1, + * us-east-1, + * us-east-2, + * us-west-1, + * us-west-2. + *

+ *

Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

*/ MediaRegion: string | undefined; @@ -1043,7 +1050,7 @@ export interface EngineTranscribeSettings { /** *

The language code specified for the Amazon Transcribe engine.

*/ - LanguageCode: TranscribeLanguageCode | string | undefined; + LanguageCode?: TranscribeLanguageCode | string; /** *

The filtering method passed to Amazon Transcribe.

@@ -1090,11 +1097,12 @@ export interface EngineTranscribeSettings { /** *

Lists the PII entity types you want to identify or redact. To specify entity types, you must enable ContentIdentificationType or ContentRedactionType.

* - *

PIIEntityTypes must be comma-separated. The available values are: - * BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, - * ADDRESS, NAME, PHONE, SSN, and ALL.

+ *

+ * PIIEntityTypes must be comma-separated. The available values are: + * BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, + * ADDRESS, NAME, PHONE, SSN, and ALL.

* - *

+ *

* PiiEntityTypes is an optional parameter with a default value of ALL.

*/ PiiEntityTypes?: string; @@ -1103,6 +1111,21 @@ export interface EngineTranscribeSettings { *

The name of the language model used during transcription.

*/ LanguageModelName?: string; + + /** + *

Automatically identifies the language spoken in media files.

+ */ + IdentifyLanguage?: boolean; + + /** + *

Language codes for the languages that you want to identify. You must provide at least 2 codes.

+ */ + LanguageOptions?: string; + + /** + *

Language code for the preferred language.

+ */ + PreferredLanguage?: TranscribeLanguageCode | string; } export namespace EngineTranscribeSettings { diff --git a/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts b/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts index 3ff68e0081a2..30a51dad838e 100644 --- a/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts +++ b/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts @@ -1287,13 +1287,19 @@ const serializeAws_restJson1EngineTranscribeSettings = ( input.EnablePartialResultsStabilization !== null && { EnablePartialResultsStabilization: input.EnablePartialResultsStabilization, }), + ...(input.IdentifyLanguage !== undefined && + input.IdentifyLanguage !== null && { IdentifyLanguage: input.IdentifyLanguage }), ...(input.LanguageCode !== undefined && input.LanguageCode !== null && { LanguageCode: input.LanguageCode }), ...(input.LanguageModelName !== undefined && input.LanguageModelName !== null && { LanguageModelName: input.LanguageModelName }), + ...(input.LanguageOptions !== undefined && + input.LanguageOptions !== null && { LanguageOptions: input.LanguageOptions }), ...(input.PartialResultsStability !== undefined && input.PartialResultsStability !== null && { PartialResultsStability: input.PartialResultsStability }), ...(input.PiiEntityTypes !== undefined && input.PiiEntityTypes !== null && { PiiEntityTypes: input.PiiEntityTypes }), + ...(input.PreferredLanguage !== undefined && + input.PreferredLanguage !== null && { PreferredLanguage: input.PreferredLanguage }), ...(input.Region !== undefined && input.Region !== null && { Region: input.Region }), ...(input.VocabularyFilterMethod !== undefined && input.VocabularyFilterMethod !== null && { VocabularyFilterMethod: input.VocabularyFilterMethod }), diff --git a/clients/client-cloudtrail/src/CloudTrail.ts b/clients/client-cloudtrail/src/CloudTrail.ts index af0212b8f5e1..56b4bf44cf92 100644 --- a/clients/client-cloudtrail/src/CloudTrail.ts +++ b/clients/client-cloudtrail/src/CloudTrail.ts @@ -148,7 +148,7 @@ export class CloudTrail extends CloudTrailClient { } /** - *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. + *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED, TIMED_OUT, or FINISHED. You must specify an ARN value for EventDataStore. * The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might * show as CANCELLED even if the operation is not yet finished.

*/ @@ -648,7 +648,7 @@ export class CloudTrail extends CloudTrailClient { * EventDataStore. Optionally, to shorten the list of results, you can specify a time range, * formatted as timestamps, by adding StartTime and EndTime parameters, and a * QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, - * FINISHED, FAILED, or CANCELLED.

+ * FINISHED, FAILED, TIMED_OUT, or CANCELLED.

*/ public listQueries(args: ListQueriesCommandInput, options?: __HttpHandlerOptions): Promise; public listQueries(args: ListQueriesCommandInput, cb: (err: any, data?: ListQueriesCommandOutput) => void): void; diff --git a/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts b/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts index 767926d2bd96..60a4ffec4ea1 100644 --- a/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts +++ b/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts @@ -22,7 +22,7 @@ export interface CancelQueryCommandInput extends CancelQueryRequest {} export interface CancelQueryCommandOutput extends CancelQueryResponse, __MetadataBearer {} /** - *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. + *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED, TIMED_OUT, or FINISHED. You must specify an ARN value for EventDataStore. * The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might * show as CANCELLED even if the operation is not yet finished.

* @example diff --git a/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts b/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts index 7d3ecda1b295..50f64eaced12 100644 --- a/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts +++ b/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts @@ -26,7 +26,7 @@ export interface ListQueriesCommandOutput extends ListQueriesResponse, __Metadat * EventDataStore. Optionally, to shorten the list of results, you can specify a time range, * formatted as timestamps, by adding StartTime and EndTime parameters, and a * QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, - * FINISHED, FAILED, or CANCELLED.

+ * FINISHED, FAILED, TIMED_OUT, or CANCELLED.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-cloudtrail/src/endpoints.ts b/clients/client-cloudtrail/src/endpoints.ts index f736c71ff03a..030185eb0561 100644 --- a/clients/client-cloudtrail/src/endpoints.ts +++ b/clients/client-cloudtrail/src/endpoints.ts @@ -22,19 +22,17 @@ const regionHash: RegionHash = { variants: [ { hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-east-1", }, "us-gov-west-1": { variants: [ { hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-west-1", }, "us-west-1": { variants: [ @@ -155,7 +153,7 @@ const partitionHash: PartitionHash = { ], }, "aws-us-gov": { - regions: ["us-gov-east-1", "us-gov-west-1"], + regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"], regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", variants: [ { diff --git a/clients/client-cloudtrail/src/models/models_0.ts b/clients/client-cloudtrail/src/models/models_0.ts index e68ed923f28e..3c04bae4d83b 100644 --- a/clients/client-cloudtrail/src/models/models_0.ts +++ b/clients/client-cloudtrail/src/models/models_0.ts @@ -751,6 +751,7 @@ export enum QueryStatus { FINISHED = "FINISHED", QUEUED = "QUEUED", RUNNING = "RUNNING", + TIMED_OUT = "TIMED_OUT", } export interface CancelQueryResponse { @@ -800,7 +801,7 @@ export class EventDataStoreARNInvalidException extends __BaseException { } /** - *

The specified query cannot be canceled because it is in the FINISHED, FAILED, or + *

The specified query cannot be canceled because it is in the FINISHED, FAILED, TIMED_OUT, or * CANCELLED state.

*/ export class InactiveQueryException extends __BaseException { @@ -1997,6 +1998,12 @@ export interface QueryStatisticsForDescribeQuery { */ EventsScanned?: number; + /** + *

The total bytes that the query scanned in the event data store. This value matches the number of + * bytes for which your account is billed for the query, unless the query is still running.

+ */ + BytesScanned?: number; + /** *

The query's run time, in milliseconds.

*/ @@ -2030,7 +2037,7 @@ export interface DescribeQueryResponse { /** *

The status of a query. Values for QueryStatus include QUEUED, RUNNING, - * FINISHED, FAILED, or CANCELLED + * FINISHED, FAILED, TIMED_OUT, or CANCELLED *

*/ QueryStatus?: QueryStatus | string; @@ -2771,6 +2778,12 @@ export interface QueryStatistics { *

The total number of results returned by a query.

*/ TotalResultsCount?: number; + + /** + *

The total bytes that the query scanned in the event data store. This value matches the number of + * bytes for which your account is billed for the query, unless the query is still running.

+ */ + BytesScanned?: number; } export namespace QueryStatistics { @@ -2785,7 +2798,7 @@ export namespace QueryStatistics { export interface GetQueryResultsResponse { /** *

The status of the query. Values include QUEUED, RUNNING, FINISHED, FAILED, - * or CANCELLED.

+ * TIMED_OUT, or CANCELLED.

*/ QueryStatus?: QueryStatus | string; @@ -3368,7 +3381,7 @@ export interface ListQueriesRequest { /** *

The status of queries that you want to return in results. Valid values for QueryStatus include QUEUED, RUNNING, - * FINISHED, FAILED, or CANCELLED.

+ * FINISHED, FAILED, TIMED_OUT, or CANCELLED.

*/ QueryStatus?: QueryStatus | string; } @@ -3393,7 +3406,7 @@ export interface Query { /** *

The status of the query. This can be QUEUED, RUNNING, FINISHED, FAILED, - * or CANCELLED.

+ * TIMED_OUT, or CANCELLED.

*/ QueryStatus?: QueryStatus | string; diff --git a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts index 71f0c34381d4..58c35bf4db48 100644 --- a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts +++ b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts @@ -4912,6 +4912,7 @@ const deserializeAws_json1_1QueryResultRows = (output: any, context: __SerdeCont const deserializeAws_json1_1QueryStatistics = (output: any, context: __SerdeContext): QueryStatistics => { return { + BytesScanned: __expectLong(output.BytesScanned), ResultsCount: __expectInt32(output.ResultsCount), TotalResultsCount: __expectInt32(output.TotalResultsCount), } as any; @@ -4922,6 +4923,7 @@ const deserializeAws_json1_1QueryStatisticsForDescribeQuery = ( context: __SerdeContext ): QueryStatisticsForDescribeQuery => { return { + BytesScanned: __expectLong(output.BytesScanned), CreationTime: output.CreationTime !== undefined && output.CreationTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) diff --git a/clients/client-cloudwatch-events/src/endpoints.ts b/clients/client-cloudwatch-events/src/endpoints.ts index ead0160761ed..d420f4722c13 100644 --- a/clients/client-cloudwatch-events/src/endpoints.ts +++ b/clients/client-cloudwatch-events/src/endpoints.ts @@ -22,19 +22,17 @@ const regionHash: RegionHash = { variants: [ { hostname: "events.us-gov-east-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-east-1", }, "us-gov-west-1": { variants: [ { hostname: "events.us-gov-west-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-west-1", }, "us-west-1": { variants: [ @@ -155,7 +153,7 @@ const partitionHash: PartitionHash = { ], }, "aws-us-gov": { - regions: ["us-gov-east-1", "us-gov-west-1"], + regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"], regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", variants: [ { diff --git a/clients/client-cloudwatch-logs/src/endpoints.ts b/clients/client-cloudwatch-logs/src/endpoints.ts index 9ccc59bbf3cc..b7fd7af15468 100644 --- a/clients/client-cloudwatch-logs/src/endpoints.ts +++ b/clients/client-cloudwatch-logs/src/endpoints.ts @@ -22,19 +22,17 @@ const regionHash: RegionHash = { variants: [ { hostname: "logs.us-gov-east-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-east-1", }, "us-gov-west-1": { variants: [ { hostname: "logs.us-gov-west-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-west-1", }, "us-west-1": { variants: [ @@ -155,7 +153,7 @@ const partitionHash: PartitionHash = { ], }, "aws-us-gov": { - regions: ["us-gov-east-1", "us-gov-west-1"], + regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"], regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", variants: [ { diff --git a/clients/client-comprehend/src/Comprehend.ts b/clients/client-comprehend/src/Comprehend.ts index caaeb6c85e9d..405fe07670d7 100644 --- a/clients/client-comprehend/src/Comprehend.ts +++ b/clients/client-comprehend/src/Comprehend.ts @@ -125,6 +125,11 @@ import { DescribeSentimentDetectionJobCommandInput, DescribeSentimentDetectionJobCommandOutput, } from "./commands/DescribeSentimentDetectionJobCommand"; +import { + DescribeTargetedSentimentDetectionJobCommand, + DescribeTargetedSentimentDetectionJobCommandInput, + DescribeTargetedSentimentDetectionJobCommandOutput, +} from "./commands/DescribeTargetedSentimentDetectionJobCommand"; import { DescribeTopicsDetectionJobCommand, DescribeTopicsDetectionJobCommandInput, @@ -226,6 +231,11 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + ListTargetedSentimentDetectionJobsCommand, + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, +} from "./commands/ListTargetedSentimentDetectionJobsCommand"; import { ListTopicsDetectionJobsCommand, ListTopicsDetectionJobsCommandInput, @@ -271,6 +281,11 @@ import { StartSentimentDetectionJobCommandInput, StartSentimentDetectionJobCommandOutput, } from "./commands/StartSentimentDetectionJobCommand"; +import { + StartTargetedSentimentDetectionJobCommand, + StartTargetedSentimentDetectionJobCommandInput, + StartTargetedSentimentDetectionJobCommandOutput, +} from "./commands/StartTargetedSentimentDetectionJobCommand"; import { StartTopicsDetectionJobCommand, StartTopicsDetectionJobCommandInput, @@ -306,6 +321,11 @@ import { StopSentimentDetectionJobCommandInput, StopSentimentDetectionJobCommandOutput, } from "./commands/StopSentimentDetectionJobCommand"; +import { + StopTargetedSentimentDetectionJobCommand, + StopTargetedSentimentDetectionJobCommandInput, + StopTargetedSentimentDetectionJobCommandOutput, +} from "./commands/StopTargetedSentimentDetectionJobCommand"; import { StopTrainingDocumentClassifierCommand, StopTrainingDocumentClassifierCommandInput, @@ -1174,6 +1194,41 @@ export class Comprehend extends ComprehendClient { } } + /** + *

Gets the properties associated with a targeted sentiment detection job. Use this operation + * to get the status of the job.

+ */ + public describeTargetedSentimentDetectionJob( + args: DescribeTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeTargetedSentimentDetectionJob( + args: DescribeTargetedSentimentDetectionJobCommandInput, + cb: (err: any, data?: DescribeTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public describeTargetedSentimentDetectionJob( + args: DescribeTargetedSentimentDetectionJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public describeTargetedSentimentDetectionJob( + args: DescribeTargetedSentimentDetectionJobCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: DescribeTargetedSentimentDetectionJobCommandOutput) => void), + cb?: (err: any, data?: DescribeTargetedSentimentDetectionJobCommandOutput) => void + ): Promise | void { + const command = new DescribeTargetedSentimentDetectionJobCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Gets the properties associated with a topic detection job. Use this operation to get * the status of a detection job.

@@ -1853,6 +1908,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *

Gets a list of targeted sentiment detection jobs that you have submitted.

+ */ + public listTargetedSentimentDetectionJobs( + args: ListTargetedSentimentDetectionJobsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTargetedSentimentDetectionJobs( + args: ListTargetedSentimentDetectionJobsCommandInput, + cb: (err: any, data?: ListTargetedSentimentDetectionJobsCommandOutput) => void + ): void; + public listTargetedSentimentDetectionJobs( + args: ListTargetedSentimentDetectionJobsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTargetedSentimentDetectionJobsCommandOutput) => void + ): void; + public listTargetedSentimentDetectionJobs( + args: ListTargetedSentimentDetectionJobsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTargetedSentimentDetectionJobsCommandOutput) => void), + cb?: (err: any, data?: ListTargetedSentimentDetectionJobsCommandOutput) => void + ): Promise | void { + const command = new ListTargetedSentimentDetectionJobsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Gets a list of the topic detection jobs that you have submitted.

*/ @@ -2121,7 +2208,7 @@ export class Comprehend extends ComprehendClient { } /** - *

Starts an asynchronous sentiment detection job for a collection of documents. use the + *

Starts an asynchronous sentiment detection job for a collection of documents. Use the * operation to track the status of a * job.

*/ @@ -2154,6 +2241,40 @@ export class Comprehend extends ComprehendClient { } } + /** + *

Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the + * operation to track the status of a + * job.

+ */ + public startTargetedSentimentDetectionJob( + args: StartTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public startTargetedSentimentDetectionJob( + args: StartTargetedSentimentDetectionJobCommandInput, + cb: (err: any, data?: StartTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public startTargetedSentimentDetectionJob( + args: StartTargetedSentimentDetectionJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public startTargetedSentimentDetectionJob( + args: StartTargetedSentimentDetectionJobCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartTargetedSentimentDetectionJobCommandOutput) => void), + cb?: (err: any, data?: StartTargetedSentimentDetectionJobCommandOutput) => void + ): Promise | void { + const command = new StartTargetedSentimentDetectionJobCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Starts an asynchronous topic detection job. Use the * DescribeTopicDetectionJob operation to track the status of a job.

@@ -2415,6 +2536,47 @@ export class Comprehend extends ComprehendClient { } } + /** + *

Stops a targeted sentiment detection job in progress.

+ *

If the job state is IN_PROGRESS the job is marked for termination and put + * into the STOP_REQUESTED state. If the job completes before it can be stopped, it + * is put into the COMPLETED state; otherwise the job is be stopped and put into the + * STOPPED state.

+ *

If the job is in the COMPLETED or FAILED state when you call the + * StopDominantLanguageDetectionJob operation, the operation returns a 400 + * Internal Request Exception.

+ *

When a job is stopped, any documents already processed are written to the output + * location.

+ */ + public stopTargetedSentimentDetectionJob( + args: StopTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public stopTargetedSentimentDetectionJob( + args: StopTargetedSentimentDetectionJobCommandInput, + cb: (err: any, data?: StopTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public stopTargetedSentimentDetectionJob( + args: StopTargetedSentimentDetectionJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StopTargetedSentimentDetectionJobCommandOutput) => void + ): void; + public stopTargetedSentimentDetectionJob( + args: StopTargetedSentimentDetectionJobCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StopTargetedSentimentDetectionJobCommandOutput) => void), + cb?: (err: any, data?: StopTargetedSentimentDetectionJobCommandOutput) => void + ): Promise | void { + const command = new StopTargetedSentimentDetectionJobCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Stops a document classifier training job while in progress.

*

If the training job state is TRAINING, the job is marked for termination and diff --git a/clients/client-comprehend/src/ComprehendClient.ts b/clients/client-comprehend/src/ComprehendClient.ts index 989fb74a8540..9ebfe1c243a8 100644 --- a/clients/client-comprehend/src/ComprehendClient.ts +++ b/clients/client-comprehend/src/ComprehendClient.ts @@ -136,6 +136,10 @@ import { DescribeSentimentDetectionJobCommandInput, DescribeSentimentDetectionJobCommandOutput, } from "./commands/DescribeSentimentDetectionJobCommand"; +import { + DescribeTargetedSentimentDetectionJobCommandInput, + DescribeTargetedSentimentDetectionJobCommandOutput, +} from "./commands/DescribeTargetedSentimentDetectionJobCommand"; import { DescribeTopicsDetectionJobCommandInput, DescribeTopicsDetectionJobCommandOutput, @@ -199,6 +203,10 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, +} from "./commands/ListTargetedSentimentDetectionJobsCommand"; import { ListTopicsDetectionJobsCommandInput, ListTopicsDetectionJobsCommandOutput, @@ -232,6 +240,10 @@ import { StartSentimentDetectionJobCommandInput, StartSentimentDetectionJobCommandOutput, } from "./commands/StartSentimentDetectionJobCommand"; +import { + StartTargetedSentimentDetectionJobCommandInput, + StartTargetedSentimentDetectionJobCommandOutput, +} from "./commands/StartTargetedSentimentDetectionJobCommand"; import { StartTopicsDetectionJobCommandInput, StartTopicsDetectionJobCommandOutput, @@ -260,6 +272,10 @@ import { StopSentimentDetectionJobCommandInput, StopSentimentDetectionJobCommandOutput, } from "./commands/StopSentimentDetectionJobCommand"; +import { + StopTargetedSentimentDetectionJobCommandInput, + StopTargetedSentimentDetectionJobCommandOutput, +} from "./commands/StopTargetedSentimentDetectionJobCommand"; import { StopTrainingDocumentClassifierCommandInput, StopTrainingDocumentClassifierCommandOutput, @@ -299,6 +315,7 @@ export type ServiceInputTypes = | DescribePiiEntitiesDetectionJobCommandInput | DescribeResourcePolicyCommandInput | DescribeSentimentDetectionJobCommandInput + | DescribeTargetedSentimentDetectionJobCommandInput | DescribeTopicsDetectionJobCommandInput | DetectDominantLanguageCommandInput | DetectEntitiesCommandInput @@ -320,6 +337,7 @@ export type ServiceInputTypes = | ListPiiEntitiesDetectionJobsCommandInput | ListSentimentDetectionJobsCommandInput | ListTagsForResourceCommandInput + | ListTargetedSentimentDetectionJobsCommandInput | ListTopicsDetectionJobsCommandInput | PutResourcePolicyCommandInput | StartDocumentClassificationJobCommandInput @@ -329,6 +347,7 @@ export type ServiceInputTypes = | StartKeyPhrasesDetectionJobCommandInput | StartPiiEntitiesDetectionJobCommandInput | StartSentimentDetectionJobCommandInput + | StartTargetedSentimentDetectionJobCommandInput | StartTopicsDetectionJobCommandInput | StopDominantLanguageDetectionJobCommandInput | StopEntitiesDetectionJobCommandInput @@ -336,6 +355,7 @@ export type ServiceInputTypes = | StopKeyPhrasesDetectionJobCommandInput | StopPiiEntitiesDetectionJobCommandInput | StopSentimentDetectionJobCommandInput + | StopTargetedSentimentDetectionJobCommandInput | StopTrainingDocumentClassifierCommandInput | StopTrainingEntityRecognizerCommandInput | TagResourceCommandInput @@ -368,6 +388,7 @@ export type ServiceOutputTypes = | DescribePiiEntitiesDetectionJobCommandOutput | DescribeResourcePolicyCommandOutput | DescribeSentimentDetectionJobCommandOutput + | DescribeTargetedSentimentDetectionJobCommandOutput | DescribeTopicsDetectionJobCommandOutput | DetectDominantLanguageCommandOutput | DetectEntitiesCommandOutput @@ -389,6 +410,7 @@ export type ServiceOutputTypes = | ListPiiEntitiesDetectionJobsCommandOutput | ListSentimentDetectionJobsCommandOutput | ListTagsForResourceCommandOutput + | ListTargetedSentimentDetectionJobsCommandOutput | ListTopicsDetectionJobsCommandOutput | PutResourcePolicyCommandOutput | StartDocumentClassificationJobCommandOutput @@ -398,6 +420,7 @@ export type ServiceOutputTypes = | StartKeyPhrasesDetectionJobCommandOutput | StartPiiEntitiesDetectionJobCommandOutput | StartSentimentDetectionJobCommandOutput + | StartTargetedSentimentDetectionJobCommandOutput | StartTopicsDetectionJobCommandOutput | StopDominantLanguageDetectionJobCommandOutput | StopEntitiesDetectionJobCommandOutput @@ -405,6 +428,7 @@ export type ServiceOutputTypes = | StopKeyPhrasesDetectionJobCommandOutput | StopPiiEntitiesDetectionJobCommandOutput | StopSentimentDetectionJobCommandOutput + | StopTargetedSentimentDetectionJobCommandOutput | StopTrainingDocumentClassifierCommandOutput | StopTrainingEntityRecognizerCommandOutput | TagResourceCommandOutput diff --git a/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts new file mode 100644 index 000000000000..5b9256941c2f --- /dev/null +++ b/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { + DescribeTargetedSentimentDetectionJobRequest, + DescribeTargetedSentimentDetectionJobResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand, + serializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribeTargetedSentimentDetectionJobCommandInput + extends DescribeTargetedSentimentDetectionJobRequest {} +export interface DescribeTargetedSentimentDetectionJobCommandOutput + extends DescribeTargetedSentimentDetectionJobResponse, + __MetadataBearer {} + +/** + *

Gets the properties associated with a targeted sentiment detection job. Use this operation + * to get the status of the job.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, DescribeTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, DescribeTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new DescribeTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link DescribeTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class DescribeTargetedSentimentDetectionJobCommand extends $Command< + DescribeTargetedSentimentDetectionJobCommandInput, + DescribeTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComprehendClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComprehendClient"; + const commandName = "DescribeTargetedSentimentDetectionJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeTargetedSentimentDetectionJobRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeTargetedSentimentDetectionJobResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DescribeTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-comprehend/src/commands/ListTargetedSentimentDetectionJobsCommand.ts b/clients/client-comprehend/src/commands/ListTargetedSentimentDetectionJobsCommand.ts new file mode 100644 index 000000000000..6c867e406ff0 --- /dev/null +++ b/clients/client-comprehend/src/commands/ListTargetedSentimentDetectionJobsCommand.ts @@ -0,0 +1,106 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { + ListTargetedSentimentDetectionJobsRequest, + ListTargetedSentimentDetectionJobsResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1ListTargetedSentimentDetectionJobsCommand, + serializeAws_json1_1ListTargetedSentimentDetectionJobsCommand, +} from "../protocols/Aws_json1_1"; + +export interface ListTargetedSentimentDetectionJobsCommandInput extends ListTargetedSentimentDetectionJobsRequest {} +export interface ListTargetedSentimentDetectionJobsCommandOutput + extends ListTargetedSentimentDetectionJobsResponse, + __MetadataBearer {} + +/** + *

Gets a list of targeted sentiment detection jobs that you have submitted.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, ListTargetedSentimentDetectionJobsCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, ListTargetedSentimentDetectionJobsCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new ListTargetedSentimentDetectionJobsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTargetedSentimentDetectionJobsCommandInput} for command's `input` shape. + * @see {@link ListTargetedSentimentDetectionJobsCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class ListTargetedSentimentDetectionJobsCommand extends $Command< + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTargetedSentimentDetectionJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComprehendClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComprehendClient"; + const commandName = "ListTargetedSentimentDetectionJobsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTargetedSentimentDetectionJobsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTargetedSentimentDetectionJobsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: ListTargetedSentimentDetectionJobsCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1ListTargetedSentimentDetectionJobsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1ListTargetedSentimentDetectionJobsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-comprehend/src/commands/StartSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/StartSentimentDetectionJobCommand.ts index d72e720480a7..a4d4244dfe00 100644 --- a/clients/client-comprehend/src/commands/StartSentimentDetectionJobCommand.ts +++ b/clients/client-comprehend/src/commands/StartSentimentDetectionJobCommand.ts @@ -22,7 +22,7 @@ export interface StartSentimentDetectionJobCommandInput extends StartSentimentDe export interface StartSentimentDetectionJobCommandOutput extends StartSentimentDetectionJobResponse, __MetadataBearer {} /** - *

Starts an asynchronous sentiment detection job for a collection of documents. use the + *

Starts an asynchronous sentiment detection job for a collection of documents. Use the * operation to track the status of a * job.

* @example diff --git a/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts new file mode 100644 index 000000000000..f5344a64da29 --- /dev/null +++ b/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { + StartTargetedSentimentDetectionJobRequest, + StartTargetedSentimentDetectionJobResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1StartTargetedSentimentDetectionJobCommand, + serializeAws_json1_1StartTargetedSentimentDetectionJobCommand, +} from "../protocols/Aws_json1_1"; + +export interface StartTargetedSentimentDetectionJobCommandInput extends StartTargetedSentimentDetectionJobRequest {} +export interface StartTargetedSentimentDetectionJobCommandOutput + extends StartTargetedSentimentDetectionJobResponse, + __MetadataBearer {} + +/** + *

Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the + * operation to track the status of a + * job.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, StartTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, StartTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new StartTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link StartTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class StartTargetedSentimentDetectionJobCommand extends $Command< + StartTargetedSentimentDetectionJobCommandInput, + StartTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComprehendClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComprehendClient"; + const commandName = "StartTargetedSentimentDetectionJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartTargetedSentimentDetectionJobRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartTargetedSentimentDetectionJobResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: StartTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1StartTargetedSentimentDetectionJobCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1StartTargetedSentimentDetectionJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-comprehend/src/commands/StopTargetedSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/StopTargetedSentimentDetectionJobCommand.ts new file mode 100644 index 000000000000..40f673e147cb --- /dev/null +++ b/clients/client-comprehend/src/commands/StopTargetedSentimentDetectionJobCommand.ts @@ -0,0 +1,115 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { + StopTargetedSentimentDetectionJobRequest, + StopTargetedSentimentDetectionJobResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1StopTargetedSentimentDetectionJobCommand, + serializeAws_json1_1StopTargetedSentimentDetectionJobCommand, +} from "../protocols/Aws_json1_1"; + +export interface StopTargetedSentimentDetectionJobCommandInput extends StopTargetedSentimentDetectionJobRequest {} +export interface StopTargetedSentimentDetectionJobCommandOutput + extends StopTargetedSentimentDetectionJobResponse, + __MetadataBearer {} + +/** + *

Stops a targeted sentiment detection job in progress.

+ *

If the job state is IN_PROGRESS the job is marked for termination and put + * into the STOP_REQUESTED state. If the job completes before it can be stopped, it + * is put into the COMPLETED state; otherwise the job is be stopped and put into the + * STOPPED state.

+ *

If the job is in the COMPLETED or FAILED state when you call the + * StopDominantLanguageDetectionJob operation, the operation returns a 400 + * Internal Request Exception.

+ *

When a job is stopped, any documents already processed are written to the output + * location.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, StopTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, StopTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new StopTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link StopTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class StopTargetedSentimentDetectionJobCommand extends $Command< + StopTargetedSentimentDetectionJobCommandInput, + StopTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComprehendClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComprehendClient"; + const commandName = "StopTargetedSentimentDetectionJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StopTargetedSentimentDetectionJobRequest.filterSensitiveLog, + outputFilterSensitiveLog: StopTargetedSentimentDetectionJobResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: StopTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1StopTargetedSentimentDetectionJobCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1StopTargetedSentimentDetectionJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-comprehend/src/commands/index.ts b/clients/client-comprehend/src/commands/index.ts index 93f36d353514..a1dfe8f5d700 100644 --- a/clients/client-comprehend/src/commands/index.ts +++ b/clients/client-comprehend/src/commands/index.ts @@ -23,6 +23,7 @@ export * from "./DescribeKeyPhrasesDetectionJobCommand"; export * from "./DescribePiiEntitiesDetectionJobCommand"; export * from "./DescribeResourcePolicyCommand"; export * from "./DescribeSentimentDetectionJobCommand"; +export * from "./DescribeTargetedSentimentDetectionJobCommand"; export * from "./DescribeTopicsDetectionJobCommand"; export * from "./DetectDominantLanguageCommand"; export * from "./DetectEntitiesCommand"; @@ -44,6 +45,7 @@ export * from "./ListKeyPhrasesDetectionJobsCommand"; export * from "./ListPiiEntitiesDetectionJobsCommand"; export * from "./ListSentimentDetectionJobsCommand"; export * from "./ListTagsForResourceCommand"; +export * from "./ListTargetedSentimentDetectionJobsCommand"; export * from "./ListTopicsDetectionJobsCommand"; export * from "./PutResourcePolicyCommand"; export * from "./StartDocumentClassificationJobCommand"; @@ -53,6 +55,7 @@ export * from "./StartEventsDetectionJobCommand"; export * from "./StartKeyPhrasesDetectionJobCommand"; export * from "./StartPiiEntitiesDetectionJobCommand"; export * from "./StartSentimentDetectionJobCommand"; +export * from "./StartTargetedSentimentDetectionJobCommand"; export * from "./StartTopicsDetectionJobCommand"; export * from "./StopDominantLanguageDetectionJobCommand"; export * from "./StopEntitiesDetectionJobCommand"; @@ -60,6 +63,7 @@ export * from "./StopEventsDetectionJobCommand"; export * from "./StopKeyPhrasesDetectionJobCommand"; export * from "./StopPiiEntitiesDetectionJobCommand"; export * from "./StopSentimentDetectionJobCommand"; +export * from "./StopTargetedSentimentDetectionJobCommand"; export * from "./StopTrainingDocumentClassifierCommand"; export * from "./StopTrainingEntityRecognizerCommand"; export * from "./TagResourceCommand"; diff --git a/clients/client-comprehend/src/models/models_0.ts b/clients/client-comprehend/src/models/models_0.ts index 839a7f707ee8..cce38b65f298 100644 --- a/clients/client-comprehend/src/models/models_0.ts +++ b/clients/client-comprehend/src/models/models_0.ts @@ -2374,7 +2374,7 @@ export enum JobStatus { } /** - *

Provides configuration parameters for the output of topic detection jobs.

+ *

Provides configuration parameters for the output of inference jobs.

*

*/ export interface OutputDataConfig { @@ -2387,6 +2387,10 @@ export interface OutputDataConfig { * directory specific to the job. The S3Uri field contains the location of the * output file, called output.tar.gz. It is a compressed archive that contains the * ouput of the operation.

+ *

+ * For a PII entity detection job, the output file is plain text, not a compressed archive. + * The output file name is the same as the input file, with .out appended at the end. + *

*/ S3Uri: string | undefined; @@ -3265,7 +3269,10 @@ export interface EntityRecognizerEvaluationMetrics { /** *

A measure of how accurate the recognizer results are for the test data. It is derived from * the Precision and Recall values. The F1Score is the - * harmonic average of the two scores. The highest score is 1, and the worst score is 0.

+ * harmonic average of the two scores. For plain text entity recognizer models, the range is 0 to 100, + * where 100 is the best score. For PDF/Word entity recognizer models, the range is 0 to 1, + * where 1 is the best score. + *

*/ F1Score?: number; } @@ -3747,6 +3754,10 @@ export interface PiiOutputDataConfig { /** *

When you use the PiiOutputDataConfig object with asynchronous operations, * you specify the Amazon S3 location where you want to write the output data.

+ *

+ * For a PII entity detection job, the output file is plain text, not a compressed archive. + * The output file name is the same as the input file, with .out appended at the end. + *

*/ S3Uri: string | undefined; @@ -4101,6 +4112,145 @@ export namespace DescribeSentimentDetectionJobResponse { }); } +export interface DescribeTargetedSentimentDetectionJobRequest { + /** + *

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its + * response.

+ */ + JobId: string | undefined; +} + +export namespace DescribeTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +/** + *

Provides information about a targeted sentiment detection job.

+ */ +export interface TargetedSentimentDetectionJobProperties { + /** + *

The identifier assigned to the targeted sentiment detection job.

+ */ + JobId?: string; + + /** + *

The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully + * qualified identifier for the job. It includes the AWS account, Region, and the job ID. The + * format of the ARN is as follows:

+ *

+ * arn::comprehend:::targeted-sentiment-detection-job/ + *

+ *

The following is an example job ARN:

+ *

+ * arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab + *

+ */ + JobArn?: string; + + /** + *

The name that you assigned to the targeted sentiment detection job.

+ */ + JobName?: string; + + /** + *

The current status of the targeted sentiment detection job. If the status is FAILED, + * the Messages field shows the reason for the failure.

+ */ + JobStatus?: JobStatus | string; + + /** + *

A description of the status of a job.

+ */ + Message?: string; + + /** + *

The time that the targeted sentiment detection job was submitted for processing.

+ */ + SubmitTime?: Date; + + /** + *

The time that the targeted sentiment detection job ended.

+ */ + EndTime?: Date; + + /** + *

The input properties for an inference job.

+ */ + InputDataConfig?: InputDataConfig; + + /** + *

Provides configuration parameters for the output of inference jobs.

+ *

+ */ + OutputDataConfig?: OutputDataConfig; + + /** + *

The language code of the input documents.

+ */ + LanguageCode?: LanguageCode | string; + + /** + *

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input + * data.

+ */ + DataAccessRoleArn?: string; + + /** + *

ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the + * targeted sentiment detection job. The VolumeKmsKeyId can be either of the following formats:

+ *
    + *
  • + *

    KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + *

    + *
  • + *
  • + *

    Amazon Resource Name (ARN) of a KMS Key: + * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + *

    + *
  • + *
+ */ + VolumeKmsKeyId?: string; + + /** + *

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for the job. For more information, see Amazon + * VPC.

+ */ + VpcConfig?: VpcConfig; +} + +export namespace TargetedSentimentDetectionJobProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetedSentimentDetectionJobProperties): any => ({ + ...obj, + }); +} + +export interface DescribeTargetedSentimentDetectionJobResponse { + /** + *

An object that contains the properties associated with a targeted sentiment detection job.

+ */ + TargetedSentimentDetectionJobProperties?: TargetedSentimentDetectionJobProperties; +} + +export namespace DescribeTargetedSentimentDetectionJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTargetedSentimentDetectionJobResponse): any => ({ + ...obj, + }); +} + export interface DescribeTopicsDetectionJobRequest { /** *

The identifier assigned by the user to the detection job.

@@ -5741,6 +5891,94 @@ export namespace ListTagsForResourceResponse { }); } +/** + *

Provides information for filtering a list of dominant language detection jobs. For more + * information, see the operation.

+ */ +export interface TargetedSentimentDetectionJobFilter { + /** + *

Filters on the name of the job.

+ */ + JobName?: string; + + /** + *

Filters the list of jobs based on job status. Returns only jobs with the specified + * status.

+ */ + JobStatus?: JobStatus | string; + + /** + *

Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest.

+ */ + SubmitTimeBefore?: Date; + + /** + *

Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest.

+ */ + SubmitTimeAfter?: Date; +} + +export namespace TargetedSentimentDetectionJobFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetedSentimentDetectionJobFilter): any => ({ + ...obj, + }); +} + +export interface ListTargetedSentimentDetectionJobsRequest { + /** + *

Filters the jobs that are returned. You can filter jobs on their name, status, or the date + * and time that they were submitted. You can only set one filter at a time.

+ */ + Filter?: TargetedSentimentDetectionJobFilter; + + /** + *

Identifies the next page of results to return.

+ */ + NextToken?: string; + + /** + *

The maximum number of results to return in each page. The default is 100.

+ */ + MaxResults?: number; +} + +export namespace ListTargetedSentimentDetectionJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTargetedSentimentDetectionJobsRequest): any => ({ + ...obj, + }); +} + +export interface ListTargetedSentimentDetectionJobsResponse { + /** + *

A list containing the properties of each job that is returned.

+ */ + TargetedSentimentDetectionJobPropertiesList?: TargetedSentimentDetectionJobProperties[]; + + /** + *

Identifies the next page of results to return.

+ */ + NextToken?: string; +} + +export namespace ListTargetedSentimentDetectionJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTargetedSentimentDetectionJobsResponse): any => ({ + ...obj, + }); +} + /** *

Provides information for filtering topic detection jobs. For more information, see * .

@@ -6724,6 +6962,132 @@ export namespace StartSentimentDetectionJobResponse { }); } +export interface StartTargetedSentimentDetectionJobRequest { + /** + *

The input properties for an inference job.

+ */ + InputDataConfig: InputDataConfig | undefined; + + /** + *

Specifies where to send the output files.

+ */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.

+ */ + DataAccessRoleArn: string | undefined; + + /** + *

The identifier of the job.

+ */ + JobName?: string; + + /** + *

The language of the input documents. You can specify any of the primary languages + * supported by Amazon Comprehend. All documents must be in the same language.

+ */ + LanguageCode: LanguageCode | string | undefined; + + /** + *

A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.

+ */ + ClientRequestToken?: string; + + /** + *

ID for the KMS key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:

+ *
    + *
  • + *

    KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + *

    + *
  • + *
  • + *

    Amazon Resource Name (ARN) of a KMS Key: + * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + *

    + *
  • + *
+ */ + VolumeKmsKeyId?: string; + + /** + *

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for the job. For more information, see Amazon + * VPC.

+ */ + VpcConfig?: VpcConfig; + + /** + *

Tags to be associated with the targeted sentiment detection job. A tag is a key-value pair that + * adds metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the + * key might be added to a resource to indicate its use by the sales department.

+ */ + Tags?: Tag[]; +} + +export namespace StartTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StartTargetedSentimentDetectionJobResponse { + /** + *

The identifier generated for the job. To get the status of a job, use this identifier with + * the operation.

+ */ + JobId?: string; + + /** + *

The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully + * qualified identifier for the job. It includes the AWS account, Region, and the job ID. The + * format of the ARN is as follows:

+ *

+ * arn::comprehend:::targeted-sentiment-detection-job/ + *

+ *

The following is an example job ARN:

+ *

+ * arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab + *

+ */ + JobArn?: string; + + /** + *

The status of the job.

+ *
    + *
  • + *

    SUBMITTED - The job has been received and is queued for processing.

    + *
  • + *
  • + *

    IN_PROGRESS - Amazon Comprehend is processing the job.

    + *
  • + *
  • + *

    COMPLETED - The job was successfully completed and the output is available.

    + *
  • + *
  • + *

    FAILED - The job did not complete. To get details, use the operation.

    + *
  • + *
+ */ + JobStatus?: JobStatus | string; +} + +export namespace StartTargetedSentimentDetectionJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartTargetedSentimentDetectionJobResponse): any => ({ + ...obj, + }); +} + export interface StartTopicsDetectionJobRequest { /** *

Specifies the format and location of the input data for the job.

@@ -7083,6 +7447,45 @@ export namespace StopSentimentDetectionJobResponse { }); } +export interface StopTargetedSentimentDetectionJobRequest { + /** + *

The identifier of the targeted sentiment detection job to stop.

+ */ + JobId: string | undefined; +} + +export namespace StopTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StopTargetedSentimentDetectionJobResponse { + /** + *

The identifier of the targeted sentiment detection job to stop.

+ */ + JobId?: string; + + /** + *

Either STOP_REQUESTED if the job is currently running, or + * STOPPED if the job was previously stopped with the + * StopSentimentDetectionJob operation.

+ */ + JobStatus?: JobStatus | string; +} + +export namespace StopTargetedSentimentDetectionJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopTargetedSentimentDetectionJobResponse): any => ({ + ...obj, + }); +} + export interface StopTrainingDocumentClassifierRequest { /** *

The Amazon Resource Name (ARN) that identifies the document classifier currently being diff --git a/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts b/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts new file mode 100644 index 000000000000..f632f1b85e1e --- /dev/null +++ b/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTargetedSentimentDetectionJobsCommand, + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, +} from "../commands/ListTargetedSentimentDetectionJobsCommand"; +import { Comprehend } from "../Comprehend"; +import { ComprehendClient } from "../ComprehendClient"; +import { ComprehendPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: ComprehendClient, + input: ListTargetedSentimentDetectionJobsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTargetedSentimentDetectionJobsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Comprehend, + input: ListTargetedSentimentDetectionJobsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTargetedSentimentDetectionJobs(input, ...args); +}; +export async function* paginateListTargetedSentimentDetectionJobs( + config: ComprehendPaginationConfiguration, + input: ListTargetedSentimentDetectionJobsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTargetedSentimentDetectionJobsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Comprehend) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof ComprehendClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Comprehend | ComprehendClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-comprehend/src/pagination/index.ts b/clients/client-comprehend/src/pagination/index.ts index 55d79c0e3fe5..38a62d992dc3 100644 --- a/clients/client-comprehend/src/pagination/index.ts +++ b/clients/client-comprehend/src/pagination/index.ts @@ -9,4 +9,5 @@ export * from "./ListEntityRecognizersPaginator"; export * from "./ListEventsDetectionJobsPaginator"; export * from "./ListKeyPhrasesDetectionJobsPaginator"; export * from "./ListSentimentDetectionJobsPaginator"; +export * from "./ListTargetedSentimentDetectionJobsPaginator"; export * from "./ListTopicsDetectionJobsPaginator"; diff --git a/clients/client-comprehend/src/protocols/Aws_json1_1.ts b/clients/client-comprehend/src/protocols/Aws_json1_1.ts index e3b086ef227d..47d9726b9fdb 100644 --- a/clients/client-comprehend/src/protocols/Aws_json1_1.ts +++ b/clients/client-comprehend/src/protocols/Aws_json1_1.ts @@ -102,6 +102,10 @@ import { DescribeSentimentDetectionJobCommandInput, DescribeSentimentDetectionJobCommandOutput, } from "../commands/DescribeSentimentDetectionJobCommand"; +import { + DescribeTargetedSentimentDetectionJobCommandInput, + DescribeTargetedSentimentDetectionJobCommandOutput, +} from "../commands/DescribeTargetedSentimentDetectionJobCommand"; import { DescribeTopicsDetectionJobCommandInput, DescribeTopicsDetectionJobCommandOutput, @@ -165,6 +169,10 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "../commands/ListTagsForResourceCommand"; +import { + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, +} from "../commands/ListTargetedSentimentDetectionJobsCommand"; import { ListTopicsDetectionJobsCommandInput, ListTopicsDetectionJobsCommandOutput, @@ -198,6 +206,10 @@ import { StartSentimentDetectionJobCommandInput, StartSentimentDetectionJobCommandOutput, } from "../commands/StartSentimentDetectionJobCommand"; +import { + StartTargetedSentimentDetectionJobCommandInput, + StartTargetedSentimentDetectionJobCommandOutput, +} from "../commands/StartTargetedSentimentDetectionJobCommand"; import { StartTopicsDetectionJobCommandInput, StartTopicsDetectionJobCommandOutput, @@ -226,6 +238,10 @@ import { StopSentimentDetectionJobCommandInput, StopSentimentDetectionJobCommandOutput, } from "../commands/StopSentimentDetectionJobCommand"; +import { + StopTargetedSentimentDetectionJobCommandInput, + StopTargetedSentimentDetectionJobCommandOutput, +} from "../commands/StopTargetedSentimentDetectionJobCommand"; import { StopTrainingDocumentClassifierCommandInput, StopTrainingDocumentClassifierCommandOutput, @@ -300,6 +316,8 @@ import { DescribeResourcePolicyResponse, DescribeSentimentDetectionJobRequest, DescribeSentimentDetectionJobResponse, + DescribeTargetedSentimentDetectionJobRequest, + DescribeTargetedSentimentDetectionJobResponse, DescribeTopicsDetectionJobRequest, DescribeTopicsDetectionJobResponse, DetectDominantLanguageRequest, @@ -385,6 +403,8 @@ import { ListSentimentDetectionJobsResponse, ListTagsForResourceRequest, ListTagsForResourceResponse, + ListTargetedSentimentDetectionJobsRequest, + ListTargetedSentimentDetectionJobsResponse, ListTopicsDetectionJobsRequest, ListTopicsDetectionJobsResponse, OutputDataConfig, @@ -418,6 +438,8 @@ import { StartPiiEntitiesDetectionJobResponse, StartSentimentDetectionJobRequest, StartSentimentDetectionJobResponse, + StartTargetedSentimentDetectionJobRequest, + StartTargetedSentimentDetectionJobResponse, StartTopicsDetectionJobRequest, StartTopicsDetectionJobResponse, StopDominantLanguageDetectionJobRequest, @@ -432,6 +454,8 @@ import { StopPiiEntitiesDetectionJobResponse, StopSentimentDetectionJobRequest, StopSentimentDetectionJobResponse, + StopTargetedSentimentDetectionJobRequest, + StopTargetedSentimentDetectionJobResponse, StopTrainingDocumentClassifierRequest, StopTrainingDocumentClassifierResponse, StopTrainingEntityRecognizerRequest, @@ -440,6 +464,8 @@ import { Tag, TagResourceRequest, TagResourceResponse, + TargetedSentimentDetectionJobFilter, + TargetedSentimentDetectionJobProperties, TextSizeLimitExceededException, TooManyRequestsException, TooManyTagKeysException, @@ -779,6 +805,19 @@ export const serializeAws_json1_1DescribeSentimentDetectionJobCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand = async ( + input: DescribeTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.DescribeTargetedSentimentDetectionJob", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeTargetedSentimentDetectionJobRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeTopicsDetectionJobCommand = async ( input: DescribeTopicsDetectionJobCommandInput, context: __SerdeContext @@ -1052,6 +1091,19 @@ export const serializeAws_json1_1ListTagsForResourceCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListTargetedSentimentDetectionJobsCommand = async ( + input: ListTargetedSentimentDetectionJobsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.ListTargetedSentimentDetectionJobs", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListTargetedSentimentDetectionJobsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListTopicsDetectionJobsCommand = async ( input: ListTopicsDetectionJobsCommandInput, context: __SerdeContext @@ -1169,6 +1221,19 @@ export const serializeAws_json1_1StartSentimentDetectionJobCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1StartTargetedSentimentDetectionJobCommand = async ( + input: StartTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.StartTargetedSentimentDetectionJob", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1StartTargetedSentimentDetectionJobRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StartTopicsDetectionJobCommand = async ( input: StartTopicsDetectionJobCommandInput, context: __SerdeContext @@ -1260,6 +1325,19 @@ export const serializeAws_json1_1StopSentimentDetectionJobCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1StopTargetedSentimentDetectionJobCommand = async ( + input: StopTargetedSentimentDetectionJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.StopTargetedSentimentDetectionJob", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1StopTargetedSentimentDetectionJobRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StopTrainingDocumentClassifierCommand = async ( input: StopTrainingDocumentClassifierCommandInput, context: __SerdeContext @@ -2682,6 +2760,58 @@ const deserializeAws_json1_1DescribeSentimentDetectionJobCommandError = async ( } }; +export const deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribeTargetedSentimentDetectionJobResponse(data, context); + const response: DescribeTargetedSentimentDetectionJobCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.comprehend#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.comprehend#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + case "JobNotFoundException": + case "com.amazonaws.comprehend#JobNotFoundException": + throw await deserializeAws_json1_1JobNotFoundExceptionResponse(parsedOutput, context); + case "TooManyRequestsException": + case "com.amazonaws.comprehend#TooManyRequestsException": + throw await deserializeAws_json1_1TooManyRequestsExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1DescribeTopicsDetectionJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -3777,6 +3907,58 @@ const deserializeAws_json1_1ListTagsForResourceCommandError = async ( } }; +export const deserializeAws_json1_1ListTargetedSentimentDetectionJobsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1ListTargetedSentimentDetectionJobsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1ListTargetedSentimentDetectionJobsResponse(data, context); + const response: ListTargetedSentimentDetectionJobsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1ListTargetedSentimentDetectionJobsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.comprehend#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidFilterException": + case "com.amazonaws.comprehend#InvalidFilterException": + throw await deserializeAws_json1_1InvalidFilterExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.comprehend#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + case "TooManyRequestsException": + case "com.amazonaws.comprehend#TooManyRequestsException": + throw await deserializeAws_json1_1TooManyRequestsExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1ListTopicsDetectionJobsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4275,6 +4457,61 @@ const deserializeAws_json1_1StartSentimentDetectionJobCommandError = async ( } }; +export const deserializeAws_json1_1StartTargetedSentimentDetectionJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1StartTargetedSentimentDetectionJobCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1StartTargetedSentimentDetectionJobResponse(data, context); + const response: StartTargetedSentimentDetectionJobCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1StartTargetedSentimentDetectionJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.comprehend#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.comprehend#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + case "KmsKeyValidationException": + case "com.amazonaws.comprehend#KmsKeyValidationException": + throw await deserializeAws_json1_1KmsKeyValidationExceptionResponse(parsedOutput, context); + case "TooManyRequestsException": + case "com.amazonaws.comprehend#TooManyRequestsException": + throw await deserializeAws_json1_1TooManyRequestsExceptionResponse(parsedOutput, context); + case "TooManyTagsException": + case "com.amazonaws.comprehend#TooManyTagsException": + throw await deserializeAws_json1_1TooManyTagsExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1StartTopicsDetectionJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4624,6 +4861,55 @@ const deserializeAws_json1_1StopSentimentDetectionJobCommandError = async ( } }; +export const deserializeAws_json1_1StopTargetedSentimentDetectionJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1StopTargetedSentimentDetectionJobCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1StopTargetedSentimentDetectionJobResponse(data, context); + const response: StopTargetedSentimentDetectionJobCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1StopTargetedSentimentDetectionJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.comprehend#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.comprehend#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + case "JobNotFoundException": + case "com.amazonaws.comprehend#JobNotFoundException": + throw await deserializeAws_json1_1JobNotFoundExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1StopTrainingDocumentClassifierCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -5424,6 +5710,15 @@ const serializeAws_json1_1DescribeSentimentDetectionJobRequest = ( }; }; +const serializeAws_json1_1DescribeTargetedSentimentDetectionJobRequest = ( + input: DescribeTargetedSentimentDetectionJobRequest, + context: __SerdeContext +): any => { + return { + ...(input.JobId !== undefined && input.JobId !== null && { JobId: input.JobId }), + }; +}; + const serializeAws_json1_1DescribeTopicsDetectionJobRequest = ( input: DescribeTopicsDetectionJobRequest, context: __SerdeContext @@ -5936,6 +6231,20 @@ const serializeAws_json1_1ListTagsForResourceRequest = ( }; }; +const serializeAws_json1_1ListTargetedSentimentDetectionJobsRequest = ( + input: ListTargetedSentimentDetectionJobsRequest, + context: __SerdeContext +): any => { + return { + ...(input.Filter !== undefined && + input.Filter !== null && { + Filter: serializeAws_json1_1TargetedSentimentDetectionJobFilter(input.Filter, context), + }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + const serializeAws_json1_1ListTopicsDetectionJobsRequest = ( input: ListTopicsDetectionJobsRequest, context: __SerdeContext @@ -6203,6 +6512,32 @@ const serializeAws_json1_1StartSentimentDetectionJobRequest = ( }; }; +const serializeAws_json1_1StartTargetedSentimentDetectionJobRequest = ( + input: StartTargetedSentimentDetectionJobRequest, + context: __SerdeContext +): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.DataAccessRoleArn !== undefined && + input.DataAccessRoleArn !== null && { DataAccessRoleArn: input.DataAccessRoleArn }), + ...(input.InputDataConfig !== undefined && + input.InputDataConfig !== null && { + InputDataConfig: serializeAws_json1_1InputDataConfig(input.InputDataConfig, context), + }), + ...(input.JobName !== undefined && input.JobName !== null && { JobName: input.JobName }), + ...(input.LanguageCode !== undefined && input.LanguageCode !== null && { LanguageCode: input.LanguageCode }), + ...(input.OutputDataConfig !== undefined && + input.OutputDataConfig !== null && { + OutputDataConfig: serializeAws_json1_1OutputDataConfig(input.OutputDataConfig, context), + }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_1TagList(input.Tags, context) }), + ...(input.VolumeKmsKeyId !== undefined && + input.VolumeKmsKeyId !== null && { VolumeKmsKeyId: input.VolumeKmsKeyId }), + ...(input.VpcConfig !== undefined && + input.VpcConfig !== null && { VpcConfig: serializeAws_json1_1VpcConfig(input.VpcConfig, context) }), + }; +}; + const serializeAws_json1_1StartTopicsDetectionJobRequest = ( input: StartTopicsDetectionJobRequest, context: __SerdeContext @@ -6284,6 +6619,15 @@ const serializeAws_json1_1StopSentimentDetectionJobRequest = ( }; }; +const serializeAws_json1_1StopTargetedSentimentDetectionJobRequest = ( + input: StopTargetedSentimentDetectionJobRequest, + context: __SerdeContext +): any => { + return { + ...(input.JobId !== undefined && input.JobId !== null && { JobId: input.JobId }), + }; +}; + const serializeAws_json1_1StopTrainingDocumentClassifierRequest = ( input: StopTrainingDocumentClassifierRequest, context: __SerdeContext @@ -6351,6 +6695,20 @@ const serializeAws_json1_1TagResourceRequest = (input: TagResourceRequest, conte }; }; +const serializeAws_json1_1TargetedSentimentDetectionJobFilter = ( + input: TargetedSentimentDetectionJobFilter, + context: __SerdeContext +): any => { + return { + ...(input.JobName !== undefined && input.JobName !== null && { JobName: input.JobName }), + ...(input.JobStatus !== undefined && input.JobStatus !== null && { JobStatus: input.JobStatus }), + ...(input.SubmitTimeAfter !== undefined && + input.SubmitTimeAfter !== null && { SubmitTimeAfter: Math.round(input.SubmitTimeAfter.getTime() / 1000) }), + ...(input.SubmitTimeBefore !== undefined && + input.SubmitTimeBefore !== null && { SubmitTimeBefore: Math.round(input.SubmitTimeBefore.getTime() / 1000) }), + }; +}; + const serializeAws_json1_1TargetEventTypes = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -6867,6 +7225,22 @@ const deserializeAws_json1_1DescribeSentimentDetectionJobResponse = ( } as any; }; +const deserializeAws_json1_1DescribeTargetedSentimentDetectionJobResponse = ( + output: any, + context: __SerdeContext +): DescribeTargetedSentimentDetectionJobResponse => { + return { + TargetedSentimentDetectionJobProperties: + output.TargetedSentimentDetectionJobProperties !== undefined && + output.TargetedSentimentDetectionJobProperties !== null + ? deserializeAws_json1_1TargetedSentimentDetectionJobProperties( + output.TargetedSentimentDetectionJobProperties, + context + ) + : undefined, + } as any; +}; + const deserializeAws_json1_1DescribeTopicsDetectionJobResponse = ( output: any, context: __SerdeContext @@ -8119,6 +8493,23 @@ const deserializeAws_json1_1ListTagsForResourceResponse = ( } as any; }; +const deserializeAws_json1_1ListTargetedSentimentDetectionJobsResponse = ( + output: any, + context: __SerdeContext +): ListTargetedSentimentDetectionJobsResponse => { + return { + NextToken: __expectString(output.NextToken), + TargetedSentimentDetectionJobPropertiesList: + output.TargetedSentimentDetectionJobPropertiesList !== undefined && + output.TargetedSentimentDetectionJobPropertiesList !== null + ? deserializeAws_json1_1TargetedSentimentDetectionJobPropertiesList( + output.TargetedSentimentDetectionJobPropertiesList, + context + ) + : undefined, + } as any; +}; + const deserializeAws_json1_1ListTopicsDetectionJobsResponse = ( output: any, context: __SerdeContext @@ -8415,6 +8806,17 @@ const deserializeAws_json1_1StartSentimentDetectionJobResponse = ( } as any; }; +const deserializeAws_json1_1StartTargetedSentimentDetectionJobResponse = ( + output: any, + context: __SerdeContext +): StartTargetedSentimentDetectionJobResponse => { + return { + JobArn: __expectString(output.JobArn), + JobId: __expectString(output.JobId), + JobStatus: __expectString(output.JobStatus), + } as any; +}; + const deserializeAws_json1_1StartTopicsDetectionJobResponse = ( output: any, context: __SerdeContext @@ -8486,6 +8888,16 @@ const deserializeAws_json1_1StopSentimentDetectionJobResponse = ( } as any; }; +const deserializeAws_json1_1StopTargetedSentimentDetectionJobResponse = ( + output: any, + context: __SerdeContext +): StopTargetedSentimentDetectionJobResponse => { + return { + JobId: __expectString(output.JobId), + JobStatus: __expectString(output.JobStatus), + } as any; +}; + const deserializeAws_json1_1StopTrainingDocumentClassifierResponse = ( output: any, context: __SerdeContext @@ -8548,6 +8960,57 @@ const deserializeAws_json1_1TagResourceResponse = (output: any, context: __Serde return {} as any; }; +const deserializeAws_json1_1TargetedSentimentDetectionJobProperties = ( + output: any, + context: __SerdeContext +): TargetedSentimentDetectionJobProperties => { + return { + DataAccessRoleArn: __expectString(output.DataAccessRoleArn), + EndTime: + output.EndTime !== undefined && output.EndTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.EndTime))) + : undefined, + InputDataConfig: + output.InputDataConfig !== undefined && output.InputDataConfig !== null + ? deserializeAws_json1_1InputDataConfig(output.InputDataConfig, context) + : undefined, + JobArn: __expectString(output.JobArn), + JobId: __expectString(output.JobId), + JobName: __expectString(output.JobName), + JobStatus: __expectString(output.JobStatus), + LanguageCode: __expectString(output.LanguageCode), + Message: __expectString(output.Message), + OutputDataConfig: + output.OutputDataConfig !== undefined && output.OutputDataConfig !== null + ? deserializeAws_json1_1OutputDataConfig(output.OutputDataConfig, context) + : undefined, + SubmitTime: + output.SubmitTime !== undefined && output.SubmitTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.SubmitTime))) + : undefined, + VolumeKmsKeyId: __expectString(output.VolumeKmsKeyId), + VpcConfig: + output.VpcConfig !== undefined && output.VpcConfig !== null + ? deserializeAws_json1_1VpcConfig(output.VpcConfig, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1TargetedSentimentDetectionJobPropertiesList = ( + output: any, + context: __SerdeContext +): TargetedSentimentDetectionJobProperties[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1TargetedSentimentDetectionJobProperties(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1TargetEventTypes = (output: any, context: __SerdeContext): string[] => { const retVal = (output || []) .filter((e: any) => e != null) diff --git a/clients/client-connect/src/models/models_0.ts b/clients/client-connect/src/models/models_0.ts index 59748db9c821..c8753af0e6f3 100644 --- a/clients/client-connect/src/models/models_0.ts +++ b/clients/client-connect/src/models/models_0.ts @@ -489,6 +489,7 @@ export enum InstanceStorageResourceType { CHAT_TRANSCRIPTS = "CHAT_TRANSCRIPTS", CONTACT_TRACE_RECORDS = "CONTACT_TRACE_RECORDS", MEDIA_STREAMS = "MEDIA_STREAMS", + REAL_TIME_CONTACT_ANALYSIS_SEGMENTS = "REAL_TIME_CONTACT_ANALYSIS_SEGMENTS", SCHEDULED_REPORTS = "SCHEDULED_REPORTS", } diff --git a/clients/client-devops-guru/src/DevOpsGuru.ts b/clients/client-devops-guru/src/DevOpsGuru.ts index 97045d465e5d..080700d2a43f 100644 --- a/clients/client-devops-guru/src/DevOpsGuru.ts +++ b/clients/client-devops-guru/src/DevOpsGuru.ts @@ -20,6 +20,11 @@ import { DescribeAnomalyCommandInput, DescribeAnomalyCommandOutput, } from "./commands/DescribeAnomalyCommand"; +import { + DescribeEventSourcesConfigCommand, + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, +} from "./commands/DescribeEventSourcesConfigCommand"; import { DescribeFeedbackCommand, DescribeFeedbackCommandInput, @@ -112,6 +117,11 @@ import { StartCostEstimationCommandInput, StartCostEstimationCommandOutput, } from "./commands/StartCostEstimationCommand"; +import { + UpdateEventSourcesConfigCommand, + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, +} from "./commands/UpdateEventSourcesConfigCommand"; import { UpdateResourceCollectionCommand, UpdateResourceCollectionCommandInput, @@ -281,6 +291,38 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *

This operation lists details about a DevOps Guru event source that is shared with your
 account.

+ */ + public describeEventSourcesConfig( + args: DescribeEventSourcesConfigCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeEventSourcesConfig( + args: DescribeEventSourcesConfigCommandInput, + cb: (err: any, data?: DescribeEventSourcesConfigCommandOutput) => void + ): void; + public describeEventSourcesConfig( + args: DescribeEventSourcesConfigCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeEventSourcesConfigCommandOutput) => void + ): void; + public describeEventSourcesConfig( + args: DescribeEventSourcesConfigCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeEventSourcesConfigCommandOutput) => void), + cb?: (err: any, data?: DescribeEventSourcesConfigCommandOutput) => void + ): Promise | void { + const command = new DescribeEventSourcesConfigCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Returns the most recent feedback submitted in the current Amazon Web Services account and Region. *

@@ -882,12 +924,12 @@ export class DevOpsGuru extends DevOpsGuruClient { /** *

Returns a list of insights in your organization. You can specify which insights are * returned by their start time, one or more statuses (ONGOING, - * CLOSED, and CLOSED), one or more severities - * (LOW, MEDIUM, and HIGH), and type - * (REACTIVE or PROACTIVE).

+ * CLOSED, and CLOSED), one or more severities + * (LOW, MEDIUM, and HIGH), and type + * (REACTIVE or PROACTIVE).

*

Use the Filters parameter to specify status and severity search * parameters. Use the Type parameter to specify REACTIVE or - * PROACTIVE in your search.

+ * PROACTIVE in your search.

*/ public searchOrganizationInsights( args: SearchOrganizationInsightsCommandInput, @@ -951,6 +993,38 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *

Updates the event source configuration.

+ */ + public updateEventSourcesConfig( + args: UpdateEventSourcesConfigCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateEventSourcesConfig( + args: UpdateEventSourcesConfigCommandInput, + cb: (err: any, data?: UpdateEventSourcesConfigCommandOutput) => void + ): void; + public updateEventSourcesConfig( + args: UpdateEventSourcesConfigCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateEventSourcesConfigCommandOutput) => void + ): void; + public updateEventSourcesConfig( + args: UpdateEventSourcesConfigCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateEventSourcesConfigCommandOutput) => void), + cb?: (err: any, data?: UpdateEventSourcesConfigCommandOutput) => void + ): Promise | void { + const command = new UpdateEventSourcesConfigCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Updates the collection of resources that DevOps Guru analyzes. * The two types of Amazon Web Services resource collections supported are Amazon Web Services CloudFormation stacks and diff --git a/clients/client-devops-guru/src/DevOpsGuruClient.ts b/clients/client-devops-guru/src/DevOpsGuruClient.ts index 0984359cfc81..8e2f5350c256 100644 --- a/clients/client-devops-guru/src/DevOpsGuruClient.ts +++ b/clients/client-devops-guru/src/DevOpsGuruClient.ts @@ -64,6 +64,10 @@ import { DescribeAccountOverviewCommandOutput, } from "./commands/DescribeAccountOverviewCommand"; import { DescribeAnomalyCommandInput, DescribeAnomalyCommandOutput } from "./commands/DescribeAnomalyCommand"; +import { + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, +} from "./commands/DescribeEventSourcesConfigCommand"; import { DescribeFeedbackCommandInput, DescribeFeedbackCommandOutput } from "./commands/DescribeFeedbackCommand"; import { DescribeInsightCommandInput, DescribeInsightCommandOutput } from "./commands/DescribeInsightCommand"; import { @@ -123,6 +127,10 @@ import { StartCostEstimationCommandInput, StartCostEstimationCommandOutput, } from "./commands/StartCostEstimationCommand"; +import { + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, +} from "./commands/UpdateEventSourcesConfigCommand"; import { UpdateResourceCollectionCommandInput, UpdateResourceCollectionCommandOutput, @@ -138,6 +146,7 @@ export type ServiceInputTypes = | DescribeAccountHealthCommandInput | DescribeAccountOverviewCommandInput | DescribeAnomalyCommandInput + | DescribeEventSourcesConfigCommandInput | DescribeFeedbackCommandInput | DescribeInsightCommandInput | DescribeOrganizationHealthCommandInput @@ -158,6 +167,7 @@ export type ServiceInputTypes = | SearchInsightsCommandInput | SearchOrganizationInsightsCommandInput | StartCostEstimationCommandInput + | UpdateEventSourcesConfigCommandInput | UpdateResourceCollectionCommandInput | UpdateServiceIntegrationCommandInput; @@ -166,6 +176,7 @@ export type ServiceOutputTypes = | DescribeAccountHealthCommandOutput | DescribeAccountOverviewCommandOutput | DescribeAnomalyCommandOutput + | DescribeEventSourcesConfigCommandOutput | DescribeFeedbackCommandOutput | DescribeInsightCommandOutput | DescribeOrganizationHealthCommandOutput @@ -186,6 +197,7 @@ export type ServiceOutputTypes = | SearchInsightsCommandOutput | SearchOrganizationInsightsCommandOutput | StartCostEstimationCommandOutput + | UpdateEventSourcesConfigCommandOutput | UpdateResourceCollectionCommandOutput | UpdateServiceIntegrationCommandOutput; diff --git a/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts b/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts new file mode 100644 index 000000000000..93728a76c465 --- /dev/null +++ b/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { DescribeEventSourcesConfigRequest, DescribeEventSourcesConfigResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeEventSourcesConfigCommand, + serializeAws_restJson1DescribeEventSourcesConfigCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeEventSourcesConfigCommandInput extends DescribeEventSourcesConfigRequest {} +export interface DescribeEventSourcesConfigCommandOutput extends DescribeEventSourcesConfigResponse, __MetadataBearer {} + +/** + *

This operation lists details about a DevOps Guru event source that is shared with your
 account.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, DescribeEventSourcesConfigCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, DescribeEventSourcesConfigCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new DescribeEventSourcesConfigCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeEventSourcesConfigCommandInput} for command's `input` shape. + * @see {@link DescribeEventSourcesConfigCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class DescribeEventSourcesConfigCommand extends $Command< + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEventSourcesConfigCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DevOpsGuruClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DevOpsGuruClient"; + const commandName = "DescribeEventSourcesConfigCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeEventSourcesConfigRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeEventSourcesConfigResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeEventSourcesConfigCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeEventSourcesConfigCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DescribeEventSourcesConfigCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-devops-guru/src/commands/SearchOrganizationInsightsCommand.ts b/clients/client-devops-guru/src/commands/SearchOrganizationInsightsCommand.ts index 6dd3afc75d8d..3b3af2d45e84 100644 --- a/clients/client-devops-guru/src/commands/SearchOrganizationInsightsCommand.ts +++ b/clients/client-devops-guru/src/commands/SearchOrganizationInsightsCommand.ts @@ -24,12 +24,12 @@ export interface SearchOrganizationInsightsCommandOutput extends SearchOrganizat /** *

Returns a list of insights in your organization. You can specify which insights are * returned by their start time, one or more statuses (ONGOING, - * CLOSED, and CLOSED), one or more severities - * (LOW, MEDIUM, and HIGH), and type - * (REACTIVE or PROACTIVE).

+ * CLOSED, and CLOSED), one or more severities + * (LOW, MEDIUM, and HIGH), and type + * (REACTIVE or PROACTIVE).

*

Use the Filters parameter to specify status and severity search * parameters. Use the Type parameter to specify REACTIVE or - * PROACTIVE in your search.

+ * PROACTIVE in your search.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts b/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts new file mode 100644 index 000000000000..4db12b10b963 --- /dev/null +++ b/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { UpdateEventSourcesConfigRequest, UpdateEventSourcesConfigResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateEventSourcesConfigCommand, + serializeAws_restJson1UpdateEventSourcesConfigCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateEventSourcesConfigCommandInput extends UpdateEventSourcesConfigRequest {} +export interface UpdateEventSourcesConfigCommandOutput extends UpdateEventSourcesConfigResponse, __MetadataBearer {} + +/** + *

Updates the event source configuration.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, UpdateEventSourcesConfigCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, UpdateEventSourcesConfigCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new UpdateEventSourcesConfigCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateEventSourcesConfigCommandInput} for command's `input` shape. + * @see {@link UpdateEventSourcesConfigCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class UpdateEventSourcesConfigCommand extends $Command< + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateEventSourcesConfigCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DevOpsGuruClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DevOpsGuruClient"; + const commandName = "UpdateEventSourcesConfigCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateEventSourcesConfigRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateEventSourcesConfigResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateEventSourcesConfigCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateEventSourcesConfigCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateEventSourcesConfigCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-devops-guru/src/commands/index.ts b/clients/client-devops-guru/src/commands/index.ts index 629c5d78bab2..cd45a69fc521 100644 --- a/clients/client-devops-guru/src/commands/index.ts +++ b/clients/client-devops-guru/src/commands/index.ts @@ -2,6 +2,7 @@ export * from "./AddNotificationChannelCommand"; export * from "./DescribeAccountHealthCommand"; export * from "./DescribeAccountOverviewCommand"; export * from "./DescribeAnomalyCommand"; +export * from "./DescribeEventSourcesConfigCommand"; export * from "./DescribeFeedbackCommand"; export * from "./DescribeInsightCommand"; export * from "./DescribeOrganizationHealthCommand"; @@ -22,5 +23,6 @@ export * from "./RemoveNotificationChannelCommand"; export * from "./SearchInsightsCommand"; export * from "./SearchOrganizationInsightsCommand"; export * from "./StartCostEstimationCommand"; +export * from "./UpdateEventSourcesConfigCommand"; export * from "./UpdateResourceCollectionCommand"; export * from "./UpdateServiceIntegrationCommand"; diff --git a/clients/client-devops-guru/src/models/models_0.ts b/clients/client-devops-guru/src/models/models_0.ts index 1fb89492bf21..94b08800fc9e 100644 --- a/clients/client-devops-guru/src/models/models_0.ts +++ b/clients/client-devops-guru/src/models/models_0.ts @@ -393,6 +393,30 @@ export class ValidationException extends __BaseException { } } +export enum EventSourceOptInStatus { + DISABLED = "DISABLED", + ENABLED = "ENABLED", +} + +/** + *

Information about your account's integration with Amazon CodeGuru Profiler.

+ */ +export interface AmazonCodeGuruProfilerIntegration { + /** + *

The status of the CodeGuru Profiler integration.

+ */ + Status?: EventSourceOptInStatus | string; +} + +export namespace AmazonCodeGuruProfilerIntegration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AmazonCodeGuruProfilerIntegration): any => ({ + ...obj, + }); +} + /** *

A time range that specifies when DevOps Guru opens and then closes an anomaly. This * is different from AnomalyTimeRange, which specifies the time range when @@ -420,10 +444,10 @@ export namespace AnomalyReportedTimeRange { } /** - *

The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in - * the generation of an anomaly. When DevOps Guru detects multiple related anomalies, it creates - * and insight with details about the anomalous behavior and suggestions about how to correct the - * problem.

+ *

The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in the + * generation of an anomaly. When DevOps Guru detects multiple related anomalies, it creates and + * insight with details about the anomalous behavior and suggestions about how to correct + * the problem.

*/ export interface AnomalyResource { /** @@ -520,8 +544,8 @@ export interface CloudWatchMetricsDataSummary { TimestampMetricValuePairList?: TimestampMetricValuePair[]; /** - *

This is an enum of the status showing whether the metric value pair list has partial or - * complete data, or if there was an error.

+ *

This is an enum of the status showing whether the metric value pair list has partial + * or complete data, or if there was an error.

*/ StatusCode?: CloudWatchMetricDataStatusCode | string; } @@ -600,15 +624,14 @@ export namespace CloudWatchMetricsDetail { /** *

A logical grouping of Performance Insights metrics for a related subject area. For example, the - * db.sql dimension group consists of the following dimensions: - * db.sql.id, db.sql.db_id, db.sql.statement, and - * db.sql.tokenized_id.

- * - *

Each response element returns a maximum of 500 bytes. For larger elements, such as SQL statements, - * only the first 500 bytes are returned.

- *
- * - *

Amazon RDS Performance Insights enables you to monitor and explore different + * db.sql dimension group consists of the following dimensions: + * db.sql.id, db.sql.db_id, db.sql.statement, + * and db.sql.tokenized_id.

+ * + *

Each response element returns a maximum of 500 bytes. For larger elements, such as + * SQL statements, only the first 500 bytes are returned.

+ *
+ *

Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -631,128 +654,144 @@ export interface PerformanceInsightsMetricDimensionGroup { /** *

The name of the dimension group. Its valid values are:

* - *
    + *
      *
    • - *

      - * db - The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS PostgreSQL, - * Aurora MySQL, Amazon RDS MySQL, and MariaDB)

      - *
    • + *

      + * db - The name of the database to which the client is connected + * (only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

      + * *
    • - *

      - * db.application - The name of the application that is connected to the database (only Aurora - * PostgreSQL and RDS PostgreSQL)

      - *
    • + *

      + * db.application - The name of the application that is connected to + * the database (only Aurora PostgreSQL and RDS PostgreSQL)

      + * *
    • - *

      - * db.host - The host name of the connected client (all engines)

      - *
    • + *

      + * db.host - The host name of the connected client (all + * engines)

      + * *
    • - *

      - * db.session_type - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)

      - *
    • + *

      + * db.session_type - The type of the current session (only Aurora PostgreSQL + * and RDS PostgreSQL)

      + * *
    • - *

      - * db.sql - The SQL that is currently executing (all engines)

      - *
    • + *

      + * db.sql - The SQL that is currently executing (all engines)

      + * *
    • - *

      - * db.sql_tokenized - The SQL digest (all engines)

      - *
    • + *

      + * db.sql_tokenized - The SQL digest (all engines)

      + * *
    • - *

      - * db.wait_event - The event for which the database backend is waiting (all engines)

      - *
    • + *

      + * db.wait_event - The event for which the database backend is waiting + * (all engines)

      + * *
    • - *

      - * db.wait_event_type - The type of event for which the database backend is waiting (all engines)

      - *
    • + *

      + * db.wait_event_type - The type of event for which the database + * backend is waiting (all engines)

      + * *
    • - *

      - * db.user - The user logged in to the database (all engines)

      - *
    • + *

      + * db.user - The user logged in to the database (all engines)

      + * *
    */ Group?: string; /** - *

    A list of specific dimensions from a dimension group. If this parameter is not present, - * then it signifies that all of the dimensions in the group were requested or are present in - * the response.

    - *

    Valid values for elements in the Dimensions array are:

    + *

    A list of specific dimensions from a dimension group. If this parameter is not + * present, then it signifies that all of the dimensions in the group were requested or are + * present in the response.

    + *

    Valid values for elements in the Dimensions array are:

    * - *
      + *
        *
      • - *

        - * db.application.name - The name of the application that is connected to the database (only - * Aurora PostgreSQL and RDS PostgreSQL)

        - *
      • + *

        + * db.application.name - The name of the application that is connected + * to the database (only Aurora PostgreSQL and RDS PostgreSQL)

        + * *
      • - *

        - * db.host.id - The host ID of the connected client (all engines)

        - *
      • + *

        + * db.host.id - The host ID of the connected client (all + * engines)

        + * *
      • - *

        - * db.host.name - The host name of the connected client (all engines)

        - *
      • + *

        + * db.host.name - The host name of the connected client (all + * engines)

        + * *
      • - *

        - * db.name - The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS - * PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

        - *
      • + *

        + * db.name - The name of the database to which the client is connected + * (only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

        + * *
      • - *

        - * db.session_type.name - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)

        - *
      • + *

        + * db.session_type.name - The type of the current session (only Aurora + * PostgreSQL and RDS PostgreSQL)

        + * *
      • - *

        - * db.sql.id - The SQL ID generated by Performance Insights (all engines)

        - *
      • + *

        + * db.sql.id - The SQL ID generated by Performance Insights (all engines)

        + * *
      • - *

        - * db.sql.db_id - The SQL ID generated by the database (all engines)

        - *
      • + *

        + * db.sql.db_id - The SQL ID generated by the database (all + * engines)

        + * *
      • - *

        - * db.sql.statement - The SQL text that is being executed (all engines)

        - *
      • + *

        + * db.sql.statement - The SQL text that is being executed (all + * engines)

        + * *
      • - *

        - * db.sql.tokenized_id - *

        - *
      • + *

        + * db.sql.tokenized_id + *

        + * *
      • - *

        - * db.sql_tokenized.id - The SQL digest ID generated by Performance Insights (all engines)

        - *
      • + *

        + * db.sql_tokenized.id - The SQL digest ID generated by Performance Insights (all + * engines)

        + * *
      • - *

        - * db.sql_tokenized.db_id - SQL digest ID generated by the database (all engines)

        - *
      • + *

        + * db.sql_tokenized.db_id - SQL digest ID generated by the database + * (all engines)

        + * *
      • - *

        - * db.sql_tokenized.statement - The SQL digest text (all engines)

        - *
      • + *

        + * db.sql_tokenized.statement - The SQL digest text (all + * engines)

        + * *
      • - *

        - * db.user.id - The ID of the user logged in to the database (all engines)

        - *
      • + *

        + * db.user.id - The ID of the user logged in to the database (all + * engines)

        + * *
      • - *

        - * db.user.name - The name of the user logged in to the database (all engines)

        - *
      • + *

        + * db.user.name - The name of the user logged in to the database (all + * engines)

        + * *
      • - *

        - * db.wait_event.name - The event for which the backend is waiting (all engines)

        - *
      • + *

        + * db.wait_event.name - The event for which the backend is waiting + * (all engines)

        + * *
      • - *

        - * db.wait_event.type - The type of event for which the backend is waiting (all engines)

        - *
      • + *

        + * db.wait_event.type - The type of event for which the backend is + * waiting (all engines)

        + * *
      • - *

        - * db.wait_event_type.name - The name of the event type for which the backend is waiting (all - * engines)

        - *
      • + *

        + * db.wait_event_type.name - The name of the event type for which the + * backend is waiting (all engines)

        + * *
      */ Dimensions?: string[]; @@ -773,14 +812,13 @@ export namespace PerformanceInsightsMetricDimensionGroup { } /** - *

      A single query to be processed. Use these parameters to - * query the Performance Insights GetResourceMetrics API to retrieve the metrics - * for an anomaly. For more information, see + *

      A single query to be processed. Use these parameters to query the Performance Insights + * GetResourceMetrics API to retrieve the metrics for an anomaly. For more + * information, see * GetResourceMetrics - * - * in the Amazon RDS Performance Insights API Reference.

      - * - *

      Amazon RDS Performance Insights enables you to monitor and explore different + * in the Amazon RDS Performance Insights API + * Reference.

      + *

      Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -801,47 +839,53 @@ export namespace PerformanceInsightsMetricDimensionGroup { */ export interface PerformanceInsightsMetricQuery { /** - *

      The name of the meteric used used when querying an Performance Insights GetResourceMetrics API for - * anomaly metrics.

      + *

      The name of the meteric used used when querying an Performance Insights + * GetResourceMetrics API for anomaly metrics.

      * - *

      Valid values for Metric are:

      + *

      Valid values for Metric are:

      * - *
        + *
          *
        • - *

          - * db.load.avg - a scaled representation of the number of active sessions - * for the database engine.

          - *
        • + *

          + * db.load.avg - a scaled representation of the number of active sessions for the + * database engine.

          + * *
        • - *

          - * db.sampledload.avg - the raw number of active sessions for the - * database engine.

          - *
        • + *

          + * db.sampledload.avg - the raw number of active sessions for the database + * engine.

          + * *
        - *

        If the number of active sessions is less than an internal Performance Insights threshold, db.load.avg and db.sampledload.avg - * are the same value. If the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with db.load.avg - * showing the scaled values, db.sampledload.avg showing the raw values, and db.sampledload.avg less than db.load.avg. - * For most use cases, you can query db.load.avg only.

        + *

        If the number of active sessions is less than an internal Performance Insights threshold, + * db.load.avg and db.sampledload.avg are the same value. If + * the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with + * db.load.avg showing the scaled values, db.sampledload.avg + * showing the raw values, and db.sampledload.avg less than + * db.load.avg. For most use cases, you can query db.load.avg + * only.

        */ Metric?: string; /** - *

        The specification for how to aggregate the data points from a Performance Insights GetResourceMetrics API query. The - * Performance Insights query returns all of the dimensions within that group, - * unless you provide the names of specific dimensions within that group. You can also request - * that Performance Insights return a limited number of values for a dimension.

        + *

        The specification for how to aggregate the data points from a Performance Insights + * GetResourceMetrics API query. The Performance Insights query returns all of the + * dimensions within that group, unless you provide the names of specific dimensions within + * that group. You can also request that Performance Insights return a limited number of values for a + * dimension.

        */ GroupBy?: PerformanceInsightsMetricDimensionGroup; /** - *

        One or more filters to apply to a Performance Insights GetResourceMetrics API query. Restrictions:

        - *
          + *

          One or more filters to apply to a Performance Insights GetResourceMetrics API query. + * Restrictions:

          + *
            *
          • - *

            Any number of filters by the same dimension, as specified in the GroupBy parameter.

            - *
          • + *

            Any number of filters by the same dimension, as specified in the + * GroupBy parameter.

            + * *
          • - *

            A single filter for any other dimension in this dimension group.

            - *
          • + *

            A single filter for any other dimension in this dimension group.

            + * *
          */ Filter?: { [key: string]: string }; @@ -877,7 +921,7 @@ export namespace PerformanceInsightsReferenceMetric { /** *

          A reference value to compare Performance Insights metrics against to determine if the metrics - * demonstrate anomalous behavior.

          + * demonstrate anomalous behavior.

          */ export interface PerformanceInsightsReferenceScalar { /** @@ -896,19 +940,22 @@ export namespace PerformanceInsightsReferenceScalar { } /** - *

          Reference scalar values and other metrics that DevOps Guru displays on a graph in its console along with the actual metrics it - * analyzed. Compare these reference values to your actual metrics to help you understand anomalous behavior that DevOps Guru detected.

          + *

          Reference scalar values and other metrics that DevOps Guru displays on a graph in its + * console along with the actual metrics it analyzed. Compare these reference values to + * your actual metrics to help you understand anomalous behavior that DevOps Guru + * detected.

          */ export interface PerformanceInsightsReferenceComparisonValues { /** - *

          A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This reference value is used - * to determine if an actual metric value should be considered anomalous.

          + *

          A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This + * reference value is used to determine if an actual metric value should be considered + * anomalous.

          */ ReferenceScalar?: PerformanceInsightsReferenceScalar; /** - *

          A metric that DevOps Guru compares to actual metric values. This reference metric is used - * to determine if an actual metric should be considered anomalous.

          + *

          A metric that DevOps Guru compares to actual metric values. This reference metric is used to + * determine if an actual metric should be considered anomalous.

          */ ReferenceMetric?: PerformanceInsightsReferenceMetric; } @@ -923,8 +970,8 @@ export namespace PerformanceInsightsReferenceComparisonValues { } /** - *

          Reference data used to evaluate Performance Insights to determine if its performance - * is anomalous or not.

          + *

          Reference data used to evaluate Performance Insights to determine if its performance is anomalous or + * not.

          */ export interface PerformanceInsightsReferenceData { /** @@ -934,10 +981,9 @@ export interface PerformanceInsightsReferenceData { /** *

          The specific reference values used to evaluate the Performance Insights. For more information, see - * + * * PerformanceInsightsReferenceComparisonValues - * . - *

          + * .

          */ ComparisonValues?: PerformanceInsightsReferenceComparisonValues; } @@ -977,8 +1023,7 @@ export namespace PerformanceInsightsStat { /** *

          Details about Performance Insights metrics.

          - * - *

          Amazon RDS Performance Insights enables you to monitor and explore different + *

          Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -1009,20 +1054,16 @@ export interface PerformanceInsightsMetricsDetail { Unit?: string; /** - *

          A single query to be processed for the metric. For more information, see - * + *

          A single query to be processed for the metric. For more information, see * PerformanceInsightsMetricQuery * .

          */ MetricQuery?: PerformanceInsightsMetricQuery; /** - *

          - * For more information, see - * + *

          For more information, see * PerformanceInsightsReferenceData - * . - *

          + * .

          */ ReferenceData?: PerformanceInsightsReferenceData[]; @@ -1060,8 +1101,8 @@ export interface AnomalySourceDetails { CloudWatchMetrics?: CloudWatchMetricsDetail[]; /** - *

          An array of PerformanceInsightsMetricsDetail objects that contain information - * about analyzed Performance Insights metrics that show anomalous behavior.

          + *

          An array of PerformanceInsightsMetricsDetail objects that contain + * information about analyzed Performance Insights metrics that show anomalous behavior.

          */ PerformanceInsightsMetrics?: PerformanceInsightsMetricsDetail[]; } @@ -1075,6 +1116,35 @@ export namespace AnomalySourceDetails { }); } +/** + *

          Metadata about an anomaly. The anomaly is detected using analysis of the metric data
 over a period of time

          + */ +export interface AnomalySourceMetadata { + /** + *

          The source of the anomaly.

          + */ + Source?: string; + + /** + *

          The name of the anomaly's resource.

          + */ + SourceResourceName?: string; + + /** + *

          The anomaly's resource type.

          + */ + SourceResourceType?: string; +} + +export namespace AnomalySourceMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AnomalySourceMetadata): any => ({ + ...obj, + }); +} + export enum AnomalyStatus { CLOSED = "CLOSED", ONGOING = "ONGOING", @@ -1282,7 +1352,7 @@ export namespace CloudFormationCollection { /** *

          A collection of Amazon Web Services stags.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -1337,7 +1407,7 @@ export interface TagCollection { /** *

          The values in an Amazon Web Services tag collection.

          - *

          The tag's value is an optional field used to associate a string with + *

          The tag's value is an optional field used to associate a string with * the tag key (for example, 111122223333, Production, or a team * name). The key and value are the tag's key pair. * Omitting the tag value is the same as using an empty @@ -1371,7 +1441,7 @@ export interface ResourceCollection { /** *

          The Amazon Web Services tags that are used by resources in the resource collection.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -1490,6 +1560,16 @@ export interface ProactiveAnomaly { * threshold is related to the anomalous behavior that generated this anomaly.

          */ Limit?: number; + + /** + *

          The metadata for the anomaly.

          + */ + SourceMetadata?: AnomalySourceMetadata; + + /** + *

          Information about a resource in which DevOps Guru detected anomalous behavior.

          + */ + AnomalyResources?: AnomalyResource[]; } export namespace ProactiveAnomaly { @@ -1625,6 +1705,52 @@ export namespace DescribeAnomalyResponse { }); } +export interface DescribeEventSourcesConfigRequest {} + +export namespace DescribeEventSourcesConfigRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEventSourcesConfigRequest): any => ({ + ...obj, + }); +} + +/** + *

          Describes the event sources.

          + */ +export interface EventSourcesConfig { + /** + *

          + */ + AmazonCodeGuruProfiler?: AmazonCodeGuruProfilerIntegration; +} + +export namespace EventSourcesConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventSourcesConfig): any => ({ + ...obj, + }); +} + +export interface DescribeEventSourcesConfigResponse { + /** + *

          The name of the event source.

          + */ + EventSources?: EventSourcesConfig; +} + +export namespace DescribeEventSourcesConfigResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEventSourcesConfigResponse): any => ({ + ...obj, + }); +} + export interface DescribeFeedbackRequest { /** *

          The ID of the insight for which the feedback was provided.

          @@ -1798,6 +1924,11 @@ export interface ProactiveInsight { * the creation of OpstItems insights before they are created for each insight.

          */ SsmOpsItemId?: string; + + /** + *

          Describes the proactive insight.

          + */ + Description?: string; } export namespace ProactiveInsight { @@ -1855,6 +1986,11 @@ export interface ReactiveInsight { * the creation of OpstItems insights before they are created for each insight.

          */ SsmOpsItemId?: string; + + /** + *

          Describes the reactive insight.

          + */ + Description?: string; } export namespace ReactiveInsight { @@ -2187,7 +2323,7 @@ export namespace ServiceHealth { /** *

          Information about the health of Amazon Web Services resources in your account that are specified by - * an Amazon Web Services tag key.

          + * an Amazon Web Services tag key.

          */ export interface TagHealth { /** @@ -2210,7 +2346,7 @@ export interface TagHealth { /** *

          The value in an Amazon Web Services tag.

          - *

          The tag's value is an optional field used to associate a string with + *

          The tag's value is an optional field used to associate a string with * the tag key (for example, 111122223333, Production, or a team * name). The key and value are the tag's key pair. * Omitting the tag value is the same as using an empty @@ -2220,9 +2356,9 @@ export interface TagHealth { TagValue?: string; /** - *

          Information about the health of the Amazon Web Services resources in your account that are - * specified by an Amazon Web Services tag, including the number of open proactive, open reactive - * insights, and the Mean Time to Recover (MTTR) of closed insights.

          + *

          Information about the health of the Amazon Web Services resources in your account that are specified + * by an Amazon Web Services tag, including the number of open proactive, open reactive insights, and the + * Mean Time to Recover (MTTR) of closed insights.

          */ Insight?: InsightHealth; } @@ -2239,7 +2375,7 @@ export namespace TagHealth { export interface DescribeOrganizationResourceCollectionHealthResponse { /** *

          The returned CloudFormationHealthOverview object that contains an - * InsightHealthOverview object with the requested system health + * InsightHealthOverview object with the requested system health * information.

          */ CloudFormation?: CloudFormationHealth[]; @@ -2361,7 +2497,7 @@ export interface DescribeResourceCollectionHealthResponse { /** *

          The Amazon Web Services tags that are used by resources in the resource collection.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -2580,12 +2716,12 @@ export namespace CloudFormationCostEstimationResourceCollectionFilter { } /** - *

          Information about a collection of Amazon Web Services resources that are identified by an - * Amazon Web Services tag. This collection of resources is used to create a monthly cost estimate - * for DevOps Guru to analyze Amazon Web Services resources. The maximum number of tags you can specify for a - * cost estimate is one. The estimate created is for the cost to analyze the Amazon Web Services - * resources defined by the tag. For more information, see Stacks in the - * Amazon Web Services CloudFormation User Guide.

          + *

          Information about a collection of Amazon Web Services resources that are identified by an Amazon Web Services tag. + * This collection of resources is used to create a monthly cost estimate for DevOps Guru to + * analyze Amazon Web Services resources. The maximum number of tags you can specify for a cost estimate + * is one. The estimate created is for the cost to analyze the Amazon Web Services resources defined by + * the tag. For more information, see Stacks in the + * Amazon Web Services CloudFormation User Guide.

          */ export interface TagCostEstimationResourceCollectionFilter { /** @@ -2608,7 +2744,7 @@ export interface TagCostEstimationResourceCollectionFilter { /** *

          The values in an Amazon Web Services tag collection.

          - *

          The tag's value is an optional field used to associate a string with + *

          The tag's value is an optional field used to associate a string with * the tag key (for example, 111122223333, Production, or a team * name). The key and value are the tag's key pair. * Omitting the tag value is the same as using an empty @@ -2642,9 +2778,9 @@ export interface CostEstimationResourceCollectionFilter { CloudFormation?: CloudFormationCostEstimationResourceCollectionFilter; /** - *

          The Amazon Web Services tags used to filter the resource collection that is used for - * a cost estimate.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          The Amazon Web Services tags used to filter the resource collection that is used for a cost + * estimate.

          + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -2813,8 +2949,8 @@ export namespace CloudFormationCollectionFilter { } /** - *

          A collection of Amazon Web Services tags used to filter insights. This is used to return insights generated from - * only resources that contain the tags in the tag collection.

          + *

          A collection of Amazon Web Services tags used to filter insights. This is used to return insights + * generated from only resources that contain the tags in the tag collection.

          */ export interface TagCollectionFilter { /** @@ -2837,7 +2973,7 @@ export interface TagCollectionFilter { /** *

          The values in an Amazon Web Services tag collection.

          - *

          The tag's value is an optional field used to associate a string with + *

          The tag's value is an optional field used to associate a string with * the tag key (for example, 111122223333, Production, or a team * name). The key and value are the tag's key pair. * Omitting the tag value is the same as using an empty @@ -2871,7 +3007,7 @@ export interface ResourceCollectionFilter { /** *

          The Amazon Web Services tags used to filter the resources in the resource collection.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -3080,6 +3216,16 @@ export interface ProactiveAnomalySummary { * threshold is related to the anomalous behavior that generated this anomaly.

          */ Limit?: number; + + /** + *

          Returns the metadata of the source.

          + */ + SourceMetadata?: AnomalySourceMetadata; + + /** + *

          Information about a resource in which DevOps Guru detected anomalous behavior.

          + */ + AnomalyResources?: AnomalyResource[]; } export namespace ProactiveAnomalySummary { @@ -3918,7 +4064,7 @@ export namespace ListOrganizationInsightsRequest { /** *

          Details about a proactive insight. This object is returned by - * DescribeInsight.

          + * DescribeInsight.

          */ export interface ProactiveOrganizationInsightSummary { /** @@ -3942,7 +4088,8 @@ export interface ProactiveOrganizationInsightSummary { Name?: string; /** - *

          An array of severity values used to search for insights. For more information, see + *

          An array of severity values used to search for insights. + * For more information, see * Understanding * insight severities in the Amazon DevOps Guru User Guide.

          */ @@ -3990,7 +4137,7 @@ export namespace ProactiveOrganizationInsightSummary { /** *

          Information about a reactive insight. This object is returned by - * DescribeInsight.

          + * DescribeInsight.

          */ export interface ReactiveOrganizationInsightSummary { /** @@ -4014,7 +4161,8 @@ export interface ReactiveOrganizationInsightSummary { Name?: string; /** - *

          An array of severity values used to search for insights. For more information, see + *

          An array of severity values used to search for insights. + * For more information, see * Understanding * insight severities in the Amazon DevOps Guru User Guide.

          */ @@ -4139,11 +4287,11 @@ export interface RecommendationRelatedAnomalyResource { Name?: string; /** - *

          The type of the resource. Resource types take the same form that is - * used by Amazon Web Services CloudFormation resource type identifiers, service-provider::service-name::data-type-name. - * For example, AWS::RDS::DBCluster. For more information, see - * Amazon Web Services resource and - * property types reference in the Amazon Web Services CloudFormation User Guide.

          + *

          The type of the resource. Resource types take the same form that is used by Amazon Web Services CloudFormation + * resource type identifiers, service-provider::service-name::data-type-name. + * For example, AWS::RDS::DBCluster. For more information, see Amazon Web Services + * resource and property types reference in the Amazon Web Services CloudFormation User + * Guide.

          */ Type?: string; } @@ -4323,6 +4471,11 @@ export interface Recommendation { * what's happening and to help address the issue.

          */ RelatedAnomalies?: RecommendationRelatedAnomaly[]; + + /** + *

          The category type of the recommendation.

          + */ + Category?: string; } export namespace Recommendation { @@ -4586,7 +4739,7 @@ export interface SearchOrganizationInsightsRequest { /** *

          The type of insights you are searching for (REACTIVE or - * PROACTIVE).

          + * PROACTIVE).

          */ Type: InsightType | string | undefined; } @@ -4661,6 +4814,33 @@ export namespace StartCostEstimationResponse { }); } +export interface UpdateEventSourcesConfigRequest { + /** + *

          The name of the event source.

          + */ + EventSources?: EventSourcesConfig; +} + +export namespace UpdateEventSourcesConfigRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventSourcesConfigRequest): any => ({ + ...obj, + }); +} + +export interface UpdateEventSourcesConfigResponse {} + +export namespace UpdateEventSourcesConfigResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventSourcesConfigResponse): any => ({ + ...obj, + }); +} + export enum UpdateResourceCollectionAction { ADD = "ADD", REMOVE = "REMOVE", @@ -4689,7 +4869,7 @@ export namespace UpdateCloudFormationCollectionFilter { /** *

          A new collection of Amazon Web Services resources that are defined by an Amazon Web Services tag or tag - * key/value pair.

          + * key/value pair.

          */ export interface UpdateTagCollectionFilter { /** @@ -4712,7 +4892,7 @@ export interface UpdateTagCollectionFilter { /** *

          The values in an Amazon Web Services tag collection.

          - *

          The tag's value is an optional field used to associate a string with + *

          The tag's value is an optional field used to associate a string with * the tag key (for example, 111122223333, Production, or a team * name). The key and value are the tag's key pair. * Omitting the tag value is the same as using an empty @@ -4742,7 +4922,7 @@ export interface UpdateResourceCollectionFilter { /** *

          The updated Amazon Web Services tags used to filter the resources in the resource collection.

          - *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *

          Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about diff --git a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts index 18571a6835c5..56b2edc2aca6 100644 --- a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts +++ b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts @@ -32,6 +32,10 @@ import { DescribeAccountOverviewCommandOutput, } from "../commands/DescribeAccountOverviewCommand"; import { DescribeAnomalyCommandInput, DescribeAnomalyCommandOutput } from "../commands/DescribeAnomalyCommand"; +import { + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, +} from "../commands/DescribeEventSourcesConfigCommand"; import { DescribeFeedbackCommandInput, DescribeFeedbackCommandOutput } from "../commands/DescribeFeedbackCommand"; import { DescribeInsightCommandInput, DescribeInsightCommandOutput } from "../commands/DescribeInsightCommand"; import { @@ -91,6 +95,10 @@ import { StartCostEstimationCommandInput, StartCostEstimationCommandOutput, } from "../commands/StartCostEstimationCommand"; +import { + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, +} from "../commands/UpdateEventSourcesConfigCommand"; import { UpdateResourceCollectionCommandInput, UpdateResourceCollectionCommandOutput, @@ -104,9 +112,11 @@ import { AccessDeniedException, AccountHealth, AccountInsightHealth, + AmazonCodeGuruProfilerIntegration, AnomalyReportedTimeRange, AnomalyResource, AnomalySourceDetails, + AnomalySourceMetadata, AnomalyTimeRange, CloudFormationCollection, CloudFormationCollectionFilter, @@ -121,6 +131,7 @@ import { EndTimeRange, Event, EventResource, + EventSourcesConfig, EventTimeRange, InsightFeedback, InsightHealth, @@ -296,6 +307,28 @@ export const serializeAws_restJson1DescribeAnomalyCommand = async ( }); }; +export const serializeAws_restJson1DescribeEventSourcesConfigCommand = async ( + input: DescribeEventSourcesConfigCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/event-sources"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DescribeFeedbackCommand = async ( input: DescribeFeedbackCommandInput, context: __SerdeContext @@ -907,6 +940,33 @@ export const serializeAws_restJson1StartCostEstimationCommand = async ( }); }; +export const serializeAws_restJson1UpdateEventSourcesConfigCommand = async ( + input: UpdateEventSourcesConfigCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/event-sources"; + let body: any; + body = JSON.stringify({ + ...(input.EventSources !== undefined && + input.EventSources !== null && { + EventSources: serializeAws_restJson1EventSourcesConfig(input.EventSources, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1UpdateResourceCollectionCommand = async ( input: UpdateResourceCollectionCommandInput, context: __SerdeContext @@ -1210,6 +1270,59 @@ const deserializeAws_restJson1DescribeAnomalyCommandError = async ( } }; +export const deserializeAws_restJson1DescribeEventSourcesConfigCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeEventSourcesConfigCommandError(output, context); + } + const contents: DescribeEventSourcesConfigCommandOutput = { + $metadata: deserializeMetadata(output), + EventSources: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.EventSources !== undefined && data.EventSources !== null) { + contents.EventSources = deserializeAws_restJson1EventSourcesConfig(data.EventSources, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeEventSourcesConfigCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.devopsguru#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.devopsguru#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.devopsguru#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.devopsguru#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1DescribeFeedbackCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2424,6 +2537,55 @@ const deserializeAws_restJson1StartCostEstimationCommandError = async ( } }; +export const deserializeAws_restJson1UpdateEventSourcesConfigCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateEventSourcesConfigCommandError(output, context); + } + const contents: UpdateEventSourcesConfigCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateEventSourcesConfigCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.devopsguru#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.devopsguru#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.devopsguru#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.devopsguru#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1UpdateResourceCollectionCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2681,6 +2843,15 @@ const serializeAws_restJson1AccountIdList = (input: string[], context: __SerdeCo }); }; +const serializeAws_restJson1AmazonCodeGuruProfilerIntegration = ( + input: AmazonCodeGuruProfilerIntegration, + context: __SerdeContext +): any => { + return { + ...(input.Status !== undefined && input.Status !== null && { Status: input.Status }), + }; +}; + const serializeAws_restJson1CloudFormationCollection = ( input: CloudFormationCollection, context: __SerdeContext @@ -2752,6 +2923,18 @@ const serializeAws_restJson1EndTimeRange = (input: EndTimeRange, context: __Serd }; }; +const serializeAws_restJson1EventSourcesConfig = (input: EventSourcesConfig, context: __SerdeContext): any => { + return { + ...(input.AmazonCodeGuruProfiler !== undefined && + input.AmazonCodeGuruProfiler !== null && { + AmazonCodeGuruProfiler: serializeAws_restJson1AmazonCodeGuruProfilerIntegration( + input.AmazonCodeGuruProfiler, + context + ), + }), + }; +}; + const serializeAws_restJson1EventTimeRange = (input: EventTimeRange, context: __SerdeContext): any => { return { ...(input.FromTime !== undefined && @@ -3181,6 +3364,15 @@ const deserializeAws_restJson1AccountInsightHealth = (output: any, context: __Se } as any; }; +const deserializeAws_restJson1AmazonCodeGuruProfilerIntegration = ( + output: any, + context: __SerdeContext +): AmazonCodeGuruProfilerIntegration => { + return { + Status: __expectString(output.Status), + } as any; +}; + const deserializeAws_restJson1AnomalyReportedTimeRange = ( output: any, context: __SerdeContext @@ -3229,6 +3421,14 @@ const deserializeAws_restJson1AnomalySourceDetails = (output: any, context: __Se } as any; }; +const deserializeAws_restJson1AnomalySourceMetadata = (output: any, context: __SerdeContext): AnomalySourceMetadata => { + return { + Source: __expectString(output.Source), + SourceResourceName: __expectString(output.SourceResourceName), + SourceResourceType: __expectString(output.SourceResourceType), + } as any; +}; + const deserializeAws_restJson1AnomalyTimeRange = (output: any, context: __SerdeContext): AnomalyTimeRange => { return { EndTime: @@ -3511,6 +3711,15 @@ const deserializeAws_restJson1Events = (output: any, context: __SerdeContext): E return retVal; }; +const deserializeAws_restJson1EventSourcesConfig = (output: any, context: __SerdeContext): EventSourcesConfig => { + return { + AmazonCodeGuruProfiler: + output.AmazonCodeGuruProfiler !== undefined && output.AmazonCodeGuruProfiler !== null + ? deserializeAws_restJson1AmazonCodeGuruProfilerIntegration(output.AmazonCodeGuruProfiler, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1InsightFeedback = (output: any, context: __SerdeContext): InsightFeedback => { return { Feedback: __expectString(output.Feedback), @@ -3793,6 +4002,10 @@ const deserializeAws_restJson1ProactiveAnomaly = (output: any, context: __SerdeC output.AnomalyReportedTimeRange !== undefined && output.AnomalyReportedTimeRange !== null ? deserializeAws_restJson1AnomalyReportedTimeRange(output.AnomalyReportedTimeRange, context) : undefined, + AnomalyResources: + output.AnomalyResources !== undefined && output.AnomalyResources !== null + ? deserializeAws_restJson1AnomalyResources(output.AnomalyResources, context) + : undefined, AnomalyTimeRange: output.AnomalyTimeRange !== undefined && output.AnomalyTimeRange !== null ? deserializeAws_restJson1AnomalyTimeRange(output.AnomalyTimeRange, context) @@ -3813,6 +4026,10 @@ const deserializeAws_restJson1ProactiveAnomaly = (output: any, context: __SerdeC output.SourceDetails !== undefined && output.SourceDetails !== null ? deserializeAws_restJson1AnomalySourceDetails(output.SourceDetails, context) : undefined, + SourceMetadata: + output.SourceMetadata !== undefined && output.SourceMetadata !== null + ? deserializeAws_restJson1AnomalySourceMetadata(output.SourceMetadata, context) + : undefined, Status: __expectString(output.Status), UpdateTime: output.UpdateTime !== undefined && output.UpdateTime !== null @@ -3830,6 +4047,10 @@ const deserializeAws_restJson1ProactiveAnomalySummary = ( output.AnomalyReportedTimeRange !== undefined && output.AnomalyReportedTimeRange !== null ? deserializeAws_restJson1AnomalyReportedTimeRange(output.AnomalyReportedTimeRange, context) : undefined, + AnomalyResources: + output.AnomalyResources !== undefined && output.AnomalyResources !== null + ? deserializeAws_restJson1AnomalyResources(output.AnomalyResources, context) + : undefined, AnomalyTimeRange: output.AnomalyTimeRange !== undefined && output.AnomalyTimeRange !== null ? deserializeAws_restJson1AnomalyTimeRange(output.AnomalyTimeRange, context) @@ -3850,6 +4071,10 @@ const deserializeAws_restJson1ProactiveAnomalySummary = ( output.SourceDetails !== undefined && output.SourceDetails !== null ? deserializeAws_restJson1AnomalySourceDetails(output.SourceDetails, context) : undefined, + SourceMetadata: + output.SourceMetadata !== undefined && output.SourceMetadata !== null + ? deserializeAws_restJson1AnomalySourceMetadata(output.SourceMetadata, context) + : undefined, Status: __expectString(output.Status), UpdateTime: output.UpdateTime !== undefined && output.UpdateTime !== null @@ -3860,6 +4085,7 @@ const deserializeAws_restJson1ProactiveAnomalySummary = ( const deserializeAws_restJson1ProactiveInsight = (output: any, context: __SerdeContext): ProactiveInsight => { return { + Description: __expectString(output.Description), Id: __expectString(output.Id), InsightTimeRange: output.InsightTimeRange !== undefined && output.InsightTimeRange !== null @@ -4052,6 +4278,7 @@ const deserializeAws_restJson1ReactiveAnomalySummary = ( const deserializeAws_restJson1ReactiveInsight = (output: any, context: __SerdeContext): ReactiveInsight => { return { + Description: __expectString(output.Description), Id: __expectString(output.Id), InsightTimeRange: output.InsightTimeRange !== undefined && output.InsightTimeRange !== null @@ -4151,6 +4378,7 @@ const deserializeAws_restJson1ReactiveOrganizationInsightSummary = ( const deserializeAws_restJson1Recommendation = (output: any, context: __SerdeContext): Recommendation => { return { + Category: __expectString(output.Category), Description: __expectString(output.Description), Link: __expectString(output.Link), Name: __expectString(output.Name), diff --git a/clients/client-ec2/src/models/models_0.ts b/clients/client-ec2/src/models/models_0.ts index 5dfadcf78086..33f260e7cb9e 100644 --- a/clients/client-ec2/src/models/models_0.ts +++ b/clients/client-ec2/src/models/models_0.ts @@ -2089,6 +2089,7 @@ export type ResourceType = | "spot-fleet-request" | "spot-instances-request" | "subnet" + | "subnet-cidr-reservation" | "traffic-mirror-filter" | "traffic-mirror-session" | "traffic-mirror-target" diff --git a/clients/client-ec2/src/models/models_1.ts b/clients/client-ec2/src/models/models_1.ts index c5e0291ca381..5104be27f196 100644 --- a/clients/client-ec2/src/models/models_1.ts +++ b/clients/client-ec2/src/models/models_1.ts @@ -261,6 +261,11 @@ export interface InstanceRequirementsRequest { *

          The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

          *

          To turn off price protection, specify a high value, such as 999999.

          *

          This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

          + * + *

          If you set TargetCapacityUnitType to vcpu or + * memory-mib, the price protection threshold is applied based on the + * per-vCPU or per-memory price instead of the per-instance price.

          + *
          *

          Default: 100 *

          */ @@ -274,6 +279,11 @@ export interface InstanceRequirementsRequest { *

          The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

          *

          To turn off price protection, specify a high value, such as 999999.

          *

          This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

          + * + *

          If you set TargetCapacityUnitType to vcpu or + * memory-mib, the price protection threshold is applied based on the + * per-vCPU or per-memory price instead of the per-instance price.

          + *
          *

          Default: 20 *

          */ @@ -1065,7 +1075,7 @@ export interface CreateFleetRequest { * launched.

          * *
        - *

        For more information, see EC2 Fleet + *

        For more information, see EC2 Fleet * request types in the Amazon EC2 User Guide.

        */ Type?: FleetType | string; @@ -1400,6 +1410,11 @@ export interface InstanceRequirements { *

        The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

        *

        To turn off price protection, specify a high value, such as 999999.

        *

        This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

        + * + *

        If you set TargetCapacityUnitType to vcpu or + * memory-mib, the price protection threshold is applied based on the + * per-vCPU or per-memory price instead of the per-instance price.

        + *
        *

        Default: 100 *

        */ @@ -1413,6 +1428,11 @@ export interface InstanceRequirements { *

        The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

        *

        To turn off price protection, specify a high value, such as 999999.

        *

        This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

        + * + *

        If you set TargetCapacityUnitType to vcpu or + * memory-mib, the price protection threshold is applied based on the + * per-vCPU or per-memory price instead of the per-instance price.

        + *
        *

        Default: 20 *

        */ @@ -3000,7 +3020,7 @@ export interface CreateIpamPoolRequest { /** *

        The IP protocol assigned to this IPAM pool. You must choose either IPv4 or IPv6 protocol for a pool.

        */ - AddressFamily?: AddressFamily | string; + AddressFamily: AddressFamily | string | undefined; /** *

        If selected, IPAM will continuously look for resources within the CIDR range of this pool diff --git a/clients/client-ec2/src/models/models_3.ts b/clients/client-ec2/src/models/models_3.ts index 3666e68f0117..3d8cad6dfb5d 100644 --- a/clients/client-ec2/src/models/models_3.ts +++ b/clients/client-ec2/src/models/models_3.ts @@ -3704,6 +3704,7 @@ export type ImageAttributeName = | "bootMode" | "description" | "kernel" + | "lastLaunchedTime" | "launchPermission" | "productCodes" | "ramdisk" @@ -3824,9 +3825,20 @@ export interface ImageAttribute { SriovNetSupport?: AttributeValue; /** - *

        Describes a value for a resource attribute that is a String.

        + *

        The boot mode.

        */ BootMode?: AttributeValue; + + /** + *

        The date and time, in ISO 8601 date-time + * format, when the AMI was last used to launch an EC2 instance. When the AMI is used, + * there is a 24-hour delay before that usage is reported.

        + * + *

        + * lastLaunchedTime data is available starting April 2017.

        + *
        + */ + LastLaunchedTime?: AttributeValue; } export namespace ImageAttribute { diff --git a/clients/client-ec2/src/protocols/Aws_ec2.ts b/clients/client-ec2/src/protocols/Aws_ec2.ts index 87192b12dfa1..daf8918e7a4a 100644 --- a/clients/client-ec2/src/protocols/Aws_ec2.ts +++ b/clients/client-ec2/src/protocols/Aws_ec2.ts @@ -65878,6 +65878,7 @@ const deserializeAws_ec2ImageAttribute = (output: any, context: __SerdeContext): RamdiskId: undefined, SriovNetSupport: undefined, BootMode: undefined, + LastLaunchedTime: undefined, }; if (output.blockDeviceMapping === "") { contents.BlockDeviceMappings = []; @@ -65924,6 +65925,9 @@ const deserializeAws_ec2ImageAttribute = (output: any, context: __SerdeContext): if (output["bootMode"] !== undefined) { contents.BootMode = deserializeAws_ec2AttributeValue(output["bootMode"], context); } + if (output["lastLaunchedTime"] !== undefined) { + contents.LastLaunchedTime = deserializeAws_ec2AttributeValue(output["lastLaunchedTime"], context); + } return contents; }; diff --git a/clients/client-ecr/src/ECR.ts b/clients/client-ecr/src/ECR.ts index 8498e0c160aa..9e66f1658bc4 100644 --- a/clients/client-ecr/src/ECR.ts +++ b/clients/client-ecr/src/ECR.ts @@ -212,7 +212,7 @@ export class ECR extends ECRClient { *

        When an image is pushed to a repository, each image layer is checked to verify if it * has been uploaded before. If it has been uploaded, then the image layer is * skipped.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        @@ -358,7 +358,7 @@ export class ECR extends ECRClient { * of the image layer for data validation purposes.

        *

        When an image is pushed, the CompleteLayerUpload API is called once per each new image * layer to verify that the upload has completed.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        @@ -864,7 +864,7 @@ export class ECR extends ECRClient { * only get URLs for image layers that are referenced in an image.

        *

        When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer * that is not already cached.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        @@ -1064,7 +1064,7 @@ export class ECR extends ECRClient { *

        When an image is pushed, the InitiateLayerUpload API is called once per image layer * that has not already been uploaded. Whether or not an image layer has been uploaded is * determined by the BatchCheckLayerAvailability API action.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        @@ -1197,7 +1197,12 @@ export class ECR extends ECRClient { } /** - *

        Updates the image scanning configuration for the specified repository.

        + * + *

        The PutImageScanningConfiguration API is being deprecated, in favor + * of specifying the image scanning configuration at the registry level. For more + * information, see PutRegistryScanningConfiguration.

        + *
        + *

        Updates the image scanning configuration for the specified repository.

        */ public putImageScanningConfiguration( args: PutImageScanningConfigurationCommandInput, @@ -1570,7 +1575,7 @@ export class ECR extends ECRClient { *

        When an image is pushed, each new image layer is uploaded in parts. The maximum size * of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API * is called once per each new image layer part.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts b/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts index 5059598c57df..0499143ee556 100644 --- a/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts +++ b/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts @@ -28,7 +28,7 @@ export interface BatchCheckLayerAvailabilityCommandOutput *

        When an image is pushed to a repository, each image layer is checked to verify if it * has been uploaded before. If it has been uploaded, then the image layer is * skipped.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts b/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts index 9a2ed95cc8b2..1c81c1e280df 100644 --- a/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts +++ b/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts @@ -27,7 +27,7 @@ export interface CompleteLayerUploadCommandOutput extends CompleteLayerUploadRes * of the image layer for data validation purposes.

        *

        When an image is pushed, the CompleteLayerUpload API is called once per each new image * layer to verify that the upload has completed.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts b/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts index daac0804e82a..25efcf71e2d8 100644 --- a/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts +++ b/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts @@ -26,7 +26,7 @@ export interface GetDownloadUrlForLayerCommandOutput extends GetDownloadUrlForLa * only get URLs for image layers that are referenced in an image.

        *

        When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer * that is not already cached.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/commands/InitiateLayerUploadCommand.ts b/clients/client-ecr/src/commands/InitiateLayerUploadCommand.ts index ffdfff9055c4..36a28d9aa5ec 100644 --- a/clients/client-ecr/src/commands/InitiateLayerUploadCommand.ts +++ b/clients/client-ecr/src/commands/InitiateLayerUploadCommand.ts @@ -26,7 +26,7 @@ export interface InitiateLayerUploadCommandOutput extends InitiateLayerUploadRes *

        When an image is pushed, the InitiateLayerUpload API is called once per image layer * that has not already been uploaded. Whether or not an image layer has been uploaded is * determined by the BatchCheckLayerAvailability API action.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/commands/PutImageScanningConfigurationCommand.ts b/clients/client-ecr/src/commands/PutImageScanningConfigurationCommand.ts index 79048f824a21..90d0fdad399b 100644 --- a/clients/client-ecr/src/commands/PutImageScanningConfigurationCommand.ts +++ b/clients/client-ecr/src/commands/PutImageScanningConfigurationCommand.ts @@ -24,7 +24,12 @@ export interface PutImageScanningConfigurationCommandOutput __MetadataBearer {} /** - *

        Updates the image scanning configuration for the specified repository.

        + * + *

        The PutImageScanningConfiguration API is being deprecated, in favor + * of specifying the image scanning configuration at the registry level. For more + * information, see PutRegistryScanningConfiguration.

        + *
        + *

        Updates the image scanning configuration for the specified repository.

        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecr/src/commands/UploadLayerPartCommand.ts b/clients/client-ecr/src/commands/UploadLayerPartCommand.ts index 64cfb12a296c..c6fc403ac8ae 100644 --- a/clients/client-ecr/src/commands/UploadLayerPartCommand.ts +++ b/clients/client-ecr/src/commands/UploadLayerPartCommand.ts @@ -26,7 +26,7 @@ export interface UploadLayerPartCommandOutput extends UploadLayerPartResponse, _ *

        When an image is pushed, each new image layer is uploaded in parts. The maximum size * of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API * is called once per each new image layer part.

        - * + * *

        This operation is used by the Amazon ECR proxy and is not generally used by * customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

        *
        diff --git a/clients/client-ecr/src/models/models_0.ts b/clients/client-ecr/src/models/models_0.ts index 80f2e34b6aa1..a139b4f9cb81 100644 --- a/clients/client-ecr/src/models/models_0.ts +++ b/clients/client-ecr/src/models/models_0.ts @@ -951,9 +951,8 @@ export enum ImageTagMutability { } /** - *

        The metadata that you apply to a resource to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define. - * Tag keys can have a maximum character length of 128 characters, and tag values can have + *

        The metadata to apply to a resource to help you categorize and organize them. Each tag + * consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have * a maximum length of 256 characters.

        */ export interface Tag { @@ -964,8 +963,7 @@ export interface Tag { Key?: string; /** - *

        The optional part of a key-value pair that make up a tag. A value acts as - * a descriptor within a tag category (key).

        + *

        A value acts as a descriptor within a tag category (key).

        */ Value?: string; } @@ -1825,6 +1823,20 @@ export interface ImageDetail { *

        The artifact media type of the image.

        */ artifactMediaType?: string; + + /** + *

        The date and time, expressed in standard JavaScript date format, when Amazon ECR recorded + * the last image pull.

        + * + *

        Amazon ECR refreshes the last image pull timestamp at least once every 24 hours. For + * example, if you pull an image once a day then the lastRecordedPullTime + * timestamp will indicate the exact time that the image was last pulled. However, if + * you pull an image once an hour, because Amazon ECR refreshes the + * lastRecordedPullTime timestamp at least once every 24 hours, the + * result may not be the exact time that the image was last pulled.

        + *
        + */ + lastRecordedPullTime?: Date; } export namespace ImageDetail { @@ -3373,7 +3385,11 @@ export namespace GetRegistryScanningConfigurationRequest { */ export interface RegistryScanningRule { /** - *

        The frequency that scans are performed at for a private registry.

        + *

        The frequency that scans are performed at for a private registry. When the + * ENHANCED scan type is specified, the supported scan frequencies are + * CONTINUOUS_SCAN and SCAN_ON_PUSH. When the + * BASIC scan type is specified, the SCAN_ON_PUSH and + * MANUAL scan frequencies are supported.

        */ scanFrequency: ScanFrequency | string | undefined; @@ -4016,12 +4032,15 @@ export namespace PutRegistryPolicyResponse { export interface PutRegistryScanningConfigurationRequest { /** *

        The scanning type to set for the registry.

        - *

        By default, the BASIC scan type is used. When basic scanning is set, you - * may specify filters to determine which individual repositories, or all repositories, are - * scanned when new images are pushed. Alternatively, you can do manual scans of images - * with basic scanning.

        - *

        When the ENHANCED scan type is set, Amazon Inspector provides automated, continuous - * scanning of all repositories in your registry.

        + *

        When a registry scanning configuration is not defined, by default the + * BASIC scan type is used. When basic scanning is used, you may specify + * filters to determine which individual repositories, or all repositories, are scanned + * when new images are pushed to those repositories. Alternatively, you can do manual scans + * of images with basic scanning.

        + *

        When the ENHANCED scan type is set, Amazon Inspector provides automated + * vulnerability scanning. You may choose between continuous scanning or scan on push and + * you may specify filters to determine which individual repositories, or all repositories, + * are scanned.

        */ scanType?: ScanType | string; diff --git a/clients/client-ecr/src/protocols/Aws_json1_1.ts b/clients/client-ecr/src/protocols/Aws_json1_1.ts index 773ba253e510..85b00879149a 100644 --- a/clients/client-ecr/src/protocols/Aws_json1_1.ts +++ b/clients/client-ecr/src/protocols/Aws_json1_1.ts @@ -4807,6 +4807,10 @@ const deserializeAws_json1_1ImageDetail = (output: any, context: __SerdeContext) output.imageTags !== undefined && output.imageTags !== null ? deserializeAws_json1_1ImageTagList(output.imageTags, context) : undefined, + lastRecordedPullTime: + output.lastRecordedPullTime !== undefined && output.lastRecordedPullTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastRecordedPullTime))) + : undefined, registryId: __expectString(output.registryId), repositoryName: __expectString(output.repositoryName), } as any; diff --git a/clients/client-ecs/src/ECS.ts b/clients/client-ecs/src/ECS.ts index e96d9fd05b5b..9cecee68d38d 100644 --- a/clients/client-ecs/src/ECS.ts +++ b/clients/client-ecs/src/ECS.ts @@ -351,8 +351,7 @@ export class ECS extends ECSClient { * Amazon Elastic Container Service Developer Guide.

        *

        Tasks for services that don't use a load balancer are considered healthy if they're in * the RUNNING state. Tasks for services that use a load balancer are - * considered healthy if they're in the RUNNING state and the container - * instance that they're hosted on is reported as healthy by the load balancer.

        + * considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

        *

        There are two service scheduler strategies available:

        *
          *
        • @@ -1448,7 +1447,7 @@ export class ECS extends ECSClient { * the root user for an account is affected. The opt-in and opt-out account setting must be * set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is * defined by the opt-in status of the IAM user or role that created the resource. You must - * enable this setting to use Amazon ECS features such as resource tagging.

          + * turn on this setting to use Amazon ECS features such as resource tagging.

          *

          When awsvpcTrunking is specified, the elastic network interface (ENI) * limit for any new container instances that support the feature is changed. If * awsvpcTrunking is enabled, any new container instances that support the @@ -2152,8 +2151,7 @@ export class ECS extends ECSClient { * replacement tasks are considered healthy. Tasks for services that do not use a * load balancer are considered healthy if they're in the RUNNING * state. Tasks for services that use a load balancer are considered healthy if - * they're in the RUNNING state and the container instance they're - * hosted on is reported as healthy by the load balancer.

          + * they're in the RUNNING state and are reported as healthy by the load balancer..

          *
        • *
        • *

          The maximumPercent parameter represents an upper limit on the @@ -2210,19 +2208,21 @@ export class ECS extends ECSClient { * apply to your participation in this preview.

          * *

          Modifies the parameters of a service.

          - *

          For services using the rolling update (ECS) deployment controller, the - * desired count, deployment configuration, network configuration, task placement - * constraints and strategies, or task definition used can be updated.

          - *

          For services using the blue/green (CODE_DEPLOY) deployment controller, - * only the desired count, deployment configuration, task placement constraints and - * strategies, and health check grace period can be updated using this API. If the network - * configuration, platform version, or task definition need to be updated, a new CodeDeploy - * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference.

          - *

          For services using an external deployment controller, you can update only the desired - * count, task placement constraints and strategies, and health check grace period using - * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, create a new task set. For more information, see - * CreateTaskSet.

          + *

          For services using the rolling update (ECS) you can update the desired count, + * the deployment configuration, the network configuration, load balancers, service + * registries, enable ECS managed tags option, propagate tags option, task placement + * constraints and strategies, and the task definition. When you update any of these + * parameters, Amazon ECS starts new tasks with the new configuration.

          + *

          For services using the blue/green (CODE_DEPLOY) deployment controller, only the + * desired count, deployment configuration, task placement constraints and strategies, + * enable ECS managed tags option, and propagate tags can be updated using this API. If the + * network configuration, platform version, task definition, or load balancer need to be + * updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

          + *

          For services using an external deployment controller, you can update only the desired count, + * task placement constraints and strategies, health check grace period, enable ECS managed + * tags option, and propagate tags option, using this API. If the launch type, load + * balancer, network configuration, platform version, or task definition need to be + * updated, create a new task set For more information, see CreateTaskSet.

          *

          You can add to or subtract from the number of instantiations of a task definition in a * service by specifying the cluster that the service is running in and a new * desiredCount parameter.

          @@ -2250,9 +2250,8 @@ export class ECS extends ECSClient { * scheduler to stop two existing tasks before starting two new tasks. Tasks for * services that don't use a load balancer are considered healthy if they're in the * RUNNING state. Tasks for services that use a load balancer are - * considered healthy if they're in the RUNNING state and the - * container instance they're hosted on is reported as healthy by the load - * balancer.

          + * considered healthy if they're in the RUNNING state and are reported + * as healthy by the load balancer.

          *
        • *
        • *

          The maximumPercent parameter represents an upper limit on the @@ -2297,6 +2296,7 @@ export class ECS extends ECSClient { *

        * *
      + * *

      When the service scheduler stops running tasks, it attempts to maintain balance across * the Availability Zones in your cluster using the following logic:

      *
        @@ -2312,6 +2312,25 @@ export class ECS extends ECSClient { * running tasks for this service.

        * *
      + * + *

      You must have a service-linked role when you update any of the following service properties. + * If you specified a custom IAM role when you created the service, Amazon ECS automatically + * replaces the roleARN associated with the service with the ARN of your service-linked + * role. For more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.

      + *
        + *
      • + *

        + * loadBalancers, + *

        + *
      • + *
      • + *

        + * serviceRegistries + *

        + *
      • + *
      + *
      */ public updateService( args: UpdateServiceCommandInput, diff --git a/clients/client-ecs/src/commands/CreateServiceCommand.ts b/clients/client-ecs/src/commands/CreateServiceCommand.ts index 15957efdd155..945382a9431e 100644 --- a/clients/client-ecs/src/commands/CreateServiceCommand.ts +++ b/clients/client-ecs/src/commands/CreateServiceCommand.ts @@ -33,8 +33,7 @@ export interface CreateServiceCommandOutput extends CreateServiceResponse, __Met * Amazon Elastic Container Service Developer Guide.

      *

      Tasks for services that don't use a load balancer are considered healthy if they're in * the RUNNING state. Tasks for services that use a load balancer are - * considered healthy if they're in the RUNNING state and the container - * instance that they're hosted on is reported as healthy by the load balancer.

      + * considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

      *

      There are two service scheduler strategies available:

      *
        *
      • diff --git a/clients/client-ecs/src/commands/PutAccountSettingCommand.ts b/clients/client-ecs/src/commands/PutAccountSettingCommand.ts index c0a0a8e2aec1..62c621355968 100644 --- a/clients/client-ecs/src/commands/PutAccountSettingCommand.ts +++ b/clients/client-ecs/src/commands/PutAccountSettingCommand.ts @@ -33,7 +33,7 @@ export interface PutAccountSettingCommandOutput extends PutAccountSettingRespons * the root user for an account is affected. The opt-in and opt-out account setting must be * set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is * defined by the opt-in status of the IAM user or role that created the resource. You must - * enable this setting to use Amazon ECS features such as resource tagging.

        + * turn on this setting to use Amazon ECS features such as resource tagging.

        *

        When awsvpcTrunking is specified, the elastic network interface (ENI) * limit for any new container instances that support the feature is changed. If * awsvpcTrunking is enabled, any new container instances that support the diff --git a/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts b/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts index 7a4128678453..b6ee6bc51b9a 100644 --- a/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts +++ b/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts @@ -53,8 +53,7 @@ export interface UpdateContainerInstancesStateCommandOutput * replacement tasks are considered healthy. Tasks for services that do not use a * load balancer are considered healthy if they're in the RUNNING * state. Tasks for services that use a load balancer are considered healthy if - * they're in the RUNNING state and the container instance they're - * hosted on is reported as healthy by the load balancer.

        + * they're in the RUNNING state and are reported as healthy by the load balancer..

        *
      • *
      • *

        The maximumPercent parameter represents an upper limit on the diff --git a/clients/client-ecs/src/commands/UpdateServiceCommand.ts b/clients/client-ecs/src/commands/UpdateServiceCommand.ts index abfdbc1d1d83..459264fd5be2 100644 --- a/clients/client-ecs/src/commands/UpdateServiceCommand.ts +++ b/clients/client-ecs/src/commands/UpdateServiceCommand.ts @@ -29,19 +29,21 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met * apply to your participation in this preview.

        * *

        Modifies the parameters of a service.

        - *

        For services using the rolling update (ECS) deployment controller, the - * desired count, deployment configuration, network configuration, task placement - * constraints and strategies, or task definition used can be updated.

        - *

        For services using the blue/green (CODE_DEPLOY) deployment controller, - * only the desired count, deployment configuration, task placement constraints and - * strategies, and health check grace period can be updated using this API. If the network - * configuration, platform version, or task definition need to be updated, a new CodeDeploy - * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference.

        - *

        For services using an external deployment controller, you can update only the desired - * count, task placement constraints and strategies, and health check grace period using - * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, create a new task set. For more information, see - * CreateTaskSet.

        + *

        For services using the rolling update (ECS) you can update the desired count, + * the deployment configuration, the network configuration, load balancers, service + * registries, enable ECS managed tags option, propagate tags option, task placement + * constraints and strategies, and the task definition. When you update any of these + * parameters, Amazon ECS starts new tasks with the new configuration.

        + *

        For services using the blue/green (CODE_DEPLOY) deployment controller, only the + * desired count, deployment configuration, task placement constraints and strategies, + * enable ECS managed tags option, and propagate tags can be updated using this API. If the + * network configuration, platform version, task definition, or load balancer need to be + * updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

        + *

        For services using an external deployment controller, you can update only the desired count, + * task placement constraints and strategies, health check grace period, enable ECS managed + * tags option, and propagate tags option, using this API. If the launch type, load + * balancer, network configuration, platform version, or task definition need to be + * updated, create a new task set For more information, see CreateTaskSet.

        *

        You can add to or subtract from the number of instantiations of a task definition in a * service by specifying the cluster that the service is running in and a new * desiredCount parameter.

        @@ -69,9 +71,8 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met * scheduler to stop two existing tasks before starting two new tasks. Tasks for * services that don't use a load balancer are considered healthy if they're in the * RUNNING state. Tasks for services that use a load balancer are - * considered healthy if they're in the RUNNING state and the - * container instance they're hosted on is reported as healthy by the load - * balancer.

        + * considered healthy if they're in the RUNNING state and are reported + * as healthy by the load balancer.

        *
      • *
      • *

        The maximumPercent parameter represents an upper limit on the @@ -116,6 +117,7 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met *

      * *
    + * *

    When the service scheduler stops running tasks, it attempts to maintain balance across * the Availability Zones in your cluster using the following logic:

    *
      @@ -131,6 +133,25 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met * running tasks for this service.

      * *
    + * + *

    You must have a service-linked role when you update any of the following service properties. + * If you specified a custom IAM role when you created the service, Amazon ECS automatically + * replaces the roleARN associated with the service with the ARN of your service-linked + * role. For more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.

    + *
      + *
    • + *

      + * loadBalancers, + *

      + *
    • + *
    • + *

      + * serviceRegistries + *

      + *
    • + *
    + *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/models/models_0.ts b/clients/client-ecs/src/models/models_0.ts index 3134a5cccce0..98eaeb4d97a5 100644 --- a/clients/client-ecs/src/models/models_0.ts +++ b/clients/client-ecs/src/models/models_0.ts @@ -68,7 +68,7 @@ export enum ManagedScalingStatus { */ export interface ManagedScaling { /** - *

    Determines whether to enable managed scaling for the capacity provider.

    + *

    Determines whether to use managed scaling for the capacity provider.

    */ status?: ManagedScalingStatus | string; @@ -502,7 +502,7 @@ export interface ExecuteCommandLogConfiguration { cloudWatchLogGroupName?: string; /** - *

    Determines whether to enable encryption on the CloudWatch logs. If not specified, + *

    Determines whether to use encryption on the CloudWatch logs. If not specified, * encryption will be disabled.

    */ cloudWatchEncryptionEnabled?: boolean; @@ -682,7 +682,7 @@ export enum ClusterSettingName { } /** - *

    The settings to use when creating a cluster. This parameter is used to enable CloudWatch + *

    The settings to use when creating a cluster. This parameter is used to turn on CloudWatch * Container Insights for a cluster.

    */ export interface ClusterSetting { @@ -757,7 +757,7 @@ export interface CreateClusterRequest { tags?: Tag[]; /** - *

    The setting to use when creating a cluster. This parameter is used to enable CloudWatch + *

    The setting to use when creating a cluster. This parameter is used to turn on CloudWatch * Container Insights for a cluster. If this value is specified, it overrides the * containerInsights value set with PutAccountSetting or * PutAccountSettingDefault.

    @@ -1120,19 +1120,19 @@ export class ClusterNotFoundException extends __BaseException { *

    The deployment circuit breaker determines whether a * service deployment will fail if the service can't reach a steady state. If enabled, a * service deployment will transition to a failed state and stop launching new tasks. You - * can also enable Amazon ECS to roll back your service to the last completed deployment after a + * can also configure Amazon ECS to roll back your service to the last completed deployment after a * failure. For more information, see Rolling * update in the Amazon Elastic Container Service Developer Guide.

    */ export interface DeploymentCircuitBreaker { /** - *

    Determines whether to enable the deployment circuit breaker logic for the + *

    Determines whether to use the deployment circuit breaker logic for the * service.

    */ enable: boolean | undefined; /** - *

    Determines whether to enable Amazon ECS to roll back the service if a service deployment + *

    Determines whether to configure Amazon ECS to roll back the service if a service deployment * fails. If rollback is enabled, when a service deployment fails, the service is rolled * back to the last deployment that completed successfully.

    */ @@ -1283,6 +1283,14 @@ export enum LaunchType { *

    The load balancer configuration to use with a service or task set.

    *

    For specific notes and restrictions regarding the use of load balancers with services * and task sets, see the CreateService and CreateTaskSet actions.

    + *

    When you add, update, or remove a load blaancer configuration, Amazon ECS starts a new + * deployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and + * deregister from load balancers.

    + *

    We recommend that you verify this on a test environment before you update the Elastic Load Balancing + * configuration.

    + *

    A service-linked role is required for services that use multiple target groups. For + * more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.

    */ export interface LoadBalancer { /** @@ -1493,6 +1501,7 @@ export namespace PlacementStrategy { } export enum PropagateTags { + NONE = "NONE", SERVICE = "SERVICE", TASK_DEFINITION = "TASK_DEFINITION", } @@ -1504,6 +1513,11 @@ export enum SchedulingStrategy { /** *

    The details for the service registry.

    + *

    Each service may be associated with one service registry. Multiple service registries for + * each service are not supported.

    + *

    When you add, update, or remove the service registries configuration, Amazon ECS starts a + * new deployment. New tasks are registered and deregistered to the updated service + * registry configuration.

    */ export interface ServiceRegistry { /** @@ -1594,10 +1608,8 @@ export interface CreateServiceRequest { * also have up to two listeners: a required listener for production traffic and an * optional listener that you can use to perform validation tests with Lambda functions * before routing production traffic to it.

    - *

    After you create a service using the ECS deployment controller, the load - * balancer name or target group ARN, container name, and container port that's specified - * in the service definition are immutable. If you use the CODE_DEPLOY - * deployment controller, these values can be changed when updating the service.

    + *

    If you use the CODE_DEPLOY deployment controller, these values can be changed + * when updating the service.

    *

    For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, * the container name, and the container port to access from the load balancer. The * container name must be as it appears in a container definition. The load balancer name @@ -1741,6 +1753,7 @@ export interface CreateServiceRequest { * service is configured to use a load balancer. If your service has a load balancer * defined and you don't specify a health check grace period value, the default value of * 0 is used.

    + *

    If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod in the task definition healtch check parameters. For more information, see Health check.

    *

    If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you * can specify a health check grace period of up to * 2,147,483,647 @@ -1828,7 +1841,7 @@ export interface CreateServiceRequest { tags?: Tag[]; /** - *

    Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For + *

    Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For * more information, see Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.

    */ @@ -2464,13 +2477,11 @@ export interface Service { * *
  • *

    - * DAEMON-The daemon scheduling strategy deploys exactly one - * task on each active container - * instance. - * This taskmeets all of the task placement constraints that you - * specify in your cluster. The service scheduler also evaluates the task placement - * constraints for running tasks. It stop tasks that don't meet the placement - * constraints.

    + * DAEMON-The daemon scheduling strategy deploys exactly one task on each + * active container instance. This task meets all of the task placement constraints + * that you specify in your cluster. The service scheduler also evaluates the task + * placement constraints for running tasks. It stop tasks that don't meet the + * placement constraints.

    * *

    Fargate tasks don't support the DAEMON * scheduling strategy.

    @@ -2531,7 +2542,7 @@ export interface Service { createdBy?: string; /** - *

    Determines whether to enable Amazon ECS managed tags for the tasks in the service. For more + *

    Determines whether to use Amazon ECS managed tags for the tasks in the service. For more * information, see Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.

    */ @@ -2935,8 +2946,8 @@ export enum TargetType { } /** - *

    An attribute is a name-value pair that's associated with an Amazon ECS object. Attributes - * enable you to extend the Amazon ECS data model by adding custom metadata to your resources. + *

    An attribute is a name-value pair that's associated with an Amazon ECS object. Use attributes + * to extend the Amazon ECS data model by adding custom metadata to your resources. * For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

    */ export interface Attribute { @@ -3701,7 +3712,7 @@ export enum ContainerCondition { * multiple dependencies. When a dependency is defined for container startup, for container * shutdown it is reversed.

    *

    Your Amazon ECS container instances require at least version 1.26.0 of the container agent - * to enable container dependencies. However, we recommend using the latest container agent + * to use container dependencies. However, we recommend using the latest container agent * version. For information about checking your agent version and updating to the latest * version, see Updating the Amazon ECS * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using @@ -4286,6 +4297,7 @@ export interface Secret { /** *

    The secret to expose to the container. The supported values are either the full ARN of * the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.

    + *

    For information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter store) in the Amazon Elastic Container Service Developer Guide.

    * *

    If the SSM Parameter Store parameter exists in the same Region as the task * you're launching, then you can use either the full ARN or name of the parameter. If @@ -5015,7 +5027,7 @@ export interface ContainerDefinition { * multiple dependencies. When a dependency is defined for container startup, for container * shutdown it is reversed.

    *

    For tasks using the EC2 launch type, the container instances require at - * least version 1.26.0 of the container agent to enable container dependencies. However, + * least version 1.26.0 of the container agent to turn on container dependencies. However, * we recommend using the latest container agent version. For information about checking * your agent version and updating to the latest version, see Updating the Amazon ECS * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using @@ -5059,7 +5071,7 @@ export interface ContainerDefinition { *

  • *
*

For tasks using the EC2 launch type, your container instances require at - * least version 1.26.0 of the container agent to enable a container start + * least version 1.26.0 of the container agent to use a container start * timeout value. However, we recommend using the latest container agent version. For * information about checking your agent version and updating to the latest version, see * Updating the Amazon ECS @@ -5092,7 +5104,7 @@ export interface ContainerDefinition { * stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT * agent configuration variable are set, then the default values of 30 seconds for Linux * containers and 30 seconds on Windows containers are used. Your container instances - * require at least version 1.26.0 of the container agent to enable a container stop + * require at least version 1.26.0 of the container agent to use a container stop * timeout value. However, we recommend using the latest container agent version. For * information about checking your agent version and updating to the latest version, see * Updating the Amazon ECS @@ -5394,16 +5406,9 @@ export namespace ContainerDefinition { * tasks hosted on Fargate. For more information, see Fargate task * storage in the Amazon ECS User Guide for Fargate.

* - *

This parameter is only supported for tasks hosted on Fargate using - * the following platform versions:

- *
    - *
  • - *

    Linux platform version 1.4.0 or later.

    - *
  • - *
  • - *

    Windows platform version 1.0.0 or later.

    - *
  • - *
+ *

This parameter is only supported for tasks hosted on Fargate using Linux + * platform version 1.4.0 or later. This parameter is not supported for + * Windows containers on Fargate.

*
*/ export interface EphemeralStorage { @@ -5512,7 +5517,7 @@ export enum ProxyConfigurationType { *

The configuration details for the App Mesh proxy.

*

For tasks that use the EC2 launch type, the container instances require * at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the - * ecs-init package to enable a proxy configuration. If your container + * ecs-init package to use a proxy configuration. If your container * instances are launched from the Amazon ECS optimized AMI version 20190301 or * later, then they contain the required versions of the container agent and * ecs-init. For more information, see Amazon ECS-optimized Linux AMI @@ -5772,7 +5777,7 @@ export interface EFSVolumeConfiguration { rootDirectory?: string; /** - *

Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS + *

Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS * host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization * is used. If this parameter is omitted, the default value of DISABLED is * used. For more information, see Encrypting Data in Transit in @@ -6227,7 +6232,7 @@ export interface TaskDefinition { /** *

The configuration details for the App Mesh proxy.

*

Your Amazon ECS container instances require at least version 1.26.0 of the container agent - * and at least version 1.26.0-1 of the ecs-init package to enable a proxy + * and at least version 1.26.0-1 of the ecs-init package to use a proxy * configuration. If your container instances are launched from the Amazon ECS optimized AMI * version 20190301 or later, they contain the required versions of the * container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

@@ -7309,6 +7314,39 @@ export interface Task { /** *

The stop code indicating why a task was stopped. The stoppedReason might * contain additional details.

+ *

The following are valid values:

+ *
    + *
  • + *

    + * TaskFailedToStart + *

    + *
  • + *
  • + *

    + * EssentialContainerExited + *

    + *
  • + *
  • + *

    + * UserInitiated + *

    + *
  • + *
  • + *

    + * TerminationNotice + *

    + *
  • + *
  • + *

    + * ServiceSchedulerInitiated + *

    + *
  • + *
  • + *

    + * SpotInterruption + *

    + *
  • + *
*/ stopCode?: TaskStopCode | string; @@ -7578,8 +7616,7 @@ export interface Session { /** *

A URL - * back - * to managed agent on the container that the SSM Session Manager client + * to the managed agent on the container that the SSM Session Manager client * uses to send commands and receive output from the container.

*/ streamUrl?: string; @@ -9060,7 +9097,7 @@ export interface RegisterTaskDefinitionRequest { *

The configuration details for the App Mesh proxy.

*

For tasks hosted on Amazon EC2 instances, the container instances require at least version * 1.26.0 of the container agent and at least version - * 1.26.0-1 of the ecs-init package to enable a proxy + * 1.26.0-1 of the ecs-init package to use a proxy * configuration. If your container instances are launched from the Amazon ECS-optimized * AMI version 20190301 or later, then they contain the required versions of * the container agent and ecs-init. For more information, see Amazon ECS-optimized AMI versions in the @@ -9178,14 +9215,14 @@ export interface RunTaskRequest { count?: number; /** - *

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see + *

Specifies whether to use Amazon ECS managed tags for the task. For more information, see * Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.

*/ enableECSManagedTags?: boolean; /** - *

Determines whether to enable the execute command functionality for the containers in + *

Determines whether to use the execute command functionality for the containers in * this task. If true, this enables execute command functionality on all * containers in the task.

*/ @@ -9386,7 +9423,7 @@ export interface StartTaskRequest { containerInstances: string[] | undefined; /** - *

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see + *

Specifies whether to use Amazon ECS managed tags for the task. For more information, see * Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.

*/ @@ -10101,7 +10138,7 @@ export interface UpdateClusterSettingsRequest { cluster: string | undefined; /** - *

The setting to use by default for a cluster. This parameter is used to enable CloudWatch + *

The setting to use by default for a cluster. This parameter is used to turn on CloudWatch * Container Insights for a cluster. If this value is specified, it overrides the * containerInsights value set with PutAccountSetting or * PutAccountSettingDefault.

@@ -10389,6 +10426,48 @@ export interface UpdateServiceRequest { * you can set this to null when performing this action.

*/ enableExecuteCommand?: boolean; + + /** + *

Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more + * information, see Tagging Your Amazon ECS + * Resources in the Amazon Elastic Container Service Developer Guide.

+ *

Only tasks launched after the update will reflect the update. To update the tags on + * all tasks, set forceNewDeployment to true, so that Amazon ECS + * starts new tasks with the updated tags.

+ */ + enableECSManagedTags?: boolean; + + /** + *

A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the + * container name, and the container port to access from the load balancer. The container + * name is as it appears in a container definition.

+ *

When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with + * the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are + * running.

+ *

You can remove existing loadBalancers by passing an empty list.

+ */ + loadBalancers?: LoadBalancer[]; + + /** + *

Determines whether to propagate the tags from the task definition or the service to + * the task. If no value is specified, the tags aren't propagated.

+ *

Only tasks launched after the update will reflect the update. To update the tags on + * all tasks, set forceNewDeployment to true, so that Amazon ECS + * starts new tasks with the updated tags.

+ */ + propagateTags?: PropagateTags | string; + + /** + *

The details for the service discovery registries to assign to this service. For more + * information, see Service + * Discovery.

+ *

When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks + * with the updated service registries configuration, and then stops the old tasks when the + * new tasks are running.

+ *

You can remove existing serviceRegistries by passing an empty + * list.

+ */ + serviceRegistries?: ServiceRegistry[]; } export namespace UpdateServiceRequest { @@ -10446,7 +10525,7 @@ export namespace UpdateServicePrimaryTaskSetRequest { export interface UpdateServicePrimaryTaskSetResponse { /** - *

Details about the task set.

+ *

etails about the task set.

*/ taskSet?: TaskSet; } diff --git a/clients/client-ecs/src/protocols/Aws_json1_1.ts b/clients/client-ecs/src/protocols/Aws_json1_1.ts index 01f55a6fb81a..ea9f0e0b365a 100644 --- a/clients/client-ecs/src/protocols/Aws_json1_1.ts +++ b/clients/client-ecs/src/protocols/Aws_json1_1.ts @@ -6203,6 +6203,8 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c deploymentConfiguration: serializeAws_json1_1DeploymentConfiguration(input.deploymentConfiguration, context), }), ...(input.desiredCount !== undefined && input.desiredCount !== null && { desiredCount: input.desiredCount }), + ...(input.enableECSManagedTags !== undefined && + input.enableECSManagedTags !== null && { enableECSManagedTags: input.enableECSManagedTags }), ...(input.enableExecuteCommand !== undefined && input.enableExecuteCommand !== null && { enableExecuteCommand: input.enableExecuteCommand }), ...(input.forceNewDeployment !== undefined && @@ -6211,6 +6213,10 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c input.healthCheckGracePeriodSeconds !== null && { healthCheckGracePeriodSeconds: input.healthCheckGracePeriodSeconds, }), + ...(input.loadBalancers !== undefined && + input.loadBalancers !== null && { + loadBalancers: serializeAws_json1_1LoadBalancers(input.loadBalancers, context), + }), ...(input.networkConfiguration !== undefined && input.networkConfiguration !== null && { networkConfiguration: serializeAws_json1_1NetworkConfiguration(input.networkConfiguration, context), @@ -6225,7 +6231,12 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c }), ...(input.platformVersion !== undefined && input.platformVersion !== null && { platformVersion: input.platformVersion }), + ...(input.propagateTags !== undefined && input.propagateTags !== null && { propagateTags: input.propagateTags }), ...(input.service !== undefined && input.service !== null && { service: input.service }), + ...(input.serviceRegistries !== undefined && + input.serviceRegistries !== null && { + serviceRegistries: serializeAws_json1_1ServiceRegistries(input.serviceRegistries, context), + }), ...(input.taskDefinition !== undefined && input.taskDefinition !== null && { taskDefinition: input.taskDefinition }), }; diff --git a/clients/client-eks/src/models/models_0.ts b/clients/client-eks/src/models/models_0.ts index a53cddc24e9a..4d84897cb7b8 100644 --- a/clients/client-eks/src/models/models_0.ts +++ b/clients/client-eks/src/models/models_0.ts @@ -2235,6 +2235,7 @@ export enum NodegroupIssueCode { EC2_SECURITY_GROUP_DELETION_FAILURE = "Ec2SecurityGroupDeletionFailure", EC2_SECURITY_GROUP_NOT_FOUND = "Ec2SecurityGroupNotFound", EC2_SUBNET_INVALID_CONFIGURATION = "Ec2SubnetInvalidConfiguration", + EC2_SUBNET_MISSING_IPV6_ASSIGNMENT = "Ec2SubnetMissingIpv6Assignment", EC2_SUBNET_NOT_FOUND = "Ec2SubnetNotFound", IAM_INSTANCE_PROFILE_NOT_FOUND = "IamInstanceProfileNotFound", IAM_LIMIT_EXCEEDED = "IamLimitExceeded", diff --git a/clients/client-elasticache/src/models/models_0.ts b/clients/client-elasticache/src/models/models_0.ts index 948e4ca0a4f0..56bdbf068cf5 100644 --- a/clients/client-elasticache/src/models/models_0.ts +++ b/clients/client-elasticache/src/models/models_0.ts @@ -2424,37 +2424,6 @@ export interface CreateCacheClusterMessage { * * *
  • - *

    Memory optimized with data tiering:

    - *
      - *
    • - *

      Current generation:

      - * - *

      - * R6gd node types (available only for Redis engine version 6.2 onward).

      - * - * - * - * - *

      - * - * cache.r6gd.xlarge, - * cache.r6gd.2xlarge, - * cache.r6gd.4xlarge, - * cache.r6gd.8xlarge, - * cache.r6gd.12xlarge, - * cache.r6gd.16xlarge - * - * - * - * - * - * - *

      - * - *
    • - *
    - *
  • - *
  • *

    Memory optimized:

    *
      *
    • diff --git a/clients/client-eventbridge/src/endpoints.ts b/clients/client-eventbridge/src/endpoints.ts index ead0160761ed..d420f4722c13 100644 --- a/clients/client-eventbridge/src/endpoints.ts +++ b/clients/client-eventbridge/src/endpoints.ts @@ -22,19 +22,17 @@ const regionHash: RegionHash = { variants: [ { hostname: "events.us-gov-east-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-east-1", }, "us-gov-west-1": { variants: [ { hostname: "events.us-gov-west-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-west-1", }, "us-west-1": { variants: [ @@ -155,7 +153,7 @@ const partitionHash: PartitionHash = { ], }, "aws-us-gov": { - regions: ["us-gov-east-1", "us-gov-west-1"], + regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"], regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", variants: [ { diff --git a/clients/client-finspace-data/src/FinspaceData.ts b/clients/client-finspace-data/src/FinspaceData.ts index 504470093287..1db206f724e7 100644 --- a/clients/client-finspace-data/src/FinspaceData.ts +++ b/clients/client-finspace-data/src/FinspaceData.ts @@ -15,11 +15,24 @@ import { CreateDataViewCommandInput, CreateDataViewCommandOutput, } from "./commands/CreateDataViewCommand"; +import { + CreatePermissionGroupCommand, + CreatePermissionGroupCommandInput, + CreatePermissionGroupCommandOutput, +} from "./commands/CreatePermissionGroupCommand"; +import { CreateUserCommand, CreateUserCommandInput, CreateUserCommandOutput } from "./commands/CreateUserCommand"; import { DeleteDatasetCommand, DeleteDatasetCommandInput, DeleteDatasetCommandOutput, } from "./commands/DeleteDatasetCommand"; +import { + DeletePermissionGroupCommand, + DeletePermissionGroupCommandInput, + DeletePermissionGroupCommandOutput, +} from "./commands/DeletePermissionGroupCommand"; +import { DisableUserCommand, DisableUserCommandInput, DisableUserCommandOutput } from "./commands/DisableUserCommand"; +import { EnableUserCommand, EnableUserCommandInput, EnableUserCommandOutput } from "./commands/EnableUserCommand"; import { GetChangesetCommand, GetChangesetCommandInput, @@ -32,6 +45,7 @@ import { GetProgrammaticAccessCredentialsCommandInput, GetProgrammaticAccessCredentialsCommandOutput, } from "./commands/GetProgrammaticAccessCredentialsCommand"; +import { GetUserCommand, GetUserCommandInput, GetUserCommandOutput } from "./commands/GetUserCommand"; import { GetWorkingLocationCommand, GetWorkingLocationCommandInput, @@ -52,6 +66,17 @@ import { ListDataViewsCommandInput, ListDataViewsCommandOutput, } from "./commands/ListDataViewsCommand"; +import { + ListPermissionGroupsCommand, + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, +} from "./commands/ListPermissionGroupsCommand"; +import { ListUsersCommand, ListUsersCommandInput, ListUsersCommandOutput } from "./commands/ListUsersCommand"; +import { + ResetUserPasswordCommand, + ResetUserPasswordCommandInput, + ResetUserPasswordCommandOutput, +} from "./commands/ResetUserPasswordCommand"; import { UpdateChangesetCommand, UpdateChangesetCommandInput, @@ -62,6 +87,12 @@ import { UpdateDatasetCommandInput, UpdateDatasetCommandOutput, } from "./commands/UpdateDatasetCommand"; +import { + UpdatePermissionGroupCommand, + UpdatePermissionGroupCommandInput, + UpdatePermissionGroupCommandOutput, +} from "./commands/UpdatePermissionGroupCommand"; +import { UpdateUserCommand, UpdateUserCommandInput, UpdateUserCommandOutput } from "./commands/UpdateUserCommand"; import { FinspaceDataClient } from "./FinspaceDataClient"; /** @@ -164,6 +195,64 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *

      Creates a group of permissions for various actions that a user can perform in FinSpace.

      + */ + public createPermissionGroup( + args: CreatePermissionGroupCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createPermissionGroup( + args: CreatePermissionGroupCommandInput, + cb: (err: any, data?: CreatePermissionGroupCommandOutput) => void + ): void; + public createPermissionGroup( + args: CreatePermissionGroupCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreatePermissionGroupCommandOutput) => void + ): void; + public createPermissionGroup( + args: CreatePermissionGroupCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreatePermissionGroupCommandOutput) => void), + cb?: (err: any, data?: CreatePermissionGroupCommandOutput) => void + ): Promise | void { + const command = new CreatePermissionGroupCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Creates a new user in FinSpace.

      + */ + public createUser(args: CreateUserCommandInput, options?: __HttpHandlerOptions): Promise; + public createUser(args: CreateUserCommandInput, cb: (err: any, data?: CreateUserCommandOutput) => void): void; + public createUser( + args: CreateUserCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateUserCommandOutput) => void + ): void; + public createUser( + args: CreateUserCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateUserCommandOutput) => void), + cb?: (err: any, data?: CreateUserCommandOutput) => void + ): Promise | void { + const command = new CreateUserCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

      Deletes a FinSpace Dataset.

      */ @@ -196,6 +285,90 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *

      Deletes a permission group. This action is irreversible.

      + */ + public deletePermissionGroup( + args: DeletePermissionGroupCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deletePermissionGroup( + args: DeletePermissionGroupCommandInput, + cb: (err: any, data?: DeletePermissionGroupCommandOutput) => void + ): void; + public deletePermissionGroup( + args: DeletePermissionGroupCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeletePermissionGroupCommandOutput) => void + ): void; + public deletePermissionGroup( + args: DeletePermissionGroupCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeletePermissionGroupCommandOutput) => void), + cb?: (err: any, data?: DeletePermissionGroupCommandOutput) => void + ): Promise | void { + const command = new DeletePermissionGroupCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Denies access to the FinSpace web application and API for the specified user.

      + */ + public disableUser(args: DisableUserCommandInput, options?: __HttpHandlerOptions): Promise; + public disableUser(args: DisableUserCommandInput, cb: (err: any, data?: DisableUserCommandOutput) => void): void; + public disableUser( + args: DisableUserCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisableUserCommandOutput) => void + ): void; + public disableUser( + args: DisableUserCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisableUserCommandOutput) => void), + cb?: (err: any, data?: DisableUserCommandOutput) => void + ): Promise | void { + const command = new DisableUserCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Allows the specified user to access the FinSpace web application and API.

      + */ + public enableUser(args: EnableUserCommandInput, options?: __HttpHandlerOptions): Promise; + public enableUser(args: EnableUserCommandInput, cb: (err: any, data?: EnableUserCommandOutput) => void): void; + public enableUser( + args: EnableUserCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: EnableUserCommandOutput) => void + ): void; + public enableUser( + args: EnableUserCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: EnableUserCommandOutput) => void), + cb?: (err: any, data?: EnableUserCommandOutput) => void + ): Promise | void { + const command = new EnableUserCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

      Get information about a Changeset.

      */ @@ -309,6 +482,32 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *

      Retrieves details for a specific user.

      + */ + public getUser(args: GetUserCommandInput, options?: __HttpHandlerOptions): Promise; + public getUser(args: GetUserCommandInput, cb: (err: any, data?: GetUserCommandOutput) => void): void; + public getUser( + args: GetUserCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetUserCommandOutput) => void + ): void; + public getUser( + args: GetUserCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetUserCommandOutput) => void), + cb?: (err: any, data?: GetUserCommandOutput) => void + ): Promise | void { + const command = new GetUserCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

      A temporary Amazon S3 location, where you can copy your files from a source location to stage or use * as a scratch space in FinSpace notebook.

      @@ -435,6 +634,96 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *

      Lists all available permission groups in FinSpace.

      + */ + public listPermissionGroups( + args: ListPermissionGroupsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listPermissionGroups( + args: ListPermissionGroupsCommandInput, + cb: (err: any, data?: ListPermissionGroupsCommandOutput) => void + ): void; + public listPermissionGroups( + args: ListPermissionGroupsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListPermissionGroupsCommandOutput) => void + ): void; + public listPermissionGroups( + args: ListPermissionGroupsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListPermissionGroupsCommandOutput) => void), + cb?: (err: any, data?: ListPermissionGroupsCommandOutput) => void + ): Promise | void { + const command = new ListPermissionGroupsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Lists all available user accounts in FinSpace.

      + */ + public listUsers(args: ListUsersCommandInput, options?: __HttpHandlerOptions): Promise; + public listUsers(args: ListUsersCommandInput, cb: (err: any, data?: ListUsersCommandOutput) => void): void; + public listUsers( + args: ListUsersCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListUsersCommandOutput) => void + ): void; + public listUsers( + args: ListUsersCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListUsersCommandOutput) => void), + cb?: (err: any, data?: ListUsersCommandOutput) => void + ): Promise | void { + const command = new ListUsersCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Resets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.

      + */ + public resetUserPassword( + args: ResetUserPasswordCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public resetUserPassword( + args: ResetUserPasswordCommandInput, + cb: (err: any, data?: ResetUserPasswordCommandOutput) => void + ): void; + public resetUserPassword( + args: ResetUserPasswordCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ResetUserPasswordCommandOutput) => void + ): void; + public resetUserPassword( + args: ResetUserPasswordCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ResetUserPasswordCommandOutput) => void), + cb?: (err: any, data?: ResetUserPasswordCommandOutput) => void + ): Promise | void { + const command = new ResetUserPasswordCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

      Updates a FinSpace Changeset.

      */ @@ -498,4 +787,62 @@ export class FinspaceData extends FinspaceDataClient { return this.send(command, optionsOrCb); } } + + /** + *

      Modifies the details of a permission group. You cannot modify a permissionGroupID.

      + */ + public updatePermissionGroup( + args: UpdatePermissionGroupCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updatePermissionGroup( + args: UpdatePermissionGroupCommandInput, + cb: (err: any, data?: UpdatePermissionGroupCommandOutput) => void + ): void; + public updatePermissionGroup( + args: UpdatePermissionGroupCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdatePermissionGroupCommandOutput) => void + ): void; + public updatePermissionGroup( + args: UpdatePermissionGroupCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdatePermissionGroupCommandOutput) => void), + cb?: (err: any, data?: UpdatePermissionGroupCommandOutput) => void + ): Promise | void { + const command = new UpdatePermissionGroupCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Modifies the details of the specified user account. You cannot update the userId for a user.

      + */ + public updateUser(args: UpdateUserCommandInput, options?: __HttpHandlerOptions): Promise; + public updateUser(args: UpdateUserCommandInput, cb: (err: any, data?: UpdateUserCommandOutput) => void): void; + public updateUser( + args: UpdateUserCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateUserCommandOutput) => void + ): void; + public updateUser( + args: UpdateUserCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateUserCommandOutput) => void), + cb?: (err: any, data?: UpdateUserCommandOutput) => void + ): Promise | void { + const command = new UpdateUserCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } } diff --git a/clients/client-finspace-data/src/FinspaceDataClient.ts b/clients/client-finspace-data/src/FinspaceDataClient.ts index 99b7fa077f05..db99e80ec51b 100644 --- a/clients/client-finspace-data/src/FinspaceDataClient.ts +++ b/clients/client-finspace-data/src/FinspaceDataClient.ts @@ -54,7 +54,18 @@ import { import { CreateChangesetCommandInput, CreateChangesetCommandOutput } from "./commands/CreateChangesetCommand"; import { CreateDatasetCommandInput, CreateDatasetCommandOutput } from "./commands/CreateDatasetCommand"; import { CreateDataViewCommandInput, CreateDataViewCommandOutput } from "./commands/CreateDataViewCommand"; +import { + CreatePermissionGroupCommandInput, + CreatePermissionGroupCommandOutput, +} from "./commands/CreatePermissionGroupCommand"; +import { CreateUserCommandInput, CreateUserCommandOutput } from "./commands/CreateUserCommand"; import { DeleteDatasetCommandInput, DeleteDatasetCommandOutput } from "./commands/DeleteDatasetCommand"; +import { + DeletePermissionGroupCommandInput, + DeletePermissionGroupCommandOutput, +} from "./commands/DeletePermissionGroupCommand"; +import { DisableUserCommandInput, DisableUserCommandOutput } from "./commands/DisableUserCommand"; +import { EnableUserCommandInput, EnableUserCommandOutput } from "./commands/EnableUserCommand"; import { GetChangesetCommandInput, GetChangesetCommandOutput } from "./commands/GetChangesetCommand"; import { GetDatasetCommandInput, GetDatasetCommandOutput } from "./commands/GetDatasetCommand"; import { GetDataViewCommandInput, GetDataViewCommandOutput } from "./commands/GetDataViewCommand"; @@ -62,45 +73,79 @@ import { GetProgrammaticAccessCredentialsCommandInput, GetProgrammaticAccessCredentialsCommandOutput, } from "./commands/GetProgrammaticAccessCredentialsCommand"; +import { GetUserCommandInput, GetUserCommandOutput } from "./commands/GetUserCommand"; import { GetWorkingLocationCommandInput, GetWorkingLocationCommandOutput } from "./commands/GetWorkingLocationCommand"; import { ListChangesetsCommandInput, ListChangesetsCommandOutput } from "./commands/ListChangesetsCommand"; import { ListDatasetsCommandInput, ListDatasetsCommandOutput } from "./commands/ListDatasetsCommand"; import { ListDataViewsCommandInput, ListDataViewsCommandOutput } from "./commands/ListDataViewsCommand"; +import { + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, +} from "./commands/ListPermissionGroupsCommand"; +import { ListUsersCommandInput, ListUsersCommandOutput } from "./commands/ListUsersCommand"; +import { ResetUserPasswordCommandInput, ResetUserPasswordCommandOutput } from "./commands/ResetUserPasswordCommand"; import { UpdateChangesetCommandInput, UpdateChangesetCommandOutput } from "./commands/UpdateChangesetCommand"; import { UpdateDatasetCommandInput, UpdateDatasetCommandOutput } from "./commands/UpdateDatasetCommand"; +import { + UpdatePermissionGroupCommandInput, + UpdatePermissionGroupCommandOutput, +} from "./commands/UpdatePermissionGroupCommand"; +import { UpdateUserCommandInput, UpdateUserCommandOutput } from "./commands/UpdateUserCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = | CreateChangesetCommandInput | CreateDataViewCommandInput | CreateDatasetCommandInput + | CreatePermissionGroupCommandInput + | CreateUserCommandInput | DeleteDatasetCommandInput + | DeletePermissionGroupCommandInput + | DisableUserCommandInput + | EnableUserCommandInput | GetChangesetCommandInput | GetDataViewCommandInput | GetDatasetCommandInput | GetProgrammaticAccessCredentialsCommandInput + | GetUserCommandInput | GetWorkingLocationCommandInput | ListChangesetsCommandInput | ListDataViewsCommandInput | ListDatasetsCommandInput + | ListPermissionGroupsCommandInput + | ListUsersCommandInput + | ResetUserPasswordCommandInput | UpdateChangesetCommandInput - | UpdateDatasetCommandInput; + | UpdateDatasetCommandInput + | UpdatePermissionGroupCommandInput + | UpdateUserCommandInput; export type ServiceOutputTypes = | CreateChangesetCommandOutput | CreateDataViewCommandOutput | CreateDatasetCommandOutput + | CreatePermissionGroupCommandOutput + | CreateUserCommandOutput | DeleteDatasetCommandOutput + | DeletePermissionGroupCommandOutput + | DisableUserCommandOutput + | EnableUserCommandOutput | GetChangesetCommandOutput | GetDataViewCommandOutput | GetDatasetCommandOutput | GetProgrammaticAccessCredentialsCommandOutput + | GetUserCommandOutput | GetWorkingLocationCommandOutput | ListChangesetsCommandOutput | ListDataViewsCommandOutput | ListDatasetsCommandOutput + | ListPermissionGroupsCommandOutput + | ListUsersCommandOutput + | ResetUserPasswordCommandOutput | UpdateChangesetCommandOutput - | UpdateDatasetCommandOutput; + | UpdateDatasetCommandOutput + | UpdatePermissionGroupCommandOutput + | UpdateUserCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** diff --git a/clients/client-finspace-data/src/commands/CreatePermissionGroupCommand.ts b/clients/client-finspace-data/src/commands/CreatePermissionGroupCommand.ts new file mode 100644 index 000000000000..61d3c634924f --- /dev/null +++ b/clients/client-finspace-data/src/commands/CreatePermissionGroupCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { CreatePermissionGroupRequest, CreatePermissionGroupResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreatePermissionGroupCommand, + serializeAws_restJson1CreatePermissionGroupCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreatePermissionGroupCommandInput extends CreatePermissionGroupRequest {} +export interface CreatePermissionGroupCommandOutput extends CreatePermissionGroupResponse, __MetadataBearer {} + +/** + *

      Creates a group of permissions for various actions that a user can perform in FinSpace.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, CreatePermissionGroupCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, CreatePermissionGroupCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new CreatePermissionGroupCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreatePermissionGroupCommandInput} for command's `input` shape. + * @see {@link CreatePermissionGroupCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class CreatePermissionGroupCommand extends $Command< + CreatePermissionGroupCommandInput, + CreatePermissionGroupCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreatePermissionGroupCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "CreatePermissionGroupCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreatePermissionGroupRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreatePermissionGroupResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreatePermissionGroupCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreatePermissionGroupCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreatePermissionGroupCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/CreateUserCommand.ts b/clients/client-finspace-data/src/commands/CreateUserCommand.ts new file mode 100644 index 000000000000..bdf3397da0d7 --- /dev/null +++ b/clients/client-finspace-data/src/commands/CreateUserCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { CreateUserRequest, CreateUserResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateUserCommand, + serializeAws_restJson1CreateUserCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateUserCommandInput extends CreateUserRequest {} +export interface CreateUserCommandOutput extends CreateUserResponse, __MetadataBearer {} + +/** + *

      Creates a new user in FinSpace.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, CreateUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, CreateUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new CreateUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateUserCommandInput} for command's `input` shape. + * @see {@link CreateUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class CreateUserCommand extends $Command< + CreateUserCommandInput, + CreateUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "CreateUserCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateUserRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateUserResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateUserCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateUserCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateUserCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/DeletePermissionGroupCommand.ts b/clients/client-finspace-data/src/commands/DeletePermissionGroupCommand.ts new file mode 100644 index 000000000000..36cf49abe5da --- /dev/null +++ b/clients/client-finspace-data/src/commands/DeletePermissionGroupCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { DeletePermissionGroupRequest, DeletePermissionGroupResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeletePermissionGroupCommand, + serializeAws_restJson1DeletePermissionGroupCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeletePermissionGroupCommandInput extends DeletePermissionGroupRequest {} +export interface DeletePermissionGroupCommandOutput extends DeletePermissionGroupResponse, __MetadataBearer {} + +/** + *

      Deletes a permission group. This action is irreversible.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, DeletePermissionGroupCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, DeletePermissionGroupCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new DeletePermissionGroupCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeletePermissionGroupCommandInput} for command's `input` shape. + * @see {@link DeletePermissionGroupCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class DeletePermissionGroupCommand extends $Command< + DeletePermissionGroupCommandInput, + DeletePermissionGroupCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeletePermissionGroupCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "DeletePermissionGroupCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeletePermissionGroupRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeletePermissionGroupResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeletePermissionGroupCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeletePermissionGroupCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeletePermissionGroupCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/DisableUserCommand.ts b/clients/client-finspace-data/src/commands/DisableUserCommand.ts new file mode 100644 index 000000000000..0549d6b8457d --- /dev/null +++ b/clients/client-finspace-data/src/commands/DisableUserCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { DisableUserRequest, DisableUserResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisableUserCommand, + serializeAws_restJson1DisableUserCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisableUserCommandInput extends DisableUserRequest {} +export interface DisableUserCommandOutput extends DisableUserResponse, __MetadataBearer {} + +/** + *

      Denies access to the FinSpace web application and API for the specified user.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, DisableUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, DisableUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new DisableUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisableUserCommandInput} for command's `input` shape. + * @see {@link DisableUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class DisableUserCommand extends $Command< + DisableUserCommandInput, + DisableUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "DisableUserCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisableUserRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisableUserResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisableUserCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisableUserCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DisableUserCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/EnableUserCommand.ts b/clients/client-finspace-data/src/commands/EnableUserCommand.ts new file mode 100644 index 000000000000..6db62afb0e5a --- /dev/null +++ b/clients/client-finspace-data/src/commands/EnableUserCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { EnableUserRequest, EnableUserResponse } from "../models/models_0"; +import { + deserializeAws_restJson1EnableUserCommand, + serializeAws_restJson1EnableUserCommand, +} from "../protocols/Aws_restJson1"; + +export interface EnableUserCommandInput extends EnableUserRequest {} +export interface EnableUserCommandOutput extends EnableUserResponse, __MetadataBearer {} + +/** + *

      Allows the specified user to access the FinSpace web application and API.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, EnableUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, EnableUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new EnableUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EnableUserCommandInput} for command's `input` shape. + * @see {@link EnableUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class EnableUserCommand extends $Command< + EnableUserCommandInput, + EnableUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "EnableUserCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: EnableUserRequest.filterSensitiveLog, + outputFilterSensitiveLog: EnableUserResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: EnableUserCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1EnableUserCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1EnableUserCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/GetUserCommand.ts b/clients/client-finspace-data/src/commands/GetUserCommand.ts new file mode 100644 index 000000000000..1b0ebbdf81ee --- /dev/null +++ b/clients/client-finspace-data/src/commands/GetUserCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { GetUserRequest, GetUserResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetUserCommand, + serializeAws_restJson1GetUserCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetUserCommandInput extends GetUserRequest {} +export interface GetUserCommandOutput extends GetUserResponse, __MetadataBearer {} + +/** + *

      Retrieves details for a specific user.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, GetUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, GetUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new GetUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetUserCommandInput} for command's `input` shape. + * @see {@link GetUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class GetUserCommand extends $Command< + GetUserCommandInput, + GetUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "GetUserCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetUserRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetUserResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetUserCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetUserCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetUserCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/ListPermissionGroupsCommand.ts b/clients/client-finspace-data/src/commands/ListPermissionGroupsCommand.ts new file mode 100644 index 000000000000..4507f66794e0 --- /dev/null +++ b/clients/client-finspace-data/src/commands/ListPermissionGroupsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { ListPermissionGroupsRequest, ListPermissionGroupsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListPermissionGroupsCommand, + serializeAws_restJson1ListPermissionGroupsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListPermissionGroupsCommandInput extends ListPermissionGroupsRequest {} +export interface ListPermissionGroupsCommandOutput extends ListPermissionGroupsResponse, __MetadataBearer {} + +/** + *

      Lists all available permission groups in FinSpace.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ListPermissionGroupsCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ListPermissionGroupsCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ListPermissionGroupsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListPermissionGroupsCommandInput} for command's `input` shape. + * @see {@link ListPermissionGroupsCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ListPermissionGroupsCommand extends $Command< + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListPermissionGroupsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "ListPermissionGroupsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListPermissionGroupsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListPermissionGroupsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListPermissionGroupsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListPermissionGroupsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListPermissionGroupsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/ListUsersCommand.ts b/clients/client-finspace-data/src/commands/ListUsersCommand.ts new file mode 100644 index 000000000000..f3c348922851 --- /dev/null +++ b/clients/client-finspace-data/src/commands/ListUsersCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { ListUsersRequest, ListUsersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListUsersCommand, + serializeAws_restJson1ListUsersCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListUsersCommandInput extends ListUsersRequest {} +export interface ListUsersCommandOutput extends ListUsersResponse, __MetadataBearer {} + +/** + *

      Lists all available user accounts in FinSpace.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ListUsersCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ListUsersCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ListUsersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListUsersCommandInput} for command's `input` shape. + * @see {@link ListUsersCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ListUsersCommand extends $Command< + ListUsersCommandInput, + ListUsersCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListUsersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "ListUsersCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListUsersRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListUsersResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListUsersCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListUsersCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListUsersCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/ResetUserPasswordCommand.ts b/clients/client-finspace-data/src/commands/ResetUserPasswordCommand.ts new file mode 100644 index 000000000000..1d68347b3de1 --- /dev/null +++ b/clients/client-finspace-data/src/commands/ResetUserPasswordCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { ResetUserPasswordRequest, ResetUserPasswordResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ResetUserPasswordCommand, + serializeAws_restJson1ResetUserPasswordCommand, +} from "../protocols/Aws_restJson1"; + +export interface ResetUserPasswordCommandInput extends ResetUserPasswordRequest {} +export interface ResetUserPasswordCommandOutput extends ResetUserPasswordResponse, __MetadataBearer {} + +/** + *

      Resets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ResetUserPasswordCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ResetUserPasswordCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ResetUserPasswordCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ResetUserPasswordCommandInput} for command's `input` shape. + * @see {@link ResetUserPasswordCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ResetUserPasswordCommand extends $Command< + ResetUserPasswordCommandInput, + ResetUserPasswordCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ResetUserPasswordCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "ResetUserPasswordCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ResetUserPasswordRequest.filterSensitiveLog, + outputFilterSensitiveLog: ResetUserPasswordResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ResetUserPasswordCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ResetUserPasswordCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ResetUserPasswordCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/UpdatePermissionGroupCommand.ts b/clients/client-finspace-data/src/commands/UpdatePermissionGroupCommand.ts new file mode 100644 index 000000000000..93b05f598bb5 --- /dev/null +++ b/clients/client-finspace-data/src/commands/UpdatePermissionGroupCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { UpdatePermissionGroupRequest, UpdatePermissionGroupResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdatePermissionGroupCommand, + serializeAws_restJson1UpdatePermissionGroupCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdatePermissionGroupCommandInput extends UpdatePermissionGroupRequest {} +export interface UpdatePermissionGroupCommandOutput extends UpdatePermissionGroupResponse, __MetadataBearer {} + +/** + *

      Modifies the details of a permission group. You cannot modify a permissionGroupID.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, UpdatePermissionGroupCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, UpdatePermissionGroupCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new UpdatePermissionGroupCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdatePermissionGroupCommandInput} for command's `input` shape. + * @see {@link UpdatePermissionGroupCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class UpdatePermissionGroupCommand extends $Command< + UpdatePermissionGroupCommandInput, + UpdatePermissionGroupCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdatePermissionGroupCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "UpdatePermissionGroupCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdatePermissionGroupRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdatePermissionGroupResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdatePermissionGroupCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdatePermissionGroupCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdatePermissionGroupCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/UpdateUserCommand.ts b/clients/client-finspace-data/src/commands/UpdateUserCommand.ts new file mode 100644 index 000000000000..af081b50f8c7 --- /dev/null +++ b/clients/client-finspace-data/src/commands/UpdateUserCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient"; +import { UpdateUserRequest, UpdateUserResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateUserCommand, + serializeAws_restJson1UpdateUserCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateUserCommandInput extends UpdateUserRequest {} +export interface UpdateUserCommandOutput extends UpdateUserResponse, __MetadataBearer {} + +/** + *

      Modifies the details of the specified user account. You cannot update the userId for a user.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, UpdateUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, UpdateUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new UpdateUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateUserCommandInput} for command's `input` shape. + * @see {@link UpdateUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class UpdateUserCommand extends $Command< + UpdateUserCommandInput, + UpdateUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FinspaceDataClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FinspaceDataClient"; + const commandName = "UpdateUserCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateUserRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateUserResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateUserCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateUserCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateUserCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-finspace-data/src/commands/index.ts b/clients/client-finspace-data/src/commands/index.ts index 753a56795430..ba967db13a75 100644 --- a/clients/client-finspace-data/src/commands/index.ts +++ b/clients/client-finspace-data/src/commands/index.ts @@ -1,14 +1,25 @@ export * from "./CreateChangesetCommand"; export * from "./CreateDataViewCommand"; export * from "./CreateDatasetCommand"; +export * from "./CreatePermissionGroupCommand"; +export * from "./CreateUserCommand"; export * from "./DeleteDatasetCommand"; +export * from "./DeletePermissionGroupCommand"; +export * from "./DisableUserCommand"; +export * from "./EnableUserCommand"; export * from "./GetChangesetCommand"; export * from "./GetDataViewCommand"; export * from "./GetDatasetCommand"; export * from "./GetProgrammaticAccessCredentialsCommand"; +export * from "./GetUserCommand"; export * from "./GetWorkingLocationCommand"; export * from "./ListChangesetsCommand"; export * from "./ListDataViewsCommand"; export * from "./ListDatasetsCommand"; +export * from "./ListPermissionGroupsCommand"; +export * from "./ListUsersCommand"; +export * from "./ResetUserPasswordCommand"; export * from "./UpdateChangesetCommand"; export * from "./UpdateDatasetCommand"; +export * from "./UpdatePermissionGroupCommand"; +export * from "./UpdateUserCommand"; diff --git a/clients/client-finspace-data/src/models/models_0.ts b/clients/client-finspace-data/src/models/models_0.ts index ef7fb49515e2..f4bf43aea280 100644 --- a/clients/client-finspace-data/src/models/models_0.ts +++ b/clients/client-finspace-data/src/models/models_0.ts @@ -1,4 +1,4 @@ -import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-client"; +import { ExceptionOptionType as __ExceptionOptionType, SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; import { FinspaceDataServiceException as __BaseException } from "./FinspaceDataServiceException"; @@ -22,12 +22,28 @@ export class AccessDeniedException extends __BaseException { } } +export enum ApiAccess { + DISABLED = "DISABLED", + ENABLED = "ENABLED", +} + +export enum ApplicationPermission { + AccessNotebooks = "AccessNotebooks", + CreateDataset = "CreateDataset", + GetTemporaryCredentials = "GetTemporaryCredentials", + ManageAttributeSets = "ManageAttributeSets", + ManageClusters = "ManageClusters", + ManageUsersAndGroups = "ManageUsersAndGroups", + ViewAuditData = "ViewAuditData", +} + /** *

      The request conflicts with an existing resource.

      */ export class ConflictException extends __BaseException { readonly name: "ConflictException" = "ConflictException"; readonly $fault: "client" = "client"; + reason?: string; /** * @internal */ @@ -38,6 +54,7 @@ export class ConflictException extends __BaseException { ...opts, }); Object.setPrototypeOf(this, ConflictException.prototype); + this.reason = opts.reason; } } @@ -63,21 +80,21 @@ export interface CreateChangesetRequest { datasetId: string | undefined; /** - *

      Option to indicate how a Changeset will be applied to a Dataset.

      + *

      The option to indicate how a Changeset will be applied to a Dataset.

      *
        *
      • *

        - * REPLACE - Changeset will be considered as a replacement to all prior + * REPLACE – Changeset will be considered as a replacement to all prior * loaded Changesets.

        *
      • *
      • *

        - * APPEND - Changeset will be considered as an addition to the end of all + * APPEND – Changeset will be considered as an addition to the end of all * prior loaded Changesets.

        *
      • *
      • *

        - * MODIFY - Changeset is considered as a replacement to a specific prior ingested Changeset.

        + * MODIFY – Changeset is considered as a replacement to a specific prior ingested Changeset.

        *
      • *
      */ @@ -96,7 +113,7 @@ export interface CreateChangesetRequest { * } *
      *

      - *

      The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace APIsection.

      + *

      The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace API section.

      */ sourceParams: { [key: string]: string } | undefined; @@ -109,19 +126,19 @@ export interface CreateChangesetRequest { *
        *
      • *

        - * PARQUET - Parquet source file format.

        + * PARQUET – Parquet source file format.

        *
      • *
      • *

        - * CSV - CSV source file format.

        + * CSV – CSV source file format.

        *
      • *
      • *

        - * JSON - JSON source file format.

        + * JSON – JSON source file format.

        *
      • *
      • *

        - * XML - XML source file format.

        + * XML – XML source file format.

        *
      • *
      * @@ -229,6 +246,7 @@ export class LimitExceededException extends __BaseException { export class ResourceNotFoundException extends __BaseException { readonly name: "ResourceNotFoundException" = "ResourceNotFoundException"; readonly $fault: "client" = "client"; + reason?: string; /** * @internal */ @@ -239,6 +257,7 @@ export class ResourceNotFoundException extends __BaseException { ...opts, }); Object.setPrototypeOf(this, ResourceNotFoundException.prototype); + this.reason = opts.reason; } } @@ -267,6 +286,7 @@ export class ThrottlingException extends __BaseException { export class ValidationException extends __BaseException { readonly name: "ValidationException" = "ValidationException"; readonly $fault: "client" = "client"; + reason?: string; /** * @internal */ @@ -277,6 +297,7 @@ export class ValidationException extends __BaseException { ...opts, }); Object.setPrototypeOf(this, ValidationException.prototype); + this.reason = opts.reason; } } @@ -290,7 +311,7 @@ export enum DatasetKind { */ export interface DatasetOwnerInfo { /** - *

      Name of the Dataset owner.

      + *

      The name of the Dataset owner.

      */ name?: string; @@ -311,6 +332,7 @@ export namespace DatasetOwnerInfo { */ export const filterSensitiveLog = (obj: DatasetOwnerInfo): any => ({ ...obj, + ...(obj.email && { email: SENSITIVE_STRING }), }); } @@ -351,7 +373,7 @@ export namespace DatasetOwnerInfo { *

      *
    • *
    - *

    For more information on the ataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.

    + *

    For more information on the dataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.

    */ export interface ResourcePermission { /** @@ -388,7 +410,7 @@ export namespace ResourcePermission { */ export interface PermissionGroupParams { /** - *

    The unique identifier of the PermissionGroup.

    + *

    The unique identifier for the PermissionGroup.

    */ permissionGroupId?: string; @@ -431,36 +453,36 @@ export interface ColumnDefinition { *
      *
    • *

      - * STRING - A String data type.

      + * STRING – A String data type.

      *

      - * CHAR - A char data type.

      + * CHAR – A char data type.

      *

      - * INTEGER - An integer data type.

      + * INTEGER – An integer data type.

      *

      - * TINYINT - A tinyint data type.

      + * TINYINT – A tinyint data type.

      *

      - * SMALLINT - A smallint data type.

      + * SMALLINT – A smallint data type.

      *

      - * BIGINT - A bigint data type.

      + * BIGINT – A bigint data type.

      *

      - * FLOAT - A float data type.

      + * FLOAT – A float data type.

      *

      - * DOUBLE - A double data type.

      + * DOUBLE – A double data type.

      *

      - * DATE - A date data type.

      + * DATE – A date data type.

      *

      - * DATETIME - A datetime data type.

      + * DATETIME – A datetime data type.

      *

      - * BOOLEAN - A boolean data type.

      + * BOOLEAN – A boolean data type.

      *

      - * BINARY - A binary data type.

      + * BINARY – A binary data type.

      *
    • *
    */ dataType?: ColumnDataType | string; /** - *

    Name for a column.

    + *

    The name of a column.

    */ columnName?: string; @@ -541,11 +563,11 @@ export interface CreateDatasetRequest { *
      *
    • *

      - * TABULAR - Data is structured in a tabular format.

      + * TABULAR – Data is structured in a tabular format.

      *
    • *
    • *

      - * NON_TABULAR - Data is structured in a non-tabular format.

      + * NON_TABULAR – Data is structured in a non-tabular format.

      *
    • *
    */ @@ -583,6 +605,7 @@ export namespace CreateDatasetRequest { */ export const filterSensitiveLog = (obj: CreateDatasetRequest): any => ({ ...obj, + ...(obj.ownerInfo && { ownerInfo: DatasetOwnerInfo.filterSensitiveLog(obj.ownerInfo) }), }); } @@ -619,11 +642,11 @@ export interface DataViewDestinationTypeParams { *
      *
    • *

      - * GLUE_TABLE - Glue table destination type.

      + * GLUE_TABLE – Glue table destination type.

      *
    • *
    • *

      - * S3 - S3 destination type.

      + * S3 – S3 destination type.

      *
    • *
    */ @@ -634,11 +657,11 @@ export interface DataViewDestinationTypeParams { *
      *
    • *

      - * PARQUET - Parquet export file format.

      + * PARQUET – Parquet export file format.

      *
    • *
    • *

      - * DELIMITED_TEXT - Delimited text export file format.

      + * DELIMITED_TEXT – Delimited text export file format.

      *
    • *
    */ @@ -699,7 +722,7 @@ export interface CreateDataViewRequest { partitionColumns?: string[]; /** - *

    Beginning time to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Beginning time to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ asOfTimestamp?: number; @@ -742,6 +765,175 @@ export namespace CreateDataViewResponse { }); } +export interface CreatePermissionGroupRequest { + /** + *

    The name of the permission group.

    + */ + name: string | undefined; + + /** + *

    A brief description for the permission group.

    + */ + description?: string; + + /** + *

    The option to indicate FinSpace application permissions that are granted to a specific group.

    + *
      + *
    • + *

      + * CreateDataset – Group members can create new datasets.

      + *
    • + *
    • + *

      + * ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

      + *
    • + *
    • + *

      + * ManageUsersAndGroups – Group members can manage users and permission groups.

      + *
    • + *
    • + *

      + * ManageAttributeSets – Group members can manage attribute sets.

      + *
    • + *
    • + *

      + * ViewAuditData – Group members can view audit data.

      + *
    • + *
    • + *

      + * AccessNotebooks – Group members will have access to FinSpace notebooks.

      + *
    • + *
    • + *

      + * GetTemporaryCredentials – Group members can get temporary API credentials.

      + *
    • + *
    + */ + applicationPermissions: (ApplicationPermission | string)[] | undefined; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace CreatePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePermissionGroupRequest): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface CreatePermissionGroupResponse { + /** + *

    The unique identifier for the permission group.

    + */ + permissionGroupId?: string; +} + +export namespace CreatePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export enum UserType { + APP_USER = "APP_USER", + SUPER_USER = "SUPER_USER", +} + +export interface CreateUserRequest { + /** + *

    The email address of the user that you want to register. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.

    + */ + emailAddress: string | undefined; + + /** + *

    The option to indicate the type of user. Use one of the following options to specify this parameter:

    + *
      + *
    • + *

      + * SUPER_USER – A user with permission to all the functionality and data in FinSpace.

      + *
    • + *
    • + *

      + * APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permission group.

      + *
    • + *
    + */ + type: UserType | string | undefined; + + /** + *

    The first name of the user that you want to register.

    + */ + firstName?: string; + + /** + *

    The last name of the user that you want to register.

    + */ + lastName?: string; + + /** + *

    The option to indicate whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

    + *
      + *
    • + *

      + * ENABLED – The user has permissions to use the APIs.

      + *
    • + *
    • + *

      + * DISABLED – The user does not have permissions to use any APIs.

      + *
    • + *
    + */ + ApiAccess?: ApiAccess | string; + + /** + *

    The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

    + */ + apiAccessPrincipalArn?: string; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace CreateUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserRequest): any => ({ + ...obj, + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + }); +} + +export interface CreateUserResponse { + /** + *

    The unique identifier for the user.

    + */ + userId?: string; +} + +export namespace CreateUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserResponse): any => ({ + ...obj, + }); +} + /** * The request for a DeleteDataset operation. */ @@ -785,6 +977,117 @@ export namespace DeleteDatasetResponse { }); } +export interface DeletePermissionGroupRequest { + /** + *

    The unique identifier for the permission group that you want to delete.

    + */ + permissionGroupId: string | undefined; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace DeletePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionGroupRequest): any => ({ + ...obj, + }); +} + +export interface DeletePermissionGroupResponse { + /** + *

    The unique identifier for the deleted permission group.

    + */ + permissionGroupId?: string; +} + +export namespace DeletePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export interface DisableUserRequest { + /** + *

    The unique identifier for the user account that you want to disable.

    + */ + userId: string | undefined; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace DisableUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableUserRequest): any => ({ + ...obj, + }); +} + +export interface DisableUserResponse { + /** + *

    The unique identifier for the disabled user account.

    + */ + userId?: string; +} + +export namespace DisableUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableUserResponse): any => ({ + ...obj, + }); +} + +export interface EnableUserRequest { + /** + *

    The unique identifier for the user account that you want to enable.

    + */ + userId: string | undefined; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace EnableUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableUserRequest): any => ({ + ...obj, + }); +} + +export interface EnableUserResponse { + /** + *

    The unique identifier for the enabled user account.

    + */ + userId?: string; +} + +export namespace EnableUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableUserResponse): any => ({ + ...obj, + }); +} + /** * Request to describe a changeset. */ @@ -834,40 +1137,40 @@ export interface ChangesetErrorInfo { *
      *
    • *

      - * VALIDATION -The inputs to this request are invalid.

      + * VALIDATION – The inputs to this request are invalid.

      *
    • *
    • *

      - * SERVICE_QUOTA_EXCEEDED - Service quotas have been exceeded. Please + * SERVICE_QUOTA_EXCEEDED – Service quotas have been exceeded. Please * contact AWS support to increase quotas.

      *
    • *
    • *

      - * ACCESS_DENIED - Missing required permission to perform this + * ACCESS_DENIED – Missing required permission to perform this * request.

      *
    • *
    • *

      - * RESOURCE_NOT_FOUND - One or more inputs to this request were not + * RESOURCE_NOT_FOUND – One or more inputs to this request were not * found.

      *
    • *
    • *

      - * THROTTLING - The system temporarily lacks sufficient resources to process + * THROTTLING – The system temporarily lacks sufficient resources to process * the request.

      *
    • *
    • *

      - * INTERNAL_SERVICE_EXCEPTION - An internal service error has + * INTERNAL_SERVICE_EXCEPTION – An internal service error has * occurred.

      *
    • *
    • *

      - * CANCELLED - Cancelled.

      + * CANCELLED – Cancelled.

      *
    • *
    • *

      - * USER_RECOVERABLE - A user recoverable error has occurred.

      + * USER_RECOVERABLE – A user recoverable error has occurred.

      *
    • *
    */ @@ -915,15 +1218,15 @@ export interface GetChangesetResponse { *
      *
    • *

      - * REPLACE - Changeset is considered as a replacement to all prior loaded Changesets.

      + * REPLACE – Changeset is considered as a replacement to all prior loaded Changesets.

      *
    • *
    • *

      - * APPEND - Changeset is considered as an addition to the end of all prior loaded Changesets.

      + * APPEND – Changeset is considered as an addition to the end of all prior loaded Changesets.

      *
    • *
    • *

      - * MODIFY - Changeset is considered as a replacement to a specific prior ingested Changeset.

      + * MODIFY – Changeset is considered as a replacement to a specific prior ingested Changeset.

      *
    • *
    */ @@ -940,7 +1243,7 @@ export interface GetChangesetResponse { formatParams?: { [key: string]: string }; /** - *

    The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; @@ -955,12 +1258,12 @@ export interface GetChangesetResponse { errorInfo?: ChangesetErrorInfo; /** - *

    Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ activeUntilTimestamp?: number; /** - *

    Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ activeFromTimestamp?: number; @@ -1034,11 +1337,11 @@ export interface GetDatasetResponse { *
      *
    • *

      - * TABULAR - Data is structured in a tabular format.

      + * TABULAR – Data is structured in a tabular format.

      *
    • *
    • *

      - * NON_TABULAR - Data is structured in a non-tabular format.

      + * NON_TABULAR – Data is structured in a non-tabular format.

      *
    • *
    */ @@ -1050,12 +1353,12 @@ export interface GetDatasetResponse { datasetDescription?: string; /** - *

    The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; /** - *

    The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ lastModifiedTime?: number; @@ -1074,19 +1377,19 @@ export interface GetDatasetResponse { *
      *
    • *

      - * PENDING - Dataset is pending creation.

      + * PENDING – Dataset is pending creation.

      *
    • *
    • *

      - * FAILED - Dataset creation has failed.

      + * FAILED – Dataset creation has failed.

      *
    • *
    • *

      - * SUCCESS - Dataset creation has succeeded.

      + * SUCCESS – Dataset creation has succeeded.

      *
    • *
    • *

      - * RUNNING - Dataset creation is running.

      + * RUNNING – Dataset creation is running.

      *
    • *
    */ @@ -1140,40 +1443,40 @@ export interface DataViewErrorInfo { *
      *
    • *

      - * VALIDATION -The inputs to this request are invalid.

      + * VALIDATION – The inputs to this request are invalid.

      *
    • *
    • *

      - * SERVICE_QUOTA_EXCEEDED - Service quotas have been exceeded. Please + * SERVICE_QUOTA_EXCEEDED – Service quotas have been exceeded. Please * contact AWS support to increase quotas.

      *
    • *
    • *

      - * ACCESS_DENIED - Missing required permission to perform this + * ACCESS_DENIED – Missing required permission to perform this * request.

      *
    • *
    • *

      - * RESOURCE_NOT_FOUND - One or more inputs to this request were not + * RESOURCE_NOT_FOUND – One or more inputs to this request were not * found.

      *
    • *
    • *

      - * THROTTLING - The system temporarily lacks sufficient resources to process + * THROTTLING – The system temporarily lacks sufficient resources to process * the request.

      *
    • *
    • *

      - * INTERNAL_SERVICE_EXCEPTION - An internal service error has + * INTERNAL_SERVICE_EXCEPTION – An internal service error has * occurred.

      *
    • *
    • *

      - * CANCELLED - Cancelled.

      + * CANCELLED – Cancelled.

      *
    • *
    • *

      - * USER_RECOVERABLE - A user recoverable error has occurred.

      + * USER_RECOVERABLE – A user recoverable error has occurred.

      *
    • *
    */ @@ -1220,7 +1523,7 @@ export interface GetDataViewResponse { datasetId?: string; /** - *

    Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ asOfTimestamp?: number; @@ -1230,12 +1533,12 @@ export interface GetDataViewResponse { errorInfo?: DataViewErrorInfo; /** - *

    The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ lastModifiedTime?: number; /** - *

    The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; @@ -1264,35 +1567,35 @@ export interface GetDataViewResponse { *
      *
    • *

      - * RUNNING - Dataview creation is running.

      + * RUNNING – Dataview creation is running.

      *
    • *
    • *

      - * STARTING - Dataview creation is starting.

      + * STARTING – Dataview creation is starting.

      *
    • *
    • *

      - * FAILED - Dataview creation has failed.

      + * FAILED – Dataview creation has failed.

      *
    • *
    • *

      - * CANCELLED - Dataview creation has been cancelled.

      + * CANCELLED – Dataview creation has been cancelled.

      *
    • *
    • *

      - * TIMEOUT - Dataview creation has timed out.

      + * TIMEOUT – Dataview creation has timed out.

      *
    • *
    • *

      - * SUCCESS - Dataview creation has succeeded.

      + * SUCCESS – Dataview creation has succeeded.

      *
    • *
    • *

      - * PENDING - Dataview creation is pending.

      + * PENDING – Dataview creation is pending.

      *
    • *
    • *

      - * FAILED_CLEANUP_FAILED - Dataview creation failed and resource cleanup failed.

      + * FAILED_CLEANUP_FAILED – Dataview creation failed and resource cleanup failed.

      *
    • *
    */ @@ -1385,59 +1688,196 @@ export namespace GetProgrammaticAccessCredentialsResponse { }); } -export enum LocationType { - INGESTION = "INGESTION", - SAGEMAKER = "SAGEMAKER", +export interface GetUserRequest { + /** + *

    The unique identifier of the user to get data for.

    + */ + userId: string | undefined; } -export interface GetWorkingLocationRequest { +export namespace GetUserRequest { /** - *

    Specify the type of the working location.

    + * @internal + */ + export const filterSensitiveLog = (obj: GetUserRequest): any => ({ + ...obj, + }); +} + +export enum UserStatus { + CREATING = "CREATING", + DISABLED = "DISABLED", + ENABLED = "ENABLED", +} + +export interface GetUserResponse { + /** + *

    The unique identifier for the user account that is retrieved.

    + */ + userId?: string; + + /** + *

    The current status of the user account.

    *
      *
    • *

      - * SAGEMAKER - Use the Amazon S3 location as a temporary location to store data content when - * working with FinSpace Notebooks that run on SageMaker studio.

      + * CREATING – The user account creation is in progress.

      *
    • *
    • *

      - * INGESTION - Use the Amazon S3 location as a staging location to copy your - * data content and then use the location with the Changeset creation operation.

      + * ENABLED – The user account is created and is currently active.

      + *
    • + *
    • + *

      + * DISABLED – The user account is currently inactive.

      *
    • *
    */ - locationType?: LocationType | string; -} + status?: UserStatus | string; -export namespace GetWorkingLocationRequest { /** - * @internal + *

    The first name of the user.

    */ - export const filterSensitiveLog = (obj: GetWorkingLocationRequest): any => ({ - ...obj, - }); -} + firstName?: string; -export interface GetWorkingLocationResponse { /** - *

    Returns the Amazon S3 URI for the working location.

    + *

    The last name of the user.

    */ - s3Uri?: string; + lastName?: string; /** - *

    Returns the Amazon S3 Path for the working location.

    + *

    The email address that is associated with the user.

    */ - s3Path?: string; + emailAddress?: string; /** - *

    Returns the Amazon S3 bucket name for the working location.

    + *

    Indicates the type of user.

    + *
      + *
    • + *

      + * SUPER_USER – A user with permission to all the functionality and data in FinSpace.

      + *
    • + *
    + *
      + *
    • + *

      + * APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

      + *
    • + *
    */ - s3Bucket?: string; -} + type?: UserType | string; -export namespace GetWorkingLocationResponse { /** - * @internal + *

    Indicates whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

    + *
      + *
    • + *

      + * ENABLED – The user has permissions to use the APIs.

      + *
    • + *
    • + *

      + * DISABLED – The user does not have permissions to use any APIs.

      + *
    • + *
    + */ + apiAccess?: ApiAccess | string; + + /** + *

    The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

    + */ + apiAccessPrincipalArn?: string; + + /** + *

    The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.

    + */ + createTime?: number; + + /** + *

    Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.

    + */ + lastEnabledTime?: number; + + /** + *

    Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.

    + */ + lastDisabledTime?: number; + + /** + *

    Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.

    + */ + lastModifiedTime?: number; + + /** + *

    Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds.

    + */ + lastLoginTime?: number; +} + +export namespace GetUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUserResponse): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + }); +} + +export enum LocationType { + INGESTION = "INGESTION", + SAGEMAKER = "SAGEMAKER", +} + +export interface GetWorkingLocationRequest { + /** + *

    Specify the type of the working location.

    + *
      + *
    • + *

      + * SAGEMAKER – Use the Amazon S3 location as a temporary location to store data content when + * working with FinSpace Notebooks that run on SageMaker studio.

      + *
    • + *
    • + *

      + * INGESTION – Use the Amazon S3 location as a staging location to copy your + * data content and then use the location with the Changeset creation operation.

      + *
    • + *
    + */ + locationType?: LocationType | string; +} + +export namespace GetWorkingLocationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetWorkingLocationRequest): any => ({ + ...obj, + }); +} + +export interface GetWorkingLocationResponse { + /** + *

    Returns the Amazon S3 URI for the working location.

    + */ + s3Uri?: string; + + /** + *

    Returns the Amazon S3 Path for the working location.

    + */ + s3Path?: string; + + /** + *

    Returns the Amazon S3 bucket name for the working location.

    + */ + s3Bucket?: string; +} + +export namespace GetWorkingLocationResponse { + /** + * @internal */ export const filterSensitiveLog = (obj: GetWorkingLocationResponse): any => ({ ...obj, @@ -1459,7 +1899,7 @@ export interface ListChangesetsRequest { maxResults?: number; /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; } @@ -1497,17 +1937,17 @@ export interface ChangesetSummary { *
      *
    • *

      - * REPLACE - Changeset is considered as a replacement to all prior loaded + * REPLACE – Changeset is considered as a replacement to all prior loaded * Changesets.

      *
    • *
    • *

      - * APPEND - Changeset is considered as an addition to the end of all prior + * APPEND – Changeset is considered as an addition to the end of all prior * loaded Changesets.

      *
    • *
    • *

      - * MODIFY - Changeset is considered as a replacement to a specific prior + * MODIFY – Changeset is considered as a replacement to a specific prior * ingested Changeset.

      *
    • *
    @@ -1525,7 +1965,7 @@ export interface ChangesetSummary { formatParams?: { [key: string]: string }; /** - *

    The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; @@ -1534,23 +1974,23 @@ export interface ChangesetSummary { *
      *
    • *

      - * PENDING - Changeset is pending creation.

      + * PENDING – Changeset is pending creation.

      *
    • *
    • *

      - * FAILED - Changeset creation has failed.

      + * FAILED – Changeset creation has failed.

      *
    • *
    • *

      - * SUCCESS - Changeset creation has succeeded.

      + * SUCCESS – Changeset creation has succeeded.

      *
    • *
    • *

      - * RUNNING - Changeset creation is running.

      + * RUNNING – Changeset creation is running.

      *
    • *
    • *

      - * STOP_REQUESTED - User requested Changeset creation to stop.

      + * STOP_REQUESTED – User requested Changeset creation to stop.

      *
    • *
    */ @@ -1562,12 +2002,12 @@ export interface ChangesetSummary { errorInfo?: ChangesetErrorInfo; /** - *

    Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ activeUntilTimestamp?: number; /** - *

    Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ activeFromTimestamp?: number; @@ -1601,7 +2041,7 @@ export interface ListChangesetsResponse { changesets?: ChangesetSummary[]; /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; } @@ -1620,7 +2060,7 @@ export namespace ListChangesetsResponse { */ export interface ListDatasetsRequest { /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; @@ -1663,11 +2103,11 @@ export interface Dataset { *
      *
    • *

      - * TABULAR - Data is structured in a tabular format.

      + * TABULAR – Data is structured in a tabular format.

      *
    • *
    • *

      - * NON_TABULAR - Data is structured in a non-tabular format.

      + * NON_TABULAR – Data is structured in a non-tabular format.

      *
    • *
    */ @@ -1684,12 +2124,12 @@ export interface Dataset { ownerInfo?: DatasetOwnerInfo; /** - *

    The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; /** - *

    The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ lastModifiedTime?: number; @@ -1710,6 +2150,7 @@ export namespace Dataset { */ export const filterSensitiveLog = (obj: Dataset): any => ({ ...obj, + ...(obj.ownerInfo && { ownerInfo: DatasetOwnerInfo.filterSensitiveLog(obj.ownerInfo) }), }); } @@ -1723,7 +2164,7 @@ export interface ListDatasetsResponse { datasets?: Dataset[]; /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; } @@ -1734,6 +2175,7 @@ export namespace ListDatasetsResponse { */ export const filterSensitiveLog = (obj: ListDatasetsResponse): any => ({ ...obj, + ...(obj.datasets && { datasets: obj.datasets.map((item) => Dataset.filterSensitiveLog(item)) }), }); } @@ -1747,7 +2189,7 @@ export interface ListDataViewsRequest { datasetId: string | undefined; /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; @@ -1786,7 +2228,7 @@ export interface DataViewSummary { datasetId?: string; /** - *

    Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ asOfTimestamp?: number; @@ -1805,35 +2247,35 @@ export interface DataViewSummary { *
      *
    • *

      - * RUNNING - Dataview creation is running.

      + * RUNNING – Dataview creation is running.

      *
    • *
    • *

      - * STARTING - Dataview creation is starting.

      + * STARTING – Dataview creation is starting.

      *
    • *
    • *

      - * FAILED - Dataview creation has failed.

      + * FAILED – Dataview creation has failed.

      *
    • *
    • *

      - * CANCELLED - Dataview creation has been cancelled.

      + * CANCELLED – Dataview creation has been cancelled.

      *
    • *
    • *

      - * TIMEOUT - Dataview creation has timed out.

      + * TIMEOUT – Dataview creation has timed out.

      *
    • *
    • *

      - * SUCCESS - Dataview creation has succeeded.

      + * SUCCESS – Dataview creation has succeeded.

      *
    • *
    • *

      - * PENDING - Dataview creation is pending.

      + * PENDING – Dataview creation is pending.

      *
    • *
    • *

      - * FAILED_CLEANUP_FAILED - Dataview creation failed and resource cleanup failed.

      + * FAILED_CLEANUP_FAILED – Dataview creation failed and resource cleanup failed.

      *
    • *
    */ @@ -1855,12 +2297,12 @@ export interface DataViewSummary { autoUpdate?: boolean; /** - *

    The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ createTime?: number; /** - *

    The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    + *

    The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

    */ lastModifiedTime?: number; } @@ -1876,7 +2318,7 @@ export namespace DataViewSummary { export interface ListDataViewsResponse { /** - *

    A token indicating where a results page should begin.

    + *

    A token that indicates where a results page should begin.

    */ nextToken?: string; @@ -1895,6 +2337,334 @@ export namespace ListDataViewsResponse { }); } +export interface ListPermissionGroupsRequest { + /** + *

    A token that indicates where a results page should begin.

    + */ + nextToken?: string; + + /** + *

    The maximum number of results per page.

    + */ + maxResults: number | undefined; +} + +export namespace ListPermissionGroupsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionGroupsRequest): any => ({ + ...obj, + }); +} + +/** + *

    The structure for a permission group.

    + */ +export interface PermissionGroup { + /** + *

    The unique identifier for the permission group.

    + */ + permissionGroupId?: string; + + /** + *

    The name of the permission group.

    + */ + name?: string; + + /** + *

    A brief description for the permission group.

    + */ + description?: string; + + /** + *

    Indicates the permissions that are granted to a specific group for accessing the FinSpace application.

    + *
      + *
    • + *

      + * CreateDataset – Group members can create new datasets.

      + *
    • + *
    • + *

      + * ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

      + *
    • + *
    • + *

      + * ManageUsersAndGroups – Group members can manage users and permission groups.

      + *
    • + *
    • + *

      + * ManageAttributeSets – Group members can manage attribute sets.

      + *
    • + *
    • + *

      + * ViewAuditData – Group members can view audit data.

      + *
    • + *
    • + *

      + * AccessNotebooks – Group members will have access to FinSpace notebooks.

      + *
    • + *
    • + *

      + * GetTemporaryCredentials – Group members can get temporary API credentials.

      + *
    • + *
    + */ + applicationPermissions?: (ApplicationPermission | string)[]; + + /** + *

    The timestamp at which the group was created in FinSpace. The value is determined as epoch time in milliseconds. + *

    + */ + createTime?: number; + + /** + *

    Describes the last time the permission group was updated. The value is determined as epoch time in milliseconds. + *

    + */ + lastModifiedTime?: number; +} + +export namespace PermissionGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PermissionGroup): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface ListPermissionGroupsResponse { + /** + *

    A list of all the permission groups.

    + */ + permissionGroups?: PermissionGroup[]; + + /** + *

    A token that indicates where a results page should begin.

    + */ + nextToken?: string; +} + +export namespace ListPermissionGroupsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionGroupsResponse): any => ({ + ...obj, + ...(obj.permissionGroups && { + permissionGroups: obj.permissionGroups.map((item) => PermissionGroup.filterSensitiveLog(item)), + }), + }); +} + +export interface ListUsersRequest { + /** + *

    A token that indicates where a results page should begin.

    + */ + nextToken?: string; + + /** + *

    The maximum number of results per page.

    + */ + maxResults: number | undefined; +} + +export namespace ListUsersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsersRequest): any => ({ + ...obj, + }); +} + +/** + *

    The details of the user account.

    + */ +export interface User { + /** + *

    The unique identifier for the user.

    + */ + userId?: string; + + /** + *

    The current status of the user account.

    + *
      + *
    • + *

      + * CREATING – The user account creation is in progress.

      + *
    • + *
    • + *

      + * ENABLED – The user account is created and is currently active.

      + *
    • + *
    • + *

      + * DISABLED – The user account is currently inactive.

      + *
    • + *
    + */ + status?: UserStatus | string; + + /** + *

    The first name of the user.

    + */ + firstName?: string; + + /** + *

    The last name of the user.

    + */ + lastName?: string; + + /** + *

    The email address of the user. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.

    + */ + emailAddress?: string; + + /** + *

    Indicates the type of user.

    + *
      + *
    • + *

      + * SUPER_USER – A user with permission to all the functionality and data in FinSpace.

      + *
    • + *
    • + *

      + * APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

      + *
    • + *
    + */ + type?: UserType | string; + + /** + *

    Indicates whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

    + *
      + *
    • + *

      + * ENABLED – The user has permissions to use the APIs.

      + *
    • + *
    • + *

      + * DISABLED – The user does not have permissions to use any APIs.

      + *
    • + *
    + */ + apiAccess?: ApiAccess | string; + + /** + *

    The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

    + */ + apiAccessPrincipalArn?: string; + + /** + *

    The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.

    + */ + createTime?: number; + + /** + *

    Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds. + *

    + */ + lastEnabledTime?: number; + + /** + *

    Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.

    + */ + lastDisabledTime?: number; + + /** + *

    Describes the last time the user account was updated. The value is determined as epoch time in milliseconds. + *

    + */ + lastModifiedTime?: number; + + /** + *

    Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds. + *

    + */ + lastLoginTime?: number; +} + +export namespace User { + /** + * @internal + */ + export const filterSensitiveLog = (obj: User): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + }); +} + +export interface ListUsersResponse { + /** + *

    A list of all the user accounts.

    + */ + users?: User[]; + + /** + *

    A token that indicates where a results page should begin.

    + */ + nextToken?: string; +} + +export namespace ListUsersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsersResponse): any => ({ + ...obj, + ...(obj.users && { users: obj.users.map((item) => User.filterSensitiveLog(item)) }), + }); +} + +export interface ResetUserPasswordRequest { + /** + *

    The unique identifier of the user that a temporary password is requested for.

    + */ + userId: string | undefined; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace ResetUserPasswordRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResetUserPasswordRequest): any => ({ + ...obj, + }); +} + +export interface ResetUserPasswordResponse { + /** + *

    The unique identifier of the user that a new password is generated for.

    + */ + userId?: string; + + /** + *

    A randomly generated temporary password for the requested user account. This password expires in 7 days.

    + */ + temporaryPassword?: string; +} + +export namespace ResetUserPasswordResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResetUserPasswordResponse): any => ({ + ...obj, + ...(obj.temporaryPassword && { temporaryPassword: SENSITIVE_STRING }), + }); +} + /** * Request to update an existing changeset. */ @@ -1940,19 +2710,19 @@ export interface UpdateChangesetRequest { *
      *
    • *

      - * PARQUET - Parquet source file format.

      + * PARQUET – Parquet source file format.

      *
    • *
    • *

      - * CSV - CSV source file format.

      + * CSV – CSV source file format.

      *
    • *
    • *

      - * JSON - JSON source file format.

      + * JSON – JSON source file format.

      *
    • *
    • *

      - * XML - XML source file format.

      + * XML – XML source file format.

      *
    • *
    * @@ -2039,11 +2809,11 @@ export interface UpdateDatasetRequest { *
      *
    • *

      - * TABULAR - Data is structured in a tabular format.

      + * TABULAR – Data is structured in a tabular format.

      *
    • *
    • *

      - * NON_TABULAR - Data is structured in a non-tabular format.

      + * NON_TABULAR – Data is structured in a non-tabular format.

      *
    • *
    */ @@ -2092,3 +2862,171 @@ export namespace UpdateDatasetResponse { ...obj, }); } + +export interface UpdatePermissionGroupRequest { + /** + *

    The unique identifier for the permission group to update.

    + */ + permissionGroupId: string | undefined; + + /** + *

    The name of the permission group.

    + */ + name?: string; + + /** + *

    A brief description for the permission group.

    + */ + description?: string; + + /** + *

    The permissions that are granted to a specific group for accessing the FinSpace application.

    + *
      + *
    • + *

      + * CreateDataset – Group members can create new datasets.

      + *
    • + *
    • + *

      + * ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

      + *
    • + *
    • + *

      + * ManageUsersAndGroups – Group members can manage users and permission groups.

      + *
    • + *
    • + *

      + * ManageAttributeSets – Group members can manage attribute sets.

      + *
    • + *
    • + *

      + * ViewAuditData – Group members can view audit data.

      + *
    • + *
    • + *

      + * AccessNotebooks – Group members will have access to FinSpace notebooks.

      + *
    • + *
    • + *

      + * GetTemporaryCredentials – Group members can get temporary API credentials.

      + *
    • + *
    + */ + applicationPermissions?: (ApplicationPermission | string)[]; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace UpdatePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePermissionGroupRequest): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface UpdatePermissionGroupResponse { + /** + *

    The unique identifier for the updated permission group.

    + */ + permissionGroupId?: string; +} + +export namespace UpdatePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export interface UpdateUserRequest { + /** + *

    The unique identifier for the user account to update.

    + */ + userId: string | undefined; + + /** + *

    The option to indicate the type of user.

    + *
      + *
    • + *

      + * SUPER_USER– A user with permission to all the functionality and data in FinSpace.

      + *
    • + *
    • + *

      + * APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

      + *
    • + *
    + */ + type?: UserType | string; + + /** + *

    The first name of the user.

    + */ + firstName?: string; + + /** + *

    The last name of the user.

    + */ + lastName?: string; + + /** + *

    The option to indicate whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

    + *
      + *
    • + *

      + * ENABLED – The user has permissions to use the APIs.

      + *
    • + *
    • + *

      + * DISABLED – The user does not have permissions to use any APIs.

      + *
    • + *
    + */ + apiAccess?: ApiAccess | string; + + /** + *

    The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

    + */ + apiAccessPrincipalArn?: string; + + /** + *

    A token that ensures idempotency. This token expires in 10 minutes.

    + */ + clientToken?: string; +} + +export namespace UpdateUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserRequest): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + }); +} + +export interface UpdateUserResponse { + /** + *

    The unique identifier of the updated user account.

    + */ + userId?: string; +} + +export namespace UpdateUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts b/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts new file mode 100644 index 000000000000..4b245d4dcd0c --- /dev/null +++ b/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListPermissionGroupsCommand, + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, +} from "../commands/ListPermissionGroupsCommand"; +import { FinspaceData } from "../FinspaceData"; +import { FinspaceDataClient } from "../FinspaceDataClient"; +import { FinspaceDataPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FinspaceDataClient, + input: ListPermissionGroupsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListPermissionGroupsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: FinspaceData, + input: ListPermissionGroupsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listPermissionGroups(input, ...args); +}; +export async function* paginateListPermissionGroups( + config: FinspaceDataPaginationConfiguration, + input: ListPermissionGroupsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListPermissionGroupsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof FinspaceData) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof FinspaceDataClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected FinspaceData | FinspaceDataClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-finspace-data/src/pagination/ListUsersPaginator.ts b/clients/client-finspace-data/src/pagination/ListUsersPaginator.ts new file mode 100644 index 000000000000..6411e50ba987 --- /dev/null +++ b/clients/client-finspace-data/src/pagination/ListUsersPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListUsersCommand, ListUsersCommandInput, ListUsersCommandOutput } from "../commands/ListUsersCommand"; +import { FinspaceData } from "../FinspaceData"; +import { FinspaceDataClient } from "../FinspaceDataClient"; +import { FinspaceDataPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FinspaceDataClient, + input: ListUsersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListUsersCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: FinspaceData, + input: ListUsersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listUsers(input, ...args); +}; +export async function* paginateListUsers( + config: FinspaceDataPaginationConfiguration, + input: ListUsersCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListUsersCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof FinspaceData) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof FinspaceDataClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected FinspaceData | FinspaceDataClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-finspace-data/src/pagination/index.ts b/clients/client-finspace-data/src/pagination/index.ts index 4180238bba3e..3c596188016e 100644 --- a/clients/client-finspace-data/src/pagination/index.ts +++ b/clients/client-finspace-data/src/pagination/index.ts @@ -2,3 +2,5 @@ export * from "./Interfaces"; export * from "./ListChangesetsPaginator"; export * from "./ListDataViewsPaginator"; export * from "./ListDatasetsPaginator"; +export * from "./ListPermissionGroupsPaginator"; +export * from "./ListUsersPaginator"; diff --git a/clients/client-finspace-data/src/protocols/Aws_restJson1.ts b/clients/client-finspace-data/src/protocols/Aws_restJson1.ts index 0a42ff6c236b..4886570e2f1f 100644 --- a/clients/client-finspace-data/src/protocols/Aws_restJson1.ts +++ b/clients/client-finspace-data/src/protocols/Aws_restJson1.ts @@ -18,7 +18,18 @@ import { v4 as generateIdempotencyToken } from "uuid"; import { CreateChangesetCommandInput, CreateChangesetCommandOutput } from "../commands/CreateChangesetCommand"; import { CreateDatasetCommandInput, CreateDatasetCommandOutput } from "../commands/CreateDatasetCommand"; import { CreateDataViewCommandInput, CreateDataViewCommandOutput } from "../commands/CreateDataViewCommand"; +import { + CreatePermissionGroupCommandInput, + CreatePermissionGroupCommandOutput, +} from "../commands/CreatePermissionGroupCommand"; +import { CreateUserCommandInput, CreateUserCommandOutput } from "../commands/CreateUserCommand"; import { DeleteDatasetCommandInput, DeleteDatasetCommandOutput } from "../commands/DeleteDatasetCommand"; +import { + DeletePermissionGroupCommandInput, + DeletePermissionGroupCommandOutput, +} from "../commands/DeletePermissionGroupCommand"; +import { DisableUserCommandInput, DisableUserCommandOutput } from "../commands/DisableUserCommand"; +import { EnableUserCommandInput, EnableUserCommandOutput } from "../commands/EnableUserCommand"; import { GetChangesetCommandInput, GetChangesetCommandOutput } from "../commands/GetChangesetCommand"; import { GetDatasetCommandInput, GetDatasetCommandOutput } from "../commands/GetDatasetCommand"; import { GetDataViewCommandInput, GetDataViewCommandOutput } from "../commands/GetDataViewCommand"; @@ -26,15 +37,28 @@ import { GetProgrammaticAccessCredentialsCommandInput, GetProgrammaticAccessCredentialsCommandOutput, } from "../commands/GetProgrammaticAccessCredentialsCommand"; +import { GetUserCommandInput, GetUserCommandOutput } from "../commands/GetUserCommand"; import { GetWorkingLocationCommandInput, GetWorkingLocationCommandOutput } from "../commands/GetWorkingLocationCommand"; import { ListChangesetsCommandInput, ListChangesetsCommandOutput } from "../commands/ListChangesetsCommand"; import { ListDatasetsCommandInput, ListDatasetsCommandOutput } from "../commands/ListDatasetsCommand"; import { ListDataViewsCommandInput, ListDataViewsCommandOutput } from "../commands/ListDataViewsCommand"; +import { + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, +} from "../commands/ListPermissionGroupsCommand"; +import { ListUsersCommandInput, ListUsersCommandOutput } from "../commands/ListUsersCommand"; +import { ResetUserPasswordCommandInput, ResetUserPasswordCommandOutput } from "../commands/ResetUserPasswordCommand"; import { UpdateChangesetCommandInput, UpdateChangesetCommandOutput } from "../commands/UpdateChangesetCommand"; import { UpdateDatasetCommandInput, UpdateDatasetCommandOutput } from "../commands/UpdateDatasetCommand"; +import { + UpdatePermissionGroupCommandInput, + UpdatePermissionGroupCommandOutput, +} from "../commands/UpdatePermissionGroupCommand"; +import { UpdateUserCommandInput, UpdateUserCommandOutput } from "../commands/UpdateUserCommand"; import { FinspaceDataServiceException as __BaseException } from "../models/FinspaceDataServiceException"; import { AccessDeniedException, + ApplicationPermission, ChangesetErrorInfo, ChangesetSummary, ColumnDefinition, @@ -47,12 +71,14 @@ import { DataViewSummary, InternalServerException, LimitExceededException, + PermissionGroup, PermissionGroupParams, ResourceNotFoundException, ResourcePermission, SchemaDefinition, SchemaUnion, ThrottlingException, + User, ValidationException, } from "../models/models_0"; @@ -183,6 +209,67 @@ export const serializeAws_restJson1CreateDataViewCommand = async ( }); }; +export const serializeAws_restJson1CreatePermissionGroupCommand = async ( + input: CreatePermissionGroupCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/permission-group"; + let body: any; + body = JSON.stringify({ + ...(input.applicationPermissions !== undefined && + input.applicationPermissions !== null && { + applicationPermissions: serializeAws_restJson1ApplicationPermissionList(input.applicationPermissions, context), + }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateUserCommand = async ( + input: CreateUserCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user"; + let body: any; + body = JSON.stringify({ + ...(input.ApiAccess !== undefined && input.ApiAccess !== null && { ApiAccess: input.ApiAccess }), + ...(input.apiAccessPrincipalArn !== undefined && + input.apiAccessPrincipalArn !== null && { apiAccessPrincipalArn: input.apiAccessPrincipalArn }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.emailAddress !== undefined && input.emailAddress !== null && { emailAddress: input.emailAddress }), + ...(input.firstName !== undefined && input.firstName !== null && { firstName: input.firstName }), + ...(input.lastName !== undefined && input.lastName !== null && { lastName: input.lastName }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DeleteDatasetCommand = async ( input: DeleteDatasetCommandInput, context: __SerdeContext @@ -215,6 +302,105 @@ export const serializeAws_restJson1DeleteDatasetCommand = async ( }); }; +export const serializeAws_restJson1DeletePermissionGroupCommand = async ( + input: DeletePermissionGroupCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/permission-group/{permissionGroupId}"; + if (input.permissionGroupId !== undefined) { + const labelValue: string = input.permissionGroupId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: permissionGroupId."); + } + resolvedPath = resolvedPath.replace("{permissionGroupId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: permissionGroupId."); + } + const query: any = { + ...(input.clientToken !== undefined && { clientToken: input.clientToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1DisableUserCommand = async ( + input: DisableUserCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user/{userId}/disable"; + if (input.userId !== undefined) { + const labelValue: string = input.userId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userId."); + } + resolvedPath = resolvedPath.replace("{userId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: userId."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1EnableUserCommand = async ( + input: EnableUserCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user/{userId}/enable"; + if (input.userId !== undefined) { + const labelValue: string = input.userId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userId."); + } + resolvedPath = resolvedPath.replace("{userId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: userId."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetChangesetCommand = async ( input: GetChangesetCommandInput, context: __SerdeContext @@ -346,6 +532,34 @@ export const serializeAws_restJson1GetProgrammaticAccessCredentialsCommand = asy }); }; +export const serializeAws_restJson1GetUserCommand = async ( + input: GetUserCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user/{userId}"; + if (input.userId !== undefined) { + const labelValue: string = input.userId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userId."); + } + resolvedPath = resolvedPath.replace("{userId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: userId."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetWorkingLocationCommand = async ( input: GetWorkingLocationCommandInput, context: __SerdeContext @@ -462,6 +676,87 @@ export const serializeAws_restJson1ListDataViewsCommand = async ( }); }; +export const serializeAws_restJson1ListPermissionGroupsCommand = async ( + input: ListPermissionGroupsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/permission-group"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListUsersCommand = async ( + input: ListUsersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ResetUserPasswordCommand = async ( + input: ResetUserPasswordCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user/{userId}/password"; + if (input.userId !== undefined) { + const labelValue: string = input.userId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userId."); + } + resolvedPath = resolvedPath.replace("{userId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: userId."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1UpdateChangesetCommand = async ( input: UpdateChangesetCommandInput, context: __SerdeContext @@ -552,35 +847,114 @@ export const serializeAws_restJson1UpdateDatasetCommand = async ( }); }; -export const deserializeAws_restJson1CreateChangesetCommand = async ( - output: __HttpResponse, +export const serializeAws_restJson1UpdatePermissionGroupCommand = async ( + input: UpdatePermissionGroupCommandInput, context: __SerdeContext -): Promise => { - if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1CreateChangesetCommandError(output, context); - } - const contents: CreateChangesetCommandOutput = { - $metadata: deserializeMetadata(output), - changesetId: undefined, - datasetId: undefined, +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", }; - const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.changesetId !== undefined && data.changesetId !== null) { - contents.changesetId = __expectString(data.changesetId); - } - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/permission-group/{permissionGroupId}"; + if (input.permissionGroupId !== undefined) { + const labelValue: string = input.permissionGroupId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: permissionGroupId."); + } + resolvedPath = resolvedPath.replace("{permissionGroupId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: permissionGroupId."); } - return Promise.resolve(contents); + let body: any; + body = JSON.stringify({ + ...(input.applicationPermissions !== undefined && + input.applicationPermissions !== null && { + applicationPermissions: serializeAws_restJson1ApplicationPermissionList(input.applicationPermissions, context), + }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); }; -const deserializeAws_restJson1CreateChangesetCommandError = async ( - output: __HttpResponse, +export const serializeAws_restJson1UpdateUserCommand = async ( + input: UpdateUserCommandInput, context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/user/{userId}"; + if (input.userId !== undefined) { + const labelValue: string = input.userId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userId."); + } + resolvedPath = resolvedPath.replace("{userId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: userId."); + } + let body: any; + body = JSON.stringify({ + ...(input.apiAccess !== undefined && input.apiAccess !== null && { apiAccess: input.apiAccess }), + ...(input.apiAccessPrincipalArn !== undefined && + input.apiAccessPrincipalArn !== null && { apiAccessPrincipalArn: input.apiAccessPrincipalArn }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.firstName !== undefined && input.firstName !== null && { firstName: input.firstName }), + ...(input.lastName !== undefined && input.lastName !== null && { lastName: input.lastName }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1CreateChangesetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateChangesetCommandError(output, context); + } + const contents: CreateChangesetCommandOutput = { + $metadata: deserializeMetadata(output), + changesetId: undefined, + datasetId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.changesetId !== undefined && data.changesetId !== null) { + contents.changesetId = __expectString(data.changesetId); + } + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateChangesetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), }; let response: __BaseException; let errorCode = "UnknownError"; @@ -743,6 +1117,124 @@ const deserializeAws_restJson1CreateDataViewCommandError = async ( } }; +export const deserializeAws_restJson1CreatePermissionGroupCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreatePermissionGroupCommandError(output, context); + } + const contents: CreatePermissionGroupCommandOutput = { + $metadata: deserializeMetadata(output), + permissionGroupId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.permissionGroupId !== undefined && data.permissionGroupId !== null) { + contents.permissionGroupId = __expectString(data.permissionGroupId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreatePermissionGroupCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.finspacedata#LimitExceededException": + throw await deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1CreateUserCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateUserCommandError(output, context); + } + const contents: CreateUserCommandOutput = { + $metadata: deserializeMetadata(output), + userId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateUserCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.finspacedata#LimitExceededException": + throw await deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1DeleteDatasetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -805,76 +1297,28 @@ const deserializeAws_restJson1DeleteDatasetCommandError = async ( } }; -export const deserializeAws_restJson1GetChangesetCommand = async ( +export const deserializeAws_restJson1DeletePermissionGroupCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetChangesetCommandError(output, context); + return deserializeAws_restJson1DeletePermissionGroupCommandError(output, context); } - const contents: GetChangesetCommandOutput = { + const contents: DeletePermissionGroupCommandOutput = { $metadata: deserializeMetadata(output), - activeFromTimestamp: undefined, - activeUntilTimestamp: undefined, - changeType: undefined, - changesetArn: undefined, - changesetId: undefined, - createTime: undefined, - datasetId: undefined, - errorInfo: undefined, - formatParams: undefined, - sourceParams: undefined, - status: undefined, - updatedByChangesetId: undefined, - updatesChangesetId: undefined, + permissionGroupId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.activeFromTimestamp !== undefined && data.activeFromTimestamp !== null) { - contents.activeFromTimestamp = __expectLong(data.activeFromTimestamp); - } - if (data.activeUntilTimestamp !== undefined && data.activeUntilTimestamp !== null) { - contents.activeUntilTimestamp = __expectLong(data.activeUntilTimestamp); - } - if (data.changeType !== undefined && data.changeType !== null) { - contents.changeType = __expectString(data.changeType); - } - if (data.changesetArn !== undefined && data.changesetArn !== null) { - contents.changesetArn = __expectString(data.changesetArn); - } - if (data.changesetId !== undefined && data.changesetId !== null) { - contents.changesetId = __expectString(data.changesetId); - } - if (data.createTime !== undefined && data.createTime !== null) { - contents.createTime = __expectLong(data.createTime); - } - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); - } - if (data.errorInfo !== undefined && data.errorInfo !== null) { - contents.errorInfo = deserializeAws_restJson1ChangesetErrorInfo(data.errorInfo, context); - } - if (data.formatParams !== undefined && data.formatParams !== null) { - contents.formatParams = deserializeAws_restJson1FormatParams(data.formatParams, context); - } - if (data.sourceParams !== undefined && data.sourceParams !== null) { - contents.sourceParams = deserializeAws_restJson1SourceParams(data.sourceParams, context); - } - if (data.status !== undefined && data.status !== null) { - contents.status = __expectString(data.status); - } - if (data.updatedByChangesetId !== undefined && data.updatedByChangesetId !== null) { - contents.updatedByChangesetId = __expectString(data.updatedByChangesetId); - } - if (data.updatesChangesetId !== undefined && data.updatesChangesetId !== null) { - contents.updatesChangesetId = __expectString(data.updatesChangesetId); + if (data.permissionGroupId !== undefined && data.permissionGroupId !== null) { + contents.permissionGroupId = __expectString(data.permissionGroupId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetChangesetCommandError = async ( +const deserializeAws_restJson1DeletePermissionGroupCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -892,6 +1336,9 @@ const deserializeAws_restJson1GetChangesetCommandError = async ( case "InternalServerException": case "com.amazonaws.finspacedata#InternalServerException": throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.finspacedata#LimitExceededException": + throw await deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); case "ResourceNotFoundException": case "com.amazonaws.finspacedata#ResourceNotFoundException": throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); @@ -912,56 +1359,284 @@ const deserializeAws_restJson1GetChangesetCommandError = async ( } }; -export const deserializeAws_restJson1GetDatasetCommand = async ( +export const deserializeAws_restJson1DisableUserCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetDatasetCommandError(output, context); + return deserializeAws_restJson1DisableUserCommandError(output, context); } - const contents: GetDatasetCommandOutput = { + const contents: DisableUserCommandOutput = { $metadata: deserializeMetadata(output), - alias: undefined, - createTime: undefined, - datasetArn: undefined, - datasetDescription: undefined, - datasetId: undefined, - datasetTitle: undefined, - kind: undefined, - lastModifiedTime: undefined, - schemaDefinition: undefined, - status: undefined, + userId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.alias !== undefined && data.alias !== null) { - contents.alias = __expectString(data.alias); - } - if (data.createTime !== undefined && data.createTime !== null) { - contents.createTime = __expectLong(data.createTime); - } - if (data.datasetArn !== undefined && data.datasetArn !== null) { - contents.datasetArn = __expectString(data.datasetArn); - } - if (data.datasetDescription !== undefined && data.datasetDescription !== null) { - contents.datasetDescription = __expectString(data.datasetDescription); - } - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); - } - if (data.datasetTitle !== undefined && data.datasetTitle !== null) { - contents.datasetTitle = __expectString(data.datasetTitle); - } - if (data.kind !== undefined && data.kind !== null) { - contents.kind = __expectString(data.kind); - } - if (data.lastModifiedTime !== undefined && data.lastModifiedTime !== null) { - contents.lastModifiedTime = __expectLong(data.lastModifiedTime); - } - if (data.schemaDefinition !== undefined && data.schemaDefinition !== null) { - contents.schemaDefinition = deserializeAws_restJson1SchemaUnion(data.schemaDefinition, context); - } - if (data.status !== undefined && data.status !== null) { - contents.status = __expectString(data.status); + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisableUserCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1EnableUserCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1EnableUserCommandError(output, context); + } + const contents: EnableUserCommandOutput = { + $metadata: deserializeMetadata(output), + userId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1EnableUserCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.finspacedata#LimitExceededException": + throw await deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetChangesetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetChangesetCommandError(output, context); + } + const contents: GetChangesetCommandOutput = { + $metadata: deserializeMetadata(output), + activeFromTimestamp: undefined, + activeUntilTimestamp: undefined, + changeType: undefined, + changesetArn: undefined, + changesetId: undefined, + createTime: undefined, + datasetId: undefined, + errorInfo: undefined, + formatParams: undefined, + sourceParams: undefined, + status: undefined, + updatedByChangesetId: undefined, + updatesChangesetId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.activeFromTimestamp !== undefined && data.activeFromTimestamp !== null) { + contents.activeFromTimestamp = __expectLong(data.activeFromTimestamp); + } + if (data.activeUntilTimestamp !== undefined && data.activeUntilTimestamp !== null) { + contents.activeUntilTimestamp = __expectLong(data.activeUntilTimestamp); + } + if (data.changeType !== undefined && data.changeType !== null) { + contents.changeType = __expectString(data.changeType); + } + if (data.changesetArn !== undefined && data.changesetArn !== null) { + contents.changesetArn = __expectString(data.changesetArn); + } + if (data.changesetId !== undefined && data.changesetId !== null) { + contents.changesetId = __expectString(data.changesetId); + } + if (data.createTime !== undefined && data.createTime !== null) { + contents.createTime = __expectLong(data.createTime); + } + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); + } + if (data.errorInfo !== undefined && data.errorInfo !== null) { + contents.errorInfo = deserializeAws_restJson1ChangesetErrorInfo(data.errorInfo, context); + } + if (data.formatParams !== undefined && data.formatParams !== null) { + contents.formatParams = deserializeAws_restJson1FormatParams(data.formatParams, context); + } + if (data.sourceParams !== undefined && data.sourceParams !== null) { + contents.sourceParams = deserializeAws_restJson1SourceParams(data.sourceParams, context); + } + if (data.status !== undefined && data.status !== null) { + contents.status = __expectString(data.status); + } + if (data.updatedByChangesetId !== undefined && data.updatedByChangesetId !== null) { + contents.updatedByChangesetId = __expectString(data.updatedByChangesetId); + } + if (data.updatesChangesetId !== undefined && data.updatesChangesetId !== null) { + contents.updatesChangesetId = __expectString(data.updatesChangesetId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetChangesetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetDatasetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetDatasetCommandError(output, context); + } + const contents: GetDatasetCommandOutput = { + $metadata: deserializeMetadata(output), + alias: undefined, + createTime: undefined, + datasetArn: undefined, + datasetDescription: undefined, + datasetId: undefined, + datasetTitle: undefined, + kind: undefined, + lastModifiedTime: undefined, + schemaDefinition: undefined, + status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.alias !== undefined && data.alias !== null) { + contents.alias = __expectString(data.alias); + } + if (data.createTime !== undefined && data.createTime !== null) { + contents.createTime = __expectLong(data.createTime); + } + if (data.datasetArn !== undefined && data.datasetArn !== null) { + contents.datasetArn = __expectString(data.datasetArn); + } + if (data.datasetDescription !== undefined && data.datasetDescription !== null) { + contents.datasetDescription = __expectString(data.datasetDescription); + } + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); + } + if (data.datasetTitle !== undefined && data.datasetTitle !== null) { + contents.datasetTitle = __expectString(data.datasetTitle); + } + if (data.kind !== undefined && data.kind !== null) { + contents.kind = __expectString(data.kind); + } + if (data.lastModifiedTime !== undefined && data.lastModifiedTime !== null) { + contents.lastModifiedTime = __expectLong(data.lastModifiedTime); + } + if (data.schemaDefinition !== undefined && data.schemaDefinition !== null) { + contents.schemaDefinition = deserializeAws_restJson1SchemaUnion(data.schemaDefinition, context); + } + if (data.status !== undefined && data.status !== null) { + contents.status = __expectString(data.status); } return Promise.resolve(contents); }; @@ -969,7 +1644,458 @@ export const deserializeAws_restJson1GetDatasetCommand = async ( const deserializeAws_restJson1GetDatasetCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetDataViewCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetDataViewCommandError(output, context); + } + const contents: GetDataViewCommandOutput = { + $metadata: deserializeMetadata(output), + asOfTimestamp: undefined, + autoUpdate: undefined, + createTime: undefined, + dataViewArn: undefined, + dataViewId: undefined, + datasetId: undefined, + destinationTypeParams: undefined, + errorInfo: undefined, + lastModifiedTime: undefined, + partitionColumns: undefined, + sortColumns: undefined, + status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.asOfTimestamp !== undefined && data.asOfTimestamp !== null) { + contents.asOfTimestamp = __expectLong(data.asOfTimestamp); + } + if (data.autoUpdate !== undefined && data.autoUpdate !== null) { + contents.autoUpdate = __expectBoolean(data.autoUpdate); + } + if (data.createTime !== undefined && data.createTime !== null) { + contents.createTime = __expectLong(data.createTime); + } + if (data.dataViewArn !== undefined && data.dataViewArn !== null) { + contents.dataViewArn = __expectString(data.dataViewArn); + } + if (data.dataViewId !== undefined && data.dataViewId !== null) { + contents.dataViewId = __expectString(data.dataViewId); + } + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); + } + if (data.destinationTypeParams !== undefined && data.destinationTypeParams !== null) { + contents.destinationTypeParams = deserializeAws_restJson1DataViewDestinationTypeParams( + data.destinationTypeParams, + context + ); + } + if (data.errorInfo !== undefined && data.errorInfo !== null) { + contents.errorInfo = deserializeAws_restJson1DataViewErrorInfo(data.errorInfo, context); + } + if (data.lastModifiedTime !== undefined && data.lastModifiedTime !== null) { + contents.lastModifiedTime = __expectLong(data.lastModifiedTime); + } + if (data.partitionColumns !== undefined && data.partitionColumns !== null) { + contents.partitionColumns = deserializeAws_restJson1PartitionColumnList(data.partitionColumns, context); + } + if (data.sortColumns !== undefined && data.sortColumns !== null) { + contents.sortColumns = deserializeAws_restJson1SortColumnList(data.sortColumns, context); + } + if (data.status !== undefined && data.status !== null) { + contents.status = __expectString(data.status); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetDataViewCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetProgrammaticAccessCredentialsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetProgrammaticAccessCredentialsCommandError(output, context); + } + const contents: GetProgrammaticAccessCredentialsCommandOutput = { + $metadata: deserializeMetadata(output), + credentials: undefined, + durationInMinutes: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.credentials !== undefined && data.credentials !== null) { + contents.credentials = deserializeAws_restJson1Credentials(data.credentials, context); + } + if (data.durationInMinutes !== undefined && data.durationInMinutes !== null) { + contents.durationInMinutes = __expectLong(data.durationInMinutes); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetProgrammaticAccessCredentialsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetUserCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetUserCommandError(output, context); + } + const contents: GetUserCommandOutput = { + $metadata: deserializeMetadata(output), + apiAccess: undefined, + apiAccessPrincipalArn: undefined, + createTime: undefined, + emailAddress: undefined, + firstName: undefined, + lastDisabledTime: undefined, + lastEnabledTime: undefined, + lastLoginTime: undefined, + lastModifiedTime: undefined, + lastName: undefined, + status: undefined, + type: undefined, + userId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.apiAccess !== undefined && data.apiAccess !== null) { + contents.apiAccess = __expectString(data.apiAccess); + } + if (data.apiAccessPrincipalArn !== undefined && data.apiAccessPrincipalArn !== null) { + contents.apiAccessPrincipalArn = __expectString(data.apiAccessPrincipalArn); + } + if (data.createTime !== undefined && data.createTime !== null) { + contents.createTime = __expectLong(data.createTime); + } + if (data.emailAddress !== undefined && data.emailAddress !== null) { + contents.emailAddress = __expectString(data.emailAddress); + } + if (data.firstName !== undefined && data.firstName !== null) { + contents.firstName = __expectString(data.firstName); + } + if (data.lastDisabledTime !== undefined && data.lastDisabledTime !== null) { + contents.lastDisabledTime = __expectLong(data.lastDisabledTime); + } + if (data.lastEnabledTime !== undefined && data.lastEnabledTime !== null) { + contents.lastEnabledTime = __expectLong(data.lastEnabledTime); + } + if (data.lastLoginTime !== undefined && data.lastLoginTime !== null) { + contents.lastLoginTime = __expectLong(data.lastLoginTime); + } + if (data.lastModifiedTime !== undefined && data.lastModifiedTime !== null) { + contents.lastModifiedTime = __expectLong(data.lastModifiedTime); + } + if (data.lastName !== undefined && data.lastName !== null) { + contents.lastName = __expectString(data.lastName); + } + if (data.status !== undefined && data.status !== null) { + contents.status = __expectString(data.status); + } + if (data.type !== undefined && data.type !== null) { + contents.type = __expectString(data.type); + } + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetUserCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1GetWorkingLocationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetWorkingLocationCommandError(output, context); + } + const contents: GetWorkingLocationCommandOutput = { + $metadata: deserializeMetadata(output), + s3Bucket: undefined, + s3Path: undefined, + s3Uri: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.s3Bucket !== undefined && data.s3Bucket !== null) { + contents.s3Bucket = __expectString(data.s3Bucket); + } + if (data.s3Path !== undefined && data.s3Path !== null) { + contents.s3Path = __expectString(data.s3Path); + } + if (data.s3Uri !== undefined && data.s3Uri !== null) { + contents.s3Uri = __expectString(data.s3Uri); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetWorkingLocationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1ListChangesetsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListChangesetsCommandError(output, context); + } + const contents: ListChangesetsCommandOutput = { + $metadata: deserializeMetadata(output), + changesets: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.changesets !== undefined && data.changesets !== null) { + contents.changesets = deserializeAws_restJson1ChangesetList(data.changesets, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListChangesetsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.finspacedata#ConflictException": + throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.finspacedata#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.finspacedata#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.finspacedata#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.finspacedata#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_restJson1ListDatasetsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListDatasetsCommandError(output, context); + } + const contents: ListDatasetsCommandOutput = { + $metadata: deserializeMetadata(output), + datasets: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.datasets !== undefined && data.datasets !== null) { + contents.datasets = deserializeAws_restJson1DatasetList(data.datasets, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListDatasetsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -978,9 +2104,6 @@ const deserializeAws_restJson1GetDatasetCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.finspacedata#AccessDeniedException": - throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); case "ConflictException": case "com.amazonaws.finspacedata#ConflictException": throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); @@ -1007,75 +2130,32 @@ const deserializeAws_restJson1GetDatasetCommandError = async ( } }; -export const deserializeAws_restJson1GetDataViewCommand = async ( +export const deserializeAws_restJson1ListDataViewsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetDataViewCommandError(output, context); + return deserializeAws_restJson1ListDataViewsCommandError(output, context); } - const contents: GetDataViewCommandOutput = { + const contents: ListDataViewsCommandOutput = { $metadata: deserializeMetadata(output), - asOfTimestamp: undefined, - autoUpdate: undefined, - createTime: undefined, - dataViewArn: undefined, - dataViewId: undefined, - datasetId: undefined, - destinationTypeParams: undefined, - errorInfo: undefined, - lastModifiedTime: undefined, - partitionColumns: undefined, - sortColumns: undefined, - status: undefined, + dataViews: undefined, + nextToken: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.asOfTimestamp !== undefined && data.asOfTimestamp !== null) { - contents.asOfTimestamp = __expectLong(data.asOfTimestamp); - } - if (data.autoUpdate !== undefined && data.autoUpdate !== null) { - contents.autoUpdate = __expectBoolean(data.autoUpdate); - } - if (data.createTime !== undefined && data.createTime !== null) { - contents.createTime = __expectLong(data.createTime); - } - if (data.dataViewArn !== undefined && data.dataViewArn !== null) { - contents.dataViewArn = __expectString(data.dataViewArn); - } - if (data.dataViewId !== undefined && data.dataViewId !== null) { - contents.dataViewId = __expectString(data.dataViewId); - } - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); - } - if (data.destinationTypeParams !== undefined && data.destinationTypeParams !== null) { - contents.destinationTypeParams = deserializeAws_restJson1DataViewDestinationTypeParams( - data.destinationTypeParams, - context - ); - } - if (data.errorInfo !== undefined && data.errorInfo !== null) { - contents.errorInfo = deserializeAws_restJson1DataViewErrorInfo(data.errorInfo, context); - } - if (data.lastModifiedTime !== undefined && data.lastModifiedTime !== null) { - contents.lastModifiedTime = __expectLong(data.lastModifiedTime); - } - if (data.partitionColumns !== undefined && data.partitionColumns !== null) { - contents.partitionColumns = deserializeAws_restJson1PartitionColumnList(data.partitionColumns, context); - } - if (data.sortColumns !== undefined && data.sortColumns !== null) { - contents.sortColumns = deserializeAws_restJson1SortColumnList(data.sortColumns, context); + if (data.dataViews !== undefined && data.dataViews !== null) { + contents.dataViews = deserializeAws_restJson1DataViewList(data.dataViews, context); } - if (data.status !== undefined && data.status !== null) { - contents.status = __expectString(data.status); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetDataViewCommandError = async ( +const deserializeAws_restJson1ListDataViewsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1110,32 +2190,32 @@ const deserializeAws_restJson1GetDataViewCommandError = async ( } }; -export const deserializeAws_restJson1GetProgrammaticAccessCredentialsCommand = async ( +export const deserializeAws_restJson1ListPermissionGroupsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetProgrammaticAccessCredentialsCommandError(output, context); + return deserializeAws_restJson1ListPermissionGroupsCommandError(output, context); } - const contents: GetProgrammaticAccessCredentialsCommandOutput = { + const contents: ListPermissionGroupsCommandOutput = { $metadata: deserializeMetadata(output), - credentials: undefined, - durationInMinutes: undefined, + nextToken: undefined, + permissionGroups: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.credentials !== undefined && data.credentials !== null) { - contents.credentials = deserializeAws_restJson1Credentials(data.credentials, context); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); } - if (data.durationInMinutes !== undefined && data.durationInMinutes !== null) { - contents.durationInMinutes = __expectLong(data.durationInMinutes); + if (data.permissionGroups !== undefined && data.permissionGroups !== null) { + contents.permissionGroups = deserializeAws_restJson1PermissionGroupList(data.permissionGroups, context); } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetProgrammaticAccessCredentialsCommandError = async ( +const deserializeAws_restJson1ListPermissionGroupsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1167,36 +2247,32 @@ const deserializeAws_restJson1GetProgrammaticAccessCredentialsCommandError = asy } }; -export const deserializeAws_restJson1GetWorkingLocationCommand = async ( +export const deserializeAws_restJson1ListUsersCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetWorkingLocationCommandError(output, context); + return deserializeAws_restJson1ListUsersCommandError(output, context); } - const contents: GetWorkingLocationCommandOutput = { + const contents: ListUsersCommandOutput = { $metadata: deserializeMetadata(output), - s3Bucket: undefined, - s3Path: undefined, - s3Uri: undefined, + nextToken: undefined, + users: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.s3Bucket !== undefined && data.s3Bucket !== null) { - contents.s3Bucket = __expectString(data.s3Bucket); - } - if (data.s3Path !== undefined && data.s3Path !== null) { - contents.s3Path = __expectString(data.s3Path); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); } - if (data.s3Uri !== undefined && data.s3Uri !== null) { - contents.s3Uri = __expectString(data.s3Uri); + if (data.users !== undefined && data.users !== null) { + contents.users = deserializeAws_restJson1UserList(data.users, context); } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetWorkingLocationCommandError = async ( +const deserializeAws_restJson1ListUsersCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1228,32 +2304,32 @@ const deserializeAws_restJson1GetWorkingLocationCommandError = async ( } }; -export const deserializeAws_restJson1ListChangesetsCommand = async ( +export const deserializeAws_restJson1ResetUserPasswordCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1ListChangesetsCommandError(output, context); + return deserializeAws_restJson1ResetUserPasswordCommandError(output, context); } - const contents: ListChangesetsCommandOutput = { + const contents: ResetUserPasswordCommandOutput = { $metadata: deserializeMetadata(output), - changesets: undefined, - nextToken: undefined, + temporaryPassword: undefined, + userId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.changesets !== undefined && data.changesets !== null) { - contents.changesets = deserializeAws_restJson1ChangesetList(data.changesets, context); + if (data.temporaryPassword !== undefined && data.temporaryPassword !== null) { + contents.temporaryPassword = __expectString(data.temporaryPassword); } - if (data.nextToken !== undefined && data.nextToken !== null) { - contents.nextToken = __expectString(data.nextToken); + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1ListChangesetsCommandError = async ( +const deserializeAws_restJson1ResetUserPasswordCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1291,32 +2367,32 @@ const deserializeAws_restJson1ListChangesetsCommandError = async ( } }; -export const deserializeAws_restJson1ListDatasetsCommand = async ( +export const deserializeAws_restJson1UpdateChangesetCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1ListDatasetsCommandError(output, context); + return deserializeAws_restJson1UpdateChangesetCommandError(output, context); } - const contents: ListDatasetsCommandOutput = { + const contents: UpdateChangesetCommandOutput = { $metadata: deserializeMetadata(output), - datasets: undefined, - nextToken: undefined, + changesetId: undefined, + datasetId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.datasets !== undefined && data.datasets !== null) { - contents.datasets = deserializeAws_restJson1DatasetList(data.datasets, context); + if (data.changesetId !== undefined && data.changesetId !== null) { + contents.changesetId = __expectString(data.changesetId); } - if (data.nextToken !== undefined && data.nextToken !== null) { - contents.nextToken = __expectString(data.nextToken); + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1ListDatasetsCommandError = async ( +const deserializeAws_restJson1UpdateChangesetCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1325,6 +2401,9 @@ const deserializeAws_restJson1ListDatasetsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); case "ConflictException": case "com.amazonaws.finspacedata#ConflictException": throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); @@ -1351,32 +2430,28 @@ const deserializeAws_restJson1ListDatasetsCommandError = async ( } }; -export const deserializeAws_restJson1ListDataViewsCommand = async ( +export const deserializeAws_restJson1UpdateDatasetCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1ListDataViewsCommandError(output, context); + return deserializeAws_restJson1UpdateDatasetCommandError(output, context); } - const contents: ListDataViewsCommandOutput = { + const contents: UpdateDatasetCommandOutput = { $metadata: deserializeMetadata(output), - dataViews: undefined, - nextToken: undefined, + datasetId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.dataViews !== undefined && data.dataViews !== null) { - contents.dataViews = deserializeAws_restJson1DataViewList(data.dataViews, context); - } - if (data.nextToken !== undefined && data.nextToken !== null) { - contents.nextToken = __expectString(data.nextToken); + if (data.datasetId !== undefined && data.datasetId !== null) { + contents.datasetId = __expectString(data.datasetId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1ListDataViewsCommandError = async ( +const deserializeAws_restJson1UpdateDatasetCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1385,6 +2460,9 @@ const deserializeAws_restJson1ListDataViewsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.finspacedata#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); case "ConflictException": case "com.amazonaws.finspacedata#ConflictException": throw await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); @@ -1411,32 +2489,28 @@ const deserializeAws_restJson1ListDataViewsCommandError = async ( } }; -export const deserializeAws_restJson1UpdateChangesetCommand = async ( +export const deserializeAws_restJson1UpdatePermissionGroupCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1UpdateChangesetCommandError(output, context); + return deserializeAws_restJson1UpdatePermissionGroupCommandError(output, context); } - const contents: UpdateChangesetCommandOutput = { + const contents: UpdatePermissionGroupCommandOutput = { $metadata: deserializeMetadata(output), - changesetId: undefined, - datasetId: undefined, + permissionGroupId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.changesetId !== undefined && data.changesetId !== null) { - contents.changesetId = __expectString(data.changesetId); - } - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); + if (data.permissionGroupId !== undefined && data.permissionGroupId !== null) { + contents.permissionGroupId = __expectString(data.permissionGroupId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1UpdateChangesetCommandError = async ( +const deserializeAws_restJson1UpdatePermissionGroupCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1474,28 +2548,28 @@ const deserializeAws_restJson1UpdateChangesetCommandError = async ( } }; -export const deserializeAws_restJson1UpdateDatasetCommand = async ( +export const deserializeAws_restJson1UpdateUserCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1UpdateDatasetCommandError(output, context); + return deserializeAws_restJson1UpdateUserCommandError(output, context); } - const contents: UpdateDatasetCommandOutput = { + const contents: UpdateUserCommandOutput = { $metadata: deserializeMetadata(output), - datasetId: undefined, + userId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.datasetId !== undefined && data.datasetId !== null) { - contents.datasetId = __expectString(data.datasetId); + if (data.userId !== undefined && data.userId !== null) { + contents.userId = __expectString(data.userId); } return Promise.resolve(contents); }; -const deserializeAws_restJson1UpdateDatasetCommandError = async ( +const deserializeAws_restJson1UpdateUserCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1558,6 +2632,9 @@ const deserializeAws_restJson1ConflictExceptionResponse = async ( if (data.message !== undefined && data.message !== null) { contents.message = __expectString(data.message); } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } const exception = new ConflictException({ $metadata: deserializeMetadata(parsedOutput), ...contents, @@ -1606,6 +2683,9 @@ const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( if (data.message !== undefined && data.message !== null) { contents.message = __expectString(data.message); } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } const exception = new ResourceNotFoundException({ $metadata: deserializeMetadata(parsedOutput), ...contents, @@ -1635,6 +2715,9 @@ const deserializeAws_restJson1ValidationExceptionResponse = async ( if (data.message !== undefined && data.message !== null) { contents.message = __expectString(data.message); } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } const exception = new ValidationException({ $metadata: deserializeMetadata(parsedOutput), ...contents, @@ -1642,6 +2725,20 @@ const deserializeAws_restJson1ValidationExceptionResponse = async ( return __decorateServiceException(exception, parsedOutput.body); }; +const serializeAws_restJson1ApplicationPermissionList = ( + input: (ApplicationPermission | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1ColumnDefinition = (input: ColumnDefinition, context: __SerdeContext): any => { return { ...(input.columnDescription !== undefined && @@ -1811,6 +2908,21 @@ const serializeAws_restJson1SourceParams = (input: { [key: string]: string }, co }, {}); }; +const deserializeAws_restJson1ApplicationPermissionList = ( + output: any, + context: __SerdeContext +): (ApplicationPermission | string)[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_restJson1ChangesetErrorInfo = (output: any, context: __SerdeContext): ChangesetErrorInfo => { return { errorCategory: __expectString(output.errorCategory), @@ -2024,6 +3136,32 @@ const deserializeAws_restJson1PartitionColumnList = (output: any, context: __Ser return retVal; }; +const deserializeAws_restJson1PermissionGroup = (output: any, context: __SerdeContext): PermissionGroup => { + return { + applicationPermissions: + output.applicationPermissions !== undefined && output.applicationPermissions !== null + ? deserializeAws_restJson1ApplicationPermissionList(output.applicationPermissions, context) + : undefined, + createTime: __expectLong(output.createTime), + description: __expectString(output.description), + lastModifiedTime: __expectLong(output.lastModifiedTime), + name: __expectString(output.name), + permissionGroupId: __expectString(output.permissionGroupId), + } as any; +}; + +const deserializeAws_restJson1PermissionGroupList = (output: any, context: __SerdeContext): PermissionGroup[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PermissionGroup(entry, context); + }); + return retVal; +}; + const deserializeAws_restJson1S3DestinationFormatOptions = ( output: any, context: __SerdeContext @@ -2085,6 +3223,36 @@ const deserializeAws_restJson1SourceParams = (output: any, context: __SerdeConte }, {}); }; +const deserializeAws_restJson1User = (output: any, context: __SerdeContext): User => { + return { + apiAccess: __expectString(output.apiAccess), + apiAccessPrincipalArn: __expectString(output.apiAccessPrincipalArn), + createTime: __expectLong(output.createTime), + emailAddress: __expectString(output.emailAddress), + firstName: __expectString(output.firstName), + lastDisabledTime: __expectLong(output.lastDisabledTime), + lastEnabledTime: __expectLong(output.lastEnabledTime), + lastLoginTime: __expectLong(output.lastLoginTime), + lastModifiedTime: __expectLong(output.lastModifiedTime), + lastName: __expectString(output.lastName), + status: __expectString(output.status), + type: __expectString(output.type), + userId: __expectString(output.userId), + } as any; +}; + +const deserializeAws_restJson1UserList = (output: any, context: __SerdeContext): User[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1User(entry, context); + }); + return retVal; +}; + const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ httpStatusCode: output.statusCode, requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], diff --git a/clients/client-fis/src/models/models_0.ts b/clients/client-fis/src/models/models_0.ts index 2b4c7e948ef7..d284ae7bcdd3 100644 --- a/clients/client-fis/src/models/models_0.ts +++ b/clients/client-fis/src/models/models_0.ts @@ -180,6 +180,78 @@ export namespace CreateExperimentTemplateActionInput { }); } +/** + *

    Specifies the configuration for experiment logging to Amazon CloudWatch Logs.

    + */ +export interface ExperimentTemplateCloudWatchLogsLogConfigurationInput { + /** + *

    The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

    + */ + logGroupArn: string | undefined; +} + +export namespace ExperimentTemplateCloudWatchLogsLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateCloudWatchLogsLogConfigurationInput): any => ({ + ...obj, + }); +} + +/** + *

    Specifies the configuration for experiment logging to Amazon S3.

    + */ +export interface ExperimentTemplateS3LogConfigurationInput { + /** + *

    The name of the destination bucket.

    + */ + bucketName: string | undefined; + + /** + *

    The bucket prefix.

    + */ + prefix?: string; +} + +export namespace ExperimentTemplateS3LogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateS3LogConfigurationInput): any => ({ + ...obj, + }); +} + +/** + *

    Specifies the configuration for experiment logging.

    + */ +export interface CreateExperimentTemplateLogConfigurationInput { + /** + *

    The configuration for experiment logging to Amazon CloudWatch Logs.

    + */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfigurationInput; + + /** + *

    The configuration for experiment logging to Amazon S3.

    + */ + s3Configuration?: ExperimentTemplateS3LogConfigurationInput; + + /** + *

    The schema version.

    + */ + logSchemaVersion: number | undefined; +} + +export namespace CreateExperimentTemplateLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateExperimentTemplateLogConfigurationInput): any => ({ + ...obj, + }); +} + /** *

    Specifies a stop condition for an experiment template.

    */ @@ -328,6 +400,11 @@ export interface CreateExperimentTemplateRequest { *

    The tags to apply to the experiment template.

    */ tags?: { [key: string]: string }; + + /** + *

    The configuration for experiment logging.

    + */ + logConfiguration?: CreateExperimentTemplateLogConfigurationInput; } export namespace CreateExperimentTemplateRequest { @@ -378,6 +455,78 @@ export namespace ExperimentTemplateAction { }); } +/** + *

    Describes the configuration for experiment logging to Amazon CloudWatch Logs.

    + */ +export interface ExperimentTemplateCloudWatchLogsLogConfiguration { + /** + *

    The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

    + */ + logGroupArn?: string; +} + +export namespace ExperimentTemplateCloudWatchLogsLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateCloudWatchLogsLogConfiguration): any => ({ + ...obj, + }); +} + +/** + *

    Describes the configuration for experiment logging to Amazon S3.

    + */ +export interface ExperimentTemplateS3LogConfiguration { + /** + *

    The name of the destination bucket.

    + */ + bucketName?: string; + + /** + *

    The bucket prefix.

    + */ + prefix?: string; +} + +export namespace ExperimentTemplateS3LogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateS3LogConfiguration): any => ({ + ...obj, + }); +} + +/** + *

    Describes the configuration for experiment logging.

    + */ +export interface ExperimentTemplateLogConfiguration { + /** + *

    The configuration for experiment logging to Amazon CloudWatch Logs.

    + */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfiguration; + + /** + *

    The configuration for experiment logging to Amazon S3.

    + */ + s3Configuration?: ExperimentTemplateS3LogConfiguration; + + /** + *

    The schema version.

    + */ + logSchemaVersion?: number; +} + +export namespace ExperimentTemplateLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateLogConfiguration): any => ({ + ...obj, + }); +} + /** *

    Describes a stop condition for an experiment template.

    */ @@ -518,6 +667,11 @@ export interface ExperimentTemplate { *

    The tags for the experiment template.

    */ tags?: { [key: string]: string }; + + /** + *

    The configuration for experiment logging.

    + */ + logConfiguration?: ExperimentTemplateLogConfiguration; } export namespace ExperimentTemplate { @@ -723,6 +877,78 @@ export namespace ExperimentAction { }); } +/** + *

    Describes the configuration for experiment logging to Amazon CloudWatch Logs.

    + */ +export interface ExperimentCloudWatchLogsLogConfiguration { + /** + *

    The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

    + */ + logGroupArn?: string; +} + +export namespace ExperimentCloudWatchLogsLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentCloudWatchLogsLogConfiguration): any => ({ + ...obj, + }); +} + +/** + *

    Describes the configuration for experiment logging to Amazon S3.

    + */ +export interface ExperimentS3LogConfiguration { + /** + *

    The name of the destination bucket.

    + */ + bucketName?: string; + + /** + *

    The bucket prefix.

    + */ + prefix?: string; +} + +export namespace ExperimentS3LogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentS3LogConfiguration): any => ({ + ...obj, + }); +} + +/** + *

    Describes the configuration for experiment logging.

    + */ +export interface ExperimentLogConfiguration { + /** + *

    The configuration for experiment logging to Amazon CloudWatch Logs.

    + */ + cloudWatchLogsConfiguration?: ExperimentCloudWatchLogsLogConfiguration; + + /** + *

    The configuration for experiment logging to Amazon S3.

    + */ + s3Configuration?: ExperimentS3LogConfiguration; + + /** + *

    The schema version.

    + */ + logSchemaVersion?: number; +} + +export namespace ExperimentLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentLogConfiguration): any => ({ + ...obj, + }); +} + export enum ExperimentStatus { completed = "completed", failed = "failed", @@ -907,6 +1133,11 @@ export interface Experiment { *

    The tags for the experiment.

    */ tags?: { [key: string]: string }; + + /** + *

    The configuration for experiment logging.

    + */ + logConfiguration?: ExperimentLogConfiguration; } export namespace Experiment { @@ -1579,6 +1810,35 @@ export namespace UpdateExperimentTemplateActionInputItem { }); } +/** + *

    Specifies the configuration for experiment logging.

    + */ +export interface UpdateExperimentTemplateLogConfigurationInput { + /** + *

    The configuration for experiment logging to Amazon CloudWatch Logs.

    + */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfigurationInput; + + /** + *

    The configuration for experiment logging to Amazon S3.

    + */ + s3Configuration?: ExperimentTemplateS3LogConfigurationInput; + + /** + *

    The schema version.

    + */ + logSchemaVersion?: number; +} + +export namespace UpdateExperimentTemplateLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateExperimentTemplateLogConfigurationInput): any => ({ + ...obj, + }); +} + /** *

    Specifies a stop condition for an experiment. You can define a stop condition as a CloudWatch alarm.

    */ @@ -1679,6 +1939,11 @@ export interface UpdateExperimentTemplateRequest { *

    The Amazon Resource Name (ARN) of an IAM role that grants the FIS service permission to perform service actions on your behalf.

    */ roleArn?: string; + + /** + *

    The configuration for experiment logging.

    + */ + logConfiguration?: UpdateExperimentTemplateLogConfigurationInput; } export namespace UpdateExperimentTemplateRequest { diff --git a/clients/client-fis/src/protocols/Aws_restJson1.ts b/clients/client-fis/src/protocols/Aws_restJson1.ts index 487645ff9fb7..0900b791dbe5 100644 --- a/clients/client-fis/src/protocols/Aws_restJson1.ts +++ b/clients/client-fis/src/protocols/Aws_restJson1.ts @@ -2,6 +2,7 @@ import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@a import { decorateServiceException as __decorateServiceException, expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, expectNonNull as __expectNonNull, expectNumber as __expectNumber, expectObject as __expectObject, @@ -64,11 +65,15 @@ import { ActionTarget, ConflictException, CreateExperimentTemplateActionInput, + CreateExperimentTemplateLogConfigurationInput, CreateExperimentTemplateStopConditionInput, CreateExperimentTemplateTargetInput, Experiment, ExperimentAction, ExperimentActionState, + ExperimentCloudWatchLogsLogConfiguration, + ExperimentLogConfiguration, + ExperimentS3LogConfiguration, ExperimentState, ExperimentStopCondition, ExperimentSummary, @@ -76,6 +81,11 @@ import { ExperimentTargetFilter, ExperimentTemplate, ExperimentTemplateAction, + ExperimentTemplateCloudWatchLogsLogConfiguration, + ExperimentTemplateCloudWatchLogsLogConfigurationInput, + ExperimentTemplateLogConfiguration, + ExperimentTemplateS3LogConfiguration, + ExperimentTemplateS3LogConfigurationInput, ExperimentTemplateStopCondition, ExperimentTemplateSummary, ExperimentTemplateTarget, @@ -87,6 +97,7 @@ import { TargetResourceTypeParameter, TargetResourceTypeSummary, UpdateExperimentTemplateActionInputItem, + UpdateExperimentTemplateLogConfigurationInput, UpdateExperimentTemplateStopConditionInput, UpdateExperimentTemplateTargetInput, ValidationException, @@ -109,6 +120,13 @@ export const serializeAws_restJson1CreateExperimentTemplateCommand = async ( }), clientToken: input.clientToken ?? generateIdempotencyToken(), ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.logConfiguration !== undefined && + input.logConfiguration !== null && { + logConfiguration: serializeAws_restJson1CreateExperimentTemplateLogConfigurationInput( + input.logConfiguration, + context + ), + }), ...(input.roleArn !== undefined && input.roleArn !== null && { roleArn: input.roleArn }), ...(input.stopConditions !== undefined && input.stopConditions !== null && { @@ -547,6 +565,13 @@ export const serializeAws_restJson1UpdateExperimentTemplateCommand = async ( actions: serializeAws_restJson1UpdateExperimentTemplateActionInputMap(input.actions, context), }), ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.logConfiguration !== undefined && + input.logConfiguration !== null && { + logConfiguration: serializeAws_restJson1UpdateExperimentTemplateLogConfigurationInput( + input.logConfiguration, + context + ), + }), ...(input.roleArn !== undefined && input.roleArn !== null && { roleArn: input.roleArn }), ...(input.stopConditions !== undefined && input.stopConditions !== null && { @@ -1423,6 +1448,30 @@ const serializeAws_restJson1CreateExperimentTemplateActionInputMap = ( }, {}); }; +const serializeAws_restJson1CreateExperimentTemplateLogConfigurationInput = ( + input: CreateExperimentTemplateLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.cloudWatchLogsConfiguration !== undefined && + input.cloudWatchLogsConfiguration !== null && { + cloudWatchLogsConfiguration: serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput( + input.cloudWatchLogsConfiguration, + context + ), + }), + ...(input.logSchemaVersion !== undefined && + input.logSchemaVersion !== null && { logSchemaVersion: input.logSchemaVersion }), + ...(input.s3Configuration !== undefined && + input.s3Configuration !== null && { + s3Configuration: serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput( + input.s3Configuration, + context + ), + }), + }; +}; + const serializeAws_restJson1CreateExperimentTemplateStopConditionInput = ( input: CreateExperimentTemplateStopConditionInput, context: __SerdeContext @@ -1530,6 +1579,25 @@ const serializeAws_restJson1ExperimentTemplateActionTargetMap = ( }, {}); }; +const serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput = ( + input: ExperimentTemplateCloudWatchLogsLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.logGroupArn !== undefined && input.logGroupArn !== null && { logGroupArn: input.logGroupArn }), + }; +}; + +const serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput = ( + input: ExperimentTemplateS3LogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.bucketName !== undefined && input.bucketName !== null && { bucketName: input.bucketName }), + ...(input.prefix !== undefined && input.prefix !== null && { prefix: input.prefix }), + }; +}; + const serializeAws_restJson1ExperimentTemplateTargetFilterInputList = ( input: ExperimentTemplateTargetInputFilter[], context: __SerdeContext @@ -1643,6 +1711,30 @@ const serializeAws_restJson1UpdateExperimentTemplateActionInputMap = ( }, {}); }; +const serializeAws_restJson1UpdateExperimentTemplateLogConfigurationInput = ( + input: UpdateExperimentTemplateLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.cloudWatchLogsConfiguration !== undefined && + input.cloudWatchLogsConfiguration !== null && { + cloudWatchLogsConfiguration: serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput( + input.cloudWatchLogsConfiguration, + context + ), + }), + ...(input.logSchemaVersion !== undefined && + input.logSchemaVersion !== null && { logSchemaVersion: input.logSchemaVersion }), + ...(input.s3Configuration !== undefined && + input.s3Configuration !== null && { + s3Configuration: serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput( + input.s3Configuration, + context + ), + }), + }; +}; + const serializeAws_restJson1UpdateExperimentTemplateStopConditionInput = ( input: UpdateExperimentTemplateStopConditionInput, context: __SerdeContext @@ -1811,6 +1903,10 @@ const deserializeAws_restJson1Experiment = (output: any, context: __SerdeContext : undefined, experimentTemplateId: __expectString(output.experimentTemplateId), id: __expectString(output.id), + logConfiguration: + output.logConfiguration !== undefined && output.logConfiguration !== null + ? deserializeAws_restJson1ExperimentLogConfiguration(output.logConfiguration, context) + : undefined, roleArn: __expectString(output.roleArn), startTime: output.startTime !== undefined && output.startTime !== null @@ -1930,6 +2026,42 @@ const deserializeAws_restJson1ExperimentActionTargetMap = ( }, {}); }; +const deserializeAws_restJson1ExperimentCloudWatchLogsLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentCloudWatchLogsLogConfiguration => { + return { + logGroupArn: __expectString(output.logGroupArn), + } as any; +}; + +const deserializeAws_restJson1ExperimentLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentLogConfiguration => { + return { + cloudWatchLogsConfiguration: + output.cloudWatchLogsConfiguration !== undefined && output.cloudWatchLogsConfiguration !== null + ? deserializeAws_restJson1ExperimentCloudWatchLogsLogConfiguration(output.cloudWatchLogsConfiguration, context) + : undefined, + logSchemaVersion: __expectInt32(output.logSchemaVersion), + s3Configuration: + output.s3Configuration !== undefined && output.s3Configuration !== null + ? deserializeAws_restJson1ExperimentS3LogConfiguration(output.s3Configuration, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentS3LogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentS3LogConfiguration => { + return { + bucketName: __expectString(output.bucketName), + prefix: __expectString(output.prefix), + } as any; +}; + const deserializeAws_restJson1ExperimentState = (output: any, context: __SerdeContext): ExperimentState => { return { reason: __expectString(output.reason), @@ -2102,6 +2234,10 @@ const deserializeAws_restJson1ExperimentTemplate = (output: any, context: __Serd output.lastUpdateTime !== undefined && output.lastUpdateTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdateTime))) : undefined, + logConfiguration: + output.logConfiguration !== undefined && output.logConfiguration !== null + ? deserializeAws_restJson1ExperimentTemplateLogConfiguration(output.logConfiguration, context) + : undefined, roleArn: __expectString(output.roleArn), stopConditions: output.stopConditions !== undefined && output.stopConditions !== null @@ -2203,6 +2339,45 @@ const deserializeAws_restJson1ExperimentTemplateActionTargetMap = ( }, {}); }; +const deserializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateCloudWatchLogsLogConfiguration => { + return { + logGroupArn: __expectString(output.logGroupArn), + } as any; +}; + +const deserializeAws_restJson1ExperimentTemplateLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateLogConfiguration => { + return { + cloudWatchLogsConfiguration: + output.cloudWatchLogsConfiguration !== undefined && output.cloudWatchLogsConfiguration !== null + ? deserializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfiguration( + output.cloudWatchLogsConfiguration, + context + ) + : undefined, + logSchemaVersion: __expectInt32(output.logSchemaVersion), + s3Configuration: + output.s3Configuration !== undefined && output.s3Configuration !== null + ? deserializeAws_restJson1ExperimentTemplateS3LogConfiguration(output.s3Configuration, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentTemplateS3LogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateS3LogConfiguration => { + return { + bucketName: __expectString(output.bucketName), + prefix: __expectString(output.prefix), + } as any; +}; + const deserializeAws_restJson1ExperimentTemplateStopCondition = ( output: any, context: __SerdeContext diff --git a/clients/client-fsx/src/FSx.ts b/clients/client-fsx/src/FSx.ts index 0e1e98178a2e..50f0c1e32de3 100644 --- a/clients/client-fsx/src/FSx.ts +++ b/clients/client-fsx/src/FSx.ts @@ -577,8 +577,8 @@ export class FSx extends FSxClient { * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

    * *

    If a file system with the specified client request token exists and the parameters - * match, this operation returns the description of the file system. If a client request - * token with the specified by the file system exists and the parameters don't match, this + * match, this operation returns the description of the file system. If a file system + * with the specified client request token exists but the parameters don't match, this * call returns IncompatibleParameterError. If a file system with the * specified client request token doesn't exist, this operation does the following:

    * @@ -641,7 +641,7 @@ export class FSx extends FSxClient { } /** - *

    Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + *

    Creates a snapshot of an existing Amazon FSx for OpenZFS volume. With * snapshots, you can easily undo file changes and compare file versions by restoring the * volume to a previous version.

    *

    If a snapshot with the specified client request token exists, and the parameters @@ -649,7 +649,7 @@ export class FSx extends FSxClient { * with the specified client request token exists, and the parameters don't match, this * operation returns IncompatibleParameterError. If a snapshot with the * specified client request token doesn't exist, CreateSnapshot does the - * following:

    + * following:

    *
      *
    • *

      Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle @@ -668,7 +668,7 @@ export class FSx extends FSxClient { *

      The CreateSnapshot operation returns while the snapshot's lifecycle state * is still CREATING. You can check the snapshot creation status by calling * the DescribeSnapshots operation, which returns the snapshot state along with - * other information.

      + * other information.

      */ public createSnapshot( args: CreateSnapshotCommandInput, @@ -732,8 +732,7 @@ export class FSx extends FSxClient { } /** - *

      Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage - * volume.

      + *

      Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.

      */ public createVolume( args: CreateVolumeCommandInput, @@ -927,7 +926,7 @@ export class FSx extends FSxClient { } /** - *

      Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + *

      Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a * file system backup.

      *

      The DeleteSnapshot operation returns instantly. The snapshot appears with @@ -1275,7 +1274,7 @@ export class FSx extends FSxClient { } /** - *

      Returns the description of specific Amazon FSx snapshots, if a + *

      Returns the description of specific Amazon FSx for OpenZFS snapshots, if a * SnapshotIds value is provided. Otherwise, this operation returns all * snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of * the endpoint that you're calling.

      @@ -1830,7 +1829,7 @@ export class FSx extends FSxClient { } /** - *

      Updates the name of a snapshot.

      + *

      Updates the name of an Amazon FSx for OpenZFS snapshot.

      */ public updateSnapshot( args: UpdateSnapshotCommandInput, diff --git a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts index cfee43a97b5c..10ea35d7fc18 100644 --- a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts +++ b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts @@ -26,8 +26,8 @@ export interface CreateFileSystemFromBackupCommandOutput extends CreateFileSyste * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

      * *

      If a file system with the specified client request token exists and the parameters - * match, this operation returns the description of the file system. If a client request - * token with the specified by the file system exists and the parameters don't match, this + * match, this operation returns the description of the file system. If a file system + * with the specified client request token exists but the parameters don't match, this * call returns IncompatibleParameterError. If a file system with the * specified client request token doesn't exist, this operation does the following:

      * diff --git a/clients/client-fsx/src/commands/CreateSnapshotCommand.ts b/clients/client-fsx/src/commands/CreateSnapshotCommand.ts index 05c963ff826f..4502726c42e1 100644 --- a/clients/client-fsx/src/commands/CreateSnapshotCommand.ts +++ b/clients/client-fsx/src/commands/CreateSnapshotCommand.ts @@ -22,7 +22,7 @@ export interface CreateSnapshotCommandInput extends CreateSnapshotRequest {} export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __MetadataBearer {} /** - *

      Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + *

      Creates a snapshot of an existing Amazon FSx for OpenZFS volume. With * snapshots, you can easily undo file changes and compare file versions by restoring the * volume to a previous version.

      *

      If a snapshot with the specified client request token exists, and the parameters @@ -30,7 +30,7 @@ export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __M * with the specified client request token exists, and the parameters don't match, this * operation returns IncompatibleParameterError. If a snapshot with the * specified client request token doesn't exist, CreateSnapshot does the - * following:

      + * following:

      *
        *
      • *

        Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle @@ -49,7 +49,7 @@ export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __M *

        The CreateSnapshot operation returns while the snapshot's lifecycle state * is still CREATING. You can check the snapshot creation status by calling * the DescribeSnapshots operation, which returns the snapshot state along with - * other information.

        + * other information.

        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/CreateVolumeCommand.ts b/clients/client-fsx/src/commands/CreateVolumeCommand.ts index 724c5dc2356f..cc9f4819a4d6 100644 --- a/clients/client-fsx/src/commands/CreateVolumeCommand.ts +++ b/clients/client-fsx/src/commands/CreateVolumeCommand.ts @@ -22,8 +22,7 @@ export interface CreateVolumeCommandInput extends CreateVolumeRequest {} export interface CreateVolumeCommandOutput extends CreateVolumeResponse, __MetadataBearer {} /** - *

        Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage - * volume.

        + *

        Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.

        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts index 6622571da7c4..0c14b04c789a 100644 --- a/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts +++ b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts @@ -22,7 +22,7 @@ export interface DeleteSnapshotCommandInput extends DeleteSnapshotRequest {} export interface DeleteSnapshotCommandOutput extends DeleteSnapshotResponse, __MetadataBearer {} /** - *

        Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + *

        Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a * file system backup.

        *

        The DeleteSnapshot operation returns instantly. The snapshot appears with diff --git a/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts index 56aa6a9584ce..566576da509f 100644 --- a/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts +++ b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeSnapshotsCommandInput extends DescribeSnapshotsRequest export interface DescribeSnapshotsCommandOutput extends DescribeSnapshotsResponse, __MetadataBearer {} /** - *

        Returns the description of specific Amazon FSx snapshots, if a + *

        Returns the description of specific Amazon FSx for OpenZFS snapshots, if a * SnapshotIds value is provided. Otherwise, this operation returns all * snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of * the endpoint that you're calling.

        diff --git a/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts b/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts index 7fbaeadccbf1..d7ecbf30f805 100644 --- a/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts +++ b/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts @@ -22,7 +22,7 @@ export interface UpdateSnapshotCommandInput extends UpdateSnapshotRequest {} export interface UpdateSnapshotCommandOutput extends UpdateSnapshotResponse, __MetadataBearer {} /** - *

        Updates the name of a snapshot.

        + *

        Updates the name of an Amazon FSx for OpenZFS snapshot.

        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/models/models_0.ts b/clients/client-fsx/src/models/models_0.ts index 348a99494160..da2af7fa3d92 100644 --- a/clients/client-fsx/src/models/models_0.ts +++ b/clients/client-fsx/src/models/models_0.ts @@ -642,6 +642,12 @@ export interface OntapFileSystemConfiguration { /** *

        The IP address range in which the endpoints to access your file system * are created.

        + * + *

        The Endpoint IP address range you select for your file system + * must exist outside the VPC's CIDR range and must be at least /30 or larger. + * If you do not specify this optional parameter, Amazon FSx will automatically + * select a CIDR block for you.

        + *
        */ EndpointIpAddressRange?: string; @@ -752,7 +758,7 @@ export interface OpenZFSFileSystemConfiguration { /** *

        The throughput of an Amazon FSx file system, measured in megabytes per second - * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

        + * (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

        */ ThroughputCapacity?: number; @@ -1182,16 +1188,6 @@ export enum SnapshotLifecycle { PENDING = "PENDING", } -export enum VolumeLifecycle { - AVAILABLE = "AVAILABLE", - CREATED = "CREATED", - CREATING = "CREATING", - DELETING = "DELETING", - FAILED = "FAILED", - MISCONFIGURED = "MISCONFIGURED", - PENDING = "PENDING", -} - /** *

        Describes why a resource lifecycle state changed.

        */ @@ -1211,6 +1207,16 @@ export namespace LifecycleTransitionReason { }); } +export enum VolumeLifecycle { + AVAILABLE = "AVAILABLE", + CREATED = "CREATED", + CREATING = "CREATING", + DELETING = "DELETING", + FAILED = "FAILED", + MISCONFIGURED = "MISCONFIGURED", + PENDING = "PENDING", +} + export enum FlexCacheEndpointType { CACHE = "CACHE", NONE = "NONE", @@ -1423,6 +1429,7 @@ export namespace OntapVolumeConfiguration { } export enum OpenZFSDataCompressionType { + LZ4 = "LZ4", NONE = "NONE", ZSTD = "ZSTD", } @@ -1435,7 +1442,7 @@ export interface OpenZFSClientConfiguration { /** *

        A value that specifies who can mount the file system. You can provide a wildcard * character (*), an IP address (0.0.0.0), or a CIDR address - * (192.0.2.0/24. By default, Amazon FSx uses the wildcard + * (192.0.2.0/24). By default, Amazon FSx uses the wildcard * character when specifying the client.

        */ Clients: string | undefined; @@ -1447,8 +1454,8 @@ export interface OpenZFSClientConfiguration { *
          *
        • *

          - * crossmount is used by default. If you don't specify - * crossmount when changing the client configuration, you won't be + * crossmnt is used by default. If you don't specify + * crossmnt when changing the client configuration, you won't be * able to see or access snapshots in your file system's snapshot directory.

          *
        • *
        • @@ -1473,7 +1480,7 @@ export namespace OpenZFSClientConfiguration { } /** - *

          The Network File System NFS) configurations for mounting an Amazon FSx for + *

          The Network File System (NFS) configurations for mounting an Amazon FSx for * OpenZFS file system.

          */ export interface OpenZFSNfsExport { @@ -1602,19 +1609,34 @@ export interface OpenZFSVolumeConfiguration { StorageCapacityQuotaGiB?: number; /** - *

          The method used to compress the data on the volume. Unless a compression type is - * specified, volumes inherit the DataCompressionType value of their parent - * volume.

          + *

          The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * Most workloads should use the default record size. For guidance on when + * to set a custom record size, see the + * Amazon FSx for OpenZFS User Guide.

          + */ + RecordSizeKiB?: number; + + /** + *

          Specifies the method used to compress the data on the volume. The compression + * type is NONE by default.

          *
            *
          • *

            - * NONE - Doesn't compress the data on the volume.

            + * NONE - Doesn't compress the data on the volume. + * NONE is the default.

            *
          • *
          • *

            * ZSTD - Compresses the data in the volume using the Zstandard - * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on - * your volume and has very little impact on compute resources.

            + * (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better + * compression ratio to minimize on-disk storage utilization.

            + *
          • + *
          • + *

            + * LZ4 - Compresses the data in the volume using the LZ4 + * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive + * and delivers higher write throughput speeds.

            *
          • *
          */ @@ -1626,7 +1648,7 @@ export interface OpenZFSVolumeConfiguration { * for the volume are copied to snapshots where the user doesn't specify tags. If this * value is true and you specify one or more tags, only the specified tags are * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags - * are copied from the volume, regardless of this value.

          + * are copied from the volume, regardless of this value.

          */ CopyTagsToSnapshots?: boolean; @@ -1642,13 +1664,13 @@ export interface OpenZFSVolumeConfiguration { ReadOnly?: boolean; /** - *

          The configuration object for mounting a Network File System (NFS) file - * system.

          + *

          The configuration object for mounting a Network File System (NFS) + * file system.

          */ NfsExports?: OpenZFSNfsExport[]; /** - *

          An object specifying how much storage users or groups can use on the volume.

          + *

          An object specifying how much storage users or groups can use on the volume.

          */ UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; } @@ -2451,7 +2473,7 @@ export namespace CreateBackupRequest { } /** - *

          No Amazon FSx for NetApp ONTAP volumes were found based upon the supplied parameters.

          + *

          No Amazon FSx volumes were found based upon the supplied parameters.

          */ export class VolumeNotFound extends __BaseException { readonly name: "VolumeNotFound" = "VolumeNotFound"; @@ -2528,6 +2550,11 @@ export interface CreateDataRepositoryAssociationRequest { *

          This path specifies where in your file system files will be exported * from or imported to. This file system directory can be linked to only one * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

          + * + *

          If you specify only a forward slash (/) as the file system + * path, you can link only 1 data repository to the file system. You can only specify + * "/" as the file system path for the first data repository associated with a file system.

          + *
          */ FileSystemPath: string | undefined; @@ -2690,7 +2717,12 @@ export interface DataRepositoryAssociation { * path /ns1/ns2.

          *

          This path specifies where in your file system files will be exported * from or imported to. This file system directory can be linked to only one - * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

          + * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

          + * + *

          If you specify only a forward slash (/) as the file system + * path, you can link only 1 data repository to the file system. You can only specify + * "/" as the file system path for the first data repository associated with a file system.

          + *
          */ FileSystemPath?: string; @@ -3348,11 +3380,11 @@ export interface CreateFileSystemLustreConfiguration { * in the S3 bucket.

          *
        • *
        - *

        For more information, see + *

        For more information, see * Automatically import updates from your S3 bucket.

        * *

        This parameter is not supported for file systems with the Persistent_2 deployment type. - * Instead, use CreateDataRepositoryAssociation" to create + * Instead, use CreateDataRepositoryAssociation to create * a data repository association to link your Lustre file system to a data repository.

        *
        */ @@ -3485,6 +3517,10 @@ export interface CreateFileSystemOntapConfiguration { *

        Specifies the IP address range in which the endpoints to access your file system * will be created. By default, Amazon FSx selects an unused IP address range for you * from the 198.19.* range.

        + * + *

        The Endpoint IP address range you select for your file system + * must exist outside the VPC's CIDR range and must be at least /30 or larger.

        + *
        */ EndpointIpAddressRange?: string; @@ -3547,19 +3583,36 @@ export namespace CreateFileSystemOntapConfiguration { */ export interface OpenZFSCreateRootVolumeConfiguration { /** - *

        Specifies the method used to compress the data on the volume. Unless the compression - * type is specified, volumes inherit the DataCompressionType value of their - * parent volume.

        + *

        Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the + * default record size. Database workflows can benefit from a smaller record size, while streaming + * workflows can benefit from a larger record size. For additional guidance on setting a custom record + * size, see + * Tips for maximizing performance in the + * Amazon FSx for OpenZFS User Guide.

        + */ + RecordSizeKiB?: number; + + /** + *

        Specifies the method used to compress the data on the volume. The compression + * type is NONE by default.

        *
          *
        • *

          - * NONE - Doesn't compress the data on the volume.

          + * NONE - Doesn't compress the data on the volume. + * NONE is the default.

          *
        • *
        • *

          - * ZSTD - Compresses the data in the volume using the ZStandard - * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on - * your volume and has very little impact on compute resources.

          + * ZSTD - Compresses the data in the volume using the Zstandard + * (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better + * compression ratio to minimize on-disk storage utilization.

          + *
        • + *
        • + *

          + * LZ4 - Compresses the data in the volume using the LZ4 + * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive + * and delivers higher write throughput speeds.

          *
        • *
        */ @@ -3576,9 +3629,9 @@ export interface OpenZFSCreateRootVolumeConfiguration { UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; /** - *

        A Boolean value indicating whether tags for the volume should be copied to snapshots. - * This value defaults to false. If it's set to true, all tags - * for the volume are copied to snapshots where the user doesn't specify tags. If this + *

        A Boolean value indicating whether tags for the volume should be copied to snapshots + * of the volume. This value defaults to false. If it's set to true, + * all tags for the volume are copied to snapshots where the user doesn't specify tags. If this * value is true and you specify one or more tags, only the specified tags are * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags * are copied from the volume, regardless of this value.

        @@ -3603,7 +3656,7 @@ export namespace OpenZFSCreateRootVolumeConfiguration { } /** - *

        The OpenZFS configuration properties for the file system that you are creating.

        + *

        The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.

        */ export interface CreateFileSystemOpenZFSConfiguration { /** @@ -3643,8 +3696,8 @@ export interface CreateFileSystemOpenZFSConfiguration { /** *

        Specifies the file system deployment type. Amazon FSx for OpenZFS supports - * SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a - * single Availability Zone (AZ) of redundancy.

        + * SINGLE_AZ_1. SINGLE_AZ_1 deployment type is configured for redundancy + * within a single Availability Zone.

        */ DeploymentType: OpenZFSDeploymentType | string | undefined; @@ -5017,42 +5070,77 @@ export namespace CreateOpenZFSOriginSnapshotConfiguration { } /** - *

        Specifies the configuration of the OpenZFS volume that you are creating.

        + *

        Specifies the configuration of the Amazon FSx for OpenZFS volume that you are creating.

        */ export interface CreateOpenZFSVolumeConfiguration { /** - *

        The ID of the volume to use as the parent volume.

        + *

        The ID of the volume to use as the parent volume of the volume that you are creating.

        */ ParentVolumeId: string | undefined; /** - *

        The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't - * reserve more storage than the parent volume has reserved.

        + *

        Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting + * StorageCapacityReservationGiB guarantees that the specified amount of storage space + * on the parent volume will always be available for the volume. + * You can't reserve more storage than the parent volume has. To not specify a storage capacity + * reservation, set this to 0 or -1. For more information, see + * Volume properties + * in the Amazon FSx for OpenZFS User Guide.

        */ StorageCapacityReservationGiB?: number; /** - *

        The maximum amount of storage in gibibytes (GiB) that the volume can use from its - * parent. You can specify a quota larger than the storage on the parent volume.

        + *

        Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify + * a quota that is larger than the storage on the parent volume. A volume quota limits + * the amount of storage that the volume can consume to the configured amount, but does not + * guarantee the space will be available on the parent volume. To guarantee quota space, you must also set + * StorageCapacityReservationGiB. To not specify a storage capacity quota, set this to -1. + *

        + *

        For more information, see + * Volume properties + * in the Amazon FSx for OpenZFS User Guide.

        */ StorageCapacityQuotaGiB?: number; /** - *

        Specifies the method used to compress the data on the volume. Unless the compression - * type is specified, volumes inherit the DataCompressionType value of their - * parent volume.

        + *

        Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * We recommend using the default setting for the majority of use cases. + * Generally, workloads that write in fixed small or large record sizes + * may benefit from setting a custom record size, like database workloads + * (small record size) or media streaming workloads (large record size). + * For additional guidance on when + * to set a custom record size, see + * + * ZFS Record size in the Amazon FSx for OpenZFS User Guide.

        + */ + RecordSizeKiB?: number; + + /** + *

        Specifies the method used to compress the data on the volume. The compression + * type is NONE by default.

        *
          *
        • *

          - * NONE - Doesn't compress the data on the volume.

          + * NONE - Doesn't compress the data on the volume. + * NONE is the default.

          *
        • *
        • *

          * ZSTD - Compresses the data in the volume using the Zstandard - * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on - * your volume and has very little impact on compute resources.

          + * (ZSTD) compression algorithm. ZSTD compression provides a higher level of + * data compression and higher read throughput performance than LZ4 compression.

          + *
        • + *
        • + *

          + * LZ4 - Compresses the data in the volume using the LZ4 + * compression algorithm. LZ4 compression provides a lower level of compression + * and higher write throughput performance than ZSTD compression.

          *
        • *
        + *

        For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, + * see + * Tips for maximizing performance File system and volume settings in the Amazon FSx for OpenZFS User Guide.

        */ DataCompressionType?: OpenZFSDataCompressionType | string; @@ -5062,7 +5150,7 @@ export interface CreateOpenZFSVolumeConfiguration { * for the volume are copied to snapshots where the user doesn't specify tags. If this * value is true, and you specify one or more tags, only the specified tags * are copied to snapshots. If you specify one or more tags when creating the snapshot, no - * tags are copied from the volume, regardless of this value.

        + * tags are copied from the volume, regardless of this value.

        */ CopyTagsToSnapshots?: boolean; @@ -5073,17 +5161,17 @@ export interface CreateOpenZFSVolumeConfiguration { OriginSnapshot?: CreateOpenZFSOriginSnapshotConfiguration; /** - *

        A Boolean value indicating whether the volume is read-only.

        + *

        A Boolean value indicating whether the volume is read-only.

        */ ReadOnly?: boolean; /** - *

        The configuration object for mounting a Network File System (NFS) file system.

        + *

        The configuration object for mounting a Network File System (NFS) file system.

        */ NfsExports?: OpenZFSNfsExport[]; /** - *

        An object specifying how much storage users or groups can use on the volume.

        + *

        An object specifying how much storage users or groups can use on the volume.

        */ UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; } @@ -5166,7 +5254,7 @@ export class MissingVolumeConfiguration extends __BaseException { } /** - *

        No Amazon FSx for NetApp ONTAP SVMs were found based upon the supplied parameters.

        + *

        No FSx for ONTAP SVMs were found based upon the supplied parameters.

        */ export class StorageVirtualMachineNotFound extends __BaseException { readonly name: "StorageVirtualMachineNotFound" = "StorageVirtualMachineNotFound"; @@ -5452,8 +5540,12 @@ export namespace DeleteFileSystemLustreConfiguration { }); } +export enum DeleteFileSystemOpenZFSOption { + DELETE_CHILD_VOLUMES_AND_SNAPSHOTS = "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS", +} + /** - *

        The configuration object for the OpenZFS file system used in the + *

        The configuration object for the Amazon FSx for OpenZFS file system used in the * DeleteFileSystem operation.

        */ export interface DeleteFileSystemOpenZFSConfiguration { @@ -5461,16 +5553,21 @@ export interface DeleteFileSystemOpenZFSConfiguration { *

        By default, Amazon FSx for OpenZFS takes a final backup on your behalf when * the DeleteFileSystem operation is invoked. Doing this helps protect you * from data loss, and we highly recommend taking the final backup. If you want to skip - * this backup, use this - * value - * to do so.

        + * taking a final backup, set this value to true.

        */ SkipFinalBackup?: boolean; /** - *

        A list of Tag values, with a maximum of 50 elements.

        + *

        A list of tags to apply to the file system's final backup.

        */ FinalBackupTags?: Tag[]; + + /** + *

        To delete a file system if there are child volumes present below the root volume, + * use the string DELETE_CHILD_VOLUMES_AND_SNAPSHOTS. If your file system + * has child volumes and you don't use this option, the delete request will fail.

        + */ + Options?: (DeleteFileSystemOpenZFSOption | string)[]; } export namespace DeleteFileSystemOpenZFSConfiguration { @@ -7122,7 +7219,7 @@ export interface UpdateFileSystemOpenZFSConfiguration { * where the user doesn't specify tags. If this value is true and you specify * one or more tags, only the specified tags are copied to backups. If you specify one or * more tags when creating a user-initiated backup, no tags are copied from the file - * system, regardless of this value.

        + * system, regardless of this value.

        */ CopyTagsToBackups?: boolean; @@ -7132,7 +7229,7 @@ export interface UpdateFileSystemOpenZFSConfiguration { * for the volume are copied to snapshots where the user doesn't specify tags. If this * value is true and you specify one or more tags, only the specified tags are * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags - * are copied from the volume, regardless of this value.

        + * are copied from the volume, regardless of this value.

        */ CopyTagsToVolumes?: boolean; @@ -7145,7 +7242,7 @@ export interface UpdateFileSystemOpenZFSConfiguration { /** *

        The throughput of an Amazon FSx file system, measured in megabytes per second - * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^12 (4096).

        + * (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

        */ ThroughputCapacity?: number; @@ -7376,7 +7473,7 @@ export interface UpdateSnapshotRequest { ClientRequestToken?: string; /** - *

        The name of the snapshot to update.

        + *

        The name of the snapshot to update.

        */ Name: string | undefined; @@ -7520,38 +7617,55 @@ export namespace UpdateOntapVolumeConfiguration { } /** - *

        Used to specify changes to the OpenZFS configuration for the volume that you are - * updating.

        + *

        Used to specify changes to the OpenZFS configuration for the volume + * that you are updating.

        */ export interface UpdateOpenZFSVolumeConfiguration { /** - *

        The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't - * reserve more storage than the parent volume has reserved.

        + *

        The amount of storage in gibibytes (GiB) to reserve from the parent volume. + * You can't reserve more storage than the parent volume has reserved. You can specify + * a value of -1 to unset a volume's storage capacity reservation.

        */ StorageCapacityReservationGiB?: number; /** - *

        - *

        The maximum amount of storage in gibibytes (GiB) that the volume can use from its - * parent. You can specify a quota larger than the storage on the parent volume.

        + *

        The maximum amount of storage in gibibytes (GiB) that the volume can use from its + * parent. You can specify a quota larger than the storage on the parent volume. You + * can specify a value of -1 to unset a volume's storage capacity quota.

        */ StorageCapacityQuotaGiB?: number; /** - *

        - *

        Specifies the method used to compress the data on the volume. Unless the compression - * type is specified, volumes inherit the DataCompressionType value of their - * parent volume.

        + *

        Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * Most workloads should use the default record size. Database workflows can benefit from a smaller + * record size, while streaming workflows can benefit from a larger record size. For additional guidance on when + * to set a custom record size, see + * Tips for maximizing performance in the + * Amazon FSx for OpenZFS User Guide.

        + */ + RecordSizeKiB?: number; + + /** + *

        Specifies the method used to compress the data on the volume. The compression + * type is NONE by default.

        *
          *
        • *

          - * NONE - Doesn't compress the data on the volume.

          + * NONE - Doesn't compress the data on the volume. + * NONE is the default.

          *
        • *
        • *

          * ZSTD - Compresses the data in the volume using the Zstandard - * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on - * your volume and has very little impact on compute resources.

          + * (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better + * compression ratio to minimize on-disk storage utilization.

          + *
        • + *
        • + *

          + * LZ4 - Compresses the data in the volume using the LZ4 + * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive + * and delivers higher write throughput speeds.

          *
        • *
        */ @@ -8012,6 +8126,11 @@ export interface Snapshot { */ Lifecycle?: SnapshotLifecycle | string; + /** + *

        Describes why a resource lifecycle state changed.

        + */ + LifecycleTransitionReason?: LifecycleTransitionReason; + /** *

        A list of Tag values, with a maximum of 50 elements.

        */ diff --git a/clients/client-fsx/src/protocols/Aws_json1_1.ts b/clients/client-fsx/src/protocols/Aws_json1_1.ts index 53471cb4406f..0640d475aa8c 100644 --- a/clients/client-fsx/src/protocols/Aws_json1_1.ts +++ b/clients/client-fsx/src/protocols/Aws_json1_1.ts @@ -181,6 +181,7 @@ import { DeleteFileSystemLustreConfiguration, DeleteFileSystemLustreResponse, DeleteFileSystemOpenZFSConfiguration, + DeleteFileSystemOpenZFSOption, DeleteFileSystemOpenZFSResponse, DeleteFileSystemRequest, DeleteFileSystemResponse, @@ -3693,6 +3694,7 @@ const serializeAws_json1_1CreateOpenZFSVolumeConfiguration = ( ...(input.ParentVolumeId !== undefined && input.ParentVolumeId !== null && { ParentVolumeId: input.ParentVolumeId }), ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.RecordSizeKiB !== undefined && input.RecordSizeKiB !== null && { RecordSizeKiB: input.RecordSizeKiB }), ...(input.StorageCapacityQuotaGiB !== undefined && input.StorageCapacityQuotaGiB !== null && { StorageCapacityQuotaGiB: input.StorageCapacityQuotaGiB }), ...(input.StorageCapacityReservationGiB !== undefined && @@ -3883,11 +3885,29 @@ const serializeAws_json1_1DeleteFileSystemOpenZFSConfiguration = ( return { ...(input.FinalBackupTags !== undefined && input.FinalBackupTags !== null && { FinalBackupTags: serializeAws_json1_1Tags(input.FinalBackupTags, context) }), + ...(input.Options !== undefined && + input.Options !== null && { + Options: serializeAws_json1_1DeleteFileSystemOpenZFSOptions(input.Options, context), + }), ...(input.SkipFinalBackup !== undefined && input.SkipFinalBackup !== null && { SkipFinalBackup: input.SkipFinalBackup }), }; }; +const serializeAws_json1_1DeleteFileSystemOpenZFSOptions = ( + input: (DeleteFileSystemOpenZFSOption | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1DeleteFileSystemRequest = (input: DeleteFileSystemRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), @@ -4245,6 +4265,7 @@ const serializeAws_json1_1OpenZFSCreateRootVolumeConfiguration = ( ...(input.NfsExports !== undefined && input.NfsExports !== null && { NfsExports: serializeAws_json1_1OpenZFSNfsExports(input.NfsExports, context) }), ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.RecordSizeKiB !== undefined && input.RecordSizeKiB !== null && { RecordSizeKiB: input.RecordSizeKiB }), ...(input.UserAndGroupQuotas !== undefined && input.UserAndGroupQuotas !== null && { UserAndGroupQuotas: serializeAws_json1_1OpenZFSUserAndGroupQuotas(input.UserAndGroupQuotas, context), @@ -4765,6 +4786,7 @@ const serializeAws_json1_1UpdateOpenZFSVolumeConfiguration = ( ...(input.NfsExports !== undefined && input.NfsExports !== null && { NfsExports: serializeAws_json1_1OpenZFSNfsExports(input.NfsExports, context) }), ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.RecordSizeKiB !== undefined && input.RecordSizeKiB !== null && { RecordSizeKiB: input.RecordSizeKiB }), ...(input.StorageCapacityQuotaGiB !== undefined && input.StorageCapacityQuotaGiB !== null && { StorageCapacityQuotaGiB: input.StorageCapacityQuotaGiB }), ...(input.StorageCapacityReservationGiB !== undefined && @@ -6177,6 +6199,7 @@ const deserializeAws_json1_1OpenZFSVolumeConfiguration = ( : undefined, ParentVolumeId: __expectString(output.ParentVolumeId), ReadOnly: __expectBoolean(output.ReadOnly), + RecordSizeKiB: __expectInt32(output.RecordSizeKiB), StorageCapacityQuotaGiB: __expectInt32(output.StorageCapacityQuotaGiB), StorageCapacityReservationGiB: __expectInt32(output.StorageCapacityReservationGiB), UserAndGroupQuotas: @@ -6288,6 +6311,10 @@ const deserializeAws_json1_1Snapshot = (output: any, context: __SerdeContext): S ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) : undefined, Lifecycle: __expectString(output.Lifecycle), + LifecycleTransitionReason: + output.LifecycleTransitionReason !== undefined && output.LifecycleTransitionReason !== null + ? deserializeAws_json1_1LifecycleTransitionReason(output.LifecycleTransitionReason, context) + : undefined, Name: __expectString(output.Name), ResourceARN: __expectString(output.ResourceARN), SnapshotId: __expectString(output.SnapshotId), diff --git a/clients/client-gamelift/src/GameLift.ts b/clients/client-gamelift/src/GameLift.ts index 86601aaa74b1..840dd74fcbf6 100644 --- a/clients/client-gamelift/src/GameLift.ts +++ b/clients/client-gamelift/src/GameLift.ts @@ -2432,7 +2432,7 @@ export class GameLift extends GameLiftClient { } /** - *

        The GameLift service limits and current utilization for an Amazon Web Services Region or location. + *

        Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. * Instance limits control the number of instances, per instance type, per location, that * your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information * returned includes the maximum number of instances allowed and your account's current @@ -3659,7 +3659,7 @@ export class GameLift extends GameLiftClient { *

        This operation is not designed to be continually called to track matchmaking ticket * status. This practice can cause you to exceed your API limit, which results in errors. * Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide - * the topic ARN in the matchmaking configuration. Continuously poling ticket status with + * the topic ARN in the matchmaking configuration. Continuously polling ticket status with * DescribeMatchmaking should only be used for games in development * with low matchmaking usage.

        *

        diff --git a/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts b/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts index 0d09c2ff15cd..0e204ccf6c99 100644 --- a/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts +++ b/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeEC2InstanceLimitsCommandInput extends DescribeEC2Instan export interface DescribeEC2InstanceLimitsCommandOutput extends DescribeEC2InstanceLimitsOutput, __MetadataBearer {} /** - *

        The GameLift service limits and current utilization for an Amazon Web Services Region or location. + *

        Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. * Instance limits control the number of instances, per instance type, per location, that * your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information * returned includes the maximum number of instances allowed and your account's current diff --git a/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts b/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts index cc6fa4b138be..98c7d3dc641d 100644 --- a/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts +++ b/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts @@ -31,7 +31,7 @@ export interface DescribeMatchmakingCommandOutput extends DescribeMatchmakingOut *

        This operation is not designed to be continually called to track matchmaking ticket * status. This practice can cause you to exceed your API limit, which results in errors. * Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide - * the topic ARN in the matchmaking configuration. Continuously poling ticket status with + * the topic ARN in the matchmaking configuration. Continuously polling ticket status with * DescribeMatchmaking should only be used for games in development * with low matchmaking usage.

        *

        diff --git a/clients/client-greengrassv2/src/GreengrassV2.ts b/clients/client-greengrassv2/src/GreengrassV2.ts index 4f0cf794390c..7782a8dd418a 100644 --- a/clients/client-greengrassv2/src/GreengrassV2.ts +++ b/clients/client-greengrassv2/src/GreengrassV2.ts @@ -400,8 +400,8 @@ export class GreengrassV2 extends GreengrassV2Client { * target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the * new deployment to the target devices.

        *

        Every deployment has a revision number that indicates how many deployment revisions you - * define for a target. Use this operation to create a new revision of an existing deployment. - * This operation returns the revision number of the new deployment when you create it.

        + * define for a target. Use this operation to create a new revision of an existing + * deployment.

        *

        For more information, see the Create deployments in the * IoT Greengrass V2 Developer Guide.

        */ @@ -641,7 +641,7 @@ export class GreengrassV2 extends GreengrassV2Client { *

        Retrieves connectivity information for a Greengrass core device.

        *

        Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.

        @@ -1144,7 +1144,7 @@ export class GreengrassV2 extends GreengrassV2Client { *

        Updates connectivity information for a Greengrass core device.

        *

        Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.

        diff --git a/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts b/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts index d12a77b2f034..3ce6a00e0503 100644 --- a/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts +++ b/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts @@ -29,8 +29,8 @@ export interface CreateDeploymentCommandOutput extends CreateDeploymentResponse, * target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the * new deployment to the target devices.

        *

        Every deployment has a revision number that indicates how many deployment revisions you - * define for a target. Use this operation to create a new revision of an existing deployment. - * This operation returns the revision number of the new deployment when you create it.

        + * define for a target. Use this operation to create a new revision of an existing + * deployment.

        *

        For more information, see the Create deployments in the * IoT Greengrass V2 Developer Guide.

        * @example diff --git a/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts index 5ea4e90b976c..b92dcb887b41 100644 --- a/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts +++ b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts @@ -25,7 +25,7 @@ export interface GetConnectivityInfoCommandOutput extends GetConnectivityInfoRes *

        Retrieves connectivity information for a Greengrass core device.

        *

        Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.

        diff --git a/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts index 3bd6c669d47f..5820ab873754 100644 --- a/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts +++ b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts @@ -25,7 +25,7 @@ export interface UpdateConnectivityInfoCommandOutput extends UpdateConnectivityI *

        Updates connectivity information for a Greengrass core device.

        *

        Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.

        diff --git a/clients/client-kafkaconnect/src/KafkaConnect.ts b/clients/client-kafkaconnect/src/KafkaConnect.ts index 54f5e8cbf625..b9588b0e747b 100644 --- a/clients/client-kafkaconnect/src/KafkaConnect.ts +++ b/clients/client-kafkaconnect/src/KafkaConnect.ts @@ -20,6 +20,11 @@ import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput, } from "./commands/DeleteConnectorCommand"; +import { + DeleteCustomPluginCommand, + DeleteCustomPluginCommandInput, + DeleteCustomPluginCommandOutput, +} from "./commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommand, DescribeConnectorCommandInput, @@ -189,6 +194,38 @@ export class KafkaConnect extends KafkaConnectClient { } } + /** + *

        Deletes a custom plugin.

        + */ + public deleteCustomPlugin( + args: DeleteCustomPluginCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteCustomPlugin( + args: DeleteCustomPluginCommandInput, + cb: (err: any, data?: DeleteCustomPluginCommandOutput) => void + ): void; + public deleteCustomPlugin( + args: DeleteCustomPluginCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteCustomPluginCommandOutput) => void + ): void; + public deleteCustomPlugin( + args: DeleteCustomPluginCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteCustomPluginCommandOutput) => void), + cb?: (err: any, data?: DeleteCustomPluginCommandOutput) => void + ): Promise | void { + const command = new DeleteCustomPluginCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Returns summary information about the connector.

        */ @@ -286,7 +323,9 @@ export class KafkaConnect extends KafkaConnectClient { } /** - *

        Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

        + *

        Returns a list of all the connectors in this account and Region. The list is limited to + * connectors whose name starts with the specified prefix. The response also includes a + * description of each of the listed connectors.

        */ public listConnectors( args: ListConnectorsCommandInput, diff --git a/clients/client-kafkaconnect/src/KafkaConnectClient.ts b/clients/client-kafkaconnect/src/KafkaConnectClient.ts index 7e20139e1440..500ef570ad92 100644 --- a/clients/client-kafkaconnect/src/KafkaConnectClient.ts +++ b/clients/client-kafkaconnect/src/KafkaConnectClient.ts @@ -58,6 +58,7 @@ import { CreateWorkerConfigurationCommandOutput, } from "./commands/CreateWorkerConfigurationCommand"; import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput } from "./commands/DeleteConnectorCommand"; +import { DeleteCustomPluginCommandInput, DeleteCustomPluginCommandOutput } from "./commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommandInput, DescribeConnectorCommandOutput } from "./commands/DescribeConnectorCommand"; import { DescribeCustomPluginCommandInput, @@ -81,6 +82,7 @@ export type ServiceInputTypes = | CreateCustomPluginCommandInput | CreateWorkerConfigurationCommandInput | DeleteConnectorCommandInput + | DeleteCustomPluginCommandInput | DescribeConnectorCommandInput | DescribeCustomPluginCommandInput | DescribeWorkerConfigurationCommandInput @@ -94,6 +96,7 @@ export type ServiceOutputTypes = | CreateCustomPluginCommandOutput | CreateWorkerConfigurationCommandOutput | DeleteConnectorCommandOutput + | DeleteCustomPluginCommandOutput | DescribeConnectorCommandOutput | DescribeCustomPluginCommandOutput | DescribeWorkerConfigurationCommandOutput diff --git a/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts b/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts new file mode 100644 index 000000000000..93637f7acbc1 --- /dev/null +++ b/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KafkaConnectClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KafkaConnectClient"; +import { DeleteCustomPluginRequest, DeleteCustomPluginResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteCustomPluginCommand, + serializeAws_restJson1DeleteCustomPluginCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteCustomPluginCommandInput extends DeleteCustomPluginRequest {} +export interface DeleteCustomPluginCommandOutput extends DeleteCustomPluginResponse, __MetadataBearer {} + +/** + *

        Deletes a custom plugin.

        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DeleteCustomPluginCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DeleteCustomPluginCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DeleteCustomPluginCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteCustomPluginCommandInput} for command's `input` shape. + * @see {@link DeleteCustomPluginCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for KafkaConnectClient's `config` shape. + * + */ +export class DeleteCustomPluginCommand extends $Command< + DeleteCustomPluginCommandInput, + DeleteCustomPluginCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteCustomPluginCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KafkaConnectClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KafkaConnectClient"; + const commandName = "DeleteCustomPluginCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteCustomPluginRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteCustomPluginResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteCustomPluginCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteCustomPluginCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteCustomPluginCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-kafkaconnect/src/commands/ListConnectorsCommand.ts b/clients/client-kafkaconnect/src/commands/ListConnectorsCommand.ts index dd688da59748..0cc59e97dcb2 100644 --- a/clients/client-kafkaconnect/src/commands/ListConnectorsCommand.ts +++ b/clients/client-kafkaconnect/src/commands/ListConnectorsCommand.ts @@ -22,7 +22,9 @@ export interface ListConnectorsCommandInput extends ListConnectorsRequest {} export interface ListConnectorsCommandOutput extends ListConnectorsResponse, __MetadataBearer {} /** - *

        Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

        + *

        Returns a list of all the connectors in this account and Region. The list is limited to + * connectors whose name starts with the specified prefix. The response also includes a + * description of each of the listed connectors.

        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kafkaconnect/src/commands/index.ts b/clients/client-kafkaconnect/src/commands/index.ts index 2b848ed670fb..8fc3e2d1748a 100644 --- a/clients/client-kafkaconnect/src/commands/index.ts +++ b/clients/client-kafkaconnect/src/commands/index.ts @@ -2,6 +2,7 @@ export * from "./CreateConnectorCommand"; export * from "./CreateCustomPluginCommand"; export * from "./CreateWorkerConfigurationCommand"; export * from "./DeleteConnectorCommand"; +export * from "./DeleteCustomPluginCommand"; export * from "./DescribeConnectorCommand"; export * from "./DescribeCustomPluginCommand"; export * from "./DescribeWorkerConfigurationCommand"; diff --git a/clients/client-kafkaconnect/src/models/models_0.ts b/clients/client-kafkaconnect/src/models/models_0.ts index 2098c0c7f27e..875f2b7e2b89 100644 --- a/clients/client-kafkaconnect/src/models/models_0.ts +++ b/clients/client-kafkaconnect/src/models/models_0.ts @@ -1,4 +1,4 @@ -import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-client"; +import { ExceptionOptionType as __ExceptionOptionType, SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; import { KafkaConnectServiceException as __BaseException } from "./KafkaConnectServiceException"; @@ -8,7 +8,8 @@ import { KafkaConnectServiceException as __BaseException } from "./KafkaConnectS */ export interface ScaleInPolicyDescription { /** - *

        Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

        + *

        Specifies the CPU utilization percentage threshold at which you want connector scale in + * to be triggered.

        */ cpuUtilizationPercentage?: number; } @@ -27,7 +28,8 @@ export namespace ScaleInPolicyDescription { */ export interface ScaleOutPolicyDescription { /** - *

        The CPU utilization percentage threshold at which you want connector scale out to be triggered.

        + *

        The CPU utilization percentage threshold at which you want connector scale out to be + * triggered.

        */ cpuUtilizationPercentage?: number; } @@ -51,7 +53,8 @@ export interface AutoScalingDescription { maxWorkerCount?: number; /** - *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.

        */ mcuCount?: number; @@ -85,7 +88,8 @@ export namespace AutoScalingDescription { */ export interface ProvisionedCapacityDescription { /** - *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.

        */ mcuCount?: number; @@ -170,7 +174,8 @@ export interface ApacheKafkaClusterDescription { bootstrapServers?: string; /** - *

        Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

        + *

        Details of an Amazon VPC which has network connectivity to the Apache Kafka + * cluster.

        */ vpc?: VpcDescription; } @@ -209,11 +214,13 @@ export enum KafkaClusterClientAuthenticationType { } /** - *

        The client authentication information used in order to authenticate with the Apache Kafka cluster.

        + *

        The client authentication information used in order to authenticate with the Apache + * Kafka cluster.

        */ export interface KafkaClusterClientAuthenticationDescription { /** - *

        The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

        + *

        The type of client authentication used to connect to the Apache Kafka cluster. Value + * NONE means that no client authentication is used.

        */ authenticationType?: KafkaClusterClientAuthenticationType | string; } @@ -276,11 +283,13 @@ export namespace CloudWatchLogsLogDeliveryDescription { } /** - *

        A description of the settings for delivering logs to Amazon Kinesis Data Firehose.

        + *

        A description of the settings for delivering logs to Amazon Kinesis Data + * Firehose.

        */ export interface FirehoseLogDeliveryDescription { /** - *

        The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

        + *

        The name of the Kinesis Data Firehose delivery stream that is the destination for log + * delivery.

        */ deliveryStream?: string; @@ -329,7 +338,8 @@ export namespace S3LogDeliveryDescription { } /** - *

        Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

        + *

        Workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.

        */ export interface WorkerLogDeliveryDescription { /** @@ -362,7 +372,8 @@ export namespace WorkerLogDeliveryDescription { */ export interface LogDeliveryDescription { /** - *

        The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

        + *

        The workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.

        */ workerLogDelivery?: WorkerLogDeliveryDescription; } @@ -488,7 +499,8 @@ export interface ConnectorSummary { kafkaCluster?: KafkaClusterDescription; /** - *

        The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

        + *

        The type of client authentication used to connect to the Apache Kafka cluster. The value + * is NONE when no client authentication is used.

        */ kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; @@ -498,7 +510,8 @@ export interface ConnectorSummary { kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; /** - *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

        + *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.

        */ kafkaConnectVersion?: string; @@ -513,7 +526,8 @@ export interface ConnectorSummary { plugins?: PluginDescription[]; /** - *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

        + *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon + * Web Services resources.

        */ serviceExecutionRoleArn?: string; @@ -551,7 +565,8 @@ export enum CustomPluginContentType { */ export interface CustomPluginFileDescription { /** - *

        The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.

        + *

        The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the + * file.

        */ fileMd5?: string; @@ -604,7 +619,8 @@ export namespace S3LocationDescription { */ export interface CustomPluginLocationDescription { /** - *

        The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

        + *

        The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin + * file stored in Amazon S3.

        */ s3Location?: S3LocationDescription; } @@ -707,7 +723,8 @@ export namespace CustomPluginSummary { } /** - *

        A plugin is an AWS resource that contains the code that defines a connector's logic.

        + *

        A plugin is an AWS resource that contains the code that defines a connector's + * logic.

        */ export interface CustomPlugin { /** @@ -731,7 +748,8 @@ export namespace CustomPlugin { } /** - *

        A plugin is an AWS resource that contains the code that defines your connector logic.

        + *

        A plugin is an AWS resource that contains the code that defines your connector logic. + *

        */ export interface Plugin { /** @@ -851,7 +869,8 @@ export interface ApacheKafkaCluster { bootstrapServers: string | undefined; /** - *

        Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

        + *

        Details of an Amazon VPC which has network connectivity to the Apache Kafka + * cluster.

        */ vpc: Vpc | undefined; } @@ -870,7 +889,8 @@ export namespace ApacheKafkaCluster { */ export interface ScaleInPolicy { /** - *

        Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

        + *

        Specifies the CPU utilization percentage threshold at which you want connector scale in + * to be triggered.

        */ cpuUtilizationPercentage: number | undefined; } @@ -889,7 +909,8 @@ export namespace ScaleInPolicy { */ export interface ScaleOutPolicy { /** - *

        The CPU utilization percentage threshold at which you want connector scale out to be triggered.

        + *

        The CPU utilization percentage threshold at which you want connector scale out to be + * triggered.

        */ cpuUtilizationPercentage: number | undefined; } @@ -913,7 +934,8 @@ export interface AutoScaling { maxWorkerCount: number | undefined; /** - *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.

        */ mcuCount: number | undefined; @@ -947,7 +969,8 @@ export namespace AutoScaling { */ export interface ScaleInPolicyUpdate { /** - *

        The target CPU utilization percentage threshold at which you want connector scale in to be triggered.

        + *

        The target CPU utilization percentage threshold at which you want connector scale in to + * be triggered.

        */ cpuUtilizationPercentage: number | undefined; } @@ -966,7 +989,8 @@ export namespace ScaleInPolicyUpdate { */ export interface ScaleOutPolicyUpdate { /** - *

        The target CPU utilization percentage threshold at which you want connector scale out to be triggered.

        + *

        The target CPU utilization percentage threshold at which you want connector scale out to + * be triggered.

        */ cpuUtilizationPercentage: number | undefined; } @@ -990,7 +1014,8 @@ export interface AutoScalingUpdate { maxWorkerCount: number | undefined; /** - *

        The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The target number of microcontroller units (MCUs) allocated to each connector worker. + * The valid values are 1,2,4,8.

        */ mcuCount: number | undefined; @@ -1020,7 +1045,8 @@ export namespace AutoScalingUpdate { } /** - *

        HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

        + *

        HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then + * retry it.

        */ export class BadRequestException extends __BaseException { readonly name: "BadRequestException" = "BadRequestException"; @@ -1043,7 +1069,8 @@ export class BadRequestException extends __BaseException { */ export interface ProvisionedCapacity { /** - *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.

        */ mcuCount: number | undefined; @@ -1063,7 +1090,8 @@ export namespace ProvisionedCapacity { } /** - *

        Information about the capacity of the connector, whether it is auto scaled or provisioned.

        + *

        Information about the capacity of the connector, whether it is auto scaled or + * provisioned.

        */ export interface Capacity { /** @@ -1091,7 +1119,8 @@ export namespace Capacity { */ export interface ProvisionedCapacityUpdate { /** - *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

        + *

        The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.

        */ mcuCount: number | undefined; @@ -1111,7 +1140,8 @@ export namespace ProvisionedCapacityUpdate { } /** - *

        The target capacity for the connector. The capacity can be auto scaled or provisioned.

        + *

        The target capacity for the connector. The capacity can be auto scaled or + * provisioned.

        */ export interface CapacityUpdate { /** @@ -1159,7 +1189,8 @@ export namespace CloudWatchLogsLogDelivery { } /** - *

        HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.

        + *

        HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your + * request with another name.

        */ export class ConflictException extends __BaseException { readonly name: "ConflictException" = "ConflictException"; @@ -1197,11 +1228,13 @@ export namespace KafkaCluster { } /** - *

        The client authentication information used in order to authenticate with the Apache Kafka cluster.

        + *

        The client authentication information used in order to authenticate with the Apache + * Kafka cluster.

        */ export interface KafkaClusterClientAuthentication { /** - *

        The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

        + *

        The type of client authentication used to connect to the Apache Kafka cluster. Value + * NONE means that no client authentication is used.

        */ authenticationType: KafkaClusterClientAuthenticationType | string | undefined; } @@ -1239,7 +1272,8 @@ export namespace KafkaClusterEncryptionInTransit { */ export interface FirehoseLogDelivery { /** - *

        The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

        + *

        The name of the Kinesis Data Firehose delivery stream that is the destination for log + * delivery.

        */ deliveryStream?: string; @@ -1288,7 +1322,8 @@ export namespace S3LogDelivery { } /** - *

        Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

        + *

        Workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.

        */ export interface WorkerLogDelivery { /** @@ -1321,7 +1356,8 @@ export namespace WorkerLogDelivery { */ export interface LogDelivery { /** - *

        The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

        + *

        The workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.

        */ workerLogDelivery: WorkerLogDelivery | undefined; } @@ -1336,7 +1372,8 @@ export namespace LogDelivery { } /** - *

        The configuration of the workers, which are the processes that run the connector logic.

        + *

        The configuration of the workers, which are the processes that run the connector + * logic.

        */ export interface WorkerConfiguration { /** @@ -1361,7 +1398,8 @@ export namespace WorkerConfiguration { export interface CreateConnectorRequest { /** - *

        Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.

        + *

        Information about the capacity allocated to the connector. Exactly one of the two + * properties must be specified.

        */ capacity: Capacity | undefined; @@ -1396,7 +1434,8 @@ export interface CreateConnectorRequest { kafkaClusterEncryptionInTransit: KafkaClusterEncryptionInTransit | undefined; /** - *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

        + *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.

        */ kafkaConnectVersion: string | undefined; @@ -1411,7 +1450,10 @@ export interface CreateConnectorRequest { plugins: Plugin[] | undefined; /** - *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.

        + *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access the + * Amazon Web Services resources that it needs. The types of resources depends on the logic of + * the connector. For example, a connector that has Amazon S3 as a destination must have + * permissions that allow it to write to the S3 destination bucket.

        */ serviceExecutionRoleArn: string | undefined; @@ -1427,6 +1469,7 @@ export namespace CreateConnectorRequest { */ export const filterSensitiveLog = (obj: CreateConnectorRequest): any => ({ ...obj, + ...(obj.connectorConfiguration && { connectorConfiguration: SENSITIVE_STRING }), }); } @@ -1457,7 +1500,8 @@ export namespace CreateConnectorResponse { } /** - *

        HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

        + *

        HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your + * request.

        */ export class ForbiddenException extends __BaseException { readonly name: "ForbiddenException" = "ForbiddenException"; @@ -1476,7 +1520,8 @@ export class ForbiddenException extends __BaseException { } /** - *

        HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

        + *

        HTTP Status Code 500: Unexpected internal server error. Retrying your request might + * resolve the issue.

        */ export class InternalServerErrorException extends __BaseException { readonly name: "InternalServerErrorException" = "InternalServerErrorException"; @@ -1495,7 +1540,8 @@ export class InternalServerErrorException extends __BaseException { } /** - *

        HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

        + *

        HTTP Status Code 404: Resource not found due to incorrect input. Correct your request + * and then retry it.

        */ export class NotFoundException extends __BaseException { readonly name: "NotFoundException" = "NotFoundException"; @@ -1514,7 +1560,8 @@ export class NotFoundException extends __BaseException { } /** - *

        HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

        + *

        HTTP Status Code 503: Service Unavailable. Retrying your request in some time might + * resolve the issue.

        */ export class ServiceUnavailableException extends __BaseException { readonly name: "ServiceUnavailableException" = "ServiceUnavailableException"; @@ -1552,7 +1599,8 @@ export class TooManyRequestsException extends __BaseException { } /** - *

        HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

        + *

        HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be + * validated.

        */ export class UnauthorizedException extends __BaseException { readonly name: "UnauthorizedException" = "UnauthorizedException"; @@ -1604,7 +1652,8 @@ export namespace S3Location { */ export interface CustomPluginLocation { /** - *

        The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

        + *

        The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin + * file stored in Amazon S3.

        */ s3Location: S3Location | undefined; } @@ -1703,6 +1752,7 @@ export namespace CreateWorkerConfigurationRequest { */ export const filterSensitiveLog = (obj: CreateWorkerConfigurationRequest): any => ({ ...obj, + ...(obj.propertiesFileContent && { propertiesFileContent: SENSITIVE_STRING }), }); } @@ -1779,6 +1829,43 @@ export namespace DeleteConnectorResponse { }); } +export interface DeleteCustomPluginRequest { + /** + *

        The Amazon Resource Name (ARN) of the custom plugin that you want to delete.

        + */ + customPluginArn: string | undefined; +} + +export namespace DeleteCustomPluginRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteCustomPluginRequest): any => ({ + ...obj, + }); +} + +export interface DeleteCustomPluginResponse { + /** + *

        The Amazon Resource Name (ARN) of the custom plugin that you requested to delete.

        + */ + customPluginArn?: string; + + /** + *

        The state of the custom plugin.

        + */ + customPluginState?: CustomPluginState | string; +} + +export namespace DeleteCustomPluginResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteCustomPluginResponse): any => ({ + ...obj, + }); +} + export interface DescribeConnectorRequest { /** *

        The Amazon Resource Name (ARN) of the connector that you want to describe.

        @@ -1795,9 +1882,34 @@ export namespace DescribeConnectorRequest { }); } +/** + *

        Details about the state of a resource.

        + */ +export interface StateDescription { + /** + *

        A code that describes the state of a resource.

        + */ + code?: string; + + /** + *

        A message that describes the state of a resource.

        + */ + message?: string; +} + +export namespace StateDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StateDescription): any => ({ + ...obj, + }); +} + export interface DescribeConnectorResponse { /** - *

        Information about the capacity of the connector, whether it is auto scaled or provisioned.

        + *

        Information about the capacity of the connector, whether it is auto scaled or + * provisioned.

        */ capacity?: CapacityDescription; @@ -1842,7 +1954,8 @@ export interface DescribeConnectorResponse { kafkaCluster?: KafkaClusterDescription; /** - *

        The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

        + *

        The type of client authentication used to connect to the Apache Kafka cluster. The value + * is NONE when no client authentication is used.

        */ kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; @@ -1852,7 +1965,8 @@ export interface DescribeConnectorResponse { kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; /** - *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

        + *

        The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.

        */ kafkaConnectVersion?: string; @@ -1867,7 +1981,8 @@ export interface DescribeConnectorResponse { plugins?: PluginDescription[]; /** - *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

        + *

        The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon + * Web Services resources.

        */ serviceExecutionRoleArn?: string; @@ -1875,6 +1990,11 @@ export interface DescribeConnectorResponse { *

        Specifies which worker configuration was used for the connector.

        */ workerConfiguration?: WorkerConfigurationDescription; + + /** + *

        Details about the state of a connector.

        + */ + stateDescription?: StateDescription; } export namespace DescribeConnectorResponse { @@ -1883,6 +2003,7 @@ export namespace DescribeConnectorResponse { */ export const filterSensitiveLog = (obj: DescribeConnectorResponse): any => ({ ...obj, + ...(obj.connectorConfiguration && { connectorConfiguration: SENSITIVE_STRING }), }); } @@ -1924,7 +2045,8 @@ export interface DescribeCustomPluginResponse { description?: string; /** - *

        The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.

        + *

        The latest successfully created revision of the custom plugin. If there are no + * successfully created revisions, this field will be absent.

        */ latestRevision?: CustomPluginRevisionSummary; @@ -1932,6 +2054,11 @@ export interface DescribeCustomPluginResponse { *

        The name of the custom plugin.

        */ name?: string; + + /** + *

        Details about the state of a custom plugin.

        + */ + stateDescription?: StateDescription; } export namespace DescribeCustomPluginResponse { @@ -1945,7 +2072,8 @@ export namespace DescribeCustomPluginResponse { export interface DescribeWorkerConfigurationRequest { /** - *

        The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.

        + *

        The Amazon Resource Name (ARN) of the worker configuration that you want to get + * information about.

        */ workerConfigurationArn: string | undefined; } @@ -1990,6 +2118,7 @@ export namespace WorkerConfigurationRevisionDescription { */ export const filterSensitiveLog = (obj: WorkerConfigurationRevisionDescription): any => ({ ...obj, + ...(obj.propertiesFileContent && { propertiesFileContent: SENSITIVE_STRING }), }); } @@ -2026,6 +2155,9 @@ export namespace DescribeWorkerConfigurationResponse { */ export const filterSensitiveLog = (obj: DescribeWorkerConfigurationResponse): any => ({ ...obj, + ...(obj.latestRevision && { + latestRevision: WorkerConfigurationRevisionDescription.filterSensitiveLog(obj.latestRevision), + }), }); } @@ -2041,7 +2173,9 @@ export interface ListConnectorsRequest { maxResults?: number; /** - *

        If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

        + *

        If the response of a ListConnectors operation is truncated, it will include a NextToken. + * Send this NextToken in a subsequent request to continue listing from where the previous + * operation left off.

        */ nextToken?: string; } @@ -2062,7 +2196,9 @@ export interface ListConnectorsResponse { connectors?: ConnectorSummary[]; /** - *

        If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

        + *

        If the response of a ListConnectors operation is truncated, it will include a NextToken. + * Send this NextToken in a subsequent request to continue listing from where it left + * off.

        */ nextToken?: string; } @@ -2083,7 +2219,9 @@ export interface ListCustomPluginsRequest { maxResults?: number; /** - *

        If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

        + *

        If the response of a ListCustomPlugins operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.

        */ nextToken?: string; } @@ -2104,7 +2242,9 @@ export interface ListCustomPluginsResponse { customPlugins?: CustomPluginSummary[]; /** - *

        If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

        + *

        If the response of a ListCustomPlugins operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.

        */ nextToken?: string; } @@ -2125,7 +2265,9 @@ export interface ListWorkerConfigurationsRequest { maxResults?: number; /** - *

        If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

        + *

        If the response of a ListWorkerConfigurations operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.

        */ nextToken?: string; } @@ -2141,7 +2283,9 @@ export namespace ListWorkerConfigurationsRequest { export interface ListWorkerConfigurationsResponse { /** - *

        If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

        + *

        If the response of a ListWorkerConfigurations operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.

        */ nextToken?: string; diff --git a/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts b/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts index 94319d782cae..38f472510aae 100644 --- a/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts +++ b/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts @@ -23,6 +23,7 @@ import { CreateWorkerConfigurationCommandOutput, } from "../commands/CreateWorkerConfigurationCommand"; import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput } from "../commands/DeleteConnectorCommand"; +import { DeleteCustomPluginCommandInput, DeleteCustomPluginCommandOutput } from "../commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommandInput, DescribeConnectorCommandOutput } from "../commands/DescribeConnectorCommand"; import { DescribeCustomPluginCommandInput, @@ -90,6 +91,7 @@ import { ScaleOutPolicyDescription, ScaleOutPolicyUpdate, ServiceUnavailableException, + StateDescription, TooManyRequestsException, UnauthorizedException, Vpc, @@ -252,6 +254,35 @@ export const serializeAws_restJson1DeleteConnectorCommand = async ( }); }; +export const serializeAws_restJson1DeleteCustomPluginCommand = async ( + input: DeleteCustomPluginCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/custom-plugins/{customPluginArn}"; + if (input.customPluginArn !== undefined) { + const labelValue: string = input.customPluginArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: customPluginArn."); + } + resolvedPath = resolvedPath.replace("{customPluginArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: customPluginArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DescribeConnectorCommand = async ( input: DescribeConnectorCommandInput, context: __SerdeContext @@ -746,6 +777,72 @@ const deserializeAws_restJson1DeleteConnectorCommandError = async ( } }; +export const deserializeAws_restJson1DeleteCustomPluginCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteCustomPluginCommandError(output, context); + } + const contents: DeleteCustomPluginCommandOutput = { + $metadata: deserializeMetadata(output), + customPluginArn: undefined, + customPluginState: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.customPluginArn !== undefined && data.customPluginArn !== null) { + contents.customPluginArn = __expectString(data.customPluginArn); + } + if (data.customPluginState !== undefined && data.customPluginState !== null) { + contents.customPluginState = __expectString(data.customPluginState); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteCustomPluginCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequestException": + case "com.amazonaws.kafkaconnect#BadRequestException": + throw await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context); + case "ForbiddenException": + case "com.amazonaws.kafkaconnect#ForbiddenException": + throw await deserializeAws_restJson1ForbiddenExceptionResponse(parsedOutput, context); + case "InternalServerErrorException": + case "com.amazonaws.kafkaconnect#InternalServerErrorException": + throw await deserializeAws_restJson1InternalServerErrorExceptionResponse(parsedOutput, context); + case "NotFoundException": + case "com.amazonaws.kafkaconnect#NotFoundException": + throw await deserializeAws_restJson1NotFoundExceptionResponse(parsedOutput, context); + case "ServiceUnavailableException": + case "com.amazonaws.kafkaconnect#ServiceUnavailableException": + throw await deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context); + case "TooManyRequestsException": + case "com.amazonaws.kafkaconnect#TooManyRequestsException": + throw await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context); + case "UnauthorizedException": + case "com.amazonaws.kafkaconnect#UnauthorizedException": + throw await deserializeAws_restJson1UnauthorizedExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1DescribeConnectorCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -770,6 +867,7 @@ export const deserializeAws_restJson1DescribeConnectorCommand = async ( logDelivery: undefined, plugins: undefined, serviceExecutionRoleArn: undefined, + stateDescription: undefined, workerConfiguration: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); @@ -824,6 +922,9 @@ export const deserializeAws_restJson1DescribeConnectorCommand = async ( if (data.serviceExecutionRoleArn !== undefined && data.serviceExecutionRoleArn !== null) { contents.serviceExecutionRoleArn = __expectString(data.serviceExecutionRoleArn); } + if (data.stateDescription !== undefined && data.stateDescription !== null) { + contents.stateDescription = deserializeAws_restJson1StateDescription(data.stateDescription, context); + } if (data.workerConfiguration !== undefined && data.workerConfiguration !== null) { contents.workerConfiguration = deserializeAws_restJson1WorkerConfigurationDescription( data.workerConfiguration, @@ -892,6 +993,7 @@ export const deserializeAws_restJson1DescribeCustomPluginCommand = async ( description: undefined, latestRevision: undefined, name: undefined, + stateDescription: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); if (data.creationTime !== undefined && data.creationTime !== null) { @@ -912,6 +1014,9 @@ export const deserializeAws_restJson1DescribeCustomPluginCommand = async ( if (data.name !== undefined && data.name !== null) { contents.name = __expectString(data.name); } + if (data.stateDescription !== undefined && data.stateDescription !== null) { + contents.stateDescription = deserializeAws_restJson1StateDescription(data.stateDescription, context); + } return Promise.resolve(contents); }; @@ -2073,6 +2178,13 @@ const deserializeAws_restJson1ScaleOutPolicyDescription = ( } as any; }; +const deserializeAws_restJson1StateDescription = (output: any, context: __SerdeContext): StateDescription => { + return { + code: __expectString(output.code), + message: __expectString(output.message), + } as any; +}; + const deserializeAws_restJson1VpcDescription = (output: any, context: __SerdeContext): VpcDescription => { return { securityGroups: diff --git a/clients/client-kendra/src/models/models_0.ts b/clients/client-kendra/src/models/models_0.ts index 9e33f8a89efb..2d0061c2df87 100644 --- a/clients/client-kendra/src/models/models_0.ts +++ b/clients/client-kendra/src/models/models_0.ts @@ -198,7 +198,7 @@ export enum EntityType { } /** - *

        Provides the configuration information of users or groups in + *

        Provides the configuration information for users or groups in * your Amazon Web Services SSO identity source to grant access your Amazon Kendra * experience.

        */ @@ -406,7 +406,7 @@ export enum Persona { } /** - *

        Provides the configuration information of users or groups in your + *

        Provides the configuration information for users or groups in your * Amazon Web Services SSO identity source for access to your Amazon Kendra experience. * Specific permissions are defined for each user or group once they are * granted access to your Amazon Kendra experience.

        @@ -1844,7 +1844,7 @@ export enum ConfluenceVersion { } /** - *

        Provides information for connecting to an Amazon VPC.

        + *

        Provides the configuration information to connect to an Amazon VPC.

        */ export interface DataSourceVpcConfiguration { /** @@ -1873,8 +1873,8 @@ export namespace DataSourceVpcConfiguration { } /** - *

        Provides configuration information for data sources that connect - * to Confluence.

        + *

        Provides the configuration information to connect to Confluence + * as your data source.

        */ export interface ConfluenceConfiguration { /** @@ -2045,7 +2045,7 @@ export namespace ColumnConfiguration { } /** - *

        Provides the information necessary to connect to a + *

        Provides the configuration information that's required to connect to a * database.

        */ export interface ConnectionConfiguration { @@ -2104,7 +2104,7 @@ export enum QueryIdentifiersEnclosingOption { } /** - *

        Provides information that configures Amazon Kendra to use a SQL + *

        Provides the configuration information to use a SQL * database.

        */ export interface SqlConfiguration { @@ -2135,7 +2135,7 @@ export namespace SqlConfiguration { } /** - *

        Provides the information necessary to connect a database to an + *

        Provides the configuration information to connect to a * index.

        */ export interface DatabaseConfiguration { @@ -2145,12 +2145,12 @@ export interface DatabaseConfiguration { DatabaseEngineType: DatabaseEngineType | string | undefined; /** - *

        The information necessary to connect to a database.

        + *

        Configuration information that's required to connect to a database.

        */ ConnectionConfiguration: ConnectionConfiguration | undefined; /** - *

        Provides information for connecting to an Amazon VPC.

        + *

        Provides the configuration information to connect to an Amazon VPC.

        */ VpcConfiguration?: DataSourceVpcConfiguration; @@ -2208,7 +2208,7 @@ export interface FsxConfiguration { FileSystemType: FsxFileSystemType | string | undefined; /** - *

        Provides the configuration information for connecting to an + *

        Configuration information for connecting to an * Amazon Virtual Private Cloud for your Amazon FSx. Your Amazon FSx * instance must reside inside your VPC.

        */ @@ -2228,8 +2228,8 @@ export interface FsxConfiguration { * Amazon FSx file system for Windows.

        *
      • *
      • - *

        password—The password of the active directory user with - * read and mounting access Amazon FSx Windows file system.

        + *

        password—The password of the Active Directory user account with + * read and mounting access to the Amazon FSx Windows file system.

        *
      • *
      */ @@ -2258,7 +2258,7 @@ export interface FsxConfiguration { /** *

      A list of DataSourceToIndexFieldMapping objects that * map Amazon FSx data source attributes or field names to Amazon Kendra - * index field names in Amazon Kendra. To create custom fields, use the + * index field names. To create custom fields, use the * UpdateIndex API before you map to Amazon FSx fields. * For more information, see Mapping * data source fields. The Amazon FSx data source field names @@ -2277,8 +2277,8 @@ export namespace FsxConfiguration { } /** - *

      Provides configuration information for data sources that connect - * to Google Drive.

      + *

      Provides the configuration information to connect to + * Google Drive as your data source.

      */ export interface GoogleDriveConfiguration { /** @@ -2382,8 +2382,8 @@ export namespace OneDriveUsers { } /** - *

      Provides configuration information for data sources that connect - * to OneDrive.

      + *

      Provides the configuration information to connect + * to OneDrive as your data source.

      */ export interface OneDriveConfiguration { /** @@ -2475,8 +2475,8 @@ export namespace DocumentsMetadataConfiguration { } /** - *

      Provides configuration information for a data source to index - * documents in an Amazon S3 bucket.

      + *

      Provides the configuration information to connect to + * an Amazon S3 bucket.

      */ export interface S3DataSourceConfiguration { /** @@ -2577,7 +2577,7 @@ export enum SalesforceChatterFeedIncludeFilterType { } /** - *

      Defines configuration for syncing a Salesforce chatter feed. The + *

      The configuration information for syncing a Salesforce chatter feed. The * contents of the object comes from the Salesforce FeedItem * table.

      */ @@ -2666,7 +2666,7 @@ export enum SalesforceKnowledgeArticleState { } /** - *

      Provides configuration information for standard Salesforce + *

      Configuration information for standard Salesforce * knowledge articles.

      */ export interface SalesforceStandardKnowledgeArticleTypeConfiguration { @@ -2699,7 +2699,7 @@ export namespace SalesforceStandardKnowledgeArticleTypeConfiguration { } /** - *

      Specifies configuration information for the knowledge article + *

      Provides the configuration information for the knowledge article * types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge * articles and the standard fields of knowledge articles, or the * custom fields of custom knowledge articles, but not both

      @@ -2713,13 +2713,13 @@ export interface SalesforceKnowledgeArticleConfiguration { IncludedStates: (SalesforceKnowledgeArticleState | string)[] | undefined; /** - *

      Provides configuration information for standard Salesforce + *

      Configuration information for standard Salesforce * knowledge articles.

      */ StandardKnowledgeArticleTypeConfiguration?: SalesforceStandardKnowledgeArticleTypeConfiguration; /** - *

      Provides configuration information for custom Salesforce knowledge + *

      Configuration information for custom Salesforce knowledge * articles.

      */ CustomKnowledgeArticleTypeConfigurations?: SalesforceCustomKnowledgeArticleTypeConfiguration[]; @@ -2735,7 +2735,7 @@ export namespace SalesforceKnowledgeArticleConfiguration { } /** - *

      Provides configuration information for processing attachments to + *

      Provides the configuration information for processing attachments to * Salesforce standard objects.

      */ export interface SalesforceStandardObjectAttachmentConfiguration { @@ -2781,7 +2781,7 @@ export enum SalesforceStandardObjectName { } /** - *

      Specifies configuration information for indexing a single standard + *

      Provides the configuration information for indexing a single standard * object.

      */ export interface SalesforceStandardObjectConfiguration { @@ -2891,7 +2891,7 @@ export interface SalesforceConfiguration { CrawlAttachments?: boolean; /** - *

      Provides configuration information for processing attachments to + *

      Configuration information for processing attachments to * Salesforce standard objects.

      */ StandardObjectAttachmentConfiguration?: SalesforceStandardObjectAttachmentConfiguration; @@ -2932,7 +2932,7 @@ export enum ServiceNowAuthenticationType { } /** - *

      Provides configuration information for crawling knowledge articles + *

      Provides the configuration information for crawling knowledge articles * in the ServiceNow site.

      */ export interface ServiceNowKnowledgeArticleConfiguration { @@ -2998,7 +2998,7 @@ export namespace ServiceNowKnowledgeArticleConfiguration { } /** - *

      Provides configuration information for crawling service catalog + *

      Provides the configuration information for crawling service catalog * items in the ServiceNow site

      */ export interface ServiceNowServiceCatalogConfiguration { @@ -3062,8 +3062,8 @@ export enum ServiceNowBuildVersionType { } /** - *

      Provides configuration information required to connect to a - * ServiceNow data source.

      + *

      Provides the configuration information to connect to + * ServiceNow as your data source.

      */ export interface ServiceNowConfiguration { /** @@ -3089,13 +3089,13 @@ export interface ServiceNowConfiguration { ServiceNowBuildVersion: ServiceNowBuildVersionType | string | undefined; /** - *

      Provides configuration information for crawling knowledge articles + *

      Configuration information for crawling knowledge articles * in the ServiceNow site.

      */ KnowledgeArticleConfiguration?: ServiceNowKnowledgeArticleConfiguration; /** - *

      Provides configuration information for crawling service catalogs + *

      Configuration information for crawling service catalogs * in the ServiceNow site.

      */ ServiceCatalogConfiguration?: ServiceNowServiceCatalogConfiguration; @@ -3133,8 +3133,8 @@ export enum SharePointVersion { } /** - *

      Provides configuration information for connecting to a Microsoft - * SharePoint data source.

      + *

      Provides the configuration information to connect to Microsoft + * SharePoint as your data source.

      */ export interface SharePointConfiguration { /** @@ -3203,7 +3203,7 @@ export interface SharePointConfiguration { ExclusionPatterns?: string[]; /** - *

      Provides information for connecting to an Amazon VPC.

      + *

      Provides the configuration information to connect to an Amazon VPC.

      */ VpcConfiguration?: DataSourceVpcConfiguration; @@ -3294,7 +3294,7 @@ export enum WebCrawlerMode { } /** - *

      Provides the configuration information of the seed or starting point URLs to crawl.

      + *

      Provides the configuration information for the seed or starting point URLs to crawl.

      *

      * When selecting websites to index, you must adhere to * the Amazon Acceptable Use Policy @@ -3347,7 +3347,7 @@ export namespace SeedUrlConfiguration { } /** - *

      Provides the configuration information of the sitemap URLs to crawl.

      + *

      Provides the configuration information for the sitemap URLs to crawl.

      *

      * When selecting websites to index, you must adhere to * the Amazon Acceptable Use Policy @@ -3389,7 +3389,7 @@ export namespace SiteMapsConfiguration { */ export interface Urls { /** - *

      Provides the configuration of the seed or starting point URLs of the websites + *

      Configuration of the seed or starting point URLs of the websites * you want to crawl.

      *

      You can choose to crawl only the website host names, or the website host names * with subdomains, or the website host names with subdomains and other domains @@ -3399,7 +3399,7 @@ export interface Urls { SeedUrlConfiguration?: SeedUrlConfiguration; /** - *

      Provides the configuration of the sitemap URLs of the websites you want to crawl.

      + *

      Configuration of the sitemap URLs of the websites you want to crawl.

      *

      Only URLs belonging to the same website host names are crawled. You can list up to * three sitemap URLs.

      */ @@ -3487,7 +3487,7 @@ export interface WebCrawlerConfiguration { UrlExclusionPatterns?: string[]; /** - *

      Provides configuration information required to connect to your internal + *

      Configuration information required to connect to your internal * websites via a web proxy.

      *

      You must provide the website host name and port number. For example, the * host name of https://a.example.com/page1.html is "a.example.com" and the @@ -3499,7 +3499,7 @@ export interface WebCrawlerConfiguration { ProxyConfiguration?: ProxyConfiguration; /** - *

      Provides configuration information required to connect to websites using + *

      Configuration information required to connect to websites using * authentication.

      *

      You can connect to websites using basic authentication of user name and password.

      *

      You must provide the website host name and port number. For example, the host name @@ -3602,42 +3602,42 @@ export namespace WorkDocsConfiguration { } /** - *

      Configuration information for an Amazon Kendra data source.

      + *

      Provides the configuration information for an Amazon Kendra data source.

      */ export interface DataSourceConfiguration { /** - *

      Provides information to create a data source connector for a - * document repository in an Amazon S3 bucket.

      + *

      Provides the configuration information to connect to an Amazon S3 + * bucket as your data source.

      */ S3Configuration?: S3DataSourceConfiguration; /** - *

      Provides information necessary to create a data source connector - * for a Microsoft SharePoint site.

      + *

      Provides the configuration information to connect to Microsoft SharePoint + * as your data source.

      */ SharePointConfiguration?: SharePointConfiguration; /** - *

      Provides information necessary to create a data source connector - * for a database.

      + *

      Provides the configuration information to connect to a database as + * your data source.

      */ DatabaseConfiguration?: DatabaseConfiguration; /** - *

      Provides configuration information for data sources that connect - * to a Salesforce site.

      + *

      Provides the configuration information to connect to + * Salesforce as your data source.

      */ SalesforceConfiguration?: SalesforceConfiguration; /** - *

      Provides configuration for data sources that connect to Microsoft - * OneDrive.

      + *

      Provides the configuration information to connect to Microsoft + * OneDrive as your data source.

      */ OneDriveConfiguration?: OneDriveConfiguration; /** - *

      Provides configuration for data sources that connect to ServiceNow - * instances.

      + *

      Provides the configuration information to connect to ServiceNow + * as your data source.

      */ ServiceNowConfiguration?: ServiceNowConfiguration; @@ -3648,8 +3648,8 @@ export interface DataSourceConfiguration { ConfluenceConfiguration?: ConfluenceConfiguration; /** - *

      Provides configuration for data sources that connect to Google - * Drive.

      + *

      Provides the configuration information to connect to Google + * Drive as your data source.

      */ GoogleDriveConfiguration?: GoogleDriveConfiguration; @@ -3743,7 +3743,7 @@ export interface CreateDataSourceRequest { Type: DataSourceType | string | undefined; /** - *

      The connector configuration information that is required to access the + *

      Configuration information that is required to access the data source * repository.

      *

      You can't specify the Configuration parameter when the * Type parameter is set to CUSTOM. If you do, @@ -3842,7 +3842,7 @@ export namespace CreateDataSourceResponse { } /** - *

      Configuration information for your content sources, such as data sources, + *

      Provides the configuration information for your content sources, such as data sources, * FAQs, and content indexed directly via BatchPutDocument.

      */ export interface ContentSourceConfiguration { @@ -3874,7 +3874,7 @@ export namespace ContentSourceConfiguration { } /** - *

      Configuration information for the identifiers of your users.

      + *

      Provides the configuration information for the identifiers of your users.

      */ export interface UserIdentityConfiguration { /** @@ -3899,7 +3899,7 @@ export namespace UserIdentityConfiguration { } /** - *

      Specifies the configuration information for your Amazon Kendra experience. This includes + *

      Provides the configuration information for your Amazon Kendra experience. This includes * the data source IDs and/or FAQ IDs, and user or group information to grant access * to your Amazon Kendra experience.

      */ @@ -3947,7 +3947,7 @@ export interface CreateExperienceRequest { RoleArn?: string; /** - *

      Provides the configuration information for your Amazon Kendra experience. This includes + *

      Configuration information for your Amazon Kendra experience. This includes * ContentSourceConfiguration, which specifies the data source IDs * and/or FAQ IDs, and UserIdentityConfiguration, which specifies the * user or group information to grant access to your Amazon Kendra experience.

      @@ -4162,7 +4162,7 @@ export namespace UserGroupResolutionConfiguration { } /** - *

      Configuration information for the JSON token type.

      + *

      Provides the configuration information for the JSON token type.

      */ export interface JsonTokenTypeConfiguration { /** @@ -4191,7 +4191,7 @@ export enum KeyLocation { } /** - *

      Configuration information for the JWT token type.

      + *

      Provides the configuration information for the JWT token type.

      */ export interface JwtTokenTypeConfiguration { /** @@ -4240,7 +4240,7 @@ export namespace JwtTokenTypeConfiguration { } /** - *

      Provides configuration information for a token configuration.

      + *

      Provides the configuration information for a token.

      */ export interface UserTokenConfiguration { /** @@ -4772,8 +4772,7 @@ export interface DescribeDataSourceResponse { Type?: DataSourceType | string; /** - *

      Information that describes where the data source is located and how - * the data source is configured. The specific information in the description + *

      Describes how the data source is configured. The specific information in the description * depends on the data source provider.

      */ Configuration?: DataSourceConfiguration; @@ -4875,7 +4874,7 @@ export enum EndpointType { } /** - *

      Provides the configuration information of the endpoint for your Amazon Kendra + *

      Provides the configuration information for the endpoint for your Amazon Kendra * experience.

      */ export interface ExperienceEndpoint { @@ -7265,7 +7264,7 @@ export namespace ListIndicesRequest { } /** - *

      A summary of information about an index.

      + *

      A summary of information on the configuration of an index.

      */ export interface IndexConfigurationSummary { /** @@ -7316,7 +7315,7 @@ export namespace IndexConfigurationSummary { export interface ListIndicesResponse { /** - *

      An array of summary information for one or more indexes.

      + *

      An array of summary information on the configuration of one or more indexes.

      */ IndexConfigurationSummaryItems?: IndexConfigurationSummary[]; @@ -7878,9 +7877,33 @@ export namespace SortingConfiguration { } /** - *

      - * Data source information for user context filtering. - *

      + *

      Provides the configuration information for suggested query spell corrections.

      + *

      Suggested spell corrections are based on words that appear in your indexed documents + * and how closely a corrected word matches a misspelled word.

      + *

      This feature is designed with certain defaults or limits. For information on the + * current limits and how to request more support for some limits, see the + * Spell + * Checker documentation.

      + */ +export interface SpellCorrectionConfiguration { + /** + *

      + * TRUE to suggest spell corrections for queries.

      + */ + IncludeQuerySpellCheckSuggestions: boolean | undefined; +} + +export namespace SpellCorrectionConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SpellCorrectionConfiguration): any => ({ + ...obj, + }); +} + +/** + *

      Data source information for user context filtering.

      */ export interface DataSourceGroup { /** @@ -8132,6 +8155,66 @@ export namespace QueryResultItem { }); } +/** + *

      A corrected misspelled word in a query.

      + */ +export interface Correction { + /** + *

      The zero-based location in the response string or text where + * the corrected word starts.

      + */ + BeginOffset?: number; + + /** + *

      The zero-based location in the response string or text where + * the corrected word ends.

      + */ + EndOffset?: number; + + /** + *

      The string or text of a misspelled word in a query.

      + */ + Term?: string; + + /** + *

      The string or text of a corrected misspelled word in a query.

      + */ + CorrectedTerm?: string; +} + +export namespace Correction { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Correction): any => ({ + ...obj, + }); +} + +/** + *

      A query with suggested spell corrections.

      + */ +export interface SpellCorrectedQuery { + /** + *

      The query with the suggested spell corrections.

      + */ + SuggestedQueryText?: string; + + /** + *

      The corrected misspelled word or words in a query.

      + */ + Corrections?: Correction[]; +} + +export namespace SpellCorrectedQuery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SpellCorrectedQuery): any => ({ + ...obj, + }); +} + export enum WarningCode { QUERY_LANGUAGE_INVALID_SYNTAX = "QUERY_LANGUAGE_INVALID_SYNTAX", } @@ -8194,6 +8277,11 @@ export interface QueryResult { * with advanced query syntax.

      */ Warnings?: Warning[]; + + /** + *

      A list of information related to suggested spell corrections for a query.

      + */ + SpellCorrectedQueries?: SpellCorrectedQuery[]; } export namespace QueryResult { @@ -8469,7 +8557,7 @@ export interface UpdateDataSourceRequest { IndexId: string | undefined; /** - *

      Configuration information for an Amazon Kendra data source.

      + *

      Configuration information for an Amazon Kendra data source you want to update.

      */ Configuration?: DataSourceConfiguration; @@ -8543,8 +8631,7 @@ export interface UpdateExperienceRequest { RoleArn?: string; /** - *

      Provides the user configuration information. This includes the Amazon Web Services SSO - * field name that contains the identifiers of your users, such as their emails.

      + *

      Configuration information for your Amazon Kendra you want to update.

      */ Configuration?: ExperienceConfiguration; @@ -8586,7 +8673,7 @@ export interface UpdateIndexRequest { Description?: string; /** - *

      The document metadata to update.

      + *

      The document metadata you want to update.

      */ DocumentMetadataConfigurationUpdates?: DocumentMetadataConfiguration[]; @@ -8989,6 +9076,11 @@ export interface QueryRequest { * email address, as the VisitorId.

      */ VisitorId?: string; + + /** + *

      Enables suggested spell corrections for queries.

      + */ + SpellCorrectionConfiguration?: SpellCorrectionConfiguration; } export namespace QueryRequest { diff --git a/clients/client-kendra/src/protocols/Aws_json1_1.ts b/clients/client-kendra/src/protocols/Aws_json1_1.ts index 462c0d1c13e3..3cf8534a2521 100644 --- a/clients/client-kendra/src/protocols/Aws_json1_1.ts +++ b/clients/client-kendra/src/protocols/Aws_json1_1.ts @@ -185,6 +185,7 @@ import { ConfluenceSpaceToIndexFieldMapping, ConnectionConfiguration, ContentSourceConfiguration, + Correction, CreateDataSourceRequest, CreateDataSourceResponse, CreateExperienceRequest, @@ -338,6 +339,8 @@ import { SharePointConfiguration, SiteMapsConfiguration, SortingConfiguration, + SpellCorrectedQuery, + SpellCorrectionConfiguration, SqlConfiguration, StartDataSourceSyncJobRequest, StartDataSourceSyncJobResponse, @@ -6139,6 +6142,13 @@ const serializeAws_json1_1QueryRequest = (input: QueryRequest, context: __SerdeC input.SortingConfiguration !== null && { SortingConfiguration: serializeAws_json1_1SortingConfiguration(input.SortingConfiguration, context), }), + ...(input.SpellCorrectionConfiguration !== undefined && + input.SpellCorrectionConfiguration !== null && { + SpellCorrectionConfiguration: serializeAws_json1_1SpellCorrectionConfiguration( + input.SpellCorrectionConfiguration, + context + ), + }), ...(input.UserContext !== undefined && input.UserContext !== null && { UserContext: serializeAws_json1_1UserContext(input.UserContext, context) }), ...(input.VisitorId !== undefined && input.VisitorId !== null && { VisitorId: input.VisitorId }), @@ -6654,6 +6664,18 @@ const serializeAws_json1_1SortingConfiguration = (input: SortingConfiguration, c }; }; +const serializeAws_json1_1SpellCorrectionConfiguration = ( + input: SpellCorrectionConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.IncludeQuerySpellCheckSuggestions !== undefined && + input.IncludeQuerySpellCheckSuggestions !== null && { + IncludeQuerySpellCheckSuggestions: input.IncludeQuerySpellCheckSuggestions, + }), + }; +}; + const serializeAws_json1_1SqlConfiguration = (input: SqlConfiguration, context: __SerdeContext): any => { return { ...(input.QueryIdentifiersEnclosingOption !== undefined && @@ -7580,6 +7602,27 @@ const deserializeAws_json1_1ContentSourceConfiguration = ( } as any; }; +const deserializeAws_json1_1Correction = (output: any, context: __SerdeContext): Correction => { + return { + BeginOffset: __expectInt32(output.BeginOffset), + CorrectedTerm: __expectString(output.CorrectedTerm), + EndOffset: __expectInt32(output.EndOffset), + Term: __expectString(output.Term), + } as any; +}; + +const deserializeAws_json1_1CorrectionList = (output: any, context: __SerdeContext): Correction[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Correction(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1CreateDataSourceResponse = ( output: any, context: __SerdeContext @@ -9052,6 +9095,10 @@ const deserializeAws_json1_1QueryResult = (output: any, context: __SerdeContext) output.ResultItems !== undefined && output.ResultItems !== null ? deserializeAws_json1_1QueryResultItemList(output.ResultItems, context) : undefined, + SpellCorrectedQueries: + output.SpellCorrectedQueries !== undefined && output.SpellCorrectedQueries !== null + ? deserializeAws_json1_1SpellCorrectedQueryList(output.SpellCorrectedQueries, context) + : undefined, TotalNumberOfResults: __expectInt32(output.TotalNumberOfResults), Warnings: output.Warnings !== undefined && output.Warnings !== null @@ -9662,6 +9709,28 @@ const deserializeAws_json1_1SnapshotsDataRecords = (output: any, context: __Serd return retVal; }; +const deserializeAws_json1_1SpellCorrectedQuery = (output: any, context: __SerdeContext): SpellCorrectedQuery => { + return { + Corrections: + output.Corrections !== undefined && output.Corrections !== null + ? deserializeAws_json1_1CorrectionList(output.Corrections, context) + : undefined, + SuggestedQueryText: __expectString(output.SuggestedQueryText), + } as any; +}; + +const deserializeAws_json1_1SpellCorrectedQueryList = (output: any, context: __SerdeContext): SpellCorrectedQuery[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1SpellCorrectedQuery(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1SqlConfiguration = (output: any, context: __SerdeContext): SqlConfiguration => { return { QueryIdentifiersEnclosingOption: __expectString(output.QueryIdentifiersEnclosingOption), diff --git a/clients/client-keyspaces/.gitignore b/clients/client-keyspaces/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-keyspaces/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-keyspaces/LICENSE b/clients/client-keyspaces/LICENSE new file mode 100644 index 000000000000..8efcd8d5c5b7 --- /dev/null +++ b/clients/client-keyspaces/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-keyspaces/README.md b/clients/client-keyspaces/README.md new file mode 100644 index 000000000000..7353f4ad36cc --- /dev/null +++ b/clients/client-keyspaces/README.md @@ -0,0 +1,219 @@ +# @aws-sdk/client-keyspaces + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-keyspaces/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-keyspaces) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-keyspaces.svg)](https://www.npmjs.com/package/@aws-sdk/client-keyspaces) + +## Description + +AWS SDK for JavaScript Keyspaces Client for Node.js, Browser and React Native. + +

      Amazon Keyspaces (for Apache Cassandra) is a scalable, +highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, +run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, +you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.

      + +

      In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, +Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes +the supported DDL operations in detail.

      + +

      For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types +in Amazon Keyspaces in the Amazon Keyspaces Developer +Guide.

      + +

      To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer +Guide.

      + +

      For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.

      + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-keyspaces +using your favorite package manager: + +- `npm install @aws-sdk/client-keyspaces` +- `yarn add @aws-sdk/client-keyspaces` +- `pnpm add @aws-sdk/client-keyspaces` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `KeyspacesClient` and +the commands you need, for example `CreateKeyspaceCommand`: + +```js +// ES5 example +const { KeyspacesClient, CreateKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); +``` + +```ts +// ES6+ example +import { KeyspacesClient, CreateKeyspaceCommand } from "@aws-sdk/client-keyspaces"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new KeyspacesClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new CreateKeyspaceCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-keyspaces"; +const client = new AWS.Keyspaces({ region: "REGION" }); + +// async/await. +try { + const data = await client.createKeyspace(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .createKeyspace(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.createKeyspace(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-keyspaces` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-keyspaces/package.json b/clients/client-keyspaces/package.json new file mode 100644 index 000000000000..7b2445184587 --- /dev/null +++ b/clients/client-keyspaces/package.json @@ -0,0 +1,93 @@ +{ + "name": "@aws-sdk/client-keyspaces", + "description": "AWS SDK for JavaScript Keyspaces Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", + "build:cjs": "tsc -p tsconfig.cjs.json", + "build:docs": "typedoc", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "build:types:downlevel": "downlevel-dts dist-types dist-types/ts3.4", + "clean": "rimraf ./dist-* && rimraf *.tsbuildinfo" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "*", + "@aws-sdk/config-resolver": "*", + "@aws-sdk/credential-provider-node": "*", + "@aws-sdk/fetch-http-handler": "*", + "@aws-sdk/hash-node": "*", + "@aws-sdk/invalid-dependency": "*", + "@aws-sdk/middleware-content-length": "*", + "@aws-sdk/middleware-host-header": "*", + "@aws-sdk/middleware-logger": "*", + "@aws-sdk/middleware-retry": "*", + "@aws-sdk/middleware-serde": "*", + "@aws-sdk/middleware-signing": "*", + "@aws-sdk/middleware-stack": "*", + "@aws-sdk/middleware-user-agent": "*", + "@aws-sdk/node-config-provider": "*", + "@aws-sdk/node-http-handler": "*", + "@aws-sdk/protocol-http": "*", + "@aws-sdk/smithy-client": "*", + "@aws-sdk/types": "*", + "@aws-sdk/url-parser": "*", + "@aws-sdk/util-base64-browser": "*", + "@aws-sdk/util-base64-node": "*", + "@aws-sdk/util-body-length-browser": "*", + "@aws-sdk/util-body-length-node": "*", + "@aws-sdk/util-defaults-mode-browser": "*", + "@aws-sdk/util-defaults-mode-node": "*", + "@aws-sdk/util-user-agent-browser": "*", + "@aws-sdk/util-user-agent-node": "*", + "@aws-sdk/util-utf8-browser": "*", + "@aws-sdk/util-utf8-node": "*", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "*", + "@tsconfig/recommended": "1.0.1", + "@types/node": "^12.7.5", + "concurrently": "7.0.0", + "downlevel-dts": "0.7.0", + "rimraf": "3.0.2", + "typedoc": "0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=12.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-keyspaces", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-keyspaces" + } +} diff --git a/clients/client-keyspaces/src/Keyspaces.ts b/clients/client-keyspaces/src/Keyspaces.ts new file mode 100644 index 000000000000..450698616777 --- /dev/null +++ b/clients/client-keyspaces/src/Keyspaces.ts @@ -0,0 +1,505 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + CreateKeyspaceCommand, + CreateKeyspaceCommandInput, + CreateKeyspaceCommandOutput, +} from "./commands/CreateKeyspaceCommand"; +import { CreateTableCommand, CreateTableCommandInput, CreateTableCommandOutput } from "./commands/CreateTableCommand"; +import { + DeleteKeyspaceCommand, + DeleteKeyspaceCommandInput, + DeleteKeyspaceCommandOutput, +} from "./commands/DeleteKeyspaceCommand"; +import { DeleteTableCommand, DeleteTableCommandInput, DeleteTableCommandOutput } from "./commands/DeleteTableCommand"; +import { GetKeyspaceCommand, GetKeyspaceCommandInput, GetKeyspaceCommandOutput } from "./commands/GetKeyspaceCommand"; +import { GetTableCommand, GetTableCommandInput, GetTableCommandOutput } from "./commands/GetTableCommand"; +import { + ListKeyspacesCommand, + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, +} from "./commands/ListKeyspacesCommand"; +import { ListTablesCommand, ListTablesCommandInput, ListTablesCommandOutput } from "./commands/ListTablesCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + RestoreTableCommand, + RestoreTableCommandInput, + RestoreTableCommandOutput, +} from "./commands/RestoreTableCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { UpdateTableCommand, UpdateTableCommandInput, UpdateTableCommandOutput } from "./commands/UpdateTableCommand"; +import { KeyspacesClient } from "./KeyspacesClient"; + +/** + *

      Amazon Keyspaces (for Apache Cassandra) is a scalable, + * highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, + * run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, + * you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.

      + * + *

      In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, + * Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes + * the supported DDL operations in detail.

      + * + *

      For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types + * in Amazon Keyspaces in the Amazon Keyspaces Developer + * Guide.

      + * + *

      To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer + * Guide.

      + * + *

      For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.

      + */ +export class Keyspaces extends KeyspacesClient { + /** + *

      The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names + * must be unique within each Region.

      + *

      + * CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace + * by using the GetKeyspace operation.

      + *

      For more information, see Creating keyspaces in the Amazon Keyspaces Developer + * Guide.

      + */ + public createKeyspace( + args: CreateKeyspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createKeyspace( + args: CreateKeyspaceCommandInput, + cb: (err: any, data?: CreateKeyspaceCommandOutput) => void + ): void; + public createKeyspace( + args: CreateKeyspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateKeyspaceCommandOutput) => void + ): void; + public createKeyspace( + args: CreateKeyspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateKeyspaceCommandOutput) => void), + cb?: (err: any, data?: CreateKeyspaceCommandOutput) => void + ): Promise | void { + const command = new CreateKeyspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names + * must be unique.

      + *

      + * CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. + * You can monitor the creation status of the new table by using the GetTable + * operation, which returns the current status of the table. You can start using a table when the status is ACTIVE.

      + *

      For more information, see Creating tables in the Amazon Keyspaces Developer + * Guide.

      + */ + public createTable(args: CreateTableCommandInput, options?: __HttpHandlerOptions): Promise; + public createTable(args: CreateTableCommandInput, cb: (err: any, data?: CreateTableCommandOutput) => void): void; + public createTable( + args: CreateTableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateTableCommandOutput) => void + ): void; + public createTable( + args: CreateTableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateTableCommandOutput) => void), + cb?: (err: any, data?: CreateTableCommandOutput) => void + ): Promise | void { + const command = new CreateTableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      The DeleteKeyspace operation deletes a keyspace and all of its tables.

      + */ + public deleteKeyspace( + args: DeleteKeyspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteKeyspace( + args: DeleteKeyspaceCommandInput, + cb: (err: any, data?: DeleteKeyspaceCommandOutput) => void + ): void; + public deleteKeyspace( + args: DeleteKeyspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteKeyspaceCommandOutput) => void + ): void; + public deleteKeyspace( + args: DeleteKeyspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteKeyspaceCommandOutput) => void), + cb?: (err: any, data?: DeleteKeyspaceCommandOutput) => void + ): Promise | void { + const command = new DeleteKeyspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      + * The DeleteTable operation deletes a table and all of its data. After a DeleteTable request is received, + * the specified table is in the DELETING state until Amazon Keyspaces completes the deletion. If the table + * is in the ACTIVE state, you can delete it. If a table is either in the CREATING or UPDATING states, then + * Amazon Keyspaces returns a ResourceInUseException. If the specified table does not exist, Amazon Keyspaces returns + * a ResourceNotFoundException. If the table is already in the DELETING state, no error is returned.

      + */ + public deleteTable(args: DeleteTableCommandInput, options?: __HttpHandlerOptions): Promise; + public deleteTable(args: DeleteTableCommandInput, cb: (err: any, data?: DeleteTableCommandOutput) => void): void; + public deleteTable( + args: DeleteTableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteTableCommandOutput) => void + ): void; + public deleteTable( + args: DeleteTableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteTableCommandOutput) => void), + cb?: (err: any, data?: DeleteTableCommandOutput) => void + ): Promise | void { + const command = new DeleteTableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Returns the name and the Amazon Resource Name (ARN) of the specified table.

      + */ + public getKeyspace(args: GetKeyspaceCommandInput, options?: __HttpHandlerOptions): Promise; + public getKeyspace(args: GetKeyspaceCommandInput, cb: (err: any, data?: GetKeyspaceCommandOutput) => void): void; + public getKeyspace( + args: GetKeyspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetKeyspaceCommandOutput) => void + ): void; + public getKeyspace( + args: GetKeyspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetKeyspaceCommandOutput) => void), + cb?: (err: any, data?: GetKeyspaceCommandOutput) => void + ): Promise | void { + const command = new GetKeyspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Returns information about the table, including the table's name and current status, the keyspace name, + * configuration settings, and metadata.

      + *

      To read table metadata using GetTable, Select action + * permissions for the table and system tables are required to complete the operation.

      + */ + public getTable(args: GetTableCommandInput, options?: __HttpHandlerOptions): Promise; + public getTable(args: GetTableCommandInput, cb: (err: any, data?: GetTableCommandOutput) => void): void; + public getTable( + args: GetTableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTableCommandOutput) => void + ): void; + public getTable( + args: GetTableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTableCommandOutput) => void), + cb?: (err: any, data?: GetTableCommandOutput) => void + ): Promise | void { + const command = new GetTableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Returns a list of keyspaces.

      + */ + public listKeyspaces( + args: ListKeyspacesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listKeyspaces( + args: ListKeyspacesCommandInput, + cb: (err: any, data?: ListKeyspacesCommandOutput) => void + ): void; + public listKeyspaces( + args: ListKeyspacesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListKeyspacesCommandOutput) => void + ): void; + public listKeyspaces( + args: ListKeyspacesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListKeyspacesCommandOutput) => void), + cb?: (err: any, data?: ListKeyspacesCommandOutput) => void + ): Promise | void { + const command = new ListKeyspacesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Returns a list of tables for a specified keyspace.

      + */ + public listTables(args: ListTablesCommandInput, options?: __HttpHandlerOptions): Promise; + public listTables(args: ListTablesCommandInput, cb: (err: any, data?: ListTablesCommandOutput) => void): void; + public listTables( + args: ListTablesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTablesCommandOutput) => void + ): void; + public listTables( + args: ListTablesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTablesCommandOutput) => void), + cb?: (err: any, data?: ListTablesCommandOutput) => void + ): Promise | void { + const command = new ListTablesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Returns a list of all tags associated with the specified Amazon Keyspaces resource.

      + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Restores the specified table to the specified point in time within the + * earliest_restorable_timestamp and the current time. For more information about restore points, see + * + * Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide. + *

      + *

      + * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

      + *

      When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state + * based on the selected timestamp (day:hour:minute:second) to a new table. The Time to Live (TTL) settings + * are also restored to the state based on the selected timestamp.

      + *

      In addition to the table's schema, data, and TTL settings, RestoreTable restores the capacity mode, encryption, and + * point-in-time recovery settings from the source table. + * Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp, + * these settings are always restored based on the table's settings as of the current time or when the table was deleted.

      + *

      You can also overwrite these settings during restore:

      + *
        + *
      • + *

        Read/write capacity mode

        + *
      • + *
      • + *

        Provisioned throughput capacity settings

        + *
      • + *
      • + *

        Point-in-time (PITR) settings

        + *
      • + *
      • + *

        Tags

        + *
      • + *
      + *

      For more information, see PITR restore settings in the Amazon Keyspaces Developer + * Guide.

      + *

      The following settings are not restored, and you must configure them manually for the + * new table.

      + *
        + *
      • + *

        Automatic scaling policies (for tables that use provisioned capacity + * mode)

        + *
      • + *
      • + *

        Identity and Access Management (IAM) policies

        + *
      • + *
      • + *

        Amazon CloudWatch metrics and alarms

        + *
      • + *
      + */ + public restoreTable( + args: RestoreTableCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public restoreTable(args: RestoreTableCommandInput, cb: (err: any, data?: RestoreTableCommandOutput) => void): void; + public restoreTable( + args: RestoreTableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: RestoreTableCommandOutput) => void + ): void; + public restoreTable( + args: RestoreTableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: RestoreTableCommandOutput) => void), + cb?: (err: any, data?: RestoreTableCommandOutput) => void + ): Promise | void { + const command = new RestoreTableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Associates a set of tags with a Amazon Keyspaces resource. You can then + * activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. + * For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + *

      For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, + * see Amazon Keyspaces resource access based on tags + * in the Amazon Keyspaces Developer Guide.

      + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Removes the association of tags from a Amazon Keyspaces resource.

      + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

      Adds new columns to the table or updates one of the table's settings, for example + * capacity mode, encryption, point-in-time recovery, or ttl settings. + * Note that you can only update one specific table setting per update operation.

      + */ + public updateTable(args: UpdateTableCommandInput, options?: __HttpHandlerOptions): Promise; + public updateTable(args: UpdateTableCommandInput, cb: (err: any, data?: UpdateTableCommandOutput) => void): void; + public updateTable( + args: UpdateTableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateTableCommandOutput) => void + ): void; + public updateTable( + args: UpdateTableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateTableCommandOutput) => void), + cb?: (err: any, data?: UpdateTableCommandOutput) => void + ): Promise | void { + const command = new UpdateTableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-keyspaces/src/KeyspacesClient.ts b/clients/client-keyspaces/src/KeyspacesClient.ts new file mode 100644 index 000000000000..d2f954762e98 --- /dev/null +++ b/clients/client-keyspaces/src/KeyspacesClient.ts @@ -0,0 +1,311 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + DefaultsMode, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + BodyLengthCalculator as __BodyLengthCalculator, + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { CreateKeyspaceCommandInput, CreateKeyspaceCommandOutput } from "./commands/CreateKeyspaceCommand"; +import { CreateTableCommandInput, CreateTableCommandOutput } from "./commands/CreateTableCommand"; +import { DeleteKeyspaceCommandInput, DeleteKeyspaceCommandOutput } from "./commands/DeleteKeyspaceCommand"; +import { DeleteTableCommandInput, DeleteTableCommandOutput } from "./commands/DeleteTableCommand"; +import { GetKeyspaceCommandInput, GetKeyspaceCommandOutput } from "./commands/GetKeyspaceCommand"; +import { GetTableCommandInput, GetTableCommandOutput } from "./commands/GetTableCommand"; +import { ListKeyspacesCommandInput, ListKeyspacesCommandOutput } from "./commands/ListKeyspacesCommand"; +import { ListTablesCommandInput, ListTablesCommandOutput } from "./commands/ListTablesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { RestoreTableCommandInput, RestoreTableCommandOutput } from "./commands/RestoreTableCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { UpdateTableCommandInput, UpdateTableCommandOutput } from "./commands/UpdateTableCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | CreateKeyspaceCommandInput + | CreateTableCommandInput + | DeleteKeyspaceCommandInput + | DeleteTableCommandInput + | GetKeyspaceCommandInput + | GetTableCommandInput + | ListKeyspacesCommandInput + | ListTablesCommandInput + | ListTagsForResourceCommandInput + | RestoreTableCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateTableCommandInput; + +export type ServiceOutputTypes = + | CreateKeyspaceCommandOutput + | CreateTableCommandOutput + | DeleteKeyspaceCommandOutput + | DeleteTableCommandOutput + | GetKeyspaceCommandOutput + | GetTableCommandOutput + | ListKeyspacesCommandOutput + | ListTablesCommandOutput + | ListTagsForResourceCommandOutput + | RestoreTableCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateTableCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: __BodyLengthCalculator; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; + + /** + * The {@link DefaultsMode} that will be used to determine how certain default configuration options are resolved in the SDK. + */ + defaultsMode?: DefaultsMode | Provider; +} + +type KeyspacesClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of KeyspacesClient class constructor that set the region, credentials and other options. + */ +export interface KeyspacesClientConfig extends KeyspacesClientConfigType {} + +type KeyspacesClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of KeyspacesClient class. This is resolved and normalized from the {@link KeyspacesClientConfig | constructor configuration interface}. + */ +export interface KeyspacesClientResolvedConfig extends KeyspacesClientResolvedConfigType {} + +/** + *

      Amazon Keyspaces (for Apache Cassandra) is a scalable, + * highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, + * run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, + * you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.

      + * + *

      In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, + * Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes + * the supported DDL operations in detail.

      + * + *

      For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types + * in Amazon Keyspaces in the Amazon Keyspaces Developer + * Guide.

      + * + *

      To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer + * Guide.

      + * + *

      For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.

      + */ +export class KeyspacesClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + KeyspacesClientResolvedConfig +> { + /** + * The resolved configuration of KeyspacesClient class. This is resolved and normalized from the {@link KeyspacesClientConfig | constructor configuration interface}. + */ + readonly config: KeyspacesClientResolvedConfig; + + constructor(configuration: KeyspacesClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts b/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts new file mode 100644 index 000000000000..0511e489171a --- /dev/null +++ b/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { CreateKeyspaceRequest, CreateKeyspaceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0CreateKeyspaceCommand, + serializeAws_json1_0CreateKeyspaceCommand, +} from "../protocols/Aws_json1_0"; + +export interface CreateKeyspaceCommandInput extends CreateKeyspaceRequest {} +export interface CreateKeyspaceCommandOutput extends CreateKeyspaceResponse, __MetadataBearer {} + +/** + *

      The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names + * must be unique within each Region.

      + *

      + * CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace + * by using the GetKeyspace operation.

      + *

      For more information, see Creating keyspaces in the Amazon Keyspaces Developer + * Guide.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, CreateKeyspaceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, CreateKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new CreateKeyspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateKeyspaceCommandInput} for command's `input` shape. + * @see {@link CreateKeyspaceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class CreateKeyspaceCommand extends $Command< + CreateKeyspaceCommandInput, + CreateKeyspaceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateKeyspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "CreateKeyspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateKeyspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateKeyspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateKeyspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0CreateKeyspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0CreateKeyspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/CreateTableCommand.ts b/clients/client-keyspaces/src/commands/CreateTableCommand.ts new file mode 100644 index 000000000000..952ba9b48925 --- /dev/null +++ b/clients/client-keyspaces/src/commands/CreateTableCommand.ts @@ -0,0 +1,102 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { CreateTableRequest, CreateTableResponse } from "../models/models_0"; +import { + deserializeAws_json1_0CreateTableCommand, + serializeAws_json1_0CreateTableCommand, +} from "../protocols/Aws_json1_0"; + +export interface CreateTableCommandInput extends CreateTableRequest {} +export interface CreateTableCommandOutput extends CreateTableResponse, __MetadataBearer {} + +/** + *

      The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names + * must be unique.

      + *

      + * CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. + * You can monitor the creation status of the new table by using the GetTable + * operation, which returns the current status of the table. You can start using a table when the status is ACTIVE.

      + *

      For more information, see Creating tables in the Amazon Keyspaces Developer + * Guide.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, CreateTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, CreateTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new CreateTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateTableCommandInput} for command's `input` shape. + * @see {@link CreateTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class CreateTableCommand extends $Command< + CreateTableCommandInput, + CreateTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "CreateTableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateTableRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateTableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateTableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0CreateTableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0CreateTableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/DeleteKeyspaceCommand.ts b/clients/client-keyspaces/src/commands/DeleteKeyspaceCommand.ts new file mode 100644 index 000000000000..9ba4b5f16481 --- /dev/null +++ b/clients/client-keyspaces/src/commands/DeleteKeyspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { DeleteKeyspaceRequest, DeleteKeyspaceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0DeleteKeyspaceCommand, + serializeAws_json1_0DeleteKeyspaceCommand, +} from "../protocols/Aws_json1_0"; + +export interface DeleteKeyspaceCommandInput extends DeleteKeyspaceRequest {} +export interface DeleteKeyspaceCommandOutput extends DeleteKeyspaceResponse, __MetadataBearer {} + +/** + *

      The DeleteKeyspace operation deletes a keyspace and all of its tables.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, DeleteKeyspaceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, DeleteKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new DeleteKeyspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteKeyspaceCommandInput} for command's `input` shape. + * @see {@link DeleteKeyspaceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class DeleteKeyspaceCommand extends $Command< + DeleteKeyspaceCommandInput, + DeleteKeyspaceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteKeyspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "DeleteKeyspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteKeyspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteKeyspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteKeyspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0DeleteKeyspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0DeleteKeyspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/DeleteTableCommand.ts b/clients/client-keyspaces/src/commands/DeleteTableCommand.ts new file mode 100644 index 000000000000..c54535fd082d --- /dev/null +++ b/clients/client-keyspaces/src/commands/DeleteTableCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { DeleteTableRequest, DeleteTableResponse } from "../models/models_0"; +import { + deserializeAws_json1_0DeleteTableCommand, + serializeAws_json1_0DeleteTableCommand, +} from "../protocols/Aws_json1_0"; + +export interface DeleteTableCommandInput extends DeleteTableRequest {} +export interface DeleteTableCommandOutput extends DeleteTableResponse, __MetadataBearer {} + +/** + *

      + * The DeleteTable operation deletes a table and all of its data. After a DeleteTable request is received, + * the specified table is in the DELETING state until Amazon Keyspaces completes the deletion. If the table + * is in the ACTIVE state, you can delete it. If a table is either in the CREATING or UPDATING states, then + * Amazon Keyspaces returns a ResourceInUseException. If the specified table does not exist, Amazon Keyspaces returns + * a ResourceNotFoundException. If the table is already in the DELETING state, no error is returned.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, DeleteTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, DeleteTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new DeleteTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteTableCommandInput} for command's `input` shape. + * @see {@link DeleteTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class DeleteTableCommand extends $Command< + DeleteTableCommandInput, + DeleteTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "DeleteTableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteTableRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteTableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteTableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0DeleteTableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0DeleteTableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/GetKeyspaceCommand.ts b/clients/client-keyspaces/src/commands/GetKeyspaceCommand.ts new file mode 100644 index 000000000000..4b7145852d61 --- /dev/null +++ b/clients/client-keyspaces/src/commands/GetKeyspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { GetKeyspaceRequest, GetKeyspaceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0GetKeyspaceCommand, + serializeAws_json1_0GetKeyspaceCommand, +} from "../protocols/Aws_json1_0"; + +export interface GetKeyspaceCommandInput extends GetKeyspaceRequest {} +export interface GetKeyspaceCommandOutput extends GetKeyspaceResponse, __MetadataBearer {} + +/** + *

      Returns the name and the Amazon Resource Name (ARN) of the specified table.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, GetKeyspaceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, GetKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new GetKeyspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetKeyspaceCommandInput} for command's `input` shape. + * @see {@link GetKeyspaceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class GetKeyspaceCommand extends $Command< + GetKeyspaceCommandInput, + GetKeyspaceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetKeyspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "GetKeyspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetKeyspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetKeyspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetKeyspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0GetKeyspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0GetKeyspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/GetTableCommand.ts b/clients/client-keyspaces/src/commands/GetTableCommand.ts new file mode 100644 index 000000000000..d8112c7c9d97 --- /dev/null +++ b/clients/client-keyspaces/src/commands/GetTableCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { GetTableRequest, GetTableResponse } from "../models/models_0"; +import { deserializeAws_json1_0GetTableCommand, serializeAws_json1_0GetTableCommand } from "../protocols/Aws_json1_0"; + +export interface GetTableCommandInput extends GetTableRequest {} +export interface GetTableCommandOutput extends GetTableResponse, __MetadataBearer {} + +/** + *

      Returns information about the table, including the table's name and current status, the keyspace name, + * configuration settings, and metadata.

      + *

      To read table metadata using GetTable, Select action + * permissions for the table and system tables are required to complete the operation.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, GetTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, GetTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new GetTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTableCommandInput} for command's `input` shape. + * @see {@link GetTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class GetTableCommand extends $Command< + GetTableCommandInput, + GetTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "GetTableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTableRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetTableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0GetTableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0GetTableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/ListKeyspacesCommand.ts b/clients/client-keyspaces/src/commands/ListKeyspacesCommand.ts new file mode 100644 index 000000000000..c5575acec5d4 --- /dev/null +++ b/clients/client-keyspaces/src/commands/ListKeyspacesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { ListKeyspacesRequest, ListKeyspacesResponse } from "../models/models_0"; +import { + deserializeAws_json1_0ListKeyspacesCommand, + serializeAws_json1_0ListKeyspacesCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListKeyspacesCommandInput extends ListKeyspacesRequest {} +export interface ListKeyspacesCommandOutput extends ListKeyspacesResponse, __MetadataBearer {} + +/** + *

      Returns a list of keyspaces.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListKeyspacesCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListKeyspacesCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListKeyspacesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListKeyspacesCommandInput} for command's `input` shape. + * @see {@link ListKeyspacesCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListKeyspacesCommand extends $Command< + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListKeyspacesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "ListKeyspacesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListKeyspacesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListKeyspacesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListKeyspacesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListKeyspacesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListKeyspacesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/ListTablesCommand.ts b/clients/client-keyspaces/src/commands/ListTablesCommand.ts new file mode 100644 index 000000000000..f502a7f08dfc --- /dev/null +++ b/clients/client-keyspaces/src/commands/ListTablesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { ListTablesRequest, ListTablesResponse } from "../models/models_0"; +import { + deserializeAws_json1_0ListTablesCommand, + serializeAws_json1_0ListTablesCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListTablesCommandInput extends ListTablesRequest {} +export interface ListTablesCommandOutput extends ListTablesResponse, __MetadataBearer {} + +/** + *

      Returns a list of tables for a specified keyspace.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListTablesCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListTablesCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListTablesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTablesCommandInput} for command's `input` shape. + * @see {@link ListTablesCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListTablesCommand extends $Command< + ListTablesCommandInput, + ListTablesCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTablesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "ListTablesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTablesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTablesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTablesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListTablesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListTablesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/ListTagsForResourceCommand.ts b/clients/client-keyspaces/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..d3a51f55f4e5 --- /dev/null +++ b/clients/client-keyspaces/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0ListTagsForResourceCommand, + serializeAws_json1_0ListTagsForResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

      Returns a list of all tags associated with the specified Amazon Keyspaces resource.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListTagsForResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListTagsForResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/RestoreTableCommand.ts b/clients/client-keyspaces/src/commands/RestoreTableCommand.ts new file mode 100644 index 000000000000..f0e5442a2065 --- /dev/null +++ b/clients/client-keyspaces/src/commands/RestoreTableCommand.ts @@ -0,0 +1,139 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { RestoreTableRequest, RestoreTableResponse } from "../models/models_0"; +import { + deserializeAws_json1_0RestoreTableCommand, + serializeAws_json1_0RestoreTableCommand, +} from "../protocols/Aws_json1_0"; + +export interface RestoreTableCommandInput extends RestoreTableRequest {} +export interface RestoreTableCommandOutput extends RestoreTableResponse, __MetadataBearer {} + +/** + *

      Restores the specified table to the specified point in time within the + * earliest_restorable_timestamp and the current time. For more information about restore points, see + * + * Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide. + *

      + *

      + * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

      + *

      When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state + * based on the selected timestamp (day:hour:minute:second) to a new table. The Time to Live (TTL) settings + * are also restored to the state based on the selected timestamp.

      + *

      In addition to the table's schema, data, and TTL settings, RestoreTable restores the capacity mode, encryption, and + * point-in-time recovery settings from the source table. + * Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp, + * these settings are always restored based on the table's settings as of the current time or when the table was deleted.

      + *

      You can also overwrite these settings during restore:

      + *
        + *
      • + *

        Read/write capacity mode

        + *
      • + *
      • + *

        Provisioned throughput capacity settings

        + *
      • + *
      • + *

        Point-in-time (PITR) settings

        + *
      • + *
      • + *

        Tags

        + *
      • + *
      + *

      For more information, see PITR restore settings in the Amazon Keyspaces Developer + * Guide.

      + *

      The following settings are not restored, and you must configure them manually for the + * new table.

      + *
        + *
      • + *

        Automatic scaling policies (for tables that use provisioned capacity + * mode)

        + *
      • + *
      • + *

        Identity and Access Management (IAM) policies

        + *
      • + *
      • + *

        Amazon CloudWatch metrics and alarms

        + *
      • + *
      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, RestoreTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, RestoreTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new RestoreTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreTableCommandInput} for command's `input` shape. + * @see {@link RestoreTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class RestoreTableCommand extends $Command< + RestoreTableCommandInput, + RestoreTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "RestoreTableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: RestoreTableRequest.filterSensitiveLog, + outputFilterSensitiveLog: RestoreTableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: RestoreTableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0RestoreTableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0RestoreTableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/TagResourceCommand.ts b/clients/client-keyspaces/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..8ea3ff3d61ed --- /dev/null +++ b/clients/client-keyspaces/src/commands/TagResourceCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0TagResourceCommand, + serializeAws_json1_0TagResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

      Associates a set of tags with a Amazon Keyspaces resource. You can then + * activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. + * For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + *

      For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, + * see Amazon Keyspaces resource access based on tags + * in the Amazon Keyspaces Developer Guide.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, TagResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, TagResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/UntagResourceCommand.ts b/clients/client-keyspaces/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..024e7b0c6f44 --- /dev/null +++ b/clients/client-keyspaces/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0UntagResourceCommand, + serializeAws_json1_0UntagResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

      Removes the association of tags from a Amazon Keyspaces resource.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, UntagResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, UntagResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/UpdateTableCommand.ts b/clients/client-keyspaces/src/commands/UpdateTableCommand.ts new file mode 100644 index 000000000000..597255a1b0e1 --- /dev/null +++ b/clients/client-keyspaces/src/commands/UpdateTableCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { UpdateTableRequest, UpdateTableResponse } from "../models/models_0"; +import { + deserializeAws_json1_0UpdateTableCommand, + serializeAws_json1_0UpdateTableCommand, +} from "../protocols/Aws_json1_0"; + +export interface UpdateTableCommandInput extends UpdateTableRequest {} +export interface UpdateTableCommandOutput extends UpdateTableResponse, __MetadataBearer {} + +/** + *

      Adds new columns to the table or updates one of the table's settings, for example + * capacity mode, encryption, point-in-time recovery, or ttl settings. + * Note that you can only update one specific table setting per update operation.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, UpdateTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, UpdateTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new UpdateTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateTableCommandInput} for command's `input` shape. + * @see {@link UpdateTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class UpdateTableCommand extends $Command< + UpdateTableCommandInput, + UpdateTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KeyspacesClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KeyspacesClient"; + const commandName = "UpdateTableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateTableRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateTableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateTableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0UpdateTableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0UpdateTableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-keyspaces/src/commands/index.ts b/clients/client-keyspaces/src/commands/index.ts new file mode 100644 index 000000000000..17ff86ac5f51 --- /dev/null +++ b/clients/client-keyspaces/src/commands/index.ts @@ -0,0 +1,13 @@ +export * from "./CreateKeyspaceCommand"; +export * from "./CreateTableCommand"; +export * from "./DeleteKeyspaceCommand"; +export * from "./DeleteTableCommand"; +export * from "./GetKeyspaceCommand"; +export * from "./GetTableCommand"; +export * from "./ListKeyspacesCommand"; +export * from "./ListTablesCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./RestoreTableCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateTableCommand"; diff --git a/clients/client-keyspaces/src/endpoints.ts b/clients/client-keyspaces/src/endpoints.ts new file mode 100644 index 000000000000..76a759780075 --- /dev/null +++ b/clients/client-keyspaces/src/endpoints.ts @@ -0,0 +1,135 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "cassandra.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "cassandra-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "cassandra-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "cassandra.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "cassandra.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "cassandra-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "cassandra-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "cassandra.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "cassandra.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "cassandra-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "cassandra.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "cassandra-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "cassandra.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "cassandra-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "cassandra-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "cassandra.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "cassandra", + regionHash, + partitionHash, + }); diff --git a/clients/client-keyspaces/src/index.ts b/clients/client-keyspaces/src/index.ts new file mode 100644 index 000000000000..e1980db6dbc5 --- /dev/null +++ b/clients/client-keyspaces/src/index.ts @@ -0,0 +1,6 @@ +export * from "./Keyspaces"; +export * from "./KeyspacesClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; +export { KeyspacesServiceException } from "./models/KeyspacesServiceException"; diff --git a/clients/client-keyspaces/src/models/KeyspacesServiceException.ts b/clients/client-keyspaces/src/models/KeyspacesServiceException.ts new file mode 100644 index 000000000000..f1733b993699 --- /dev/null +++ b/clients/client-keyspaces/src/models/KeyspacesServiceException.ts @@ -0,0 +1,17 @@ +import { + ServiceException as __ServiceException, + ServiceExceptionOptions as __ServiceExceptionOptions, +} from "@aws-sdk/smithy-client"; + +/** + * Base exception class for all service exceptions from Keyspaces service. + */ +export class KeyspacesServiceException extends __ServiceException { + /** + * @internal + */ + constructor(options: __ServiceExceptionOptions) { + super(options); + Object.setPrototypeOf(this, KeyspacesServiceException.prototype); + } +} diff --git a/clients/client-keyspaces/src/models/index.ts b/clients/client-keyspaces/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-keyspaces/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-keyspaces/src/models/models_0.ts b/clients/client-keyspaces/src/models/models_0.ts new file mode 100644 index 000000000000..a56864bc7b96 --- /dev/null +++ b/clients/client-keyspaces/src/models/models_0.ts @@ -0,0 +1,1590 @@ +import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-client"; +import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; + +import { KeyspacesServiceException as __BaseException } from "./KeyspacesServiceException"; + +/** + *

      You do not have sufficient access to perform this action.

      + */ +export class AccessDeniedException extends __BaseException { + readonly name: "AccessDeniedException" = "AccessDeniedException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "AccessDeniedException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, AccessDeniedException.prototype); + } +} + +export enum ThroughputMode { + PAY_PER_REQUEST = "PAY_PER_REQUEST", + PROVISIONED = "PROVISIONED", +} + +/** + *

      Amazon Keyspaces has two read/write capacity modes for processing reads and writes on your tables:

      + *
        + *
      • + *

        + * On-demand (default) + *

        + *
      • + *
      • + *

        + * Provisioned + *

        + *
      • + *
      + * + *

      The read/write capacity mode that you choose controls how you are charged for read and + * write throughput and how table throughput capacity is managed.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ +export interface CapacitySpecification { + /** + *

      The read/write throughput capacity mode for a table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED. The provisioned capacity mode requires + * readCapacityUnits and writeCapacityUnits as inputs.

        + *
      • + *
      + *

      The default is throughput_mode:PAY_PER_REQUEST.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ + throughputMode: ThroughputMode | string | undefined; + + /** + *

      The throughput capacity specified for read operations defined in read capacity units + * (RCUs).

      + */ + readCapacityUnits?: number; + + /** + *

      The throughput capacity specified for write operations defined in write capacity units + * (WCUs).

      + */ + writeCapacityUnits?: number; +} + +export namespace CapacitySpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacitySpecification): any => ({ + ...obj, + }); +} + +/** + *

      The read/write throughput capacity mode for a table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED.

        + *
      • + *
      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ +export interface CapacitySpecificationSummary { + /** + *

      The read/write throughput capacity mode for a table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED. The provisioned capacity mode requires + * readCapacityUnits and writeCapacityUnits as inputs.

        + *
      • + *
      + *

      The default is throughput_mode:PAY_PER_REQUEST.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ + throughputMode: ThroughputMode | string | undefined; + + /** + *

      The throughput capacity specified for read operations defined in read capacity units + * (RCUs).

      + */ + readCapacityUnits?: number; + + /** + *

      The throughput capacity specified for write operations defined in write capacity units + * (WCUs).

      + */ + writeCapacityUnits?: number; + + /** + *

      The timestamp of the last operation that changed the provisioned throughput capacity of a table.

      + */ + lastUpdateToPayPerRequestTimestamp?: Date; +} + +export namespace CapacitySpecificationSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacitySpecificationSummary): any => ({ + ...obj, + }); +} + +export enum SortOrder { + ASC = "ASC", + DESC = "DESC", +} + +/** + *

      The optional clustering column portion of your primary key determines how the data is clustered and sorted within each partition.

      + */ +export interface ClusteringKey { + /** + *

      The name(s) of the clustering column(s).

      + */ + name: string | undefined; + + /** + *

      Sets the ascendant (ASC) or descendant (DESC) order modifier.

      + */ + orderBy: SortOrder | string | undefined; +} + +export namespace ClusteringKey { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ClusteringKey): any => ({ + ...obj, + }); +} + +/** + *

      The names and data types of regular columns.

      + */ +export interface ColumnDefinition { + /** + *

      The name of the column.

      + */ + name: string | undefined; + + /** + *

      The data type of the column. For a list of available data types, see Data types in the Amazon Keyspaces Developer + * Guide.

      + */ + type: string | undefined; +} + +export namespace ColumnDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ColumnDefinition): any => ({ + ...obj, + }); +} + +/** + *

      An optional comment that describes the table.

      + */ +export interface Comment { + /** + *

      An optional description of the table.

      + */ + message: string | undefined; +} + +export namespace Comment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Comment): any => ({ + ...obj, + }); +} + +/** + *

      Amazon Keyspaces could not complete the requested action. This error may occur if you try to + * perform an action and the same or a different action is already + * in progress, or if you try to create a resource that already exists.

      + */ +export class ConflictException extends __BaseException { + readonly name: "ConflictException" = "ConflictException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "ConflictException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, ConflictException.prototype); + } +} + +/** + *

      Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single Amazon Keyspaces resource.

      + *

      Amazon Web Services-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. + * Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the + * prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag.

      + *

      For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + */ +export interface Tag { + /** + *

      The key of the tag. Tag keys are case sensitive. Each Amazon Keyspaces resource can only have up to one tag with the same key. If you try to add an + * existing tag (same key), the existing tag value will be updated to the new value.

      + */ + key: string | undefined; + + /** + *

      The value of the tag. Tag values are case-sensitive and can be null.

      + */ + value: string | undefined; +} + +export namespace Tag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Tag): any => ({ + ...obj, + }); +} + +export interface CreateKeyspaceRequest { + /** + *

      The name of the keyspace to be created.

      + */ + keyspaceName: string | undefined; + + /** + *

      A list of key-value pair tags to be attached to the keyspace.

      + *

      For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + */ + tags?: Tag[]; +} + +export namespace CreateKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface CreateKeyspaceResponse { + /** + *

      The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).

      + */ + resourceArn: string | undefined; +} + +export namespace CreateKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateKeyspaceResponse): any => ({ + ...obj, + }); +} + +/** + *

      Amazon Keyspaces was unable to fully process this request because of an internal server error.

      + */ +export class InternalServerException extends __BaseException { + readonly name: "InternalServerException" = "InternalServerException"; + readonly $fault: "server" = "server"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "InternalServerException", + $fault: "server", + ...opts, + }); + Object.setPrototypeOf(this, InternalServerException.prototype); + } +} + +/** + *

      The operation exceeded the service quota for this resource. For more information on service quotas, see Quotas in the Amazon Keyspaces Developer + * Guide.

      + */ +export class ServiceQuotaExceededException extends __BaseException { + readonly name: "ServiceQuotaExceededException" = "ServiceQuotaExceededException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "ServiceQuotaExceededException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, ServiceQuotaExceededException.prototype); + } +} + +/** + *

      The operation failed due to an invalid or malformed request.

      + */ +export class ValidationException extends __BaseException { + readonly name: "ValidationException" = "ValidationException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "ValidationException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, ValidationException.prototype); + } +} + +export enum EncryptionType { + AWS_OWNED_KMS_KEY = "AWS_OWNED_KMS_KEY", + CUSTOMER_MANAGED_KMS_KEY = "CUSTOMER_MANAGED_KMS_KEY", +} + +/** + *

      + * Amazon Keyspaces encrypts and decrypts the table data at rest transparently and integrates with Key Management Service for storing and managing the encryption key. + * You can choose one of the following KMS keys (KMS keys):

      + *
        + *
      • + *

        + * Amazon Web Services owned key - This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge).

        + *
      • + *
      • + *

        Customer managed key - This key is stored in your account and is created, owned, and managed by you. You have full control over the customer + * managed key (KMS charges apply).

        + *
      • + *
      + *

      For more information about encryption at rest in Amazon Keyspaces, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.

      + *

      For more information about KMS, see KMS management service concepts in the Key Management Service Developer Guide.

      + */ +export interface EncryptionSpecification { + /** + *

      + * The encryption option specified for the table. You can choose one of the following KMS keys (KMS keys):

      + *
        + *
      • + *

        + * type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

        + *
      • + *
      • + *

        + * type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. + * This option + * requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

        + *
      • + *
      + *

      The default is type:AWS_OWNED_KMS_KEY.

      + *

      For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.

      + */ + type: EncryptionType | string | undefined; + + /** + *

      The Amazon Resource Name (ARN) of the customer managed KMS key, for example kms_key_identifier:ARN. + *

      + */ + kmsKeyIdentifier?: string; +} + +export namespace EncryptionSpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EncryptionSpecification): any => ({ + ...obj, + }); +} + +export enum PointInTimeRecoveryStatus { + DISABLED = "DISABLED", + ENABLED = "ENABLED", +} + +/** + *

      Point-in-time recovery (PITR) helps protect your Amazon Keyspaces tables from accidental write or delete operations by providing you continuous backups of your table data.

      + *

      For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.

      + */ +export interface PointInTimeRecovery { + /** + *

      The options are:

      + *
        + *
      • + *

        + * ENABLED + *

        + *
      • + *
      • + *

        + * DISABLED + *

        + *
      • + *
      + */ + status: PointInTimeRecoveryStatus | string | undefined; +} + +export namespace PointInTimeRecovery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PointInTimeRecovery): any => ({ + ...obj, + }); +} + +/** + *

      The partition key portion of the primary key is required + * and determines how Amazon Keyspaces stores the data. + * The partition key can be a single column, or it can be a compound value composed of two or more columns.

      + */ +export interface PartitionKey { + /** + *

      The name(s) of the partition key column(s).

      + */ + name: string | undefined; +} + +export namespace PartitionKey { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PartitionKey): any => ({ + ...obj, + }); +} + +/** + *

      The static columns of the table. Static columns store values that are shared by all rows in the same partition.

      + */ +export interface StaticColumn { + /** + *

      The name of the static column.

      + */ + name: string | undefined; +} + +export namespace StaticColumn { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StaticColumn): any => ({ + ...obj, + }); +} + +/** + *

      Describes the schema of the table.

      + */ +export interface SchemaDefinition { + /** + *

      The regular columns of the table.

      + */ + allColumns: ColumnDefinition[] | undefined; + + /** + *

      The columns that are part of the partition key of the table .

      + */ + partitionKeys: PartitionKey[] | undefined; + + /** + *

      The columns that are part of the clustering key of the table.

      + */ + clusteringKeys?: ClusteringKey[]; + + /** + *

      The columns that have been defined as STATIC. Static columns store values that are shared by all rows in the same partition.

      + */ + staticColumns?: StaticColumn[]; +} + +export namespace SchemaDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SchemaDefinition): any => ({ + ...obj, + }); +} + +export enum TimeToLiveStatus { + ENABLED = "ENABLED", +} + +/** + *

      Enable custom Time to Live (TTL) settings for rows and columns without setting a TTL default for the specified table.

      + *

      For more information, see Enabling TTL on tables in the Amazon Keyspaces Developer + * Guide.

      + */ +export interface TimeToLive { + /** + *

      Shows how to enable custom Time to Live (TTL) settings for the specified table.

      + */ + status: TimeToLiveStatus | string | undefined; +} + +export namespace TimeToLive { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TimeToLive): any => ({ + ...obj, + }); +} + +export interface CreateTableRequest { + /** + *

      The name of the keyspace that the table is going to be created in.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the table.

      + */ + tableName: string | undefined; + + /** + *

      The schemaDefinition consists of the + * following parameters.

      + *

      For each column to be created:

      + *
        + *
      • + *

        + * + * name + * - The name + * of the column.

        + *
      • + *
      • + *

        + * + * type + * - An Amazon Keyspaces + * data type. For more information, see Data types in the Amazon Keyspaces Developer + * Guide.

        + *
      • + *
      + * + *

      The primary key of the table consists of the + * following columns:

      + *
        + *
      • + *

        + * partitionKeys - The partition key can be a single column, or it can be a + * compound value composed of two or more columns. The partition + * key portion of the primary key is required and determines how + * Amazon Keyspaces stores your data.

        + *
          + *
        • + *

          + * + * name + * - The name of each partition key column.

          + *
        • + *
        + *
      • + *
      • + *

        + * clusteringKeys - The optional clustering column portion of your primary key + * determines how the data is clustered and sorted within each + * partition.

        + *
          + *
        • + *

          + * + * name + * - The name of the clustering column.

          + *
        • + *
        • + *

          + * + * orderBy + * - Sets the + * ascendant (ASC) or descendant (DESC) order modifier.

          + *
        • + *
        + *
      • + *
      + *

      To define a column as static use + * staticColumns + * - + * Static columns store values that are shared by all rows in the same partition:

      + *
        + *
      • + *

        + * + * name + * - The name + * of the column.

        + *
      • + *
      • + *

        + * + * type + * - An Amazon Keyspaces + * data type.

        + *
      • + *
      + */ + schemaDefinition: SchemaDefinition | undefined; + + /** + *

      This parameter allows to enter a description of the table.

      + */ + comment?: Comment; + + /** + *

      Specifies the read/write throughput capacity mode for the table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED. The provisioned capacity mode requires + * readCapacityUnits and writeCapacityUnits as inputs.

        + *
      • + *
      + *

      The default is + * throughput_mode:PAY_PER_REQUEST.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ + capacitySpecification?: CapacitySpecification; + + /** + *

      Specifies how the encryption key for encryption at rest is managed for the table. You can choose one of the following KMS key (KMS key):

      + * + *
        + *
      • + *

        + * type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

        + *
      • + *
      • + *

        + * type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. + * This option + * requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

        + *
      • + *
      + *

      The default is type:AWS_OWNED_KMS_KEY.

      + *

      For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.

      + */ + encryptionSpecification?: EncryptionSpecification; + + /** + *

      + * Specifies if pointInTimeRecovery is enabled or disabled for the + * table. The options are:

      + *
        + *
      • + *

        + * ENABLED + *

        + *
      • + *
      • + *

        + * DISABLED + *

        + *
      • + *
      + *

      If it's not specified, the default is DISABLED.

      + *

      For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.

      + */ + pointInTimeRecovery?: PointInTimeRecovery; + + /** + *

      + * Enables Time to Live custom settings for the + * table. The options are:

      + *
        + *
      • + *

        + * status:enabled + *

        + *
      • + *
      • + *

        + * status:disabled + *

        + *
      • + *
      + *

      The default is status:disabled. After + * ttl is enabled, you can't disable it + * for the table.

      + *

      For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer + * Guide.

      + */ + ttl?: TimeToLive; + + /** + *

      The default Time to Live setting in seconds for the + * table.

      + *

      For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer + * Guide.

      + */ + defaultTimeToLive?: number; + + /** + *

      A list of key-value pair tags to be + * attached to the resource.

      + *

      For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + */ + tags?: Tag[]; +} + +export namespace CreateTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTableRequest): any => ({ + ...obj, + }); +} + +export interface CreateTableResponse { + /** + *

      The unique identifier of the table in the format of an Amazon Resource Name (ARN).

      + */ + resourceArn: string | undefined; +} + +export namespace CreateTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTableResponse): any => ({ + ...obj, + }); +} + +/** + *

      The operation tried to access a keyspace or table that doesn't exist. The resource might not be specified correctly, or its status might not be ACTIVE.

      + */ +export class ResourceNotFoundException extends __BaseException { + readonly name: "ResourceNotFoundException" = "ResourceNotFoundException"; + readonly $fault: "client" = "client"; + /** + *

      The unique identifier in the format of Amazon Resource Name (ARN), for the resource not found.

      + */ + resourceArn?: string; + + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "ResourceNotFoundException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, ResourceNotFoundException.prototype); + this.resourceArn = opts.resourceArn; + } +} + +export interface DeleteKeyspaceRequest { + /** + *

      The name of the keyspace to be deleted.

      + */ + keyspaceName: string | undefined; +} + +export namespace DeleteKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface DeleteKeyspaceResponse {} + +export namespace DeleteKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteKeyspaceResponse): any => ({ + ...obj, + }); +} + +export interface DeleteTableRequest { + /** + *

      The name of the keyspace of the to be deleted table.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the table to be deleted.

      + */ + tableName: string | undefined; +} + +export namespace DeleteTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTableRequest): any => ({ + ...obj, + }); +} + +export interface DeleteTableResponse {} + +export namespace DeleteTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTableResponse): any => ({ + ...obj, + }); +} + +export interface GetKeyspaceRequest { + /** + *

      The name of the keyspace.

      + */ + keyspaceName: string | undefined; +} + +export namespace GetKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface GetKeyspaceResponse { + /** + *

      The name of the keyspace.

      + */ + keyspaceName: string | undefined; + + /** + *

      The ARN of the keyspace.

      + */ + resourceArn: string | undefined; +} + +export namespace GetKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetKeyspaceResponse): any => ({ + ...obj, + }); +} + +export interface GetTableRequest { + /** + *

      The name of the keyspace that the table is stored in.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the table.

      + */ + tableName: string | undefined; +} + +export namespace GetTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableRequest): any => ({ + ...obj, + }); +} + +/** + *

      The point-in-time recovery status of the specified table.

      + */ +export interface PointInTimeRecoverySummary { + /** + *

      Shows if point-in-time recovery is enabled or disabled for the specified table.

      + */ + status: PointInTimeRecoveryStatus | string | undefined; + + /** + *

      Specifies the earliest possible restore point of the table in ISO 8601 format.

      + */ + earliestRestorableTimestamp?: Date; +} + +export namespace PointInTimeRecoverySummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PointInTimeRecoverySummary): any => ({ + ...obj, + }); +} + +export enum TableStatus { + ACTIVE = "ACTIVE", + CREATING = "CREATING", + DELETED = "DELETED", + DELETING = "DELETING", + INACCESSIBLE_ENCRYPTION_CREDENTIALS = "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + RESTORING = "RESTORING", + UPDATING = "UPDATING", +} + +export interface GetTableResponse { + /** + *

      The name of the keyspace that the specified table is stored in.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the specified table.

      + */ + tableName: string | undefined; + + /** + *

      The Amazon Resource Name (ARN) of the specified table.

      + */ + resourceArn: string | undefined; + + /** + *

      The creation timestamp of the specified table.

      + */ + creationTimestamp?: Date; + + /** + *

      The current status of the specified table.

      + */ + status?: TableStatus | string; + + /** + *

      The schema definition of the specified table.

      + */ + schemaDefinition?: SchemaDefinition; + + /** + *

      The read/write throughput capacity mode for a table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED.

        + *
      • + *
      + */ + capacitySpecification?: CapacitySpecificationSummary; + + /** + *

      The encryption settings of the specified table.

      + */ + encryptionSpecification?: EncryptionSpecification; + + /** + *

      The point-in-time recovery status of the specified table.

      + */ + pointInTimeRecovery?: PointInTimeRecoverySummary; + + /** + *

      The custom Time to Live settings of the specified table.

      + */ + ttl?: TimeToLive; + + /** + *

      The default Time to Live settings of the specified table.

      + */ + defaultTimeToLive?: number; + + /** + *

      The the description of the specified table.

      + */ + comment?: Comment; +} + +export namespace GetTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableResponse): any => ({ + ...obj, + }); +} + +export interface ListKeyspacesRequest { + /** + *

      The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

      + */ + nextToken?: string; + + /** + *

      The total number of keyspaces to return in the output. If the total number of keyspaces available + * is more than the value specified, a NextToken is provided in the output. To resume pagination, + * provide the NextToken value as an argument of a subsequent API invocation.

      + */ + maxResults?: number; +} + +export namespace ListKeyspacesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListKeyspacesRequest): any => ({ + ...obj, + }); +} + +/** + *

      Represents the properties of a keyspace.

      + */ +export interface KeyspaceSummary { + /** + *

      The name of the keyspace.

      + */ + keyspaceName: string | undefined; + + /** + *

      The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).

      + */ + resourceArn: string | undefined; +} + +export namespace KeyspaceSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KeyspaceSummary): any => ({ + ...obj, + }); +} + +export interface ListKeyspacesResponse { + /** + *

      A token to specify where to start paginating. This is the NextToken from a previously truncated response.

      + */ + nextToken?: string; + + /** + *

      A list of keyspaces.

      + */ + keyspaces: KeyspaceSummary[] | undefined; +} + +export namespace ListKeyspacesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListKeyspacesResponse): any => ({ + ...obj, + }); +} + +export interface ListTablesRequest { + /** + *

      The pagination token. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation.

      + */ + nextToken?: string; + + /** + *

      The total number of tables to return in the output. If the total number of tables available + * is more than the value specified, a NextToken is provided in the output. To resume pagination, + * provide the NextToken value as an argument of a subsequent API invocation.

      + */ + maxResults?: number; + + /** + *

      The name of the keyspace.

      + */ + keyspaceName: string | undefined; +} + +export namespace ListTablesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTablesRequest): any => ({ + ...obj, + }); +} + +/** + *

      Returns the name of the specified table, the keyspace it is stored in, and the unique identifier in the format of an Amazon Resource Name (ARN).

      + */ +export interface TableSummary { + /** + *

      The name of the keyspace that the table is stored in.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the table.

      + */ + tableName: string | undefined; + + /** + *

      The unique identifier of the table in the format of an Amazon Resource Name (ARN).

      + */ + resourceArn: string | undefined; +} + +export namespace TableSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TableSummary): any => ({ + ...obj, + }); +} + +export interface ListTablesResponse { + /** + *

      A token to specify where to start paginating. This is the NextToken from a previously truncated response.

      + */ + nextToken?: string; + + /** + *

      A list of tables.

      + */ + tables?: TableSummary[]; +} + +export namespace ListTablesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTablesResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

      The Amazon Resource Name (ARN) of the Amazon Keyspaces resource.

      + */ + resourceArn: string | undefined; + + /** + *

      The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

      + */ + nextToken?: string; + + /** + *

      The total number of tags to return in the output. If the total number of tags available + * is more than the value specified, a NextToken is provided in the output. To resume pagination, + * provide the NextToken value as an argument of a subsequent API invocation.

      + */ + maxResults?: number; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

      A token to specify where to start paginating. This is the NextToken from a previously truncated response.

      + */ + nextToken?: string; + + /** + *

      A list of tags.

      + */ + tags?: Tag[]; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface RestoreTableRequest { + /** + *

      The keyspace name of the source table.

      + */ + sourceKeyspaceName: string | undefined; + + /** + *

      The name of the source table.

      + */ + sourceTableName: string | undefined; + + /** + *

      The name of the target keyspace.

      + */ + targetKeyspaceName: string | undefined; + + /** + *

      The name of the target table.

      + */ + targetTableName: string | undefined; + + /** + *

      The restore timestamp in ISO 8601 format.

      + */ + restoreTimestamp?: Date; + + /** + *

      Specifies the read/write throughput capacity mode for the target table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED. The provisioned capacity mode requires + * readCapacityUnits and writeCapacityUnits as inputs.

        + *
      • + *
      + *

      The default is throughput_mode:PAY_PER_REQUEST.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ + capacitySpecificationOverride?: CapacitySpecification; + + /** + *

      + * Specifies the encryption settings for the target table. You can choose one of the following KMS key (KMS key):

      + * + *
        + *
      • + *

        + * type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

        + *
      • + *
      • + *

        + * type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. + * This option + * requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

        + *
      • + *
      + *

      The default is type:AWS_OWNED_KMS_KEY.

      + *

      For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.

      + */ + encryptionSpecificationOverride?: EncryptionSpecification; + + /** + *

      + * Specifies the pointInTimeRecovery settings for the target + * table. The options are:

      + *
        + *
      • + *

        + * ENABLED + *

        + *
      • + *
      • + *

        + * DISABLED + *

        + *
      • + *
      + *

      If it's not specified, the default is DISABLED.

      + *

      For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.

      + */ + pointInTimeRecoveryOverride?: PointInTimeRecovery; + + /** + *

      A list of key-value pair tags to be + * attached to the restored table.

      + *

      For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.

      + */ + tagsOverride?: Tag[]; +} + +export namespace RestoreTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreTableRequest): any => ({ + ...obj, + }); +} + +export interface RestoreTableResponse { + /** + *

      The Amazon Resource Name (ARN) of the restored table.

      + */ + restoredTableARN: string | undefined; +} + +export namespace RestoreTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreTableResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

      The Amazon Resource Name (ARN) of the Amazon Keyspaces resource to which to add tags.

      + */ + resourceArn: string | undefined; + + /** + *

      The tags to be assigned to the Amazon Keyspaces resource.

      + */ + tags: Tag[] | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

      The Amazon Keyspaces resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).

      + */ + resourceArn: string | undefined; + + /** + *

      A list of existing tags to be removed from the Amazon Keyspaces resource.

      + */ + tags: Tag[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateTableRequest { + /** + *

      The name of the keyspace the specified table is stored in.

      + */ + keyspaceName: string | undefined; + + /** + *

      The name of the table.

      + */ + tableName: string | undefined; + + /** + *

      For each column to be added to the specified table:

      + *
        + *
      • + *

        + * + * name + * - The name + * of the column.

        + *
      • + *
      • + *

        + * + * type + * - An Amazon Keyspaces + * data type. For more information, see Data types in the Amazon Keyspaces Developer + * Guide.

        + *
      • + *
      + */ + addColumns?: ColumnDefinition[]; + + /** + *

      Modifies the read/write throughput capacity mode for the table. The options are:

      + *
        + *
      • + *

        + * throughputMode:PAY_PER_REQUEST and

        + *
      • + *
      • + *

        + * throughputMode:PROVISIONED. The provisioned capacity mode requires + * readCapacityUnits and writeCapacityUnits as inputs.

        + *
      • + *
      + *

      The default is throughput_mode:PAY_PER_REQUEST.

      + *

      For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.

      + */ + capacitySpecification?: CapacitySpecification; + + /** + *

      + * Modifies the encryption settings of the table. You can choose one of the following KMS key (KMS key):

      + * + *
        + *
      • + *

        + * type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

        + *
      • + *
      • + *

        + * type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. + * This option + * requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

        + *
      • + *
      + *

      The default is AWS_OWNED_KMS_KEY.

      + *

      For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.

      + */ + encryptionSpecification?: EncryptionSpecification; + + /** + *

      + * Modifies the pointInTimeRecovery settings of the table. The options are:

      + *
        + *
      • + *

        + * ENABLED + *

        + *
      • + *
      • + *

        + * DISABLED + *

        + *
      • + *
      + *

      If it's not specified, the default is DISABLED.

      + *

      For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.

      + */ + pointInTimeRecovery?: PointInTimeRecovery; + + /** + *

      Modifies Time to Live custom settings for the table. The options are:

      + *
        + *
      • + *

        + * status:enabled + *

        + *
      • + *
      • + *

        + * status:disabled + *

        + *
      • + *
      + *

      The default is status:disabled. After + * ttl is enabled, you can't disable it + * for the table.

      + *

      For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer + * Guide.

      + */ + ttl?: TimeToLive; + + /** + *

      The default Time to Live setting in seconds for the table.

      + *

      For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer + * Guide.

      + */ + defaultTimeToLive?: number; +} + +export namespace UpdateTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableRequest): any => ({ + ...obj, + }); +} + +export interface UpdateTableResponse { + /** + *

      The Amazon Resource Name (ARN) of the modified table.

      + */ + resourceArn: string | undefined; +} + +export namespace UpdateTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-keyspaces/src/pagination/Interfaces.ts b/clients/client-keyspaces/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..3f5c65fc815a --- /dev/null +++ b/clients/client-keyspaces/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; + +export interface KeyspacesPaginationConfiguration extends PaginationConfiguration { + client: Keyspaces | KeyspacesClient; +} diff --git a/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts b/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts new file mode 100644 index 000000000000..3b93acdcad05 --- /dev/null +++ b/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListKeyspacesCommand, + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, +} from "../commands/ListKeyspacesCommand"; +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; +import { KeyspacesPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KeyspacesClient, + input: ListKeyspacesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListKeyspacesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Keyspaces, + input: ListKeyspacesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listKeyspaces(input, ...args); +}; +export async function* paginateListKeyspaces( + config: KeyspacesPaginationConfiguration, + input: ListKeyspacesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListKeyspacesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Keyspaces) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KeyspacesClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Keyspaces | KeyspacesClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-keyspaces/src/pagination/ListTablesPaginator.ts b/clients/client-keyspaces/src/pagination/ListTablesPaginator.ts new file mode 100644 index 000000000000..3977f487963c --- /dev/null +++ b/clients/client-keyspaces/src/pagination/ListTablesPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListTablesCommand, ListTablesCommandInput, ListTablesCommandOutput } from "../commands/ListTablesCommand"; +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; +import { KeyspacesPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KeyspacesClient, + input: ListTablesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTablesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Keyspaces, + input: ListTablesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTables(input, ...args); +}; +export async function* paginateListTables( + config: KeyspacesPaginationConfiguration, + input: ListTablesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTablesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Keyspaces) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KeyspacesClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Keyspaces | KeyspacesClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-keyspaces/src/pagination/ListTagsForResourcePaginator.ts b/clients/client-keyspaces/src/pagination/ListTagsForResourcePaginator.ts new file mode 100644 index 000000000000..b5062602d314 --- /dev/null +++ b/clients/client-keyspaces/src/pagination/ListTagsForResourcePaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; +import { KeyspacesPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KeyspacesClient, + input: ListTagsForResourceCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTagsForResourceCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Keyspaces, + input: ListTagsForResourceCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTagsForResource(input, ...args); +}; +export async function* paginateListTagsForResource( + config: KeyspacesPaginationConfiguration, + input: ListTagsForResourceCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTagsForResourceCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Keyspaces) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KeyspacesClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Keyspaces | KeyspacesClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-keyspaces/src/pagination/index.ts b/clients/client-keyspaces/src/pagination/index.ts new file mode 100644 index 000000000000..0b4d41dfca77 --- /dev/null +++ b/clients/client-keyspaces/src/pagination/index.ts @@ -0,0 +1,4 @@ +export * from "./Interfaces"; +export * from "./ListKeyspacesPaginator"; +export * from "./ListTablesPaginator"; +export * from "./ListTagsForResourcePaginator"; diff --git a/clients/client-keyspaces/src/protocols/Aws_json1_0.ts b/clients/client-keyspaces/src/protocols/Aws_json1_0.ts new file mode 100644 index 000000000000..bae56829fc45 --- /dev/null +++ b/clients/client-keyspaces/src/protocols/Aws_json1_0.ts @@ -0,0 +1,1823 @@ +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + decorateServiceException as __decorateServiceException, + expectInt32 as __expectInt32, + expectLong as __expectLong, + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectString as __expectString, + parseEpochTimestamp as __parseEpochTimestamp, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + HeaderBag as __HeaderBag, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateKeyspaceCommandInput, CreateKeyspaceCommandOutput } from "../commands/CreateKeyspaceCommand"; +import { CreateTableCommandInput, CreateTableCommandOutput } from "../commands/CreateTableCommand"; +import { DeleteKeyspaceCommandInput, DeleteKeyspaceCommandOutput } from "../commands/DeleteKeyspaceCommand"; +import { DeleteTableCommandInput, DeleteTableCommandOutput } from "../commands/DeleteTableCommand"; +import { GetKeyspaceCommandInput, GetKeyspaceCommandOutput } from "../commands/GetKeyspaceCommand"; +import { GetTableCommandInput, GetTableCommandOutput } from "../commands/GetTableCommand"; +import { ListKeyspacesCommandInput, ListKeyspacesCommandOutput } from "../commands/ListKeyspacesCommand"; +import { ListTablesCommandInput, ListTablesCommandOutput } from "../commands/ListTablesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { RestoreTableCommandInput, RestoreTableCommandOutput } from "../commands/RestoreTableCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateTableCommandInput, UpdateTableCommandOutput } from "../commands/UpdateTableCommand"; +import { KeyspacesServiceException as __BaseException } from "../models/KeyspacesServiceException"; +import { + AccessDeniedException, + CapacitySpecification, + CapacitySpecificationSummary, + ClusteringKey, + ColumnDefinition, + Comment, + ConflictException, + CreateKeyspaceRequest, + CreateKeyspaceResponse, + CreateTableRequest, + CreateTableResponse, + DeleteKeyspaceRequest, + DeleteKeyspaceResponse, + DeleteTableRequest, + DeleteTableResponse, + EncryptionSpecification, + GetKeyspaceRequest, + GetKeyspaceResponse, + GetTableRequest, + GetTableResponse, + InternalServerException, + KeyspaceSummary, + ListKeyspacesRequest, + ListKeyspacesResponse, + ListTablesRequest, + ListTablesResponse, + ListTagsForResourceRequest, + ListTagsForResourceResponse, + PartitionKey, + PointInTimeRecovery, + PointInTimeRecoverySummary, + ResourceNotFoundException, + RestoreTableRequest, + RestoreTableResponse, + SchemaDefinition, + ServiceQuotaExceededException, + StaticColumn, + TableSummary, + Tag, + TagResourceRequest, + TagResourceResponse, + TimeToLive, + UntagResourceRequest, + UntagResourceResponse, + UpdateTableRequest, + UpdateTableResponse, + ValidationException, +} from "../models/models_0"; + +export const serializeAws_json1_0CreateKeyspaceCommand = async ( + input: CreateKeyspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.CreateKeyspace", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0CreateKeyspaceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0CreateTableCommand = async ( + input: CreateTableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.CreateTable", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0CreateTableRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0DeleteKeyspaceCommand = async ( + input: DeleteKeyspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.DeleteKeyspace", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DeleteKeyspaceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0DeleteTableCommand = async ( + input: DeleteTableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.DeleteTable", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DeleteTableRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0GetKeyspaceCommand = async ( + input: GetKeyspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.GetKeyspace", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0GetKeyspaceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0GetTableCommand = async ( + input: GetTableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.GetTable", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0GetTableRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListKeyspacesCommand = async ( + input: ListKeyspacesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.ListKeyspaces", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListKeyspacesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListTablesCommand = async ( + input: ListTablesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.ListTables", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListTablesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.ListTagsForResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListTagsForResourceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0RestoreTableCommand = async ( + input: RestoreTableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.RestoreTable", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0RestoreTableRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.TagResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0TagResourceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.UntagResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UntagResourceRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0UpdateTableCommand = async ( + input: UpdateTableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "KeyspacesService.UpdateTable", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UpdateTableRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const deserializeAws_json1_0CreateKeyspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0CreateKeyspaceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0CreateKeyspaceResponse(data, context); + const response: CreateKeyspaceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0CreateKeyspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0CreateTableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0CreateTableCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0CreateTableResponse(data, context); + const response: CreateTableCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0CreateTableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0DeleteKeyspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DeleteKeyspaceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DeleteKeyspaceResponse(data, context); + const response: DeleteKeyspaceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DeleteKeyspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0DeleteTableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DeleteTableCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DeleteTableResponse(data, context); + const response: DeleteTableCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DeleteTableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0GetKeyspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0GetKeyspaceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0GetKeyspaceResponse(data, context); + const response: GetKeyspaceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0GetKeyspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0GetTableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0GetTableCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0GetTableResponse(data, context); + const response: GetTableCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0GetTableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0ListKeyspacesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListKeyspacesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListKeyspacesResponse(data, context); + const response: ListKeyspacesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListKeyspacesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0ListTablesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListTablesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListTablesResponse(data, context); + const response: ListTablesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListTablesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListTagsForResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListTagsForResourceResponse(data, context); + const response: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0RestoreTableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0RestoreTableCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0RestoreTableResponse(data, context); + const response: RestoreTableCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0RestoreTableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0TagResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0TagResourceResponse(data, context); + const response: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0UntagResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0UntagResourceResponse(data, context); + const response: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_0UpdateTableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0UpdateTableCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0UpdateTableResponse(data, context); + const response: UpdateTableCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0UpdateTableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.keyspaces#AccessDeniedException": + throw await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.keyspaces#ConflictException": + throw await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.keyspaces#InternalServerException": + throw await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.keyspaces#ResourceNotFoundException": + throw await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.keyspaces#ServiceQuotaExceededException": + throw await deserializeAws_json1_0ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.keyspaces#ValidationException": + throw await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +const deserializeAws_json1_0AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0AccessDeniedException(body, context); + const exception = new AccessDeniedException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const deserializeAws_json1_0ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ConflictException(body, context); + const exception = new ConflictException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const deserializeAws_json1_0InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0InternalServerException(body, context); + const exception = new InternalServerException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const deserializeAws_json1_0ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ResourceNotFoundException(body, context); + const exception = new ResourceNotFoundException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const deserializeAws_json1_0ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ServiceQuotaExceededException(body, context); + const exception = new ServiceQuotaExceededException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const deserializeAws_json1_0ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ValidationException(body, context); + const exception = new ValidationException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + +const serializeAws_json1_0CapacitySpecification = (input: CapacitySpecification, context: __SerdeContext): any => { + return { + ...(input.readCapacityUnits !== undefined && + input.readCapacityUnits !== null && { readCapacityUnits: input.readCapacityUnits }), + ...(input.throughputMode !== undefined && + input.throughputMode !== null && { throughputMode: input.throughputMode }), + ...(input.writeCapacityUnits !== undefined && + input.writeCapacityUnits !== null && { writeCapacityUnits: input.writeCapacityUnits }), + }; +}; + +const serializeAws_json1_0ClusteringKey = (input: ClusteringKey, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.orderBy !== undefined && input.orderBy !== null && { orderBy: input.orderBy }), + }; +}; + +const serializeAws_json1_0ClusteringKeyList = (input: ClusteringKey[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0ClusteringKey(entry, context); + }); +}; + +const serializeAws_json1_0ColumnDefinition = (input: ColumnDefinition, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }; +}; + +const serializeAws_json1_0ColumnDefinitionList = (input: ColumnDefinition[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0ColumnDefinition(entry, context); + }); +}; + +const serializeAws_json1_0Comment = (input: Comment, context: __SerdeContext): any => { + return { + ...(input.message !== undefined && input.message !== null && { message: input.message }), + }; +}; + +const serializeAws_json1_0CreateKeyspaceRequest = (input: CreateKeyspaceRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_json1_0TagList(input.tags, context) }), + }; +}; + +const serializeAws_json1_0CreateTableRequest = (input: CreateTableRequest, context: __SerdeContext): any => { + return { + ...(input.capacitySpecification !== undefined && + input.capacitySpecification !== null && { + capacitySpecification: serializeAws_json1_0CapacitySpecification(input.capacitySpecification, context), + }), + ...(input.comment !== undefined && + input.comment !== null && { comment: serializeAws_json1_0Comment(input.comment, context) }), + ...(input.defaultTimeToLive !== undefined && + input.defaultTimeToLive !== null && { defaultTimeToLive: input.defaultTimeToLive }), + ...(input.encryptionSpecification !== undefined && + input.encryptionSpecification !== null && { + encryptionSpecification: serializeAws_json1_0EncryptionSpecification(input.encryptionSpecification, context), + }), + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.pointInTimeRecovery !== undefined && + input.pointInTimeRecovery !== null && { + pointInTimeRecovery: serializeAws_json1_0PointInTimeRecovery(input.pointInTimeRecovery, context), + }), + ...(input.schemaDefinition !== undefined && + input.schemaDefinition !== null && { + schemaDefinition: serializeAws_json1_0SchemaDefinition(input.schemaDefinition, context), + }), + ...(input.tableName !== undefined && input.tableName !== null && { tableName: input.tableName }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_json1_0TagList(input.tags, context) }), + ...(input.ttl !== undefined && input.ttl !== null && { ttl: serializeAws_json1_0TimeToLive(input.ttl, context) }), + }; +}; + +const serializeAws_json1_0DeleteKeyspaceRequest = (input: DeleteKeyspaceRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + }; +}; + +const serializeAws_json1_0DeleteTableRequest = (input: DeleteTableRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.tableName !== undefined && input.tableName !== null && { tableName: input.tableName }), + }; +}; + +const serializeAws_json1_0EncryptionSpecification = (input: EncryptionSpecification, context: __SerdeContext): any => { + return { + ...(input.kmsKeyIdentifier !== undefined && + input.kmsKeyIdentifier !== null && { kmsKeyIdentifier: input.kmsKeyIdentifier }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }; +}; + +const serializeAws_json1_0GetKeyspaceRequest = (input: GetKeyspaceRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + }; +}; + +const serializeAws_json1_0GetTableRequest = (input: GetTableRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.tableName !== undefined && input.tableName !== null && { tableName: input.tableName }), + }; +}; + +const serializeAws_json1_0ListKeyspacesRequest = (input: ListKeyspacesRequest, context: __SerdeContext): any => { + return { + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }; +}; + +const serializeAws_json1_0ListTablesRequest = (input: ListTablesRequest, context: __SerdeContext): any => { + return { + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }; +}; + +const serializeAws_json1_0ListTagsForResourceRequest = ( + input: ListTagsForResourceRequest, + context: __SerdeContext +): any => { + return { + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.resourceArn !== undefined && input.resourceArn !== null && { resourceArn: input.resourceArn }), + }; +}; + +const serializeAws_json1_0PartitionKey = (input: PartitionKey, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + }; +}; + +const serializeAws_json1_0PartitionKeyList = (input: PartitionKey[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0PartitionKey(entry, context); + }); +}; + +const serializeAws_json1_0PointInTimeRecovery = (input: PointInTimeRecovery, context: __SerdeContext): any => { + return { + ...(input.status !== undefined && input.status !== null && { status: input.status }), + }; +}; + +const serializeAws_json1_0RestoreTableRequest = (input: RestoreTableRequest, context: __SerdeContext): any => { + return { + ...(input.capacitySpecificationOverride !== undefined && + input.capacitySpecificationOverride !== null && { + capacitySpecificationOverride: serializeAws_json1_0CapacitySpecification( + input.capacitySpecificationOverride, + context + ), + }), + ...(input.encryptionSpecificationOverride !== undefined && + input.encryptionSpecificationOverride !== null && { + encryptionSpecificationOverride: serializeAws_json1_0EncryptionSpecification( + input.encryptionSpecificationOverride, + context + ), + }), + ...(input.pointInTimeRecoveryOverride !== undefined && + input.pointInTimeRecoveryOverride !== null && { + pointInTimeRecoveryOverride: serializeAws_json1_0PointInTimeRecovery( + input.pointInTimeRecoveryOverride, + context + ), + }), + ...(input.restoreTimestamp !== undefined && + input.restoreTimestamp !== null && { restoreTimestamp: Math.round(input.restoreTimestamp.getTime() / 1000) }), + ...(input.sourceKeyspaceName !== undefined && + input.sourceKeyspaceName !== null && { sourceKeyspaceName: input.sourceKeyspaceName }), + ...(input.sourceTableName !== undefined && + input.sourceTableName !== null && { sourceTableName: input.sourceTableName }), + ...(input.tagsOverride !== undefined && + input.tagsOverride !== null && { tagsOverride: serializeAws_json1_0TagList(input.tagsOverride, context) }), + ...(input.targetKeyspaceName !== undefined && + input.targetKeyspaceName !== null && { targetKeyspaceName: input.targetKeyspaceName }), + ...(input.targetTableName !== undefined && + input.targetTableName !== null && { targetTableName: input.targetTableName }), + }; +}; + +const serializeAws_json1_0SchemaDefinition = (input: SchemaDefinition, context: __SerdeContext): any => { + return { + ...(input.allColumns !== undefined && + input.allColumns !== null && { allColumns: serializeAws_json1_0ColumnDefinitionList(input.allColumns, context) }), + ...(input.clusteringKeys !== undefined && + input.clusteringKeys !== null && { + clusteringKeys: serializeAws_json1_0ClusteringKeyList(input.clusteringKeys, context), + }), + ...(input.partitionKeys !== undefined && + input.partitionKeys !== null && { + partitionKeys: serializeAws_json1_0PartitionKeyList(input.partitionKeys, context), + }), + ...(input.staticColumns !== undefined && + input.staticColumns !== null && { + staticColumns: serializeAws_json1_0StaticColumnList(input.staticColumns, context), + }), + }; +}; + +const serializeAws_json1_0StaticColumn = (input: StaticColumn, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + }; +}; + +const serializeAws_json1_0StaticColumnList = (input: StaticColumn[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0StaticColumn(entry, context); + }); +}; + +const serializeAws_json1_0Tag = (input: Tag, context: __SerdeContext): any => { + return { + ...(input.key !== undefined && input.key !== null && { key: input.key }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + +const serializeAws_json1_0TagList = (input: Tag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0Tag(entry, context); + }); +}; + +const serializeAws_json1_0TagResourceRequest = (input: TagResourceRequest, context: __SerdeContext): any => { + return { + ...(input.resourceArn !== undefined && input.resourceArn !== null && { resourceArn: input.resourceArn }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_json1_0TagList(input.tags, context) }), + }; +}; + +const serializeAws_json1_0TimeToLive = (input: TimeToLive, context: __SerdeContext): any => { + return { + ...(input.status !== undefined && input.status !== null && { status: input.status }), + }; +}; + +const serializeAws_json1_0UntagResourceRequest = (input: UntagResourceRequest, context: __SerdeContext): any => { + return { + ...(input.resourceArn !== undefined && input.resourceArn !== null && { resourceArn: input.resourceArn }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_json1_0TagList(input.tags, context) }), + }; +}; + +const serializeAws_json1_0UpdateTableRequest = (input: UpdateTableRequest, context: __SerdeContext): any => { + return { + ...(input.addColumns !== undefined && + input.addColumns !== null && { addColumns: serializeAws_json1_0ColumnDefinitionList(input.addColumns, context) }), + ...(input.capacitySpecification !== undefined && + input.capacitySpecification !== null && { + capacitySpecification: serializeAws_json1_0CapacitySpecification(input.capacitySpecification, context), + }), + ...(input.defaultTimeToLive !== undefined && + input.defaultTimeToLive !== null && { defaultTimeToLive: input.defaultTimeToLive }), + ...(input.encryptionSpecification !== undefined && + input.encryptionSpecification !== null && { + encryptionSpecification: serializeAws_json1_0EncryptionSpecification(input.encryptionSpecification, context), + }), + ...(input.keyspaceName !== undefined && input.keyspaceName !== null && { keyspaceName: input.keyspaceName }), + ...(input.pointInTimeRecovery !== undefined && + input.pointInTimeRecovery !== null && { + pointInTimeRecovery: serializeAws_json1_0PointInTimeRecovery(input.pointInTimeRecovery, context), + }), + ...(input.tableName !== undefined && input.tableName !== null && { tableName: input.tableName }), + ...(input.ttl !== undefined && input.ttl !== null && { ttl: serializeAws_json1_0TimeToLive(input.ttl, context) }), + }; +}; + +const deserializeAws_json1_0AccessDeniedException = (output: any, context: __SerdeContext): AccessDeniedException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_0CapacitySpecificationSummary = ( + output: any, + context: __SerdeContext +): CapacitySpecificationSummary => { + return { + lastUpdateToPayPerRequestTimestamp: + output.lastUpdateToPayPerRequestTimestamp !== undefined && output.lastUpdateToPayPerRequestTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdateToPayPerRequestTimestamp))) + : undefined, + readCapacityUnits: __expectLong(output.readCapacityUnits), + throughputMode: __expectString(output.throughputMode), + writeCapacityUnits: __expectLong(output.writeCapacityUnits), + } as any; +}; + +const deserializeAws_json1_0ClusteringKey = (output: any, context: __SerdeContext): ClusteringKey => { + return { + name: __expectString(output.name), + orderBy: __expectString(output.orderBy), + } as any; +}; + +const deserializeAws_json1_0ClusteringKeyList = (output: any, context: __SerdeContext): ClusteringKey[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0ClusteringKey(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0ColumnDefinition = (output: any, context: __SerdeContext): ColumnDefinition => { + return { + name: __expectString(output.name), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_json1_0ColumnDefinitionList = (output: any, context: __SerdeContext): ColumnDefinition[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0ColumnDefinition(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0Comment = (output: any, context: __SerdeContext): Comment => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_0ConflictException = (output: any, context: __SerdeContext): ConflictException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_0CreateKeyspaceResponse = (output: any, context: __SerdeContext): CreateKeyspaceResponse => { + return { + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0CreateTableResponse = (output: any, context: __SerdeContext): CreateTableResponse => { + return { + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0DeleteKeyspaceResponse = (output: any, context: __SerdeContext): DeleteKeyspaceResponse => { + return {} as any; +}; + +const deserializeAws_json1_0DeleteTableResponse = (output: any, context: __SerdeContext): DeleteTableResponse => { + return {} as any; +}; + +const deserializeAws_json1_0EncryptionSpecification = ( + output: any, + context: __SerdeContext +): EncryptionSpecification => { + return { + kmsKeyIdentifier: __expectString(output.kmsKeyIdentifier), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_json1_0GetKeyspaceResponse = (output: any, context: __SerdeContext): GetKeyspaceResponse => { + return { + keyspaceName: __expectString(output.keyspaceName), + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0GetTableResponse = (output: any, context: __SerdeContext): GetTableResponse => { + return { + capacitySpecification: + output.capacitySpecification !== undefined && output.capacitySpecification !== null + ? deserializeAws_json1_0CapacitySpecificationSummary(output.capacitySpecification, context) + : undefined, + comment: + output.comment !== undefined && output.comment !== null + ? deserializeAws_json1_0Comment(output.comment, context) + : undefined, + creationTimestamp: + output.creationTimestamp !== undefined && output.creationTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationTimestamp))) + : undefined, + defaultTimeToLive: __expectInt32(output.defaultTimeToLive), + encryptionSpecification: + output.encryptionSpecification !== undefined && output.encryptionSpecification !== null + ? deserializeAws_json1_0EncryptionSpecification(output.encryptionSpecification, context) + : undefined, + keyspaceName: __expectString(output.keyspaceName), + pointInTimeRecovery: + output.pointInTimeRecovery !== undefined && output.pointInTimeRecovery !== null + ? deserializeAws_json1_0PointInTimeRecoverySummary(output.pointInTimeRecovery, context) + : undefined, + resourceArn: __expectString(output.resourceArn), + schemaDefinition: + output.schemaDefinition !== undefined && output.schemaDefinition !== null + ? deserializeAws_json1_0SchemaDefinition(output.schemaDefinition, context) + : undefined, + status: __expectString(output.status), + tableName: __expectString(output.tableName), + ttl: + output.ttl !== undefined && output.ttl !== null + ? deserializeAws_json1_0TimeToLive(output.ttl, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0InternalServerException = ( + output: any, + context: __SerdeContext +): InternalServerException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_0KeyspaceSummary = (output: any, context: __SerdeContext): KeyspaceSummary => { + return { + keyspaceName: __expectString(output.keyspaceName), + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0KeyspaceSummaryList = (output: any, context: __SerdeContext): KeyspaceSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0KeyspaceSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0ListKeyspacesResponse = (output: any, context: __SerdeContext): ListKeyspacesResponse => { + return { + keyspaces: + output.keyspaces !== undefined && output.keyspaces !== null + ? deserializeAws_json1_0KeyspaceSummaryList(output.keyspaces, context) + : undefined, + nextToken: __expectString(output.nextToken), + } as any; +}; + +const deserializeAws_json1_0ListTablesResponse = (output: any, context: __SerdeContext): ListTablesResponse => { + return { + nextToken: __expectString(output.nextToken), + tables: + output.tables !== undefined && output.tables !== null + ? deserializeAws_json1_0TableSummaryList(output.tables, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0ListTagsForResourceResponse = ( + output: any, + context: __SerdeContext +): ListTagsForResourceResponse => { + return { + nextToken: __expectString(output.nextToken), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_json1_0TagList(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0PartitionKey = (output: any, context: __SerdeContext): PartitionKey => { + return { + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_json1_0PartitionKeyList = (output: any, context: __SerdeContext): PartitionKey[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0PartitionKey(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0PointInTimeRecoverySummary = ( + output: any, + context: __SerdeContext +): PointInTimeRecoverySummary => { + return { + earliestRestorableTimestamp: + output.earliestRestorableTimestamp !== undefined && output.earliestRestorableTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.earliestRestorableTimestamp))) + : undefined, + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_json1_0ResourceNotFoundException = ( + output: any, + context: __SerdeContext +): ResourceNotFoundException => { + return { + message: __expectString(output.message), + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0RestoreTableResponse = (output: any, context: __SerdeContext): RestoreTableResponse => { + return { + restoredTableARN: __expectString(output.restoredTableARN), + } as any; +}; + +const deserializeAws_json1_0SchemaDefinition = (output: any, context: __SerdeContext): SchemaDefinition => { + return { + allColumns: + output.allColumns !== undefined && output.allColumns !== null + ? deserializeAws_json1_0ColumnDefinitionList(output.allColumns, context) + : undefined, + clusteringKeys: + output.clusteringKeys !== undefined && output.clusteringKeys !== null + ? deserializeAws_json1_0ClusteringKeyList(output.clusteringKeys, context) + : undefined, + partitionKeys: + output.partitionKeys !== undefined && output.partitionKeys !== null + ? deserializeAws_json1_0PartitionKeyList(output.partitionKeys, context) + : undefined, + staticColumns: + output.staticColumns !== undefined && output.staticColumns !== null + ? deserializeAws_json1_0StaticColumnList(output.staticColumns, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0ServiceQuotaExceededException = ( + output: any, + context: __SerdeContext +): ServiceQuotaExceededException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_0StaticColumn = (output: any, context: __SerdeContext): StaticColumn => { + return { + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_json1_0StaticColumnList = (output: any, context: __SerdeContext): StaticColumn[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0StaticColumn(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0TableSummary = (output: any, context: __SerdeContext): TableSummary => { + return { + keyspaceName: __expectString(output.keyspaceName), + resourceArn: __expectString(output.resourceArn), + tableName: __expectString(output.tableName), + } as any; +}; + +const deserializeAws_json1_0TableSummaryList = (output: any, context: __SerdeContext): TableSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0TableSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0Tag = (output: any, context: __SerdeContext): Tag => { + return { + key: __expectString(output.key), + value: __expectString(output.value), + } as any; +}; + +const deserializeAws_json1_0TagList = (output: any, context: __SerdeContext): Tag[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0Tag(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_0TagResourceResponse = (output: any, context: __SerdeContext): TagResourceResponse => { + return {} as any; +}; + +const deserializeAws_json1_0TimeToLive = (output: any, context: __SerdeContext): TimeToLive => { + return { + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_json1_0UntagResourceResponse = (output: any, context: __SerdeContext): UntagResourceResponse => { + return {} as any; +}; + +const deserializeAws_json1_0UpdateTableResponse = (output: any, context: __SerdeContext): UpdateTableResponse => { + return { + resourceArn: __expectString(output.resourceArn), + } as any; +}; + +const deserializeAws_json1_0ValidationException = (output: any, context: __SerdeContext): ValidationException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const buildHttpRpcRequest = async ( + context: __SerdeContext, + headers: __HeaderBag, + path: string, + resolvedHostname: string | undefined, + body: any +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const contents: any = { + protocol, + hostname, + port, + method: "POST", + path: basePath.endsWith("/") ? basePath.slice(0, -1) + path : basePath + path, + headers, + }; + if (resolvedHostname !== undefined) { + contents.hostname = resolvedHostname; + } + if (body !== undefined) { + contents.body = body; + } + return new __HttpRequest(contents); +}; + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-keyspaces/src/runtimeConfig.browser.ts b/clients/client-keyspaces/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..bf808b547fce --- /dev/null +++ b/clients/client-keyspaces/src/runtimeConfig.browser.ts @@ -0,0 +1,49 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler as RequestHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { KeyspacesClientConfig } from "./KeyspacesClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { loadConfigsForDefaultMode } from "@aws-sdk/smithy-client"; +import { resolveDefaultsModeConfig } from "@aws-sdk/util-defaults-mode-browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: KeyspacesClientConfig) => { + const defaultsMode = resolveDefaultsModeConfig(config); + const defaultConfigProvider = () => defaultsMode().then(loadConfigsForDefaultMode); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + defaultsMode, + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new RequestHandler(defaultConfigProvider), + retryMode: config?.retryMode ?? (async () => (await defaultConfigProvider()).retryMode || DEFAULT_RETRY_MODE), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-keyspaces/src/runtimeConfig.native.ts b/clients/client-keyspaces/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..ad6ff5c1c3bc --- /dev/null +++ b/clients/client-keyspaces/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { KeyspacesClientConfig } from "./KeyspacesClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: KeyspacesClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-keyspaces/src/runtimeConfig.shared.ts b/clients/client-keyspaces/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..c3a635909a34 --- /dev/null +++ b/clients/client-keyspaces/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { KeyspacesClientConfig } from "./KeyspacesClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: KeyspacesClientConfig) => ({ + apiVersion: "2022-02-10", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "Keyspaces", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-keyspaces/src/runtimeConfig.ts b/clients/client-keyspaces/src/runtimeConfig.ts new file mode 100644 index 000000000000..314ae50a7eb9 --- /dev/null +++ b/clients/client-keyspaces/src/runtimeConfig.ts @@ -0,0 +1,65 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { + DEFAULT_RETRY_MODE, + NODE_MAX_ATTEMPT_CONFIG_OPTIONS, + NODE_RETRY_MODE_CONFIG_OPTIONS, +} from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler as RequestHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { KeyspacesClientConfig } from "./KeyspacesClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { loadConfigsForDefaultMode } from "@aws-sdk/smithy-client"; +import { resolveDefaultsModeConfig } from "@aws-sdk/util-defaults-mode-node"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: KeyspacesClientConfig) => { + const defaultsMode = resolveDefaultsModeConfig(config); + const defaultConfigProvider = () => defaultsMode().then(loadConfigsForDefaultMode); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + defaultsMode, + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new RequestHandler(defaultConfigProvider), + retryMode: + config?.retryMode ?? + loadNodeConfig({ + ...NODE_RETRY_MODE_CONFIG_OPTIONS, + default: async () => (await defaultConfigProvider()).retryMode || DEFAULT_RETRY_MODE, + }), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-keyspaces/tsconfig.cjs.json b/clients/client-keyspaces/tsconfig.cjs.json new file mode 100644 index 000000000000..3bf80b3f4ae5 --- /dev/null +++ b/clients/client-keyspaces/tsconfig.cjs.json @@ -0,0 +1,8 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "outDir": "dist-cjs" + } +} diff --git a/clients/client-keyspaces/tsconfig.es.json b/clients/client-keyspaces/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-keyspaces/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-keyspaces/tsconfig.json b/clients/client-keyspaces/tsconfig.json new file mode 100644 index 000000000000..07964b54c33b --- /dev/null +++ b/clients/client-keyspaces/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "@tsconfig/recommended/tsconfig.json", + "compilerOptions": { + "downlevelIteration": true, + "importHelpers": true, + "incremental": true, + "removeComments": true, + "resolveJsonModule": true, + "rootDir": "src" + }, + "exclude": ["test/"] +} diff --git a/clients/client-keyspaces/tsconfig.types.json b/clients/client-keyspaces/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-keyspaces/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-keyspaces/typedoc.json b/clients/client-keyspaces/typedoc.json new file mode 100644 index 000000000000..1b5cb0146719 --- /dev/null +++ b/clients/client-keyspaces/typedoc.json @@ -0,0 +1,3 @@ +{ + "extends": "../../typedoc.client.json" +} diff --git a/clients/client-lex-models-v2/src/models/models_0.ts b/clients/client-lex-models-v2/src/models/models_0.ts index 2568e3f4a300..928fa6511574 100644 --- a/clients/client-lex-models-v2/src/models/models_0.ts +++ b/clients/client-lex-models-v2/src/models/models_0.ts @@ -3611,7 +3611,7 @@ export interface CreateSlotRequest { * The slot type determines the values that can be entered into the * slot.

      */ - slotTypeId: string | undefined; + slotTypeId?: string; /** *

      Specifies prompts that Amazon Lex sends to the user to elicit a response @@ -9625,7 +9625,7 @@ export interface UpdateSlotRequest { *

      The unique identifier of the new slot type to associate with this * slot.

      */ - slotTypeId: string | undefined; + slotTypeId?: string; /** *

      A new set of prompts that Amazon Lex sends to the user to elicit a diff --git a/clients/client-macie/README.md b/clients/client-macie/README.md index afbf4555b410..4335fef4ba91 100644 --- a/clients/client-macie/README.md +++ b/clients/client-macie/README.md @@ -9,12 +9,12 @@ AWS SDK for JavaScript Macie Client for Node.js, Browser and React Native. Amazon Macie Classic -

      Amazon Macie Classic is a security service that uses machine learning to automatically -discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data -such as personally identifiable information (PII) or intellectual property, and provides you -with dashboards and alerts that give visibility into how this data is being accessed or moved. -For more information, see the Amazon Macie -Classic User Guide.

      +

      Amazon Macie Classic has been discontinued and is no longer available.

      + +

      A new Amazon Macie is now available with significant design improvements and additional +features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the +new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User +Guide.

      ## Installing diff --git a/clients/client-macie/src/Macie.ts b/clients/client-macie/src/Macie.ts index 38a41db062e3..35b5137d71af 100644 --- a/clients/client-macie/src/Macie.ts +++ b/clients/client-macie/src/Macie.ts @@ -39,16 +39,16 @@ import { MacieClient } from "./MacieClient"; /** * Amazon Macie Classic - *

      Amazon Macie Classic is a security service that uses machine learning to automatically - * discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data - * such as personally identifiable information (PII) or intellectual property, and provides you - * with dashboards and alerts that give visibility into how this data is being accessed or moved. - * For more information, see the Amazon Macie - * Classic User Guide.

      + *

      Amazon Macie Classic has been discontinued and is no longer available.

      + * + *

      A new Amazon Macie is now available with significant design improvements and additional + * features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the + * new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User + * Guide.

      */ export class Macie extends MacieClient { /** - *

      Associates a specified AWS account with Amazon Macie Classic as a member + *

      (Discontinued) Associates a specified Amazon Web Services account with Amazon Macie Classic as a member * account.

      */ public associateMemberAccount( @@ -81,11 +81,11 @@ export class Macie extends MacieClient { } /** - *

      Associates specified S3 resources with Amazon Macie Classic for monitoring and data - * classification. If memberAccountId isn't specified, the action associates specified S3 - * resources with Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, - * the action associates specified S3 resources with Macie Classic for the specified member - * account.

      + *

      (Discontinued) Associates specified S3 resources with Amazon Macie Classic for + * monitoring and data classification. If memberAccountId isn't specified, the action associates + * specified S3 resources with Macie Classic for the current Macie Classic administrator account. + * If memberAccountId is specified, the action associates specified S3 resources with Macie + * Classic for the specified member account.

      */ public associateS3Resources( args: AssociateS3ResourcesCommandInput, @@ -117,7 +117,8 @@ export class Macie extends MacieClient { } /** - *

      Removes the specified member account from Amazon Macie Classic.

      + *

      (Discontinued) Removes the specified member account from Amazon Macie + * Classic.

      */ public disassociateMemberAccount( args: DisassociateMemberAccountCommandInput, @@ -149,10 +150,11 @@ export class Macie extends MacieClient { } /** - *

      Removes specified S3 resources from being monitored by Amazon Macie Classic. If - * memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic - * for the current Macie Classic administrator account. If memberAccountId is specified, the action removes specified - * S3 resources from Macie Classic for the specified member account.

      + *

      (Discontinued) Removes specified S3 resources from being monitored by Amazon Macie + * Classic. If memberAccountId isn't specified, the action removes specified S3 resources from + * Macie Classic for the current Macie Classic administrator account. If memberAccountId is + * specified, the action removes specified S3 resources from Macie Classic for the specified + * member account.

      */ public disassociateS3Resources( args: DisassociateS3ResourcesCommandInput, @@ -184,7 +186,8 @@ export class Macie extends MacieClient { } /** - *

      Lists all Amazon Macie Classic member accounts for the current Macie Classic administrator account.

      + *

      (Discontinued) Lists all Amazon Macie Classic member accounts for the current Macie + * Classic administrator account.

      */ public listMemberAccounts( args: ListMemberAccountsCommandInput, @@ -216,10 +219,11 @@ export class Macie extends MacieClient { } /** - *

      Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId - * isn't specified, the action lists the S3 resources associated with Macie Classic for - * the current Macie Classic administrator account. If memberAccountId is specified, the action lists the S3 resources - * associated with Macie Classic for the specified member account.

      + *

      (Discontinued) Lists all the S3 resources associated with Amazon Macie Classic. If + * memberAccountId isn't specified, the action lists the S3 resources associated with Macie + * Classic for the current Macie Classic administrator account. If memberAccountId is specified, + * the action lists the S3 resources associated with Macie Classic for the specified member + * account.

      */ public listS3Resources( args: ListS3ResourcesCommandInput, @@ -251,11 +255,11 @@ export class Macie extends MacieClient { } /** - *

      Updates the classification types for the specified S3 resources. If memberAccountId - * isn't specified, the action updates the classification types of the S3 resources associated - * with Amazon Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the - * action updates the classification types of the S3 resources associated with Macie - * Classic for the specified member account.

      + *

      (Discontinued) Updates the classification types for the specified S3 resources. If + * memberAccountId isn't specified, the action updates the classification types of the S3 + * resources associated with Amazon Macie Classic for the current Macie Classic administrator + * account. If memberAccountId is specified, the action updates the classification types of the + * S3 resources associated with Macie Classic for the specified member account.

      */ public updateS3Resources( args: UpdateS3ResourcesCommandInput, diff --git a/clients/client-macie/src/MacieClient.ts b/clients/client-macie/src/MacieClient.ts index a80c5410b3c2..9ba3b9a2539d 100644 --- a/clients/client-macie/src/MacieClient.ts +++ b/clients/client-macie/src/MacieClient.ts @@ -245,12 +245,12 @@ export interface MacieClientResolvedConfig extends MacieClientResolvedConfigType /** * Amazon Macie Classic - *

      Amazon Macie Classic is a security service that uses machine learning to automatically - * discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data - * such as personally identifiable information (PII) or intellectual property, and provides you - * with dashboards and alerts that give visibility into how this data is being accessed or moved. - * For more information, see the Amazon Macie - * Classic User Guide.

      + *

      Amazon Macie Classic has been discontinued and is no longer available.

      + * + *

      A new Amazon Macie is now available with significant design improvements and additional + * features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the + * new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User + * Guide.

      */ export class MacieClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-macie/src/commands/AssociateMemberAccountCommand.ts b/clients/client-macie/src/commands/AssociateMemberAccountCommand.ts index c19a45710e86..6ef148b0a207 100644 --- a/clients/client-macie/src/commands/AssociateMemberAccountCommand.ts +++ b/clients/client-macie/src/commands/AssociateMemberAccountCommand.ts @@ -22,7 +22,7 @@ export interface AssociateMemberAccountCommandInput extends AssociateMemberAccou export interface AssociateMemberAccountCommandOutput extends __MetadataBearer {} /** - *

      Associates a specified AWS account with Amazon Macie Classic as a member + *

      (Discontinued) Associates a specified Amazon Web Services account with Amazon Macie Classic as a member * account.

      * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts b/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts index f5ab1a1f8f19..b6a8f293e0de 100644 --- a/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts +++ b/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts @@ -22,11 +22,11 @@ export interface AssociateS3ResourcesCommandInput extends AssociateS3ResourcesRe export interface AssociateS3ResourcesCommandOutput extends AssociateS3ResourcesResult, __MetadataBearer {} /** - *

      Associates specified S3 resources with Amazon Macie Classic for monitoring and data - * classification. If memberAccountId isn't specified, the action associates specified S3 - * resources with Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, - * the action associates specified S3 resources with Macie Classic for the specified member - * account.

      + *

      (Discontinued) Associates specified S3 resources with Amazon Macie Classic for + * monitoring and data classification. If memberAccountId isn't specified, the action associates + * specified S3 resources with Macie Classic for the current Macie Classic administrator account. + * If memberAccountId is specified, the action associates specified S3 resources with Macie + * Classic for the specified member account.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/commands/DisassociateMemberAccountCommand.ts b/clients/client-macie/src/commands/DisassociateMemberAccountCommand.ts index aebe5783d97d..58616bbd0c3b 100644 --- a/clients/client-macie/src/commands/DisassociateMemberAccountCommand.ts +++ b/clients/client-macie/src/commands/DisassociateMemberAccountCommand.ts @@ -22,7 +22,8 @@ export interface DisassociateMemberAccountCommandInput extends DisassociateMembe export interface DisassociateMemberAccountCommandOutput extends __MetadataBearer {} /** - *

      Removes the specified member account from Amazon Macie Classic.

      + *

      (Discontinued) Removes the specified member account from Amazon Macie + * Classic.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/commands/DisassociateS3ResourcesCommand.ts b/clients/client-macie/src/commands/DisassociateS3ResourcesCommand.ts index 19b4f28a4d00..41c5b243b262 100644 --- a/clients/client-macie/src/commands/DisassociateS3ResourcesCommand.ts +++ b/clients/client-macie/src/commands/DisassociateS3ResourcesCommand.ts @@ -22,10 +22,11 @@ export interface DisassociateS3ResourcesCommandInput extends DisassociateS3Resou export interface DisassociateS3ResourcesCommandOutput extends DisassociateS3ResourcesResult, __MetadataBearer {} /** - *

      Removes specified S3 resources from being monitored by Amazon Macie Classic. If - * memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic - * for the current Macie Classic administrator account. If memberAccountId is specified, the action removes specified - * S3 resources from Macie Classic for the specified member account.

      + *

      (Discontinued) Removes specified S3 resources from being monitored by Amazon Macie + * Classic. If memberAccountId isn't specified, the action removes specified S3 resources from + * Macie Classic for the current Macie Classic administrator account. If memberAccountId is + * specified, the action removes specified S3 resources from Macie Classic for the specified + * member account.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/commands/ListMemberAccountsCommand.ts b/clients/client-macie/src/commands/ListMemberAccountsCommand.ts index 3e4f83c3059d..ca077182eb93 100644 --- a/clients/client-macie/src/commands/ListMemberAccountsCommand.ts +++ b/clients/client-macie/src/commands/ListMemberAccountsCommand.ts @@ -22,7 +22,8 @@ export interface ListMemberAccountsCommandInput extends ListMemberAccountsReques export interface ListMemberAccountsCommandOutput extends ListMemberAccountsResult, __MetadataBearer {} /** - *

      Lists all Amazon Macie Classic member accounts for the current Macie Classic administrator account.

      + *

      (Discontinued) Lists all Amazon Macie Classic member accounts for the current Macie + * Classic administrator account.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/commands/ListS3ResourcesCommand.ts b/clients/client-macie/src/commands/ListS3ResourcesCommand.ts index 2787531b56fb..4953a7463692 100644 --- a/clients/client-macie/src/commands/ListS3ResourcesCommand.ts +++ b/clients/client-macie/src/commands/ListS3ResourcesCommand.ts @@ -22,10 +22,11 @@ export interface ListS3ResourcesCommandInput extends ListS3ResourcesRequest {} export interface ListS3ResourcesCommandOutput extends ListS3ResourcesResult, __MetadataBearer {} /** - *

      Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId - * isn't specified, the action lists the S3 resources associated with Macie Classic for - * the current Macie Classic administrator account. If memberAccountId is specified, the action lists the S3 resources - * associated with Macie Classic for the specified member account.

      + *

      (Discontinued) Lists all the S3 resources associated with Amazon Macie Classic. If + * memberAccountId isn't specified, the action lists the S3 resources associated with Macie + * Classic for the current Macie Classic administrator account. If memberAccountId is specified, + * the action lists the S3 resources associated with Macie Classic for the specified member + * account.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts b/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts index 67f554a590ad..e691d623e7a8 100644 --- a/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts +++ b/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts @@ -22,11 +22,11 @@ export interface UpdateS3ResourcesCommandInput extends UpdateS3ResourcesRequest export interface UpdateS3ResourcesCommandOutput extends UpdateS3ResourcesResult, __MetadataBearer {} /** - *

      Updates the classification types for the specified S3 resources. If memberAccountId - * isn't specified, the action updates the classification types of the S3 resources associated - * with Amazon Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the - * action updates the classification types of the S3 resources associated with Macie - * Classic for the specified member account.

      + *

      (Discontinued) Updates the classification types for the specified S3 resources. If + * memberAccountId isn't specified, the action updates the classification types of the S3 + * resources associated with Amazon Macie Classic for the current Macie Classic administrator + * account. If memberAccountId is specified, the action updates the classification types of the + * S3 resources associated with Macie Classic for the specified member account.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie/src/models/models_0.ts b/clients/client-macie/src/models/models_0.ts index 0c72228cf919..bded759c4a9f 100644 --- a/clients/client-macie/src/models/models_0.ts +++ b/clients/client-macie/src/models/models_0.ts @@ -4,7 +4,7 @@ import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; import { MacieServiceException as __BaseException } from "./MacieServiceException"; /** - *

      You do not have required permissions to access the requested resource.

      + *

      (Discontinued) You do not have required permissions to access the requested resource.

      */ export class AccessDeniedException extends __BaseException { readonly name: "AccessDeniedException" = "AccessDeniedException"; @@ -30,8 +30,8 @@ export class AccessDeniedException extends __BaseException { export interface AssociateMemberAccountRequest { /** - *

      The ID of the AWS account that you want to associate with Amazon Macie Classic as a - * member account.

      + *

      (Discontinued) The ID of the Amazon Web Services account that you want to associate with Amazon Macie + * Classic as a member account.

      */ memberAccountId: string | undefined; } @@ -46,7 +46,7 @@ export namespace AssociateMemberAccountRequest { } /** - *

      Internal server error.

      + *

      (Discontinued) Internal server error.

      */ export class InternalException extends __BaseException { readonly name: "InternalException" = "InternalException"; @@ -71,8 +71,8 @@ export class InternalException extends __BaseException { } /** - *

      The request was rejected because an invalid or out-of-range value was supplied for an - * input parameter.

      + *

      (Discontinued) The request was rejected because an invalid or out-of-range value was supplied for an + * input parameter.

      */ export class InvalidInputException extends __BaseException { readonly name: "InvalidInputException" = "InvalidInputException"; @@ -103,8 +103,8 @@ export class InvalidInputException extends __BaseException { } /** - *

      The request was rejected because it attempted to create resources beyond the current - * AWS account limits. The error code describes the limit exceeded.

      + *

      (Discontinued) The request was rejected because it attempted to create resources beyond the current + * Amazon Web Services account quotas. The error code describes the quota exceeded.

      */ export class LimitExceededException extends __BaseException { readonly name: "LimitExceededException" = "LimitExceededException"; @@ -144,20 +144,20 @@ export enum S3OneTimeClassificationType { } /** - *

      The classification type that Amazon Macie Classic applies to the associated S3 - * resources.

      + *

      (Discontinued) The classification type that Amazon Macie Classic applies to the + * associated S3 resources.

      */ export interface ClassificationType { /** - *

      A one-time classification of all of the existing objects in a specified S3 bucket. - *

      + *

      (Discontinued) A one-time classification of all of the existing objects in a specified + * S3 bucket.

      */ oneTime: S3OneTimeClassificationType | string | undefined; /** - *

      A continuous classification of the objects that are added to a specified S3 bucket. - * Amazon Macie Classic begins performing continuous classification after a bucket is - * successfully associated with Macie Classic.

      + *

      (Discontinued) A continuous classification of the objects that are added to a specified + * S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is + * successfully associated with Macie Classic.

      */ continuous: S3ContinuousClassificationType | string | undefined; } @@ -172,26 +172,26 @@ export namespace ClassificationType { } /** - *

      The S3 resources that you want to associate with Amazon Macie Classic for monitoring - * and data classification. This data type is used as a request parameter in the - * AssociateS3Resources action and a response parameter in the ListS3Resources action.

      + *

      (Discontinued) The S3 resources that you want to associate with Amazon Macie Classic + * for monitoring and data classification. This data type is used as a request parameter in the + * AssociateS3Resources action and a response parameter in the ListS3Resources action.

      */ export interface S3ResourceClassification { /** - *

      The name of the S3 bucket that you want to associate with Amazon Macie + *

      (Discontinued) The name of the S3 bucket that you want to associate with Amazon Macie * Classic.

      */ bucketName: string | undefined; /** - *

      The prefix of the S3 bucket that you want to associate with Amazon Macie + *

      (Discontinued) The prefix of the S3 bucket that you want to associate with Amazon Macie * Classic.

      */ prefix?: string; /** - *

      The classification type that you want to specify for the resource associated with - * Amazon Macie Classic.

      + *

      (Discontinued) The classification type that you want to specify for the resource + * associated with Amazon Macie Classic.

      */ classificationType: ClassificationType | undefined; } @@ -207,14 +207,14 @@ export namespace S3ResourceClassification { export interface AssociateS3ResourcesRequest { /** - *

      The ID of the Amazon Macie Classic member account whose resources you want to associate - * with Macie Classic.

      + *

      (Discontinued) The ID of the Amazon Macie Classic member account whose resources you + * want to associate with Macie Classic.

      */ memberAccountId?: string; /** - *

      The S3 resources that you want to associate with Amazon Macie Classic for monitoring - * and data classification.

      + *

      (Discontinued) The S3 resources that you want to associate with Amazon Macie Classic + * for monitoring and data classification.

      */ s3Resources: S3ResourceClassification[] | undefined; } @@ -229,18 +229,18 @@ export namespace AssociateS3ResourcesRequest { } /** - *

      Contains information about the S3 resource. This data type is used as a request - * parameter in the DisassociateS3Resources action and can be used as a response parameter in the - * AssociateS3Resources and UpdateS3Resources actions.

      + *

      (Discontinued) Contains information about the S3 resource. This data type is used as a + * request parameter in the DisassociateS3Resources action and can be used as a response + * parameter in the AssociateS3Resources and UpdateS3Resources actions.

      */ export interface S3Resource { /** - *

      The name of the S3 bucket.

      + *

      (Discontinued) The name of the S3 bucket.

      */ bucketName: string | undefined; /** - *

      The prefix of the S3 bucket.

      + *

      (Discontinued) The prefix of the S3 bucket.

      */ prefix?: string; } @@ -255,21 +255,21 @@ export namespace S3Resource { } /** - *

      Includes details about the failed S3 resources.

      + *

      (Discontinued) Includes details about the failed S3 resources.

      */ export interface FailedS3Resource { /** - *

      The failed S3 resources.

      + *

      (Discontinued) The failed S3 resources.

      */ failedItem?: S3Resource; /** - *

      The status code of a failed item.

      + *

      (Discontinued) The status code of a failed item.

      */ errorCode?: string; /** - *

      The error message of a failed item.

      + *

      (Discontinued) The error message of a failed item.

      */ errorMessage?: string; } @@ -285,8 +285,8 @@ export namespace FailedS3Resource { export interface AssociateS3ResourcesResult { /** - *

      S3 resources that couldn't be associated with Amazon Macie Classic. An error code and - * an error message are provided for each failed item.

      + *

      (Discontinued) S3 resources that couldn't be associated with Amazon Macie Classic. An + * error code and an error message are provided for each failed item.

      */ failedS3Resources?: FailedS3Resource[]; } @@ -301,20 +301,20 @@ export namespace AssociateS3ResourcesResult { } /** - *

      The classification type that Amazon Macie Classic applies to the associated S3 - * resources. At least one of the classification types (oneTime or continuous) must be specified. - *

      + *

      (Discontinued) The classification type that Amazon Macie Classic applies to the + * associated S3 resources. At least one of the classification types (oneTime or continuous) must + * be specified.

      */ export interface ClassificationTypeUpdate { /** - *

      A one-time classification of all of the existing objects in a specified S3 bucket. - *

      + *

      (Discontinued) A one-time classification of all of the existing objects in a specified + * S3 bucket.

      */ oneTime?: S3OneTimeClassificationType | string; /** - *

      A continuous classification of the objects that are added to a specified S3 bucket. - * Amazon Macie Classic begins performing continuous classification after a bucket is + *

      (Discontinued) A continuous classification of the objects that are added to a specified + * S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is * successfully associated with Macie Classic.

      */ continuous?: S3ContinuousClassificationType | string; @@ -331,7 +331,7 @@ export namespace ClassificationTypeUpdate { export interface DisassociateMemberAccountRequest { /** - *

      The ID of the member account that you want to remove from Amazon Macie + *

      (Discontinued) The ID of the member account that you want to remove from Amazon Macie * Classic.

      */ memberAccountId: string | undefined; @@ -348,14 +348,14 @@ export namespace DisassociateMemberAccountRequest { export interface DisassociateS3ResourcesRequest { /** - *

      The ID of the Amazon Macie Classic member account whose resources you want to remove - * from being monitored by Macie Classic.

      + *

      (Discontinued) The ID of the Amazon Macie Classic member account whose resources you + * want to remove from being monitored by Macie Classic.

      */ memberAccountId?: string; /** - *

      The S3 resources (buckets or prefixes) that you want to remove from being monitored and - * classified by Amazon Macie Classic.

      + *

      (Discontinued) The S3 resources (buckets or prefixes) that you want to remove from + * being monitored and classified by Amazon Macie Classic.

      */ associatedS3Resources: S3Resource[] | undefined; } @@ -371,9 +371,9 @@ export namespace DisassociateS3ResourcesRequest { export interface DisassociateS3ResourcesResult { /** - *

      S3 resources that couldn't be removed from being monitored and classified by Amazon - * Macie Classic. An error code and an error message are provided for each failed item. - *

      + *

      (Discontinued) S3 resources that couldn't be removed from being monitored and + * classified by Amazon Macie Classic. An error code and an error message are provided for each + * failed item.

      */ failedS3Resources?: FailedS3Resource[]; } @@ -389,16 +389,16 @@ export namespace DisassociateS3ResourcesResult { export interface ListMemberAccountsRequest { /** - *

      Use this parameter when paginating results. Set the value of this parameter to null on - * your first call to the ListMemberAccounts action. Subsequent calls to the action fill - * nextToken in the request with the value of nextToken from the previous response to continue - * listing data.

      + *

      (Discontinued) Use this parameter when paginating results. Set the value of this + * parameter to null on your first call to the ListMemberAccounts action. Subsequent calls to the + * action fill nextToken in the request with the value of nextToken from the previous response to + * continue listing data.

      */ nextToken?: string; /** - *

      Use this parameter to indicate the maximum number of items that you want in the - * response. The default value is 250.

      + *

      (Discontinued) Use this parameter to indicate the maximum number of items that you want + * in the response. The default value is 250.

      */ maxResults?: number; } @@ -413,11 +413,12 @@ export namespace ListMemberAccountsRequest { } /** - *

      Contains information about the Amazon Macie Classic member account.

      + *

      (Discontinued) Contains information about the Amazon Macie Classic member + * account.

      */ export interface MemberAccount { /** - *

      The AWS account ID of the Amazon Macie Classic member account.

      + *

      (Discontinued) The Amazon Web Services account ID of the Amazon Macie Classic member account.

      */ accountId?: string; } @@ -433,16 +434,17 @@ export namespace MemberAccount { export interface ListMemberAccountsResult { /** - *

      A list of the Amazon Macie Classic member accounts returned by the action. The current - * Macie Classic administrator account is also included in this list.

      + *

      (Discontinued) A list of the Amazon Macie Classic member accounts returned by the + * action. The current Macie Classic administrator account is also included in this + * list.

      */ memberAccounts?: MemberAccount[]; /** - *

      When a response is generated, if there is more data to be listed, this parameter is - * present in the response and contains the value to use for the nextToken parameter in a - * subsequent pagination request. If there is no more data to be listed, this parameter is set to - * null.

      + *

      (Discontinued) When a response is generated, if there is more data to be listed, this + * parameter is present in the response and contains the value to use for the nextToken parameter + * in a subsequent pagination request. If there is no more data to be listed, this parameter is + * set to null.

      */ nextToken?: string; } @@ -458,21 +460,22 @@ export namespace ListMemberAccountsResult { export interface ListS3ResourcesRequest { /** - *

      The Amazon Macie Classic member account ID whose associated S3 resources you want to - * list.

      + *

      (Discontinued) The Amazon Macie Classic member account ID whose associated S3 resources + * you want to list.

      */ memberAccountId?: string; /** - *

      Use this parameter when paginating results. Set its value to null on your first call to - * the ListS3Resources action. Subsequent calls to the action fill nextToken in the request with - * the value of nextToken from the previous response to continue listing data.

      + *

      (Discontinued) Use this parameter when paginating results. Set its value to null on + * your first call to the ListS3Resources action. Subsequent calls to the action fill nextToken + * in the request with the value of nextToken from the previous response to continue listing + * data.

      */ nextToken?: string; /** - *

      Use this parameter to indicate the maximum number of items that you want in the - * response. The default value is 250.

      + *

      (Discontinued) Use this parameter to indicate the maximum number of items that you want + * in the response. The default value is 250.

      */ maxResults?: number; } @@ -488,15 +491,15 @@ export namespace ListS3ResourcesRequest { export interface ListS3ResourcesResult { /** - *

      A list of the associated S3 resources returned by the action.

      + *

      (Discontinued) A list of the associated S3 resources returned by the action.

      */ s3Resources?: S3ResourceClassification[]; /** - *

      When a response is generated, if there is more data to be listed, this parameter is - * present in the response and contains the value to use for the nextToken parameter in a - * subsequent pagination request. If there is no more data to be listed, this parameter is set to - * null.

      + *

      (Discontinued) When a response is generated, if there is more data to be listed, this + * parameter is present in the response and contains the value to use for the nextToken parameter + * in a subsequent pagination request. If there is no more data to be listed, this parameter is + * set to null.

      */ nextToken?: string; } @@ -511,23 +514,25 @@ export namespace ListS3ResourcesResult { } /** - *

      The S3 resources whose classification types you want to update. This data type is used - * as a request parameter in the UpdateS3Resources action.

      + *

      (Discontinued) The S3 resources whose classification types you want to update. This + * data type is used as a request parameter in the UpdateS3Resources action.

      */ export interface S3ResourceClassificationUpdate { /** - *

      The name of the S3 bucket whose classification types you want to update.

      + *

      (Discontinued) The name of the S3 bucket whose classification types you want to + * update.

      */ bucketName: string | undefined; /** - *

      The prefix of the S3 bucket whose classification types you want to update.

      + *

      (Discontinued) The prefix of the S3 bucket whose classification types you want to + * update.

      */ prefix?: string; /** - *

      The classification type that you want to update for the resource associated with Amazon - * Macie Classic.

      + *

      (Discontinued) The classification type that you want to update for the resource + * associated with Amazon Macie Classic.

      */ classificationTypeUpdate: ClassificationTypeUpdate | undefined; } @@ -543,13 +548,14 @@ export namespace S3ResourceClassificationUpdate { export interface UpdateS3ResourcesRequest { /** - *

      The AWS ID of the Amazon Macie Classic member account whose S3 resources' - * classification types you want to update.

      + *

      (Discontinued) The Amazon Web Services account ID of the Amazon Macie Classic member account whose S3 + * resources' classification types you want to update.

      */ memberAccountId?: string; /** - *

      The S3 resources whose classification types you want to update.

      + *

      (Discontinued) The S3 resources whose classification types you want to + * update.

      */ s3ResourcesUpdate: S3ResourceClassificationUpdate[] | undefined; } @@ -565,8 +571,8 @@ export namespace UpdateS3ResourcesRequest { export interface UpdateS3ResourcesResult { /** - *

      The S3 resources whose classification types can't be updated. An error code and an - * error message are provided for each failed item.

      + *

      (Discontinued) The S3 resources whose classification types can't be updated. An error + * code and an error message are provided for each failed item.

      */ failedS3Resources?: FailedS3Resource[]; } diff --git a/clients/client-mediaconvert/src/commands/TagResourceCommand.ts b/clients/client-mediaconvert/src/commands/TagResourceCommand.ts index 8c64a06890d2..2e6132f86ad8 100644 --- a/clients/client-mediaconvert/src/commands/TagResourceCommand.ts +++ b/clients/client-mediaconvert/src/commands/TagResourceCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { MediaConvertClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../MediaConvertClient"; -import { TagResourceRequest, TagResourceResponse } from "../models/models_1"; +import { TagResourceRequest, TagResourceResponse } from "../models/models_2"; import { deserializeAws_restJson1TagResourceCommand, serializeAws_restJson1TagResourceCommand, diff --git a/clients/client-mediaconvert/src/models/models_0.ts b/clients/client-mediaconvert/src/models/models_0.ts index 6f6e282c13a5..d6f4e2b3fd91 100644 --- a/clients/client-mediaconvert/src/models/models_0.ts +++ b/clients/client-mediaconvert/src/models/models_0.ts @@ -1784,7 +1784,7 @@ export enum ImscStylePassthrough { */ export interface ImscDestinationSettings { /** - * Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out. + * Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes. */ Accessibility?: ImscAccessibilitySubs | string; @@ -1925,7 +1925,7 @@ export enum WebvttStylePassthrough { */ export interface WebvttDestinationSettings { /** - * Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out. + * Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes. */ Accessibility?: WebvttAccessibilitySubs | string; @@ -2245,7 +2245,7 @@ export namespace HopDestination { */ export interface Id3Insertion { /** - * Use ID3 tag (Id3) to provide a tag value in base64-encode format. + * Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format. */ Id3?: string; @@ -2971,6 +2971,11 @@ export enum ColorSpaceUsage { FORCE = "FORCE", } +export enum EmbeddedTimecodeOverride { + NONE = "NONE", + USE_MDPM = "USE_MDPM", +} + /** * Use these settings to specify static color calibration metadata, as defined by SMPTE ST 2086. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. */ @@ -3078,6 +3083,11 @@ export interface VideoSelector { */ ColorSpaceUsage?: ColorSpaceUsage | string; + /** + * Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode. + */ + EmbeddedTimecodeOverride?: EmbeddedTimecodeOverride | string; + /** * Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage (ColorSpaceUsage). To specify whether color metadata is included in an output, set Color metadata (ColorMetadata). For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. */ @@ -4743,6 +4753,11 @@ export enum HlsCaptionLanguageSetting { OMIT = "OMIT", } +export enum HlsCaptionSegmentLengthControl { + LARGE_SEGMENTS = "LARGE_SEGMENTS", + MATCH_VIDEO = "MATCH_VIDEO", +} + export enum HlsClientCache { DISABLED = "DISABLED", ENABLED = "ENABLED", @@ -4963,6 +4978,11 @@ export interface HlsGroupSettings { */ CaptionLanguageSetting?: HlsCaptionLanguageSetting | string; + /** + * Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long. + */ + CaptionSegmentLengthControl?: HlsCaptionSegmentLengthControl | string; + /** * Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header. */ @@ -5069,12 +5089,12 @@ export interface HlsGroupSettings { TargetDurationCompatibilityMode?: HlsTargetDurationCompatibilityMode | string; /** - * Indicates ID3 frame that has the timecode. + * Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE). */ TimedMetadataId3Frame?: HlsTimedMetadataId3Frame | string; /** - * Timed Metadata interval in seconds. + * Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). */ TimedMetadataId3Period?: number; @@ -5338,7 +5358,7 @@ export interface CmfcSettings { Scte35Source?: CmfcScte35Source | string; /** - * Applies to CMAF outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output. + * To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank. */ TimedMetadata?: CmfcTimedMetadata | string; } @@ -5790,13 +5810,3 @@ export enum M3u8NielsenId3 { INSERT = "INSERT", NONE = "NONE", } - -export enum M3u8PcrControl { - CONFIGURED_PCR_PERIOD = "CONFIGURED_PCR_PERIOD", - PCR_EVERY_PES_PACKET = "PCR_EVERY_PES_PACKET", -} - -export enum M3u8Scte35Source { - NONE = "NONE", - PASSTHROUGH = "PASSTHROUGH", -} diff --git a/clients/client-mediaconvert/src/models/models_1.ts b/clients/client-mediaconvert/src/models/models_1.ts index 374a25d102a0..691977033d44 100644 --- a/clients/client-mediaconvert/src/models/models_1.ts +++ b/clients/client-mediaconvert/src/models/models_1.ts @@ -31,8 +31,6 @@ import { M3u8AudioDuration, M3u8DataPtsControl, M3u8NielsenId3, - M3u8PcrControl, - M3u8Scte35Source, MotionImageInserter, NielsenConfiguration, NielsenNonLinearWatermarkSettings, @@ -42,6 +40,16 @@ import { Rectangle, } from "./models_0"; +export enum M3u8PcrControl { + CONFIGURED_PCR_PERIOD = "CONFIGURED_PCR_PERIOD", + PCR_EVERY_PES_PACKET = "PCR_EVERY_PES_PACKET", +} + +export enum M3u8Scte35Source { + NONE = "NONE", + PASSTHROUGH = "PASSTHROUGH", +} + export enum TimedMetadata { NONE = "NONE", PASSTHROUGH = "PASSTHROUGH", @@ -127,12 +135,12 @@ export interface M3u8Settings { Scte35Source?: M3u8Scte35Source | string; /** - * Applies to HLS outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output. + * Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank. */ TimedMetadata?: TimedMetadata | string; /** - * Packet Identifier (PID) of the timed metadata stream in the transport stream. + * Packet Identifier (PID) of the ID3 metadata stream in the transport stream. */ TimedMetadataPid?: number; @@ -339,7 +347,7 @@ export interface MpdSettings { Scte35Source?: MpdScte35Source | string; /** - * Applies to DASH outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output. + * To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank. */ TimedMetadata?: MpdTimedMetadata | string; } @@ -3174,12 +3182,12 @@ export interface NoiseReducerTemporalFilterSettings { AggressiveMode?: number; /** - * When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening. + * When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening. */ PostTemporalSharpening?: NoiseFilterPostTemporalSharpening | string; /** - * Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening. + * Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening. */ PostTemporalSharpeningStrength?: NoiseFilterPostTemporalSharpeningStrength | string; @@ -3616,7 +3624,7 @@ export namespace TimecodeConfig { } /** - * Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects. + * Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). */ export interface TimedMetadataInsertion { /** @@ -3694,7 +3702,7 @@ export interface JobSettings { TimecodeConfig?: TimecodeConfig; /** - * Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects. + * Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). */ TimedMetadataInsertion?: TimedMetadataInsertion; } @@ -3967,7 +3975,7 @@ export interface JobTemplateSettings { TimecodeConfig?: TimecodeConfig; /** - * Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects. + * Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). */ TimedMetadataInsertion?: TimedMetadataInsertion; } @@ -5483,35 +5491,3 @@ export namespace PutPolicyResponse { ...obj, }); } - -export interface TagResourceRequest { - /** - * The Amazon Resource Name (ARN) of the resource that you want to tag. To get the ARN, send a GET request with the resource name. - */ - Arn: string | undefined; - - /** - * The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key. - */ - Tags: { [key: string]: string } | undefined; -} - -export namespace TagResourceRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ - ...obj, - }); -} - -export interface TagResourceResponse {} - -export namespace TagResourceResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-mediaconvert/src/models/models_2.ts b/clients/client-mediaconvert/src/models/models_2.ts index fd16b38cf8c8..11663e44cb4c 100644 --- a/clients/client-mediaconvert/src/models/models_2.ts +++ b/clients/client-mediaconvert/src/models/models_2.ts @@ -10,6 +10,38 @@ import { StatusUpdateInterval, } from "./models_1"; +export interface TagResourceRequest { + /** + * The Amazon Resource Name (ARN) of the resource that you want to tag. To get the ARN, send a GET request with the resource name. + */ + Arn: string | undefined; + + /** + * The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key. + */ + Tags: { [key: string]: string } | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + export interface UntagResourceRequest { /** * The Amazon Resource Name (ARN) of the resource that you want to remove tags from. To get the ARN, send a GET request with the resource name. diff --git a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts index 29e33d84162d..5bca0c679db3 100644 --- a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts +++ b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts @@ -4566,6 +4566,8 @@ const serializeAws_restJson1HlsGroupSettings = (input: HlsGroupSettings, context }), ...(input.CaptionLanguageSetting !== undefined && input.CaptionLanguageSetting !== null && { captionLanguageSetting: input.CaptionLanguageSetting }), + ...(input.CaptionSegmentLengthControl !== undefined && + input.CaptionSegmentLengthControl !== null && { captionSegmentLengthControl: input.CaptionSegmentLengthControl }), ...(input.ClientCache !== undefined && input.ClientCache !== null && { clientCache: input.ClientCache }), ...(input.CodecSpecification !== undefined && input.CodecSpecification !== null && { codecSpecification: input.CodecSpecification }), @@ -5906,6 +5908,8 @@ const serializeAws_restJson1VideoSelector = (input: VideoSelector, context: __Se ...(input.ColorSpace !== undefined && input.ColorSpace !== null && { colorSpace: input.ColorSpace }), ...(input.ColorSpaceUsage !== undefined && input.ColorSpaceUsage !== null && { colorSpaceUsage: input.ColorSpaceUsage }), + ...(input.EmbeddedTimecodeOverride !== undefined && + input.EmbeddedTimecodeOverride !== null && { embeddedTimecodeOverride: input.EmbeddedTimecodeOverride }), ...(input.Hdr10Metadata !== undefined && input.Hdr10Metadata !== null && { hdr10Metadata: serializeAws_restJson1Hdr10Metadata(input.Hdr10Metadata, context), @@ -7773,6 +7777,7 @@ const deserializeAws_restJson1HlsGroupSettings = (output: any, context: __SerdeC ? deserializeAws_restJson1__listOfHlsCaptionLanguageMapping(output.captionLanguageMappings, context) : undefined, CaptionLanguageSetting: __expectString(output.captionLanguageSetting), + CaptionSegmentLengthControl: __expectString(output.captionSegmentLengthControl), ClientCache: __expectString(output.clientCache), CodecSpecification: __expectString(output.codecSpecification), Destination: __expectString(output.destination), @@ -9219,6 +9224,7 @@ const deserializeAws_restJson1VideoSelector = (output: any, context: __SerdeCont AlphaBehavior: __expectString(output.alphaBehavior), ColorSpace: __expectString(output.colorSpace), ColorSpaceUsage: __expectString(output.colorSpaceUsage), + EmbeddedTimecodeOverride: __expectString(output.embeddedTimecodeOverride), Hdr10Metadata: output.hdr10Metadata !== undefined && output.hdr10Metadata !== null ? deserializeAws_restJson1Hdr10Metadata(output.hdr10Metadata, context) diff --git a/clients/client-mediapackage/src/models/models_0.ts b/clients/client-mediapackage/src/models/models_0.ts index cee98ef8c4e9..aab7bf534307 100644 --- a/clients/client-mediapackage/src/models/models_0.ts +++ b/clients/client-mediapackage/src/models/models_0.ts @@ -682,6 +682,7 @@ export enum ManifestLayout { export enum Profile { HBBTV_1_5 = "HBBTV_1_5", + HYBRIDCAST = "HYBRIDCAST", NONE = "NONE", } diff --git a/clients/client-mgn/src/Mgn.ts b/clients/client-mgn/src/Mgn.ts index 7ae7a3adcb96..a231a2e5c588 100644 --- a/clients/client-mgn/src/Mgn.ts +++ b/clients/client-mgn/src/Mgn.ts @@ -298,7 +298,7 @@ export class Mgn extends MgnClient { } /** - *

      Deletes a single vCenter client by ID.

      + *

      Deletes a given vCenter client by ID.

      */ public deleteVcenterClient( args: DeleteVcenterClientCommandInput, @@ -330,7 +330,7 @@ export class Mgn extends MgnClient { } /** - *

      Retrieves detailed Job log with paging.

      + *

      Retrieves detailed job log items with paging.

      */ public describeJobLogItems( args: DescribeJobLogItemsCommandInput, @@ -362,7 +362,7 @@ export class Mgn extends MgnClient { } /** - *

      Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normaly created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

      + *

      Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normally created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

      */ public describeJobs( args: DescribeJobsCommandInput, @@ -457,7 +457,7 @@ export class Mgn extends MgnClient { } /** - *

      Lists all vCenter clients.

      + *

      Returns a list of the installed vCenter clients.

      */ public describeVcenterClients( args: DescribeVcenterClientsCommandInput, @@ -489,7 +489,7 @@ export class Mgn extends MgnClient { } /** - *

      Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communciating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

      + *

      Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communicating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

      */ public disconnectFromService( args: DisconnectFromServiceCommandInput, @@ -521,7 +521,7 @@ export class Mgn extends MgnClient { } /** - *

      Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

      + *

      Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be changed to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

      */ public finalizeCutover( args: FinalizeCutoverCommandInput, @@ -681,7 +681,7 @@ export class Mgn extends MgnClient { } /** - *

      Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle.state which equals DISCONNECTED or CUTOVER.

      + *

      Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle. state which equals DISCONNECTED or CUTOVER.

      */ public markAsArchived( args: MarkAsArchivedCommandInput, @@ -774,7 +774,7 @@ export class Mgn extends MgnClient { } /** - *

      Starts replication on source server by ID.

      + *

      Starts replication for SNAPSHOT_SHIPPING agents.

      */ public startReplication( args: StartReplicationCommandInput, @@ -806,7 +806,7 @@ export class Mgn extends MgnClient { } /** - *

      Lauches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

      + *

      Launches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

      */ public startTest(args: StartTestCommandInput, options?: __HttpHandlerOptions): Promise; public startTest(args: StartTestCommandInput, cb: (err: any, data?: StartTestCommandOutput) => void): void; @@ -1020,7 +1020,7 @@ export class Mgn extends MgnClient { } /** - *

      Updates source server Replication Type by ID.

      + *

      Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.

      */ public updateSourceServerReplicationType( args: UpdateSourceServerReplicationTypeCommandInput, diff --git a/clients/client-mgn/src/commands/DeleteVcenterClientCommand.ts b/clients/client-mgn/src/commands/DeleteVcenterClientCommand.ts index f56592dcd291..d6dacf026343 100644 --- a/clients/client-mgn/src/commands/DeleteVcenterClientCommand.ts +++ b/clients/client-mgn/src/commands/DeleteVcenterClientCommand.ts @@ -22,7 +22,7 @@ export interface DeleteVcenterClientCommandInput extends DeleteVcenterClientRequ export interface DeleteVcenterClientCommandOutput extends __MetadataBearer {} /** - *

      Deletes a single vCenter client by ID.

      + *

      Deletes a given vCenter client by ID.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/DescribeJobLogItemsCommand.ts b/clients/client-mgn/src/commands/DescribeJobLogItemsCommand.ts index 3f2cc24d86d3..e6c8134b999b 100644 --- a/clients/client-mgn/src/commands/DescribeJobLogItemsCommand.ts +++ b/clients/client-mgn/src/commands/DescribeJobLogItemsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeJobLogItemsCommandInput extends DescribeJobLogItemsRequ export interface DescribeJobLogItemsCommandOutput extends DescribeJobLogItemsResponse, __MetadataBearer {} /** - *

      Retrieves detailed Job log with paging.

      + *

      Retrieves detailed job log items with paging.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/DescribeJobsCommand.ts b/clients/client-mgn/src/commands/DescribeJobsCommand.ts index a45efe020398..33aa9923cf22 100644 --- a/clients/client-mgn/src/commands/DescribeJobsCommand.ts +++ b/clients/client-mgn/src/commands/DescribeJobsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeJobsCommandInput extends DescribeJobsRequest {} export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __MetadataBearer {} /** - *

      Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normaly created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

      + *

      Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normally created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/DescribeVcenterClientsCommand.ts b/clients/client-mgn/src/commands/DescribeVcenterClientsCommand.ts index 11ca89a6f8da..6f432512f90e 100644 --- a/clients/client-mgn/src/commands/DescribeVcenterClientsCommand.ts +++ b/clients/client-mgn/src/commands/DescribeVcenterClientsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeVcenterClientsCommandInput extends DescribeVcenterClien export interface DescribeVcenterClientsCommandOutput extends DescribeVcenterClientsResponse, __MetadataBearer {} /** - *

      Lists all vCenter clients.

      + *

      Returns a list of the installed vCenter clients.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/DisconnectFromServiceCommand.ts b/clients/client-mgn/src/commands/DisconnectFromServiceCommand.ts index f529557f74e4..dd0bb653dbb7 100644 --- a/clients/client-mgn/src/commands/DisconnectFromServiceCommand.ts +++ b/clients/client-mgn/src/commands/DisconnectFromServiceCommand.ts @@ -22,7 +22,7 @@ export interface DisconnectFromServiceCommandInput extends DisconnectFromService export interface DisconnectFromServiceCommandOutput extends SourceServer, __MetadataBearer {} /** - *

      Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communciating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

      + *

      Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communicating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/FinalizeCutoverCommand.ts b/clients/client-mgn/src/commands/FinalizeCutoverCommand.ts index 36cb84807a4a..254624e74682 100644 --- a/clients/client-mgn/src/commands/FinalizeCutoverCommand.ts +++ b/clients/client-mgn/src/commands/FinalizeCutoverCommand.ts @@ -22,7 +22,7 @@ export interface FinalizeCutoverCommandInput extends FinalizeCutoverRequest {} export interface FinalizeCutoverCommandOutput extends SourceServer, __MetadataBearer {} /** - *

      Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

      + *

      Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be changed to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/MarkAsArchivedCommand.ts b/clients/client-mgn/src/commands/MarkAsArchivedCommand.ts index 3d6ab2a3da07..089531f412d0 100644 --- a/clients/client-mgn/src/commands/MarkAsArchivedCommand.ts +++ b/clients/client-mgn/src/commands/MarkAsArchivedCommand.ts @@ -22,7 +22,7 @@ export interface MarkAsArchivedCommandInput extends MarkAsArchivedRequest {} export interface MarkAsArchivedCommandOutput extends SourceServer, __MetadataBearer {} /** - *

      Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle.state which equals DISCONNECTED or CUTOVER.

      + *

      Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle. state which equals DISCONNECTED or CUTOVER.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/StartReplicationCommand.ts b/clients/client-mgn/src/commands/StartReplicationCommand.ts index 91cac6bbc8b5..f3f912976a50 100644 --- a/clients/client-mgn/src/commands/StartReplicationCommand.ts +++ b/clients/client-mgn/src/commands/StartReplicationCommand.ts @@ -22,7 +22,7 @@ export interface StartReplicationCommandInput extends StartReplicationRequest {} export interface StartReplicationCommandOutput extends SourceServer, __MetadataBearer {} /** - *

      Starts replication on source server by ID.

      + *

      Starts replication for SNAPSHOT_SHIPPING agents.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/StartTestCommand.ts b/clients/client-mgn/src/commands/StartTestCommand.ts index 27cfbe6f3651..ebbc5058a388 100644 --- a/clients/client-mgn/src/commands/StartTestCommand.ts +++ b/clients/client-mgn/src/commands/StartTestCommand.ts @@ -22,7 +22,7 @@ export interface StartTestCommandInput extends StartTestRequest {} export interface StartTestCommandOutput extends StartTestResponse, __MetadataBearer {} /** - *

      Lauches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

      + *

      Launches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/commands/UpdateSourceServerReplicationTypeCommand.ts b/clients/client-mgn/src/commands/UpdateSourceServerReplicationTypeCommand.ts index 2fa4071e76f7..e05c981fe0be 100644 --- a/clients/client-mgn/src/commands/UpdateSourceServerReplicationTypeCommand.ts +++ b/clients/client-mgn/src/commands/UpdateSourceServerReplicationTypeCommand.ts @@ -22,7 +22,7 @@ export interface UpdateSourceServerReplicationTypeCommandInput extends UpdateSou export interface UpdateSourceServerReplicationTypeCommandOutput extends SourceServer, __MetadataBearer {} /** - *

      Updates source server Replication Type by ID.

      + *

      Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mgn/src/models/models_0.ts b/clients/client-mgn/src/models/models_0.ts index ae769e85da85..e0bfae795804 100644 --- a/clients/client-mgn/src/models/models_0.ts +++ b/clients/client-mgn/src/models/models_0.ts @@ -118,12 +118,12 @@ export class ConflictException extends __BaseException { readonly $fault: "client" = "client"; code?: string; /** - *

      A conflict occured when prompting for the Resource ID.

      + *

      A conflict occurred when prompting for the Resource ID.

      */ resourceId?: string; /** - *

      A conflict occured when prompting for resource type.

      + *

      A conflict occurred when prompting for resource type.

      */ resourceType?: string; @@ -204,7 +204,7 @@ export class ResourceNotFoundException extends __BaseException { } /** - *

      Unitialized account exception.

      + *

      Uninitialized account exception.

      */ export class UninitializedAccountException extends __BaseException { readonly name: "UninitializedAccountException" = "UninitializedAccountException"; @@ -368,7 +368,7 @@ export interface DescribeJobsRequestFilters { fromDate?: string; /** - *

      Request to describe Job log by last date.

      + *

      Request to describe job log items by last date.

      */ toDate?: string; } @@ -389,12 +389,12 @@ export interface DescribeJobsRequest { filters: DescribeJobsRequestFilters | undefined; /** - *

      Request to describe Job log by max results.

      + *

      Request to describe job log items by max results.

      */ maxResults?: number; /** - *

      Request to describe Job logby next token.

      + *

      Request to describe job log items by next token.

      */ nextToken?: string; } @@ -503,7 +503,7 @@ export interface Job { participatingServers?: ParticipatingServer[]; /** - *

      Tags associated with spcific Job.

      + *

      Tags associated with specific Job.

      */ tags?: { [key: string]: string }; } @@ -642,6 +642,7 @@ export enum ReplicationConfigurationDataPlaneRouting { export enum ReplicationConfigurationDefaultLargeStagingDiskType { GP2 = "GP2", + GP3 = "GP3", ST1 = "ST1", } @@ -662,7 +663,7 @@ export interface CreateReplicationConfigurationTemplateRequest { associateDefaultSecurityGroup: boolean | undefined; /** - *

      Request to configure the Replication Server Secuirity group ID during Replication Settings template creation.

      + *

      Request to configure the Replication Server Security group ID during Replication Settings template creation.

      */ replicationServersSecurityGroupsIDs: string[] | undefined; @@ -677,17 +678,17 @@ export interface CreateReplicationConfigurationTemplateRequest { useDedicatedReplicationServer: boolean | undefined; /** - *

      Request to configure the Staging Disk EBS volume type to "gp2" during Replication Settings template creation.

      + *

      Request to configure the default large staging disk EBS volume type during Replication Settings template creation.

      */ defaultLargeStagingDiskType: ReplicationConfigurationDefaultLargeStagingDiskType | string | undefined; /** - *

      Request to configure EBS enryption during Replication Settings template creation.

      + *

      Request to configure EBS encryption during Replication Settings template creation.

      */ ebsEncryption: ReplicationConfigurationEbsEncryption | string | undefined; /** - *

      Request to configure an EBS enryption key during Replication Settings template creation.

      + *

      Request to configure an EBS encryption key during Replication Settings template creation.

      */ ebsEncryptionKeyArn?: string; @@ -707,7 +708,7 @@ export interface CreateReplicationConfigurationTemplateRequest { createPublicIP: boolean | undefined; /** - *

      Request to configure Staiging Area tags during Replication Settings template creation.

      + *

      Request to configure Staging Area tags during Replication Settings template creation.

      */ stagingAreaTags: { [key: string]: string } | undefined; @@ -730,7 +731,7 @@ export namespace CreateReplicationConfigurationTemplateRequest { export interface ReplicationConfigurationTemplate { /** - *

      Replication Configuration template template ID.

      + *

      Replication Configuration template ID.

      */ replicationConfigurationTemplateID: string | undefined; @@ -765,7 +766,7 @@ export interface ReplicationConfigurationTemplate { useDedicatedReplicationServer?: boolean; /** - *

      Replication Configuration template use dedault large Staging Disk type.

      + *

      Replication Configuration template use default large Staging Disk type.

      */ defaultLargeStagingDiskType?: ReplicationConfigurationDefaultLargeStagingDiskType | string; @@ -780,7 +781,7 @@ export interface ReplicationConfigurationTemplate { ebsEncryptionKeyArn?: string; /** - *

      Replication Configuration template bandwidth throtting.

      + *

      Replication Configuration template bandwidth throttling.

      */ bandwidthThrottling?: number; @@ -1085,7 +1086,7 @@ export enum DataReplicationInitiationStepStatus { } /** - *

      Data replication intiation step.

      + *

      Data replication initiation step.

      */ export interface DataReplicationInitiationStep { /** @@ -1196,7 +1197,7 @@ export namespace DataReplicationInfoReplicatedDisk { */ export interface DataReplicationInfo { /** - *

      Request to query data replication lag durating.

      + *

      Request to query data replication lag duration.

      */ lagDuration?: string; @@ -1248,21 +1249,21 @@ export enum FirstBoot { } /** - *

      Configure launced instance.

      + *

      Launched instance.

      */ export interface LaunchedInstance { /** - *

      Configure launced instance EC2 ID.

      + *

      Launched instance EC2 ID.

      */ ec2InstanceID?: string; /** - *

      Configure launced instance Job ID.

      + *

      Launched instance Job ID.

      */ jobID?: string; /** - *

      Configure launced instance first boot.

      + *

      Launched instance first boot.

      */ firstBoot?: FirstBoot | string; } @@ -1368,7 +1369,7 @@ export namespace LifeCycleLastCutover { } /** - *

      Lifecycle last Test finlized.

      + *

      Lifecycle last Test finalized.

      */ export interface LifeCycleLastTestFinalized { /** @@ -1444,7 +1445,7 @@ export interface LifeCycleLastTest { reverted?: LifeCycleLastTestReverted; /** - *

      Lifecycle last Test finlized.

      + *

      Lifecycle last Test finalized.

      */ finalized?: LifeCycleLastTestFinalized; } @@ -1902,7 +1903,7 @@ export namespace DisconnectFromServiceRequest { export interface FinalizeCutoverRequest { /** - *

      Request to finalize Cutover by Soure Server ID.

      + *

      Request to finalize Cutover by Source Server ID.

      */ sourceServerID: string | undefined; } @@ -1932,6 +1933,11 @@ export namespace GetLaunchConfigurationRequest { }); } +export enum BootMode { + LEGACY_BIOS = "LEGACY_BIOS", + UEFI = "UEFI", +} + export enum LaunchDisposition { STARTED = "STARTED", STOPPED = "STOPPED", @@ -1963,27 +1969,27 @@ export enum TargetInstanceTypeRightSizingMethod { export interface LaunchConfiguration { /** - *

      Configure launch configuration Source Server ID.

      + *

      Launch configuration Source Server ID.

      */ sourceServerID?: string; /** - *

      Configure launch configuration name.

      + *

      Launch configuration name.

      */ name?: string; /** - *

      Configure EC2 lauch configuration template ID.

      + *

      Launch configuration EC2 Launch template ID.

      */ ec2LaunchTemplateID?: string; /** - *

      Configure launch dispostion for launch configuration.

      + *

      Launch disposition for launch configuration.

      */ launchDisposition?: LaunchDisposition | string; /** - *

      Configure launch configuration Target instance type right sizing method.

      + *

      Launch configuration Target instance type right sizing method.

      */ targetInstanceTypeRightSizingMethod?: TargetInstanceTypeRightSizingMethod | string; @@ -1998,9 +2004,14 @@ export interface LaunchConfiguration { copyTags?: boolean; /** - *

      Configure launch configuration OS licensing.

      + *

      Launch configuration OS licensing.

      */ licensing?: Licensing; + + /** + *

      Launch configuration boot mode.

      + */ + bootMode?: BootMode | string; } export namespace LaunchConfiguration { @@ -2014,7 +2025,7 @@ export namespace LaunchConfiguration { export interface GetReplicationConfigurationRequest { /** - *

      Request to get Replication Configuaration by Source Server ID.

      + *

      Request to get Replication Configuration by Source Server ID.

      */ sourceServerID: string | undefined; } @@ -2031,7 +2042,9 @@ export namespace GetReplicationConfigurationRequest { export enum ReplicationConfigurationReplicatedDiskStagingDiskType { AUTO = "AUTO", GP2 = "GP2", + GP3 = "GP3", IO1 = "IO1", + IO2 = "IO2", SC1 = "SC1", ST1 = "ST1", STANDARD = "STANDARD", @@ -2060,6 +2073,11 @@ export interface ReplicationConfigurationReplicatedDisk { *

      Replication Configuration replicated disk IOPs.

      */ iops?: number; + + /** + *

      Replication Configuration replicated disk throughput.

      + */ + throughput?: number; } export namespace ReplicationConfigurationReplicatedDisk { @@ -2237,7 +2255,7 @@ export class ServiceQuotaExceededException extends __BaseException { readonly $fault: "client" = "client"; code?: string; /** - *

      Exceeded the service quota resource Id.

      + *

      Exceeded the service quota resource ID.

      */ resourceId?: string; @@ -2403,6 +2421,11 @@ export interface UpdateLaunchConfigurationRequest { *

      Update Launch configuration licensing request.

      */ licensing?: Licensing; + + /** + *

      Update Launch configuration boot mode request.

      + */ + bootMode?: BootMode | string; } export namespace UpdateLaunchConfigurationRequest { diff --git a/clients/client-mgn/src/protocols/Aws_restJson1.ts b/clients/client-mgn/src/protocols/Aws_restJson1.ts index cdb1b43e6970..a12f15c08a28 100644 --- a/clients/client-mgn/src/protocols/Aws_restJson1.ts +++ b/clients/client-mgn/src/protocols/Aws_restJson1.ts @@ -877,6 +877,7 @@ export const serializeAws_restJson1UpdateLaunchConfigurationCommand = async ( `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/UpdateLaunchConfiguration"; let body: any; body = JSON.stringify({ + ...(input.bootMode !== undefined && input.bootMode !== null && { bootMode: input.bootMode }), ...(input.copyPrivateIp !== undefined && input.copyPrivateIp !== null && { copyPrivateIp: input.copyPrivateIp }), ...(input.copyTags !== undefined && input.copyTags !== null && { copyTags: input.copyTags }), ...(input.launchDisposition !== undefined && @@ -1889,6 +1890,7 @@ export const deserializeAws_restJson1GetLaunchConfigurationCommand = async ( } const contents: GetLaunchConfigurationCommandOutput = { $metadata: deserializeMetadata(output), + bootMode: undefined, copyPrivateIp: undefined, copyTags: undefined, ec2LaunchTemplateID: undefined, @@ -1899,6 +1901,9 @@ export const deserializeAws_restJson1GetLaunchConfigurationCommand = async ( targetInstanceTypeRightSizingMethod: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.bootMode !== undefined && data.bootMode !== null) { + contents.bootMode = __expectString(data.bootMode); + } if (data.copyPrivateIp !== undefined && data.copyPrivateIp !== null) { contents.copyPrivateIp = __expectBoolean(data.copyPrivateIp); } @@ -2690,6 +2695,7 @@ export const deserializeAws_restJson1UpdateLaunchConfigurationCommand = async ( } const contents: UpdateLaunchConfigurationCommandOutput = { $metadata: deserializeMetadata(output), + bootMode: undefined, copyPrivateIp: undefined, copyTags: undefined, ec2LaunchTemplateID: undefined, @@ -2700,6 +2706,9 @@ export const deserializeAws_restJson1UpdateLaunchConfigurationCommand = async ( targetInstanceTypeRightSizingMethod: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.bootMode !== undefined && data.bootMode !== null) { + contents.bootMode = __expectString(data.bootMode); + } if (data.copyPrivateIp !== undefined && data.copyPrivateIp !== null) { contents.copyPrivateIp = __expectBoolean(data.copyPrivateIp); } @@ -3365,6 +3374,7 @@ const serializeAws_restJson1ReplicationConfigurationReplicatedDisk = ( ...(input.isBootDisk !== undefined && input.isBootDisk !== null && { isBootDisk: input.isBootDisk }), ...(input.stagingDiskType !== undefined && input.stagingDiskType !== null && { stagingDiskType: input.stagingDiskType }), + ...(input.throughput !== undefined && input.throughput !== null && { throughput: input.throughput }), }; }; @@ -3861,6 +3871,7 @@ const deserializeAws_restJson1ReplicationConfigurationReplicatedDisk = ( iops: __expectLong(output.iops), isBootDisk: __expectBoolean(output.isBootDisk), stagingDiskType: __expectString(output.stagingDiskType), + throughput: __expectLong(output.throughput), } as any; }; diff --git a/clients/client-migration-hub-refactor-spaces/README.md b/clients/client-migration-hub-refactor-spaces/README.md index b6347c1857cc..fc03df624502 100644 --- a/clients/client-migration-hub-refactor-spaces/README.md +++ b/clients/client-migration-hub-refactor-spaces/README.md @@ -15,6 +15,9 @@ request parameters and the response. Alternatively, you can use one of the Amazo access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

      +

      To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations +and their OUs, use Resource Access Manager's CreateResourceShare API. See CreateResourceShare in the Amazon Web Services RAM API Reference.

      + ## Installing To install the this package, simply type add or install @aws-sdk/client-migration-hub-refactor-spaces diff --git a/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpaces.ts b/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpaces.ts index 10cdb971afd4..49f682591b58 100644 --- a/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpaces.ts +++ b/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpaces.ts @@ -101,13 +101,16 @@ import { MigrationHubRefactorSpacesClient } from "./MigrationHubRefactorSpacesCl * request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to * access an API that is tailored to the programming language or platform that you're using. For * more information, see Amazon Web Services SDKs.

      + * + *

      To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations + * and their OUs, use Resource Access Manager's CreateResourceShare API. See CreateResourceShare in the Amazon Web Services RAM API Reference.

      */ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient { /** *

      Creates an Amazon Web Services Migration Hub Refactor Spaces application. The account that owns the environment also owns the * applications created inside the environment, regardless of the account that creates the - * application. Refactor Spaces provisions the Amazon API Gateway and Network Load Balancer for - * the application proxy inside your account.

      + * application. Refactor Spaces provisions an Amazon API Gateway, API Gateway VPC link, and + * Network Load Balancer for the application proxy inside your account.

      */ public createApplication( args: CreateApplicationCommandInput, @@ -139,10 +142,11 @@ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient } /** - *

      Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and they - * are referred to as the environment owner. The environment owner has - * cross-account visibility and control of Refactor Spaces resources that are added to the environment - * by other accounts that the environment is shared with. When creating an environment, Refactor Spaces + *

      Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and all + * Refactor Spaces applications, services, and routes created within the environment. They are referred + * to as the environment owner. The environment owner has cross-account + * visibility and control of Refactor Spaces resources that are added to the environment by other + * accounts that the environment is shared with. When creating an environment, Refactor Spaces * provisions a transit gateway in your account.

      */ public createEnvironment( @@ -192,25 +196,29 @@ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient * Refactor Spaces routes traffic over the public internet.

      *
    • *
    • - *

      If the service has an Lambda function endpoint, then Refactor Spaces uses - * the API Gateway - * Lambda integration.

      + *

      If the service has an Lambda function endpoint, then Refactor Spaces + * configures the Lambda function's resource policy to allow the application's + * API Gateway to invoke the function.

      *
    • *
    - *

    A health check is performed on the service when the route is created. If the health check - * fails, the route transitions to FAILED, and no traffic is sent to the service.

    - *

    For Lambda functions, the Lambda function state is checked. If - * the function is not active, the function configuration is updated so that Lambda + *

    A one-time health check is performed on the service when the route is created. If the + * health check fails, the route transitions to FAILED, and no traffic is sent to + * the service.

    + *

    For Lambda functions, the Lambda function state is checked. If the + * function is not active, the function configuration is updated so that Lambda * resources are provisioned. If the Lambda state is Failed, then the * route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide.

    - *

    For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, - * the health check fails. For private URLs, a target group is created and the target group - * health check is run.

    + *

    For public URLs, a connection is opened to the public endpoint. If the URL is not + * reachable, the health check fails. For private URLs, a target group is created and the target + * group health check is run.

    *

    The HealthCheckProtocol, HealthCheckPort, and * HealthCheckPath are the same protocol, port, and path specified in the URL or * health URL, if used. All other settings use the default values, as described in Health checks * for your target groups. The health check is considered successful if at least one * target within the target group transitions to a healthy state.

    + *

    Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed + * certificates are supported. Private Certificate Authorities (CAs) are permitted only if the + * CA's domain is publicly resolvable.

    */ public createRoute(args: CreateRouteCommandInput, options?: __HttpHandlerOptions): Promise; public createRoute(args: CreateRouteCommandInput, cb: (err: any, data?: CreateRouteCommandOutput) => void): void; @@ -241,7 +249,7 @@ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient * Services have either a URL endpoint in a virtual private cloud (VPC), or a Lambda * function endpoint.

    * - *

    If an Amazon Web Services resourceis launched in a service VPC, and you want it to be + *

    If an Amazon Web Services resource is launched in a service VPC, and you want it to be * accessible to all of an environment’s services with VPCs and routes, apply the * RefactorSpacesSecurityGroup to the resource. Alternatively, to add more * cross-account constraints, apply your own security group.

    @@ -646,7 +654,8 @@ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient } /** - *

    Lists all the virtual private clouds (VPCs) that are part of an Amazon Web Services Migration Hub Refactor Spaces environment.

    + *

    Lists all Amazon Web Services Migration Hub Refactor Spaces service virtual private clouds (VPCs) that are part of the + * environment.

    */ public listEnvironmentVpcs( args: ListEnvironmentVpcsCommandInput, @@ -769,7 +778,8 @@ export class MigrationHubRefactorSpaces extends MigrationHubRefactorSpacesClient *

    Attaches a resource-based permission policy to the Amazon Web Services Migration Hub Refactor Spaces environment. The policy * must contain the same actions and condition statements as the * arn:aws:ram::aws:permission/AWSRAMDefaultPermissionRefactorSpacesEnvironment - * permission in Resource Access Manager. The policy must not contain new lines or blank lines.

    + * permission in Resource Access Manager. The policy must not contain new lines or blank lines. + *

    */ public putResourcePolicy( args: PutResourcePolicyCommandInput, diff --git a/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpacesClient.ts b/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpacesClient.ts index 840ccbf6d766..38873e80e196 100644 --- a/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpacesClient.ts +++ b/clients/client-migration-hub-refactor-spaces/src/MigrationHubRefactorSpacesClient.ts @@ -297,6 +297,9 @@ export interface MigrationHubRefactorSpacesClientResolvedConfig * request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to * access an API that is tailored to the programming language or platform that you're using. For * more information, see Amazon Web Services SDKs.

    + * + *

    To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations + * and their OUs, use Resource Access Manager's CreateResourceShare API. See CreateResourceShare in the Amazon Web Services RAM API Reference.

    */ export class MigrationHubRefactorSpacesClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/CreateApplicationCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/CreateApplicationCommand.ts index 07ceacc6c82a..1266e324b585 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/CreateApplicationCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/CreateApplicationCommand.ts @@ -28,8 +28,8 @@ export interface CreateApplicationCommandOutput extends CreateApplicationRespons /** *

    Creates an Amazon Web Services Migration Hub Refactor Spaces application. The account that owns the environment also owns the * applications created inside the environment, regardless of the account that creates the - * application. Refactor Spaces provisions the Amazon API Gateway and Network Load Balancer for - * the application proxy inside your account.

    + * application. Refactor Spaces provisions an Amazon API Gateway, API Gateway VPC link, and + * Network Load Balancer for the application proxy inside your account.

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/CreateEnvironmentCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/CreateEnvironmentCommand.ts index 34e373c1bdb8..b3e10ace59be 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/CreateEnvironmentCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/CreateEnvironmentCommand.ts @@ -26,10 +26,11 @@ export interface CreateEnvironmentCommandInput extends CreateEnvironmentRequest export interface CreateEnvironmentCommandOutput extends CreateEnvironmentResponse, __MetadataBearer {} /** - *

    Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and they - * are referred to as the environment owner. The environment owner has - * cross-account visibility and control of Refactor Spaces resources that are added to the environment - * by other accounts that the environment is shared with. When creating an environment, Refactor Spaces + *

    Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and all + * Refactor Spaces applications, services, and routes created within the environment. They are referred + * to as the environment owner. The environment owner has cross-account + * visibility and control of Refactor Spaces resources that are added to the environment by other + * accounts that the environment is shared with. When creating an environment, Refactor Spaces * provisions a transit gateway in your account.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/CreateRouteCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/CreateRouteCommand.ts index 3052f2874cae..9c89816391e6 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/CreateRouteCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/CreateRouteCommand.ts @@ -43,25 +43,29 @@ export interface CreateRouteCommandOutput extends CreateRouteResponse, __Metadat * Refactor Spaces routes traffic over the public internet.

    *
  • *
  • - *

    If the service has an Lambda function endpoint, then Refactor Spaces uses - * the API Gateway - * Lambda integration.

    + *

    If the service has an Lambda function endpoint, then Refactor Spaces + * configures the Lambda function's resource policy to allow the application's + * API Gateway to invoke the function.

    *
  • * - *

    A health check is performed on the service when the route is created. If the health check - * fails, the route transitions to FAILED, and no traffic is sent to the service.

    - *

    For Lambda functions, the Lambda function state is checked. If - * the function is not active, the function configuration is updated so that Lambda + *

    A one-time health check is performed on the service when the route is created. If the + * health check fails, the route transitions to FAILED, and no traffic is sent to + * the service.

    + *

    For Lambda functions, the Lambda function state is checked. If the + * function is not active, the function configuration is updated so that Lambda * resources are provisioned. If the Lambda state is Failed, then the * route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide.

    - *

    For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, - * the health check fails. For private URLs, a target group is created and the target group - * health check is run.

    + *

    For public URLs, a connection is opened to the public endpoint. If the URL is not + * reachable, the health check fails. For private URLs, a target group is created and the target + * group health check is run.

    *

    The HealthCheckProtocol, HealthCheckPort, and * HealthCheckPath are the same protocol, port, and path specified in the URL or * health URL, if used. All other settings use the default values, as described in Health checks * for your target groups. The health check is considered successful if at least one * target within the target group transitions to a healthy state.

    + *

    Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed + * certificates are supported. Private Certificate Authorities (CAs) are permitted only if the + * CA's domain is publicly resolvable.

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/CreateServiceCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/CreateServiceCommand.ts index d2ee35d4e480..070025392f48 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/CreateServiceCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/CreateServiceCommand.ts @@ -31,7 +31,7 @@ export interface CreateServiceCommandOutput extends CreateServiceResponse, __Met * Services have either a URL endpoint in a virtual private cloud (VPC), or a Lambda * function endpoint.

    * - *

    If an Amazon Web Services resourceis launched in a service VPC, and you want it to be + *

    If an Amazon Web Services resource is launched in a service VPC, and you want it to be * accessible to all of an environment’s services with VPCs and routes, apply the * RefactorSpacesSecurityGroup to the resource. Alternatively, to add more * cross-account constraints, apply your own security group.

    diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/ListEnvironmentVpcsCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/ListEnvironmentVpcsCommand.ts index 0589b4919c5a..ba7fe5c72338 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/ListEnvironmentVpcsCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/ListEnvironmentVpcsCommand.ts @@ -26,7 +26,8 @@ export interface ListEnvironmentVpcsCommandInput extends ListEnvironmentVpcsRequ export interface ListEnvironmentVpcsCommandOutput extends ListEnvironmentVpcsResponse, __MetadataBearer {} /** - *

    Lists all the virtual private clouds (VPCs) that are part of an Amazon Web Services Migration Hub Refactor Spaces environment.

    + *

    Lists all Amazon Web Services Migration Hub Refactor Spaces service virtual private clouds (VPCs) that are part of the + * environment.

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-migration-hub-refactor-spaces/src/commands/PutResourcePolicyCommand.ts b/clients/client-migration-hub-refactor-spaces/src/commands/PutResourcePolicyCommand.ts index 688e62705345..6b9a93e66abb 100644 --- a/clients/client-migration-hub-refactor-spaces/src/commands/PutResourcePolicyCommand.ts +++ b/clients/client-migration-hub-refactor-spaces/src/commands/PutResourcePolicyCommand.ts @@ -29,7 +29,8 @@ export interface PutResourcePolicyCommandOutput extends PutResourcePolicyRespons *

    Attaches a resource-based permission policy to the Amazon Web Services Migration Hub Refactor Spaces environment. The policy * must contain the same actions and condition statements as the * arn:aws:ram::aws:permission/AWSRAMDefaultPermissionRefactorSpacesEnvironment - * permission in Resource Access Manager. The policy must not contain new lines or blank lines.

    + * permission in Resource Access Manager. The policy must not contain new lines or blank lines. + *

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-migration-hub-refactor-spaces/src/models/models_0.ts b/clients/client-migration-hub-refactor-spaces/src/models/models_0.ts index 1d33eef6bd71..2f48da79aedd 100644 --- a/clients/client-migration-hub-refactor-spaces/src/models/models_0.ts +++ b/clients/client-migration-hub-refactor-spaces/src/models/models_0.ts @@ -54,7 +54,8 @@ export interface ApiGatewayProxyConfig { NlbArn?: string; /** - *

    The name of the Network Load Balancer that is configured by the API Gateway proxy.

    + *

    The name of the Network Load Balancer that is configured by the API Gateway proxy. + *

    */ NlbName?: string; @@ -132,7 +133,8 @@ export interface ApiGatewayProxySummary { NlbArn?: string; /** - *

    The name of the Network Load Balancer that is configured by the API Gateway proxy.

    + *

    The name of the Network Load Balancer that is configured by the API Gateway proxy. + *

    */ NlbName?: string; @@ -259,12 +261,13 @@ export interface ApplicationSummary { Name?: string; /** - *

    he Amazon Resource Name (ARN) of the application.

    + *

    The Amazon Resource Name (ARN) of the application.

    */ Arn?: string; /** - *

    The Amazon Web Services account ID of the application owner.

    + *

    The Amazon Web Services account ID of the application owner (which is always the same as + * the environment owner account ID).

    */ OwnerAccountId?: string; @@ -432,7 +435,8 @@ export interface CreateApplicationResponse { Arn?: string; /** - *

    The Amazon Web Services account ID of the application owner.

    + *

    The Amazon Web Services account ID of the application owner (which is always the same as + * the environment owner account ID).

    */ OwnerAccountId?: string; @@ -638,7 +642,8 @@ export class ThrottlingException extends __BaseException { } /** - *

    The input does not satisfy the constraints specified by an Amazon Web Service.

    + *

    The input does not satisfy the constraints specified by an Amazon Web Service. + *

    */ export class ValidationException extends __BaseException { readonly name: "ValidationException" = "ValidationException"; @@ -799,7 +804,8 @@ export interface UriPathRouteInput { SourcePath: string | undefined; /** - *

    Indicates whether traffic is forwarded to this route’s service after the route is created.

    + *

    Indicates whether traffic is forwarded to this route’s service after the route is created. + *

    */ ActivationState: RouteActivationState | string | undefined; @@ -919,7 +925,7 @@ export interface CreateRouteResponse { RouteType?: RouteType | string; /** - *

    The ID of service in which the rute iscreated. Traffic that matches this route is + *

    The ID of service in which the route is created. Traffic that matches this route is * forwarded to this service.

    */ ServiceId?: string; @@ -935,7 +941,7 @@ export interface CreateRouteResponse { UriPathRoute?: UriPathRouteInput; /** - *

    he current state of the route.

    + *

    The current state of the route.

    */ State?: RouteState | string; @@ -1364,7 +1370,7 @@ export interface DeleteRouteResponse { ServiceId?: string; /** - *

    he ID of the application that the route belongs to.

    + *

    The ID of the application that the route belongs to.

    */ ApplicationId?: string; @@ -1466,7 +1472,8 @@ export namespace DeleteServiceResponse { } /** - *

    The summary information for environments as a response to ListEnvironments.

    + *

    The summary information for environments as a response to ListEnvironments. + *

    */ export interface EnvironmentSummary { /** @@ -1623,7 +1630,8 @@ export interface GetApplicationResponse { Arn?: string; /** - *

    The Amazon Web Services account ID of the application owner.

    + *

    The Amazon Web Services account ID of the application owner (which is always the same as + * the environment owner account ID).

    */ OwnerAccountId?: string; diff --git a/clients/client-panorama/src/Panorama.ts b/clients/client-panorama/src/Panorama.ts index be6c49b5955f..e1cce9b3135d 100644 --- a/clients/client-panorama/src/Panorama.ts +++ b/clients/client-panorama/src/Panorama.ts @@ -356,6 +356,10 @@ export class Panorama extends PanoramaClient { /** *

    Deletes a package.

    + * + *

    To delete a package, you need permission to call s3:DeleteObject + * in addition to permissions for the AWS Panorama API.

    + *
    */ public deletePackage( args: DeletePackageCommandInput, diff --git a/clients/client-panorama/src/commands/DeletePackageCommand.ts b/clients/client-panorama/src/commands/DeletePackageCommand.ts index 6c41b11798ab..51f42bb33d38 100644 --- a/clients/client-panorama/src/commands/DeletePackageCommand.ts +++ b/clients/client-panorama/src/commands/DeletePackageCommand.ts @@ -23,6 +23,10 @@ export interface DeletePackageCommandOutput extends DeletePackageResponse, __Met /** *

    Deletes a package.

    + * + *

    To delete a package, you need permission to call s3:DeleteObject + * in addition to permissions for the AWS Panorama API.

    + *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-panorama/src/models/models_0.ts b/clients/client-panorama/src/models/models_0.ts index e01ce7a68508..302487e39651 100644 --- a/clients/client-panorama/src/models/models_0.ts +++ b/clients/client-panorama/src/models/models_0.ts @@ -24,6 +24,25 @@ export class AccessDeniedException extends __BaseException { } } +/** + *

    Details about a beta appliance software update.

    + */ +export interface AlternateSoftwareMetadata { + /** + *

    The appliance software version.

    + */ + Version?: string; +} + +export namespace AlternateSoftwareMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AlternateSoftwareMetadata): any => ({ + ...obj, + }); +} + export enum ApplicationInstanceHealthStatus { ERROR = "ERROR", NOT_AVAILABLE = "NOT_AVAILABLE", @@ -897,6 +916,7 @@ export namespace PackageImportJobInputConfig { } export enum PackageImportJobType { + MARKETPLACE_NODE_PACKAGE_VERSION = "MARKETPLACE_NODE_PACKAGE_VERSION", NODE_PACKAGE_VERSION = "NODE_PACKAGE_VERSION", } @@ -1298,6 +1318,7 @@ export namespace DescribeDeviceRequest { export enum NetworkConnectionStatus { CONNECTED = "CONNECTED", + CONNECTING = "CONNECTING", NOT_CONNECTED = "NOT_CONNECTED", } @@ -1330,6 +1351,35 @@ export namespace EthernetStatus { }); } +/** + *

    Details about an NTP server connection.

    + */ +export interface NtpStatus { + /** + *

    The connection's status.

    + */ + ConnectionStatus?: NetworkConnectionStatus | string; + + /** + *

    The IP address of the server.

    + */ + IpAddress?: string; + + /** + *

    The domain name of the server.

    + */ + NtpServerName?: string; +} + +export namespace NtpStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NtpStatus): any => ({ + ...obj, + }); +} + /** *

    The network status of a device.

    */ @@ -1343,6 +1393,16 @@ export interface NetworkStatus { *

    The status of Ethernet port 1.

    */ Ethernet1Status?: EthernetStatus; + + /** + *

    Details about a network time protocol (NTP) server connection.

    + */ + NtpStatus?: NtpStatus; + + /** + *

    When the network status changed.

    + */ + LastUpdatedTime?: Date; } export namespace NetworkStatus { @@ -1420,6 +1480,26 @@ export namespace EthernetPayload { }); } +/** + *

    Network time protocol (NTP) server settings. Use this option to connect to local NTP + * servers instead of pool.ntp.org.

    + */ +export interface NtpPayload { + /** + *

    NTP servers to use, in order of preference.

    + */ + NtpServers: string[] | undefined; +} + +export namespace NtpPayload { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NtpPayload): any => ({ + ...obj, + }); +} + /** *

    The network configuration for a device.

    */ @@ -1433,6 +1513,11 @@ export interface NetworkPayload { *

    Settings for Ethernet port 1.

    */ Ethernet1?: EthernetPayload; + + /** + *

    Network time protocol (NTP) server settings.

    + */ + Ntp?: NtpPayload; } export namespace NetworkPayload { @@ -1533,6 +1618,16 @@ export interface DescribeDeviceResponse { *

    The device's lease expiration time.

    */ LeaseExpirationTime?: Date; + + /** + *

    Beta software releases available for the device.

    + */ + AlternateSoftwares?: AlternateSoftwareMetadata[]; + + /** + *

    The most recent beta software release.

    + */ + LatestAlternateSoftware?: string; } export namespace DescribeDeviceResponse { diff --git a/clients/client-panorama/src/protocols/Aws_restJson1.ts b/clients/client-panorama/src/protocols/Aws_restJson1.ts index 81ff8c25dad7..cf6f8b690b81 100644 --- a/clients/client-panorama/src/protocols/Aws_restJson1.ts +++ b/clients/client-panorama/src/protocols/Aws_restJson1.ts @@ -110,6 +110,7 @@ import { } from "../commands/UpdateDeviceMetadataCommand"; import { AccessDeniedException, + AlternateSoftwareMetadata, ApplicationInstance, ConflictException, ConflictExceptionErrorArgument, @@ -131,6 +132,8 @@ import { NodeInstance, NodeInterface, NodeOutputPort, + NtpPayload, + NtpStatus, OTAJobConfig, OutPutS3Location, PackageImportJob, @@ -1844,6 +1847,7 @@ export const deserializeAws_restJson1DescribeDeviceCommand = async ( } const contents: DescribeDeviceCommandOutput = { $metadata: deserializeMetadata(output), + AlternateSoftwares: undefined, Arn: undefined, CreatedTime: undefined, CurrentNetworkingStatus: undefined, @@ -1851,6 +1855,7 @@ export const deserializeAws_restJson1DescribeDeviceCommand = async ( Description: undefined, DeviceConnectionStatus: undefined, DeviceId: undefined, + LatestAlternateSoftware: undefined, LatestSoftware: undefined, LeaseExpirationTime: undefined, Name: undefined, @@ -1861,6 +1866,9 @@ export const deserializeAws_restJson1DescribeDeviceCommand = async ( Type: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AlternateSoftwares !== undefined && data.AlternateSoftwares !== null) { + contents.AlternateSoftwares = deserializeAws_restJson1AlternateSoftwares(data.AlternateSoftwares, context); + } if (data.Arn !== undefined && data.Arn !== null) { contents.Arn = __expectString(data.Arn); } @@ -1882,6 +1890,9 @@ export const deserializeAws_restJson1DescribeDeviceCommand = async ( if (data.DeviceId !== undefined && data.DeviceId !== null) { contents.DeviceId = __expectString(data.DeviceId); } + if (data.LatestAlternateSoftware !== undefined && data.LatestAlternateSoftware !== null) { + contents.LatestAlternateSoftware = __expectString(data.LatestAlternateSoftware); + } if (data.LatestSoftware !== undefined && data.LatestSoftware !== null) { contents.LatestSoftware = __expectString(data.LatestSoftware); } @@ -3599,9 +3610,28 @@ const serializeAws_restJson1NetworkPayload = (input: NetworkPayload, context: __ input.Ethernet0 !== null && { Ethernet0: serializeAws_restJson1EthernetPayload(input.Ethernet0, context) }), ...(input.Ethernet1 !== undefined && input.Ethernet1 !== null && { Ethernet1: serializeAws_restJson1EthernetPayload(input.Ethernet1, context) }), + ...(input.Ntp !== undefined && input.Ntp !== null && { Ntp: serializeAws_restJson1NtpPayload(input.Ntp, context) }), + }; +}; + +const serializeAws_restJson1NtpPayload = (input: NtpPayload, context: __SerdeContext): any => { + return { + ...(input.NtpServers !== undefined && + input.NtpServers !== null && { NtpServers: serializeAws_restJson1NtpServerList(input.NtpServers, context) }), }; }; +const serializeAws_restJson1NtpServerList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1OTAJobConfig = (input: OTAJobConfig, context: __SerdeContext): any => { return { ...(input.ImageVersion !== undefined && input.ImageVersion !== null && { ImageVersion: input.ImageVersion }), @@ -3705,6 +3735,30 @@ const serializeAws_restJson1TemplateParametersMap = ( }, {}); }; +const deserializeAws_restJson1AlternateSoftwareMetadata = ( + output: any, + context: __SerdeContext +): AlternateSoftwareMetadata => { + return { + Version: __expectString(output.Version), + } as any; +}; + +const deserializeAws_restJson1AlternateSoftwares = ( + output: any, + context: __SerdeContext +): AlternateSoftwareMetadata[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1AlternateSoftwareMetadata(entry, context); + }); + return retVal; +}; + const deserializeAws_restJson1ApplicationInstance = (output: any, context: __SerdeContext): ApplicationInstance => { return { ApplicationInstanceId: __expectString(output.ApplicationInstanceId), @@ -3930,6 +3984,10 @@ const deserializeAws_restJson1NetworkPayload = (output: any, context: __SerdeCon output.Ethernet1 !== undefined && output.Ethernet1 !== null ? deserializeAws_restJson1EthernetPayload(output.Ethernet1, context) : undefined, + Ntp: + output.Ntp !== undefined && output.Ntp !== null + ? deserializeAws_restJson1NtpPayload(output.Ntp, context) + : undefined, } as any; }; @@ -3943,6 +4001,14 @@ const deserializeAws_restJson1NetworkStatus = (output: any, context: __SerdeCont output.Ethernet1Status !== undefined && output.Ethernet1Status !== null ? deserializeAws_restJson1EthernetStatus(output.Ethernet1Status, context) : undefined, + LastUpdatedTime: + output.LastUpdatedTime !== undefined && output.LastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastUpdatedTime))) + : undefined, + NtpStatus: + output.NtpStatus !== undefined && output.NtpStatus !== null + ? deserializeAws_restJson1NtpStatus(output.NtpStatus, context) + : undefined, } as any; }; @@ -4061,6 +4127,35 @@ const deserializeAws_restJson1NodesList = (output: any, context: __SerdeContext) return retVal; }; +const deserializeAws_restJson1NtpPayload = (output: any, context: __SerdeContext): NtpPayload => { + return { + NtpServers: + output.NtpServers !== undefined && output.NtpServers !== null + ? deserializeAws_restJson1NtpServerList(output.NtpServers, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1NtpServerList = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + +const deserializeAws_restJson1NtpStatus = (output: any, context: __SerdeContext): NtpStatus => { + return { + ConnectionStatus: __expectString(output.ConnectionStatus), + IpAddress: __expectString(output.IpAddress), + NtpServerName: __expectString(output.NtpServerName), + } as any; +}; + const deserializeAws_restJson1OutputPortList = (output: any, context: __SerdeContext): NodeOutputPort[] => { const retVal = (output || []) .filter((e: any) => e != null) diff --git a/clients/client-rds/src/RDS.ts b/clients/client-rds/src/RDS.ts index 0ead44368eb8..9ad8806f53c9 100644 --- a/clients/client-rds/src/RDS.ts +++ b/clients/client-rds/src/RDS.ts @@ -1158,9 +1158,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public copyDBClusterSnapshot( args: CopyDBClusterSnapshotCommandInput, @@ -1411,9 +1408,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public createDBCluster( args: CreateDBClusterCommandInput, @@ -1515,9 +1509,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public createDBClusterParameterGroup( args: CreateDBClusterParameterGroupCommandInput, @@ -1557,9 +1548,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public createDBClusterSnapshot( args: CreateDBClusterSnapshotCommandInput, @@ -2126,9 +2114,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public deleteDBCluster( args: DeleteDBClusterCommandInput, @@ -2204,9 +2189,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public deleteDBClusterParameterGroup( args: DeleteDBClusterParameterGroupCommandInput, @@ -2251,9 +2233,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public deleteDBClusterSnapshot( args: DeleteDBClusterSnapshotCommandInput, @@ -2934,9 +2913,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public describeDBClusterParameterGroups( args: DescribeDBClusterParameterGroupsCommandInput, @@ -2977,9 +2953,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public describeDBClusterParameters( args: DescribeDBClusterParametersCommandInput, @@ -3020,9 +2993,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    *

    This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

    */ public describeDBClusters( @@ -3103,9 +3073,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public describeDBClusterSnapshots( args: DescribeDBClusterSnapshotsCommandInput, @@ -4210,9 +4177,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public failoverDBCluster( args: FailoverDBClusterCommandInput, @@ -4516,9 +4480,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public modifyDBCluster( args: ModifyDBClusterCommandInput, @@ -4614,9 +4575,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public modifyDBClusterParameterGroup( args: ModifyDBClusterParameterGroupCommandInput, @@ -5229,8 +5187,7 @@ export class RDS extends RDSClient { * cluster results in a momentary outage, during which the DB cluster status is set to rebooting. *

    * - *

    Use this operation only for a non-Aurora Multi-AZ DB cluster. - * The Multi-AZ DB clusters feature is in preview and is subject to change.

    + *

    Use this operation only for a non-Aurora Multi-AZ DB cluster.

    * *

    For more information on Multi-AZ DB clusters, see * @@ -5394,9 +5351,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public removeRoleFromDBCluster( args: RemoveRoleFromDBClusterCommandInput, @@ -5550,9 +5504,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public resetDBClusterParameterGroup( args: ResetDBClusterParameterGroupCommandInput, @@ -5694,9 +5645,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public restoreDBClusterFromSnapshot( args: RestoreDBClusterFromSnapshotCommandInput, @@ -5749,9 +5697,6 @@ export class RDS extends RDSClient { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ public restoreDBClusterToPointInTime( args: RestoreDBClusterToPointInTimeCommandInput, diff --git a/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts b/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts index 997f182df86d..829934b08f67 100644 --- a/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts @@ -98,9 +98,6 @@ export interface CopyDBClusterSnapshotCommandOutput extends CopyDBClusterSnapsho * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/CreateDBClusterCommand.ts b/clients/client-rds/src/commands/CreateDBClusterCommand.ts index ec4c4dc83742..5d0f087c8fcf 100644 --- a/clients/client-rds/src/commands/CreateDBClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterCommand.ts @@ -37,9 +37,6 @@ export interface CreateDBClusterCommandOutput extends CreateDBClusterResult, __M * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts index b46ac9958755..4ce861a21d70 100644 --- a/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts @@ -59,9 +59,6 @@ export interface CreateDBClusterParameterGroupCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/CreateDBClusterSnapshotCommand.ts b/clients/client-rds/src/commands/CreateDBClusterSnapshotCommand.ts index 3f137a4c2450..18a2db45db59 100644 --- a/clients/client-rds/src/commands/CreateDBClusterSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterSnapshotCommand.ts @@ -30,9 +30,6 @@ export interface CreateDBClusterSnapshotCommandOutput extends CreateDBClusterSna * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DeleteDBClusterCommand.ts b/clients/client-rds/src/commands/DeleteDBClusterCommand.ts index 3cccfaba3f1d..0c006cdfb03f 100644 --- a/clients/client-rds/src/commands/DeleteDBClusterCommand.ts +++ b/clients/client-rds/src/commands/DeleteDBClusterCommand.ts @@ -33,9 +33,6 @@ export interface DeleteDBClusterCommandOutput extends DeleteDBClusterResult, __M * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DeleteDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/DeleteDBClusterParameterGroupCommand.ts index cd00555fe045..ccffdf2ada13 100644 --- a/clients/client-rds/src/commands/DeleteDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/DeleteDBClusterParameterGroupCommand.ts @@ -31,9 +31,6 @@ export interface DeleteDBClusterParameterGroupCommandOutput extends __MetadataBe * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DeleteDBClusterSnapshotCommand.ts b/clients/client-rds/src/commands/DeleteDBClusterSnapshotCommand.ts index 4c144a84ae6a..da00fea2c815 100644 --- a/clients/client-rds/src/commands/DeleteDBClusterSnapshotCommand.ts +++ b/clients/client-rds/src/commands/DeleteDBClusterSnapshotCommand.ts @@ -35,9 +35,6 @@ export interface DeleteDBClusterSnapshotCommandOutput extends DeleteDBClusterSna * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DescribeDBClusterParameterGroupsCommand.ts b/clients/client-rds/src/commands/DescribeDBClusterParameterGroupsCommand.ts index 1f938dd1da0d..3ba197a62d66 100644 --- a/clients/client-rds/src/commands/DescribeDBClusterParameterGroupsCommand.ts +++ b/clients/client-rds/src/commands/DescribeDBClusterParameterGroupsCommand.ts @@ -37,9 +37,6 @@ export interface DescribeDBClusterParameterGroupsCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DescribeDBClusterParametersCommand.ts b/clients/client-rds/src/commands/DescribeDBClusterParametersCommand.ts index 3a5e1c5fb8ba..ec509d11e583 100644 --- a/clients/client-rds/src/commands/DescribeDBClusterParametersCommand.ts +++ b/clients/client-rds/src/commands/DescribeDBClusterParametersCommand.ts @@ -31,9 +31,6 @@ export interface DescribeDBClusterParametersCommandOutput extends DBClusterParam * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DescribeDBClusterSnapshotsCommand.ts b/clients/client-rds/src/commands/DescribeDBClusterSnapshotsCommand.ts index f3f1827f70bb..9169d1d195ab 100644 --- a/clients/client-rds/src/commands/DescribeDBClusterSnapshotsCommand.ts +++ b/clients/client-rds/src/commands/DescribeDBClusterSnapshotsCommand.ts @@ -31,9 +31,6 @@ export interface DescribeDBClusterSnapshotsCommandOutput extends DBClusterSnapsh * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/DescribeDBClustersCommand.ts b/clients/client-rds/src/commands/DescribeDBClustersCommand.ts index 5cf56310f3e4..f9a3d962056f 100644 --- a/clients/client-rds/src/commands/DescribeDBClustersCommand.ts +++ b/clients/client-rds/src/commands/DescribeDBClustersCommand.ts @@ -31,9 +31,6 @@ export interface DescribeDBClustersCommandOutput extends DBClusterMessage, __Met * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    *

    This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/FailoverDBClusterCommand.ts b/clients/client-rds/src/commands/FailoverDBClusterCommand.ts index 30184128974f..e25f6e6cd280 100644 --- a/clients/client-rds/src/commands/FailoverDBClusterCommand.ts +++ b/clients/client-rds/src/commands/FailoverDBClusterCommand.ts @@ -41,9 +41,6 @@ export interface FailoverDBClusterCommandOutput extends FailoverDBClusterResult, * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/ModifyDBClusterCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterCommand.ts index 31cbbb31ee0d..e839f547abde 100644 --- a/clients/client-rds/src/commands/ModifyDBClusterCommand.ts +++ b/clients/client-rds/src/commands/ModifyDBClusterCommand.ts @@ -33,9 +33,6 @@ export interface ModifyDBClusterCommandOutput extends ModifyDBClusterResult, __M * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts index 2621219fdfef..55f9fa3e38e2 100644 --- a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts @@ -53,9 +53,6 @@ export interface ModifyDBClusterParameterGroupCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/RebootDBClusterCommand.ts b/clients/client-rds/src/commands/RebootDBClusterCommand.ts index 798c8c3f3442..04ae4a511e8c 100644 --- a/clients/client-rds/src/commands/RebootDBClusterCommand.ts +++ b/clients/client-rds/src/commands/RebootDBClusterCommand.ts @@ -32,8 +32,7 @@ export interface RebootDBClusterCommandOutput extends RebootDBClusterResult, __M * cluster results in a momentary outage, during which the DB cluster status is set to rebooting. *

    * - *

    Use this operation only for a non-Aurora Multi-AZ DB cluster. - * The Multi-AZ DB clusters feature is in preview and is subject to change.

    + *

    Use this operation only for a non-Aurora Multi-AZ DB cluster.

    * *

    For more information on Multi-AZ DB clusters, see * diff --git a/clients/client-rds/src/commands/RemoveRoleFromDBClusterCommand.ts b/clients/client-rds/src/commands/RemoveRoleFromDBClusterCommand.ts index 3022395e7a18..80a55ce21ce0 100644 --- a/clients/client-rds/src/commands/RemoveRoleFromDBClusterCommand.ts +++ b/clients/client-rds/src/commands/RemoveRoleFromDBClusterCommand.ts @@ -32,9 +32,6 @@ export interface RemoveRoleFromDBClusterCommandOutput extends __MetadataBearer { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/ResetDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/ResetDBClusterParameterGroupCommand.ts index 531417f108ee..1250e26cb939 100644 --- a/clients/client-rds/src/commands/ResetDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/ResetDBClusterParameterGroupCommand.ts @@ -44,9 +44,6 @@ export interface ResetDBClusterParameterGroupCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/RestoreDBClusterFromSnapshotCommand.ts b/clients/client-rds/src/commands/RestoreDBClusterFromSnapshotCommand.ts index fdc555593776..60ff95b06d65 100644 --- a/clients/client-rds/src/commands/RestoreDBClusterFromSnapshotCommand.ts +++ b/clients/client-rds/src/commands/RestoreDBClusterFromSnapshotCommand.ts @@ -44,9 +44,6 @@ export interface RestoreDBClusterFromSnapshotCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/RestoreDBClusterToPointInTimeCommand.ts b/clients/client-rds/src/commands/RestoreDBClusterToPointInTimeCommand.ts index 43ef3abfbf8b..f767d465c5b5 100644 --- a/clients/client-rds/src/commands/RestoreDBClusterToPointInTimeCommand.ts +++ b/clients/client-rds/src/commands/RestoreDBClusterToPointInTimeCommand.ts @@ -45,9 +45,6 @@ export interface RestoreDBClusterToPointInTimeCommandOutput * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index 47fd49b596ed..d6a6e39caef5 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -4125,8 +4125,15 @@ export interface CreateDBClusterMessage { /** *

    The list of log types that need to be enabled for exporting to CloudWatch Logs. The values - * in the list depend on the DB engine being used. For more information, see - * Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + * in the list depend on the DB engine being used.

    + *

    + * RDS for MySQL + *

    + *

    Possible values are error, general, and slowquery.

    + *

    + * RDS for PostgreSQL + *

    + *

    Possible values are postgresql and upgrade.

    *

    * Aurora MySQL *

    @@ -4135,7 +4142,10 @@ export interface CreateDBClusterMessage { * Aurora PostgreSQL *

    *

    Possible value is postgresql.

    - *

    Valid for: Aurora DB clusters only

    + *

    For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database + * Service User Guide.

    + *

    For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ EnableCloudwatchLogsExports?: string[]; @@ -4219,7 +4229,7 @@ export interface CreateDBClusterMessage { /** *

    A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. * The default is not to copy them.

    - *

    Valid for: Aurora DB clusters only

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ CopyTagsToSnapshot?: boolean; @@ -4686,9 +4696,6 @@ export namespace ScalingConfigurationInfo { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ export interface DBCluster { /** @@ -5124,9 +5131,6 @@ export interface CreateDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -10486,9 +10490,6 @@ export interface DeleteDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } diff --git a/clients/client-rds/src/models/models_1.ts b/clients/client-rds/src/models/models_1.ts index 48904fd0321d..f83f831e0eb4 100644 --- a/clients/client-rds/src/models/models_1.ts +++ b/clients/client-rds/src/models/models_1.ts @@ -2243,7 +2243,6 @@ export interface OrderableDBInstanceOption { /** *

    Whether DB instances can be configured as a Multi-AZ DB cluster.

    - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    *

    For more information on Multi-AZ DB clusters, see * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. @@ -3305,9 +3304,6 @@ export interface FailoverDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -3928,8 +3924,28 @@ export interface ModifyDBClusterMessage { BacktrackWindow?: number; /** - *

    The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

    - *

    Valid for: Aurora DB clusters only

    + *

    The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster. The values + * in the list depend on the DB engine being used.

    + *

    + * RDS for MySQL + *

    + *

    Possible values are error, general, and slowquery.

    + *

    + * RDS for PostgreSQL + *

    + *

    Possible values are postgresql and upgrade.

    + *

    + * Aurora MySQL + *

    + *

    Possible values are audit, error, general, and slowquery.

    + *

    + * Aurora PostgreSQL + *

    + *

    Possible value is postgresql.

    + *

    For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database + * Service User Guide.

    + *

    For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ CloudwatchLogsExportConfiguration?: CloudwatchLogsExportConfiguration; @@ -4039,7 +4055,7 @@ export interface ModifyDBClusterMessage { /** *

    A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. * The default is not to copy them.

    - *

    Valid for: Aurora DB clusters only

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ CopyTagsToSnapshot?: boolean; @@ -4182,9 +4198,6 @@ export interface ModifyDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -6120,9 +6133,6 @@ export interface PromoteReadReplicaDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -6277,9 +6287,6 @@ export interface RebootDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -7038,8 +7045,16 @@ export interface RestoreDBClusterFromS3Message { /** *

    The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values - * in the list depend on the DB engine being used. For more information, see - * Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + * in the list depend on the DB engine being used.

    + *

    + * Aurora MySQL + *

    + *

    Possible values are audit, error, general, and slowquery.

    + *

    + * Aurora PostgreSQL + *

    + *

    Possible value is postgresql.

    + *

    For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    */ EnableCloudwatchLogsExports?: string[]; @@ -7104,9 +7119,6 @@ export interface RestoreDBClusterFromS3Result { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -7360,9 +7372,26 @@ export interface RestoreDBClusterFromSnapshotMessage { /** *

    The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. * The values in the list depend on the DB engine being used.

    - *

    For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon - * Aurora User Guide.

    - *

    Valid for: Aurora DB clusters only

    + *

    + * RDS for MySQL + *

    + *

    Possible values are error, general, and slowquery.

    + *

    + * RDS for PostgreSQL + *

    + *

    Possible values are postgresql and upgrade.

    + *

    + * Aurora MySQL + *

    + *

    Possible values are audit, error, general, and slowquery.

    + *

    + * Aurora PostgreSQL + *

    + *

    Possible value is postgresql.

    + *

    For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database + * Service User Guide.

    + *

    For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ EnableCloudwatchLogsExports?: string[]; @@ -7414,7 +7443,7 @@ export interface RestoreDBClusterFromSnapshotMessage { /** *

    A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

    - *

    Valid for: Aurora DB clusters only

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ CopyTagsToSnapshot?: boolean; @@ -7530,9 +7559,6 @@ export interface RestoreDBClusterFromSnapshotResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -7724,8 +7750,26 @@ export interface RestoreDBClusterToPointInTimeMessage { /** *

    The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values * in the list depend on the DB engine being used.

    - *

    For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    - *

    Valid for: Aurora DB clusters only

    + *

    + * RDS for MySQL + *

    + *

    Possible values are error, general, and slowquery.

    + *

    + * RDS for PostgreSQL + *

    + *

    Possible values are postgresql and upgrade.

    + *

    + * Aurora MySQL + *

    + *

    Possible values are audit, error, general, and slowquery.

    + *

    + * Aurora PostgreSQL + *

    + *

    Possible value is postgresql.

    + *

    For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database + * Service User Guide.

    + *

    For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ EnableCloudwatchLogsExports?: string[]; @@ -7761,7 +7805,7 @@ export interface RestoreDBClusterToPointInTimeMessage { /** *

    A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

    - *

    Valid for: Aurora DB clusters only

    + *

    Valid for: Aurora DB clusters and Multi-AZ DB clusters

    */ CopyTagsToSnapshot?: boolean; @@ -7898,9 +7942,6 @@ export interface RestoreDBClusterToPointInTimeResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -9505,9 +9546,6 @@ export interface StartDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } @@ -9906,9 +9944,6 @@ export interface StopDBClusterResult { * * Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. *

    - * - *

    The Multi-AZ DB clusters feature is in preview and is subject to change.

    - *
    */ DBCluster?: DBCluster; } diff --git a/clients/client-route53-recovery-cluster/README.md b/clients/client-route53-recovery-cluster/README.md index a916a7989367..426cdcb7090d 100644 --- a/clients/client-route53-recovery-cluster/README.md +++ b/clients/client-route53-recovery-cluster/README.md @@ -7,26 +7,43 @@ AWS SDK for JavaScript Route53RecoveryCluster Client for Node.js, Browser and React Native. -

    Welcome to the Amazon Route 53 Application Recovery Controller API Reference Guide for Recovery Control Data Plane .

    -

    Recovery control in Route 53 Application Recovery Controller includes extremely reliable routing controls that enable you to recover applications -by rerouting traffic, for example, across Availability Zones or AWS Regions. Routing controls are simple on/off switches -hosted on a cluster. A cluster is a set of five redundant regional endpoints against which you can execute API calls to update or -get the state of routing controls. You use routing controls to failover traffic to recover your application -across Availability Zones or Regions.

    -

    This API guide includes information about how to get and update routing control states in Route 53 Application Recovery Controller.

    -

    For more information about Route 53 Application Recovery Controller, see the following:

    +

    Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

    +

    With Amazon Route 53 Application Recovery Controller, you can use routing control with extreme reliability to +recover applications by rerouting traffic across +Availability Zones or AWS Regions. Routing controls are simple on/off switches hosted +on a highly available cluster in Application Recovery Controller. A cluster provides a set of five redundant Regional endpoints against which you +can run API calls to get or update the state of routing controls. To implement failover, you set +one routing control on and another one off, to reroute traffic from one Availability Zone or Amazon Web Services Region +to another.

    +

    +Be aware that you must specify the Regional endpoints for a cluster when you work with API cluster operations +to get or update routing control states in Application Recovery Controller. In addition, you must specify the US West (Oregon) Region +for Application Recovery Controller API calls. For example, use the parameter region us-west-2 with AWS CLI commands. +For more information, see + +Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

    +

    This API guide includes information about the API operations for how to get and update routing control states +in Application Recovery Controller. You also must set up the structures to support routing controls: clusters and control panels.

    +

    For more information about working with routing control in Application Recovery Controller, see the following:

    diff --git a/clients/client-route53-recovery-cluster/src/Route53RecoveryCluster.ts b/clients/client-route53-recovery-cluster/src/Route53RecoveryCluster.ts index 8bb1cc79f63d..2f1f4ff92be0 100644 --- a/clients/client-route53-recovery-cluster/src/Route53RecoveryCluster.ts +++ b/clients/client-route53-recovery-cluster/src/Route53RecoveryCluster.ts @@ -18,41 +18,78 @@ import { import { Route53RecoveryClusterClient } from "./Route53RecoveryClusterClient"; /** - *

    Welcome to the Amazon Route 53 Application Recovery Controller API Reference Guide for Recovery Control Data Plane .

    - *

    Recovery control in Route 53 Application Recovery Controller includes extremely reliable routing controls that enable you to recover applications - * by rerouting traffic, for example, across Availability Zones or AWS Regions. Routing controls are simple on/off switches - * hosted on a cluster. A cluster is a set of five redundant regional endpoints against which you can execute API calls to update or - * get the state of routing controls. You use routing controls to failover traffic to recover your application - * across Availability Zones or Regions.

    - *

    This API guide includes information about how to get and update routing control states in Route 53 Application Recovery Controller.

    - *

    For more information about Route 53 Application Recovery Controller, see the following:

    - *
      + *

      Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

      + *

      With Amazon Route 53 Application Recovery Controller, you can use routing control with extreme reliability to + * recover applications by rerouting traffic across + * Availability Zones or AWS Regions. Routing controls are simple on/off switches hosted + * on a highly available cluster in Application Recovery Controller. A cluster provides a set of five redundant Regional endpoints against which you + * can run API calls to get or update the state of routing controls. To implement failover, you set + * one routing control on and another one off, to reroute traffic from one Availability Zone or Amazon Web Services Region + * to another.

      + *

      + * Be aware that you must specify the Regional endpoints for a cluster when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. In addition, you must specify the US West (Oregon) Region + * for Application Recovery Controller API calls. For example, use the parameter region us-west-2 with AWS CLI commands. + * For more information, see + * + * Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + *

      This API guide includes information about the API operations for how to get and update routing control states + * in Application Recovery Controller. You also must set up the structures to support routing controls: clusters and control panels.

      + *

      For more information about working with routing control in Application Recovery Controller, see the following:

      + * */ export class Route53RecoveryCluster extends Route53RecoveryClusterClient { /** - *

      Get the state for a routing control. A routing control is a simple on/off switch - * that you can use to route traffic to cells. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

      - *

      Before you can create a routing control, you first must create a cluster to host the control. - * For more information, see - * CreateCluster. - * Access one of the endpoints for the cluster to get or update the routing control state to - * redirect traffic.

      - *

      For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

      + *

      Get the state for a routing control. A routing control is a simple on/off switch that you + * can use to route traffic to cells. When the state is On, traffic flows to a cell. When + * it's Off, traffic does not flow.

      + *

      Before you can create a routing control, you must first create a cluster to host the control + * in a control panel. For more information, see + * Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. + * Then you access one of the endpoints for the cluster to get or update the routing control state to + * redirect traffic.

      + *

      + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

      + *

      To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + *

      Learn more about working with routing controls in the following topics in the + * Amazon Route 53 Application Recovery Controller Developer Guide:

      + * */ public getRoutingControlState( args: GetRoutingControlStateCommandInput, @@ -84,11 +121,40 @@ export class Route53RecoveryCluster extends Route53RecoveryClusterClient { } /** - *

      Set the state of the routing control to reroute traffic. You can set the value to be On or Off. - * When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

      - *

      For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

      + *

      Set the state of the routing control to reroute traffic. You can set the value to be On or + * Off. When the state is On, traffic flows to a cell. When it's Off, traffic does not + * flow.

      + *

      With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing + * control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, + * there are scenarios when you might want to bypass the routing control safeguards that are enforced with + * safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, + * and one or more safety rules might be unexpectedly preventing you from updating a routing control state to + * reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change + * a routing control state and fail over your application.

      + *

      The SafetyRulesToOverride property enables you override one or more safety rules and + * update routing control states. For more information, see + * + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + *

      + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

      + *

      To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + * */ public updateRoutingControlState( args: UpdateRoutingControlStateCommandInput, @@ -121,10 +187,39 @@ export class Route53RecoveryCluster extends Route53RecoveryClusterClient { /** *

      Set multiple routing control states. You can set the value for each state to be On or Off. - * When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

      - *

      For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

      + * When the state is On, traffic flows to a cell. When it's Off, traffic does not + * flow.

      + *

      With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing + * control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, + * there are scenarios when you might want to bypass the routing control safeguards that are enforced with + * safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, + * and one or more safety rules might be unexpectedly preventing you from updating a routing control state to + * reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change + * a routing control state and fail over your application.

      + *

      The SafetyRulesToOverride property enables you override one or more safety rules and + * update routing control states. For more information, see + * + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + *

      + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

      + *

      To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

      + * */ public updateRoutingControlStates( args: UpdateRoutingControlStatesCommandInput, diff --git a/clients/client-route53-recovery-cluster/src/Route53RecoveryClusterClient.ts b/clients/client-route53-recovery-cluster/src/Route53RecoveryClusterClient.ts index 7f3c4b1d17ed..8597d7501ad7 100644 --- a/clients/client-route53-recovery-cluster/src/Route53RecoveryClusterClient.ts +++ b/clients/client-route53-recovery-cluster/src/Route53RecoveryClusterClient.ts @@ -229,27 +229,44 @@ type Route53RecoveryClusterClientResolvedConfigType = __SmithyResolvedConfigurat export interface Route53RecoveryClusterClientResolvedConfig extends Route53RecoveryClusterClientResolvedConfigType {} /** - *

      Welcome to the Amazon Route 53 Application Recovery Controller API Reference Guide for Recovery Control Data Plane .

      - *

      Recovery control in Route 53 Application Recovery Controller includes extremely reliable routing controls that enable you to recover applications - * by rerouting traffic, for example, across Availability Zones or AWS Regions. Routing controls are simple on/off switches - * hosted on a cluster. A cluster is a set of five redundant regional endpoints against which you can execute API calls to update or - * get the state of routing controls. You use routing controls to failover traffic to recover your application - * across Availability Zones or Regions.

      - *

      This API guide includes information about how to get and update routing control states in Route 53 Application Recovery Controller.

      - *

      For more information about Route 53 Application Recovery Controller, see the following:

      - *
        + *

        Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

        + *

        With Amazon Route 53 Application Recovery Controller, you can use routing control with extreme reliability to + * recover applications by rerouting traffic across + * Availability Zones or AWS Regions. Routing controls are simple on/off switches hosted + * on a highly available cluster in Application Recovery Controller. A cluster provides a set of five redundant Regional endpoints against which you + * can run API calls to get or update the state of routing controls. To implement failover, you set + * one routing control on and another one off, to reroute traffic from one Availability Zone or Amazon Web Services Region + * to another.

        + *

        + * Be aware that you must specify the Regional endpoints for a cluster when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. In addition, you must specify the US West (Oregon) Region + * for Application Recovery Controller API calls. For example, use the parameter region us-west-2 with AWS CLI commands. + * For more information, see + * + * Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + *

        This API guide includes information about the API operations for how to get and update routing control states + * in Application Recovery Controller. You also must set up the structures to support routing controls: clusters and control panels.

        + *

        For more information about working with routing control in Application Recovery Controller, see the following:

        + * */ export class Route53RecoveryClusterClient extends __Client< diff --git a/clients/client-route53-recovery-cluster/src/commands/GetRoutingControlStateCommand.ts b/clients/client-route53-recovery-cluster/src/commands/GetRoutingControlStateCommand.ts index 8b0fca90ae8b..521e8074207b 100644 --- a/clients/client-route53-recovery-cluster/src/commands/GetRoutingControlStateCommand.ts +++ b/clients/client-route53-recovery-cluster/src/commands/GetRoutingControlStateCommand.ts @@ -26,16 +26,36 @@ export interface GetRoutingControlStateCommandInput extends GetRoutingControlSta export interface GetRoutingControlStateCommandOutput extends GetRoutingControlStateResponse, __MetadataBearer {} /** - *

        Get the state for a routing control. A routing control is a simple on/off switch - * that you can use to route traffic to cells. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

        - *

        Before you can create a routing control, you first must create a cluster to host the control. - * For more information, see - * CreateCluster. - * Access one of the endpoints for the cluster to get or update the routing control state to - * redirect traffic.

        - *

        For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

        + *

        Get the state for a routing control. A routing control is a simple on/off switch that you + * can use to route traffic to cells. When the state is On, traffic flows to a cell. When + * it's Off, traffic does not flow.

        + *

        Before you can create a routing control, you must first create a cluster to host the control + * in a control panel. For more information, see + * Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. + * Then you access one of the endpoints for the cluster to get or update the routing control state to + * redirect traffic.

        + *

        + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

        + *

        To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + *

        Learn more about working with routing controls in the following topics in the + * Amazon Route 53 Application Recovery Controller Developer Guide:

        + * * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts index 83ee1000f07d..d673541421b0 100644 --- a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts +++ b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts @@ -26,11 +26,40 @@ export interface UpdateRoutingControlStateCommandInput extends UpdateRoutingCont export interface UpdateRoutingControlStateCommandOutput extends UpdateRoutingControlStateResponse, __MetadataBearer {} /** - *

        Set the state of the routing control to reroute traffic. You can set the value to be On or Off. - * When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

        - *

        For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

        + *

        Set the state of the routing control to reroute traffic. You can set the value to be On or + * Off. When the state is On, traffic flows to a cell. When it's Off, traffic does not + * flow.

        + *

        With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing + * control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, + * there are scenarios when you might want to bypass the routing control safeguards that are enforced with + * safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, + * and one or more safety rules might be unexpectedly preventing you from updating a routing control state to + * reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change + * a routing control state and fail over your application.

        + *

        The SafetyRulesToOverride property enables you override one or more safety rules and + * update routing control states. For more information, see + * + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + *

        + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

        + *

        To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + * * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts index 9b4e3ec64745..62ffa8029f91 100644 --- a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts +++ b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts @@ -27,10 +27,39 @@ export interface UpdateRoutingControlStatesCommandOutput extends UpdateRoutingCo /** *

        Set multiple routing control states. You can set the value for each state to be On or Off. - * When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

        - *

        For more information about working with routing controls, see - * Routing control - * in the Route 53 Application Recovery Controller Developer Guide.

        + * When the state is On, traffic flows to a cell. When it's Off, traffic does not + * flow.

        + *

        With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing + * control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, + * there are scenarios when you might want to bypass the routing control safeguards that are enforced with + * safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, + * and one or more safety rules might be unexpectedly preventing you from updating a routing control state to + * reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change + * a routing control state and fail over your application.

        + *

        The SafetyRulesToOverride property enables you override one or more safety rules and + * update routing control states. For more information, see + * + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + *

        + * You must specify Regional endpoints when you work with API cluster operations + * to get or update routing control states in Application Recovery Controller. + *

        + *

        To see a code example for getting a routing control state, including accessing Regional cluster endpoints + * in sequence, see API examples + * in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + * * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-route53-recovery-cluster/src/models/models_0.ts b/clients/client-route53-recovery-cluster/src/models/models_0.ts index b96fc451565e..f55bc7486610 100644 --- a/clients/client-route53-recovery-cluster/src/models/models_0.ts +++ b/clients/client-route53-recovery-cluster/src/models/models_0.ts @@ -267,6 +267,15 @@ export interface UpdateRoutingControlStateRequest { *

        The state of the routing control. You can set the value to be On or Off.

        */ RoutingControlState: RoutingControlState | string | undefined; + + /** + *

        The Amazon Resource Numbers (ARNs) for the safety rules that you want to override when you're updating the state of + * a routing control. You can override one safety rule or multiple safety rules by including one or more ARNs, separated + * by commas.

        + *

        For more information, see + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + */ + SafetyRulesToOverride?: string[]; } export namespace UpdateRoutingControlStateRequest { @@ -290,11 +299,11 @@ export namespace UpdateRoutingControlStateResponse { } /** - *

        A routing control state.

        + *

        A routing control state entry.

        */ export interface UpdateRoutingControlStateEntry { /** - *

        The Amazon Resource Number (ARN) for the routing control state entry.

        + *

        The Amazon Resource Number (ARN) for a routing control state entry.

        */ RoutingControlArn: string | undefined; @@ -318,6 +327,15 @@ export interface UpdateRoutingControlStatesRequest { *

        A set of routing control entries that you want to update.

        */ UpdateRoutingControlStateEntries: UpdateRoutingControlStateEntry[] | undefined; + + /** + *

        The Amazon Resource Numbers (ARNs) for the safety rules that you want to override when you're updating routing + * control states. You can override one safety rule or multiple safety rules by including one or more ARNs, separated + * by commas.

        + *

        For more information, see + * Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

        + */ + SafetyRulesToOverride?: string[]; } export namespace UpdateRoutingControlStatesRequest { diff --git a/clients/client-route53-recovery-cluster/src/protocols/Aws_json1_0.ts b/clients/client-route53-recovery-cluster/src/protocols/Aws_json1_0.ts index df7cb7d54455..60535cc35116 100644 --- a/clients/client-route53-recovery-cluster/src/protocols/Aws_json1_0.ts +++ b/clients/client-route53-recovery-cluster/src/protocols/Aws_json1_0.ts @@ -352,6 +352,17 @@ const deserializeAws_json1_0ValidationExceptionResponse = async ( return __decorateServiceException(exception, body); }; +const serializeAws_json1_0Arns = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_0GetRoutingControlStateRequest = ( input: GetRoutingControlStateRequest, context: __SerdeContext @@ -397,6 +408,10 @@ const serializeAws_json1_0UpdateRoutingControlStateRequest = ( input.RoutingControlArn !== null && { RoutingControlArn: input.RoutingControlArn }), ...(input.RoutingControlState !== undefined && input.RoutingControlState !== null && { RoutingControlState: input.RoutingControlState }), + ...(input.SafetyRulesToOverride !== undefined && + input.SafetyRulesToOverride !== null && { + SafetyRulesToOverride: serializeAws_json1_0Arns(input.SafetyRulesToOverride, context), + }), }; }; @@ -405,6 +420,10 @@ const serializeAws_json1_0UpdateRoutingControlStatesRequest = ( context: __SerdeContext ): any => { return { + ...(input.SafetyRulesToOverride !== undefined && + input.SafetyRulesToOverride !== null && { + SafetyRulesToOverride: serializeAws_json1_0Arns(input.SafetyRulesToOverride, context), + }), ...(input.UpdateRoutingControlStateEntries !== undefined && input.UpdateRoutingControlStateEntries !== null && { UpdateRoutingControlStateEntries: serializeAws_json1_0UpdateRoutingControlStateEntries( diff --git a/clients/client-service-catalog-appregistry/src/models/models_0.ts b/clients/client-service-catalog-appregistry/src/models/models_0.ts index 0dfe6fdcfdae..bd7aa13a992d 100644 --- a/clients/client-service-catalog-appregistry/src/models/models_0.ts +++ b/clients/client-service-catalog-appregistry/src/models/models_0.ts @@ -1298,6 +1298,8 @@ export interface UpdateApplicationRequest { application: string | undefined; /** + * @deprecated + * *

        The new name of the application. The name must be unique in the region in which you are updating the application.

        */ name?: string; @@ -1340,6 +1342,8 @@ export interface UpdateAttributeGroupRequest { attributeGroup: string | undefined; /** + * @deprecated + * *

        The new name of the attribute group. The name must be unique in the region in which you are * updating the attribute group.

        */ diff --git a/clients/client-sns/src/endpoints.ts b/clients/client-sns/src/endpoints.ts index eb2846e54a4e..d6d6e61e635a 100644 --- a/clients/client-sns/src/endpoints.ts +++ b/clients/client-sns/src/endpoints.ts @@ -22,19 +22,17 @@ const regionHash: RegionHash = { variants: [ { hostname: "sns.us-gov-east-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-east-1", }, "us-gov-west-1": { variants: [ { hostname: "sns.us-gov-west-1.amazonaws.com", - tags: [], + tags: ["fips"], }, ], - signingRegion: "us-gov-west-1", }, "us-west-1": { variants: [ @@ -155,7 +153,7 @@ const partitionHash: PartitionHash = { ], }, "aws-us-gov": { - regions: ["us-gov-east-1", "us-gov-west-1"], + regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"], regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", variants: [ { diff --git a/clients/client-sqs/src/endpoints.ts b/clients/client-sqs/src/endpoints.ts index 6aae25eeefac..4cc428cd6da2 100644 --- a/clients/client-sqs/src/endpoints.ts +++ b/clients/client-sqs/src/endpoints.ts @@ -163,7 +163,7 @@ const partitionHash: PartitionHash = { tags: [], }, { - hostname: "sqs-fips.{region}.amazonaws.com", + hostname: "sqs.{region}.amazonaws.com", tags: ["fips"], }, { diff --git a/clients/client-sts/src/STS.ts b/clients/client-sts/src/STS.ts index 3c7a3c6beaf8..da44cb814198 100644 --- a/clients/client-sts/src/STS.ts +++ b/clients/client-sts/src/STS.ts @@ -75,8 +75,8 @@ export class STS extends STSClient { * Session * Policies in the IAM User Guide.

        *

        When you create a role, you create two policies: A role trust policy that specifies - * who can assume the role and a permissions policy that specifies - * what can be done with the role. You specify the trusted principal + * who can assume the role and a permissions policy that specifies + * what can be done with the role. You specify the trusted principal * who is allowed to assume the role in the role trust policy.

        *

        To assume a role from a different account, your Amazon Web Services account must be trusted by the * role. The trust relationship is defined in the role's trust policy when the role is @@ -90,8 +90,8 @@ export class STS extends STSClient { * following:

        *
          *
        • - *

          Attach a policy to the user that allows the user to call - * AssumeRole (as long as the role's trust policy trusts the account).

          + *

          Attach a policy to the user that allows the user to call AssumeRole + * (as long as the role's trust policy trusts the account).

          *
        • *
        • *

          Add the user as a principal directly in the role's trust policy.

          @@ -326,9 +326,9 @@ export class STS extends STSClient { /** *

          Returns a set of temporary security credentials for users who have been authenticated in - * a mobile or web application with a web identity provider. Example providers include Amazon Cognito, - * Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity - * provider.

          + * a mobile or web application with a web identity provider. Example providers include the + * OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID Connect-compatible + * identity provider such as Google or Amazon Cognito federated identities.

          * *

          For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the * Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely diff --git a/clients/client-sts/src/commands/AssumeRoleCommand.ts b/clients/client-sts/src/commands/AssumeRoleCommand.ts index b2ba1a369099..83c3d70d2018 100644 --- a/clients/client-sts/src/commands/AssumeRoleCommand.ts +++ b/clients/client-sts/src/commands/AssumeRoleCommand.ts @@ -48,8 +48,8 @@ export interface AssumeRoleCommandOutput extends AssumeRoleResponse, __MetadataB * Session * Policies in the IAM User Guide.

          *

          When you create a role, you create two policies: A role trust policy that specifies - * who can assume the role and a permissions policy that specifies - * what can be done with the role. You specify the trusted principal + * who can assume the role and a permissions policy that specifies + * what can be done with the role. You specify the trusted principal * who is allowed to assume the role in the role trust policy.

          *

          To assume a role from a different account, your Amazon Web Services account must be trusted by the * role. The trust relationship is defined in the role's trust policy when the role is @@ -63,8 +63,8 @@ export interface AssumeRoleCommandOutput extends AssumeRoleResponse, __MetadataB * following:

          *
            *
          • - *

            Attach a policy to the user that allows the user to call - * AssumeRole (as long as the role's trust policy trusts the account).

            + *

            Attach a policy to the user that allows the user to call AssumeRole + * (as long as the role's trust policy trusts the account).

            *
          • *
          • *

            Add the user as a principal directly in the role's trust policy.

            diff --git a/clients/client-sts/src/commands/AssumeRoleWithWebIdentityCommand.ts b/clients/client-sts/src/commands/AssumeRoleWithWebIdentityCommand.ts index 3f8214dab2c3..33d4282dc6f1 100644 --- a/clients/client-sts/src/commands/AssumeRoleWithWebIdentityCommand.ts +++ b/clients/client-sts/src/commands/AssumeRoleWithWebIdentityCommand.ts @@ -23,9 +23,9 @@ export interface AssumeRoleWithWebIdentityCommandOutput extends AssumeRoleWithWe /** *

            Returns a set of temporary security credentials for users who have been authenticated in - * a mobile or web application with a web identity provider. Example providers include Amazon Cognito, - * Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity - * provider.

            + * a mobile or web application with a web identity provider. Example providers include the + * OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID Connect-compatible + * identity provider such as Google or Amazon Cognito federated identities.

            * *

            For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the * Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely diff --git a/clients/client-sts/src/models/models_0.ts b/clients/client-sts/src/models/models_0.ts index 1d7865af3389..6a57d214c6bd 100644 --- a/clients/client-sts/src/models/models_0.ts +++ b/clients/client-sts/src/models/models_0.ts @@ -219,7 +219,7 @@ export interface AssumeRoleRequest { *

            Additionally, if you used temporary credentials to perform this operation, the new * session inherits any transitive session tags from the calling session. If you pass a * session tag with the same key as an inherited tag, the operation fails. To view the - * inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the + * inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the * IAM User Guide.

            */ Tags?: Tag[]; @@ -775,11 +775,11 @@ export interface AssumeRoleWithWebIdentityRequest { WebIdentityToken: string | undefined; /** - *

            The fully qualified host component of the domain name of the identity provider.

            - *

            Specify this value only for OAuth 2.0 access tokens. Currently - * www.amazon.com and graph.facebook.com are the only supported - * identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port - * numbers.

            + *

            The fully qualified host component of the domain name of the OAuth 2.0 identity + * provider. Do not specify this value for an OpenID Connect identity provider.

            + *

            Currently www.amazon.com and graph.facebook.com are the only + * supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and + * port numbers.

            *

            Do not specify this value for OpenID Connect ID tokens.

            */ ProviderId?: string; diff --git a/clients/client-synthetics/src/models/models_0.ts b/clients/client-synthetics/src/models/models_0.ts index f2ae35e5db12..170b2b06c41f 100644 --- a/clients/client-synthetics/src/models/models_0.ts +++ b/clients/client-synthetics/src/models/models_0.ts @@ -622,8 +622,19 @@ export interface CanaryCodeInput { ZipFile?: Uint8Array; /** - *

            The entry point to use for the source code when running the canary. This value must end - * with the string .handler. The string is limited to 29 characters or fewer.

            + *

            The entry point to use for the source code when running the canary. For canaries that use the + * syn-python-selenium-1.0 runtime + * or a syn-nodejs.puppeteer runtime earlier than syn-nodejs.puppeteer-3.4, + * the handler must be specified as + * fileName.handler. For + * syn-python-selenium-1.1, syn-nodejs.puppeteer-3.4, and later runtimes, the handler can be specified as + * + * fileName.functionName + * , or + * you can specify a folder where canary scripts reside as + * + * folder/fileName.functionName + * .

            */ Handler: string | undefined; } diff --git a/clients/client-timestream-query/src/TimestreamQuery.ts b/clients/client-timestream-query/src/TimestreamQuery.ts index bbab031440ff..ffeb6af0239f 100644 --- a/clients/client-timestream-query/src/TimestreamQuery.ts +++ b/clients/client-timestream-query/src/TimestreamQuery.ts @@ -66,7 +66,7 @@ export class TimestreamQuery extends TimestreamQueryClient { * not completed running before the cancellation request was issued. Because cancellation * is an idempotent operation, subsequent cancellation requests will return a * CancellationMessage, indicating that the query has already been - * canceled. See code + * canceled. See code * sample for details.

            */ public cancelQuery(args: CancelQueryCommandInput, options?: __HttpHandlerOptions): Promise; @@ -168,7 +168,7 @@ export class TimestreamQuery extends TimestreamQueryClient { * it is not recommended that you use this API unless:

            * *

            For detailed information on how and when to use and implement DescribeEndpoints, see - * The Endpoint Discovery Pattern.

            + * The Endpoint Discovery Pattern.

            */ public describeEndpoints( args: DescribeEndpointsCommandInput, @@ -376,7 +376,7 @@ export class TimestreamQuery extends TimestreamQueryClient { * Query is a synchronous operation that enables you to run a query against * your Amazon Timestream data. Query will time out after 60 seconds. * You must update the default timeout in the SDK to support a timeout of 60 seconds. See - * the code + * the code * sample for details.

            *

            Your query request will fail in the following cases:

            *
              diff --git a/clients/client-timestream-query/src/commands/CancelQueryCommand.ts b/clients/client-timestream-query/src/commands/CancelQueryCommand.ts index f94ec7365fa2..fadd4be769e1 100644 --- a/clients/client-timestream-query/src/commands/CancelQueryCommand.ts +++ b/clients/client-timestream-query/src/commands/CancelQueryCommand.ts @@ -27,7 +27,7 @@ export interface CancelQueryCommandOutput extends CancelQueryResponse, __Metadat * not completed running before the cancellation request was issued. Because cancellation * is an idempotent operation, subsequent cancellation requests will return a * CancellationMessage, indicating that the query has already been - * canceled. See code + * canceled. See code * sample for details.

              * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-timestream-query/src/commands/DescribeEndpointsCommand.ts b/clients/client-timestream-query/src/commands/DescribeEndpointsCommand.ts index eeb4ba261843..f868f0814943 100644 --- a/clients/client-timestream-query/src/commands/DescribeEndpointsCommand.ts +++ b/clients/client-timestream-query/src/commands/DescribeEndpointsCommand.ts @@ -29,7 +29,7 @@ export interface DescribeEndpointsCommandOutput extends DescribeEndpointsRespons * it is not recommended that you use this API unless:

              * *

              For detailed information on how and when to use and implement DescribeEndpoints, see - * The Endpoint Discovery Pattern.

              + * The Endpoint Discovery Pattern.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-timestream-query/src/commands/QueryCommand.ts b/clients/client-timestream-query/src/commands/QueryCommand.ts index b9c1b1940c2c..083160553458 100644 --- a/clients/client-timestream-query/src/commands/QueryCommand.ts +++ b/clients/client-timestream-query/src/commands/QueryCommand.ts @@ -24,7 +24,7 @@ export interface QueryCommandOutput extends QueryResponse, __MetadataBearer {} * Query is a synchronous operation that enables you to run a query against * your Amazon Timestream data. Query will time out after 60 seconds. * You must update the default timeout in the SDK to support a timeout of 60 seconds. See - * the code + * the code * sample for details.

              *

              Your query request will fail in the following cases:

              *
                diff --git a/clients/client-transcribe/src/Transcribe.ts b/clients/client-transcribe/src/Transcribe.ts index 37ed1b27689e..70fb6ca9eab0 100644 --- a/clients/client-transcribe/src/Transcribe.ts +++ b/clients/client-transcribe/src/Transcribe.ts @@ -198,11 +198,13 @@ import { TranscribeClient } from "./TranscribeClient"; */ export class Transcribe extends TranscribeClient { /** - *

                Creates an analytics category. Amazon Transcribe applies the conditions specified by your - * analytics categories to your call analytics jobs. For each analytics category, you specify one - * or more rules. For example, you can specify a rule that the customer sentiment was neutral - * or negative within that category. If you start a call analytics job, Amazon Transcribe applies the - * category to the analytics job that you've specified.

                + *

                Creates a call analytics category. Amazon Transcribe applies the conditions specified by your + * call analytics categories to your call analytics jobs. For each analytics category, you + * must create between 1 and 20 rules. For example, you can create a 'greeting' + * category with a rule that flags calls in which your agent does not use a specified + * phrase (for example: "Please note this call may be recorded.") in the first 15 seconds + * of the call. When you start a call analytics job, Amazon Transcribe applies all your existing call + * analytics categories to that job.

                */ public createCallAnalyticsCategory( args: CreateCallAnalyticsCategoryCommandInput, @@ -234,9 +236,11 @@ export class Transcribe extends TranscribeClient { } /** - *

                Creates a new custom language model. Use Amazon S3 prefixes to provide the location of - * your input files. The time it takes to create your model depends on the size of your training - * data.

                + *

                Creates a new custom language model. When creating a new language model, + * you must specify if you want a Wideband (audio sample rates over 16,000 Hz) or + * Narrowband (audio sample rates under 16,000 Hz) base model. You then include the + * S3 URI location of your training and tuning files, the language for the model, a + * unique name, and any tags you want associated with your model.

                */ public createLanguageModel( args: CreateLanguageModelCommandInput, @@ -268,8 +272,16 @@ export class Transcribe extends TranscribeClient { } /** - *

                Creates a new custom vocabulary that you can use to modify how Amazon Transcribe Medical transcribes - * your audio file.

                + *

                Creates a new custom medical vocabulary.

                + *

                When creating a new medical vocabulary, you must upload a text file that contains + * your new entries, phrases, and terms into an S3 bucket. Note that this differs from + * , where you can include a list of terms within + * your request using the Phrases flag, as + * CreateMedicalVocabulary does not support the Phrases + * flag.

                + *

                For more information on creating a custom vocabulary text file, see + * Creating a custom + * vocabulary.

                */ public createMedicalVocabulary( args: CreateMedicalVocabularyCommandInput, @@ -301,8 +313,13 @@ export class Transcribe extends TranscribeClient { } /** - *

                Creates a new custom vocabulary that you can use to change the way Amazon Transcribe handles transcription of an - * audio file.

                + *

                Creates a new custom vocabulary.

                + *

                When creating a new medical vocabulary, you can either upload a text file that + * contains your new entries, phrases, and terms into an S3 bucket or include a list of + * terms directly in your request using the Phrases flag.

                + *

                For more information on creating a custom vocabulary, see + * Creating a custom + * vocabulary.

                */ public createVocabulary( args: CreateVocabularyCommandInput, @@ -334,8 +351,9 @@ export class Transcribe extends TranscribeClient { } /** - *

                Creates a new vocabulary filter that you can use to filter words, such as profane words, - * from the output of a transcription job.

                + *

                Creates a new vocabulary filter that you can use to filter words from your transcription + * output. For example, you can use this operation to remove profanity from your + * transcript.

                */ public createVocabularyFilter( args: CreateVocabularyFilterCommandInput, @@ -367,7 +385,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a call analytics category using its name.

                + *

                Deletes a call analytics category. To use this operation, specify the name of the + * category you want to delete using CategoryName.

                */ public deleteCallAnalyticsCategory( args: DeleteCallAnalyticsCategoryCommandInput, @@ -399,7 +418,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a call analytics job using its name.

                + *

                Deletes a call analytics job. To use this operation, specify the name of the + * job you want to delete using CallAnalyticsJobName.

                */ public deleteCallAnalyticsJob( args: DeleteCallAnalyticsJobCommandInput, @@ -431,7 +451,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a custom language model using its name.

                + *

                Deletes a custom language model. To use this operation, specify the name of the + * language model you want to delete using ModelName.

                */ public deleteLanguageModel( args: DeleteLanguageModelCommandInput, @@ -463,7 +484,9 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a transcription job generated by Amazon Transcribe Medical and any related information.

                + *

                Deletes a medical transcription job, along with any related information. To use this + * operation, specify the name of the job you want to delete using + * MedicalTranscriptionJobName.

                */ public deleteMedicalTranscriptionJob( args: DeleteMedicalTranscriptionJobCommandInput, @@ -495,7 +518,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a vocabulary from Amazon Transcribe Medical.

                + *

                Deletes a custom medical vocabulary. To use this operation, specify the name of the + * vocabulary you want to delete using VocabularyName.

                */ public deleteMedicalVocabulary( args: DeleteMedicalVocabularyCommandInput, @@ -527,8 +551,9 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a previously submitted transcription job along with any other generated results such as the - * transcription, models, and so on.

                + *

                Deletes a transcription job, along with any related information. To use this operation, + * specify the name of the job you want to delete using + * TranscriptionJobName.

                */ public deleteTranscriptionJob( args: DeleteTranscriptionJobCommandInput, @@ -560,7 +585,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Deletes a vocabulary from Amazon Transcribe.

                + *

                Deletes a custom vocabulary. To use this operation, specify the name of the vocabulary + * you want to delete using VocabularyName.

                */ public deleteVocabulary( args: DeleteVocabularyCommandInput, @@ -592,7 +618,8 @@ export class Transcribe extends TranscribeClient { } /** - *

                Removes a vocabulary filter.

                + *

                Deletes a vocabulary filter. To use this operation, specify the name of the vocabulary + * filter you want to delete using VocabularyFilterName.

                */ public deleteVocabularyFilter( args: DeleteVocabularyFilterCommandInput, @@ -624,11 +651,13 @@ export class Transcribe extends TranscribeClient { } /** - *

                Gets information about a single custom language model. Use this information to see details about the - * language model in your Amazon Web Services account. You can also see whether the base language model used - * to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a - * new custom language model using the updated base model. If the language model wasn't created, you can use this - * operation to understand why Amazon Transcribe couldn't create it.

                + *

                Provides information about a specific custom language model in your + * Amazon Web Services account.

                + *

                This operation also shows if the base language model you used to create your custom + * language model has been updated. If Amazon Transcribe has updated the base model, you can create a + * new custom language model using the updated base model.

                + *

                If you tried to create a new custom language model and the request wasn't successful, + * you can use this operation to help identify the reason.

                */ public describeLanguageModel( args: DescribeLanguageModelCommandInput, @@ -692,11 +721,12 @@ export class Transcribe extends TranscribeClient { } /** - *

                Returns information about a call analytics job. To see the status of the job, check the - * CallAnalyticsJobStatus field. If the status is COMPLETED, the job - * is finished and you can find the results at the location specified in the TranscriptFileUri - * field. If you enable personally identifiable information (PII) redaction, the redacted transcript appears - * in the RedactedTranscriptFileUri field.

                + *

                Retrieves information about a call analytics job.

                + *

                To view the job's status, refer to the CallAnalyticsJobStatus field. If the + * status is COMPLETED, the job is finished. You can then find your transcript at + * the URI specified in the TranscriptFileUri field. If you enabled personally + * identifiable information (PII) redaction, the redacted transcript appears in the + * RedactedTranscriptFileUri field.

                */ public getCallAnalyticsJob( args: GetCallAnalyticsJobCommandInput, @@ -728,9 +758,10 @@ export class Transcribe extends TranscribeClient { } /** - *

                Returns information about a transcription job from Amazon Transcribe Medical. To see the status of the job, check the - * TranscriptionJobStatus field. If the status is COMPLETED, the job is finished. You - * find the results of the completed job in the TranscriptFileUri field.

                + *

                Retrieves information about a medical transcription job.

                + *

                To view the job's status, refer to the TranscriptionJobStatus field. If the + * status is COMPLETED, the job is finished. You can then find your transcript at + * the URI specified in the TranscriptFileUri field.

                */ public getMedicalTranscriptionJob( args: GetMedicalTranscriptionJobCommandInput, @@ -795,9 +826,10 @@ export class Transcribe extends TranscribeClient { /** *

                Returns information about a transcription job. To see the status of the job, check the - * TranscriptionJobStatus field. If the status is COMPLETED, the job is finished and - * you can find the results at the location specified in the TranscriptFileUri field. If you enable content - * redaction, the redacted transcript appears in RedactedTranscriptFileUri.

                + * TranscriptionJobStatus field. If the status is COMPLETED, + * the job is finished and you can find the results at the location specified in the + * TranscriptFileUri field. If you enable content redaction, the redacted + * transcript appears in RedactedTranscriptFileUri.

                */ public getTranscriptionJob( args: GetTranscriptionJobCommandInput, diff --git a/clients/client-transcribe/src/commands/CreateCallAnalyticsCategoryCommand.ts b/clients/client-transcribe/src/commands/CreateCallAnalyticsCategoryCommand.ts index 2257a0a5058c..c1326f8b98eb 100644 --- a/clients/client-transcribe/src/commands/CreateCallAnalyticsCategoryCommand.ts +++ b/clients/client-transcribe/src/commands/CreateCallAnalyticsCategoryCommand.ts @@ -24,11 +24,13 @@ export interface CreateCallAnalyticsCategoryCommandOutput __MetadataBearer {} /** - *

                Creates an analytics category. Amazon Transcribe applies the conditions specified by your - * analytics categories to your call analytics jobs. For each analytics category, you specify one - * or more rules. For example, you can specify a rule that the customer sentiment was neutral - * or negative within that category. If you start a call analytics job, Amazon Transcribe applies the - * category to the analytics job that you've specified.

                + *

                Creates a call analytics category. Amazon Transcribe applies the conditions specified by your + * call analytics categories to your call analytics jobs. For each analytics category, you + * must create between 1 and 20 rules. For example, you can create a 'greeting' + * category with a rule that flags calls in which your agent does not use a specified + * phrase (for example: "Please note this call may be recorded.") in the first 15 seconds + * of the call. When you start a call analytics job, Amazon Transcribe applies all your existing call + * analytics categories to that job.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/CreateLanguageModelCommand.ts b/clients/client-transcribe/src/commands/CreateLanguageModelCommand.ts index 96347e91a495..71352ea975b0 100644 --- a/clients/client-transcribe/src/commands/CreateLanguageModelCommand.ts +++ b/clients/client-transcribe/src/commands/CreateLanguageModelCommand.ts @@ -22,9 +22,11 @@ export interface CreateLanguageModelCommandInput extends CreateLanguageModelRequ export interface CreateLanguageModelCommandOutput extends CreateLanguageModelResponse, __MetadataBearer {} /** - *

                Creates a new custom language model. Use Amazon S3 prefixes to provide the location of - * your input files. The time it takes to create your model depends on the size of your training - * data.

                + *

                Creates a new custom language model. When creating a new language model, + * you must specify if you want a Wideband (audio sample rates over 16,000 Hz) or + * Narrowband (audio sample rates under 16,000 Hz) base model. You then include the + * S3 URI location of your training and tuning files, the language for the model, a + * unique name, and any tags you want associated with your model.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/CreateMedicalVocabularyCommand.ts b/clients/client-transcribe/src/commands/CreateMedicalVocabularyCommand.ts index 00970bf48eca..928ca57753f1 100644 --- a/clients/client-transcribe/src/commands/CreateMedicalVocabularyCommand.ts +++ b/clients/client-transcribe/src/commands/CreateMedicalVocabularyCommand.ts @@ -22,8 +22,16 @@ export interface CreateMedicalVocabularyCommandInput extends CreateMedicalVocabu export interface CreateMedicalVocabularyCommandOutput extends CreateMedicalVocabularyResponse, __MetadataBearer {} /** - *

                Creates a new custom vocabulary that you can use to modify how Amazon Transcribe Medical transcribes - * your audio file.

                + *

                Creates a new custom medical vocabulary.

                + *

                When creating a new medical vocabulary, you must upload a text file that contains + * your new entries, phrases, and terms into an S3 bucket. Note that this differs from + * , where you can include a list of terms within + * your request using the Phrases flag, as + * CreateMedicalVocabulary does not support the Phrases + * flag.

                + *

                For more information on creating a custom vocabulary text file, see + * Creating a custom + * vocabulary.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/CreateVocabularyCommand.ts b/clients/client-transcribe/src/commands/CreateVocabularyCommand.ts index 4a269dc95068..470b067a4d63 100644 --- a/clients/client-transcribe/src/commands/CreateVocabularyCommand.ts +++ b/clients/client-transcribe/src/commands/CreateVocabularyCommand.ts @@ -22,8 +22,13 @@ export interface CreateVocabularyCommandInput extends CreateVocabularyRequest {} export interface CreateVocabularyCommandOutput extends CreateVocabularyResponse, __MetadataBearer {} /** - *

                Creates a new custom vocabulary that you can use to change the way Amazon Transcribe handles transcription of an - * audio file.

                + *

                Creates a new custom vocabulary.

                + *

                When creating a new medical vocabulary, you can either upload a text file that + * contains your new entries, phrases, and terms into an S3 bucket or include a list of + * terms directly in your request using the Phrases flag.

                + *

                For more information on creating a custom vocabulary, see + * Creating a custom + * vocabulary.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/CreateVocabularyFilterCommand.ts b/clients/client-transcribe/src/commands/CreateVocabularyFilterCommand.ts index 978161bf28cc..dd1ca604b5fa 100644 --- a/clients/client-transcribe/src/commands/CreateVocabularyFilterCommand.ts +++ b/clients/client-transcribe/src/commands/CreateVocabularyFilterCommand.ts @@ -22,8 +22,9 @@ export interface CreateVocabularyFilterCommandInput extends CreateVocabularyFilt export interface CreateVocabularyFilterCommandOutput extends CreateVocabularyFilterResponse, __MetadataBearer {} /** - *

                Creates a new vocabulary filter that you can use to filter words, such as profane words, - * from the output of a transcription job.

                + *

                Creates a new vocabulary filter that you can use to filter words from your transcription + * output. For example, you can use this operation to remove profanity from your + * transcript.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteCallAnalyticsCategoryCommand.ts b/clients/client-transcribe/src/commands/DeleteCallAnalyticsCategoryCommand.ts index b26fc1c0b55b..36bb46decc9c 100644 --- a/clients/client-transcribe/src/commands/DeleteCallAnalyticsCategoryCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteCallAnalyticsCategoryCommand.ts @@ -24,7 +24,8 @@ export interface DeleteCallAnalyticsCategoryCommandOutput __MetadataBearer {} /** - *

                Deletes a call analytics category using its name.

                + *

                Deletes a call analytics category. To use this operation, specify the name of the + * category you want to delete using CategoryName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteCallAnalyticsJobCommand.ts b/clients/client-transcribe/src/commands/DeleteCallAnalyticsJobCommand.ts index 1d604a941cb2..0259681a4b1e 100644 --- a/clients/client-transcribe/src/commands/DeleteCallAnalyticsJobCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteCallAnalyticsJobCommand.ts @@ -22,7 +22,8 @@ export interface DeleteCallAnalyticsJobCommandInput extends DeleteCallAnalyticsJ export interface DeleteCallAnalyticsJobCommandOutput extends DeleteCallAnalyticsJobResponse, __MetadataBearer {} /** - *

                Deletes a call analytics job using its name.

                + *

                Deletes a call analytics job. To use this operation, specify the name of the + * job you want to delete using CallAnalyticsJobName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteLanguageModelCommand.ts b/clients/client-transcribe/src/commands/DeleteLanguageModelCommand.ts index da84f4a84d61..29fba40f1e49 100644 --- a/clients/client-transcribe/src/commands/DeleteLanguageModelCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteLanguageModelCommand.ts @@ -22,7 +22,8 @@ export interface DeleteLanguageModelCommandInput extends DeleteLanguageModelRequ export interface DeleteLanguageModelCommandOutput extends __MetadataBearer {} /** - *

                Deletes a custom language model using its name.

                + *

                Deletes a custom language model. To use this operation, specify the name of the + * language model you want to delete using ModelName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteMedicalTranscriptionJobCommand.ts b/clients/client-transcribe/src/commands/DeleteMedicalTranscriptionJobCommand.ts index 3704799b5070..b3eb8c9150a1 100644 --- a/clients/client-transcribe/src/commands/DeleteMedicalTranscriptionJobCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteMedicalTranscriptionJobCommand.ts @@ -22,7 +22,9 @@ export interface DeleteMedicalTranscriptionJobCommandInput extends DeleteMedical export interface DeleteMedicalTranscriptionJobCommandOutput extends __MetadataBearer {} /** - *

                Deletes a transcription job generated by Amazon Transcribe Medical and any related information.

                + *

                Deletes a medical transcription job, along with any related information. To use this + * operation, specify the name of the job you want to delete using + * MedicalTranscriptionJobName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteMedicalVocabularyCommand.ts b/clients/client-transcribe/src/commands/DeleteMedicalVocabularyCommand.ts index bc1dea6fab7f..50de4c23a00d 100644 --- a/clients/client-transcribe/src/commands/DeleteMedicalVocabularyCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteMedicalVocabularyCommand.ts @@ -22,7 +22,8 @@ export interface DeleteMedicalVocabularyCommandInput extends DeleteMedicalVocabu export interface DeleteMedicalVocabularyCommandOutput extends __MetadataBearer {} /** - *

                Deletes a vocabulary from Amazon Transcribe Medical.

                + *

                Deletes a custom medical vocabulary. To use this operation, specify the name of the + * vocabulary you want to delete using VocabularyName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteTranscriptionJobCommand.ts b/clients/client-transcribe/src/commands/DeleteTranscriptionJobCommand.ts index 1c3130a05eb8..3532296cb73d 100644 --- a/clients/client-transcribe/src/commands/DeleteTranscriptionJobCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteTranscriptionJobCommand.ts @@ -22,8 +22,9 @@ export interface DeleteTranscriptionJobCommandInput extends DeleteTranscriptionJ export interface DeleteTranscriptionJobCommandOutput extends __MetadataBearer {} /** - *

                Deletes a previously submitted transcription job along with any other generated results such as the - * transcription, models, and so on.

                + *

                Deletes a transcription job, along with any related information. To use this operation, + * specify the name of the job you want to delete using + * TranscriptionJobName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteVocabularyCommand.ts b/clients/client-transcribe/src/commands/DeleteVocabularyCommand.ts index 8abdd2afb56b..cb469c44a213 100644 --- a/clients/client-transcribe/src/commands/DeleteVocabularyCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteVocabularyCommand.ts @@ -22,7 +22,8 @@ export interface DeleteVocabularyCommandInput extends DeleteVocabularyRequest {} export interface DeleteVocabularyCommandOutput extends __MetadataBearer {} /** - *

                Deletes a vocabulary from Amazon Transcribe.

                + *

                Deletes a custom vocabulary. To use this operation, specify the name of the vocabulary + * you want to delete using VocabularyName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DeleteVocabularyFilterCommand.ts b/clients/client-transcribe/src/commands/DeleteVocabularyFilterCommand.ts index 88862f58f6f0..7aff6e0004d5 100644 --- a/clients/client-transcribe/src/commands/DeleteVocabularyFilterCommand.ts +++ b/clients/client-transcribe/src/commands/DeleteVocabularyFilterCommand.ts @@ -22,7 +22,8 @@ export interface DeleteVocabularyFilterCommandInput extends DeleteVocabularyFilt export interface DeleteVocabularyFilterCommandOutput extends __MetadataBearer {} /** - *

                Removes a vocabulary filter.

                + *

                Deletes a vocabulary filter. To use this operation, specify the name of the vocabulary + * filter you want to delete using VocabularyFilterName.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/DescribeLanguageModelCommand.ts b/clients/client-transcribe/src/commands/DescribeLanguageModelCommand.ts index 1825c704c51d..d61480009b4c 100644 --- a/clients/client-transcribe/src/commands/DescribeLanguageModelCommand.ts +++ b/clients/client-transcribe/src/commands/DescribeLanguageModelCommand.ts @@ -22,11 +22,13 @@ export interface DescribeLanguageModelCommandInput extends DescribeLanguageModel export interface DescribeLanguageModelCommandOutput extends DescribeLanguageModelResponse, __MetadataBearer {} /** - *

                Gets information about a single custom language model. Use this information to see details about the - * language model in your Amazon Web Services account. You can also see whether the base language model used - * to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a - * new custom language model using the updated base model. If the language model wasn't created, you can use this - * operation to understand why Amazon Transcribe couldn't create it.

                + *

                Provides information about a specific custom language model in your + * Amazon Web Services account.

                + *

                This operation also shows if the base language model you used to create your custom + * language model has been updated. If Amazon Transcribe has updated the base model, you can create a + * new custom language model using the updated base model.

                + *

                If you tried to create a new custom language model and the request wasn't successful, + * you can use this operation to help identify the reason.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/GetCallAnalyticsJobCommand.ts b/clients/client-transcribe/src/commands/GetCallAnalyticsJobCommand.ts index ad2662e1ce10..7bd6c224c0cf 100644 --- a/clients/client-transcribe/src/commands/GetCallAnalyticsJobCommand.ts +++ b/clients/client-transcribe/src/commands/GetCallAnalyticsJobCommand.ts @@ -22,11 +22,12 @@ export interface GetCallAnalyticsJobCommandInput extends GetCallAnalyticsJobRequ export interface GetCallAnalyticsJobCommandOutput extends GetCallAnalyticsJobResponse, __MetadataBearer {} /** - *

                Returns information about a call analytics job. To see the status of the job, check the - * CallAnalyticsJobStatus field. If the status is COMPLETED, the job - * is finished and you can find the results at the location specified in the TranscriptFileUri - * field. If you enable personally identifiable information (PII) redaction, the redacted transcript appears - * in the RedactedTranscriptFileUri field.

                + *

                Retrieves information about a call analytics job.

                + *

                To view the job's status, refer to the CallAnalyticsJobStatus field. If the + * status is COMPLETED, the job is finished. You can then find your transcript at + * the URI specified in the TranscriptFileUri field. If you enabled personally + * identifiable information (PII) redaction, the redacted transcript appears in the + * RedactedTranscriptFileUri field.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/GetMedicalTranscriptionJobCommand.ts b/clients/client-transcribe/src/commands/GetMedicalTranscriptionJobCommand.ts index 6f6197caef28..6fa0def45ece 100644 --- a/clients/client-transcribe/src/commands/GetMedicalTranscriptionJobCommand.ts +++ b/clients/client-transcribe/src/commands/GetMedicalTranscriptionJobCommand.ts @@ -22,9 +22,10 @@ export interface GetMedicalTranscriptionJobCommandInput extends GetMedicalTransc export interface GetMedicalTranscriptionJobCommandOutput extends GetMedicalTranscriptionJobResponse, __MetadataBearer {} /** - *

                Returns information about a transcription job from Amazon Transcribe Medical. To see the status of the job, check the - * TranscriptionJobStatus field. If the status is COMPLETED, the job is finished. You - * find the results of the completed job in the TranscriptFileUri field.

                + *

                Retrieves information about a medical transcription job.

                + *

                To view the job's status, refer to the TranscriptionJobStatus field. If the + * status is COMPLETED, the job is finished. You can then find your transcript at + * the URI specified in the TranscriptFileUri field.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/commands/GetTranscriptionJobCommand.ts b/clients/client-transcribe/src/commands/GetTranscriptionJobCommand.ts index 60f84d4e8de2..5dfab8b51773 100644 --- a/clients/client-transcribe/src/commands/GetTranscriptionJobCommand.ts +++ b/clients/client-transcribe/src/commands/GetTranscriptionJobCommand.ts @@ -23,9 +23,10 @@ export interface GetTranscriptionJobCommandOutput extends GetTranscriptionJobRes /** *

                Returns information about a transcription job. To see the status of the job, check the - * TranscriptionJobStatus field. If the status is COMPLETED, the job is finished and - * you can find the results at the location specified in the TranscriptFileUri field. If you enable content - * redaction, the redacted transcript appears in RedactedTranscriptFileUri.

                + * TranscriptionJobStatus field. If the status is COMPLETED, + * the job is finished and you can find the results at the location specified in the + * TranscriptFileUri field. If you enable content redaction, the redacted + * transcript appears in RedactedTranscriptFileUri.

                * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe/src/models/models_0.ts b/clients/client-transcribe/src/models/models_0.ts index 0a56ee0b2e66..6057a5fb9ffd 100644 --- a/clients/client-transcribe/src/models/models_0.ts +++ b/clients/client-transcribe/src/models/models_0.ts @@ -182,14 +182,14 @@ export interface Media { *

                The S3 object location of the input media file. The URI must be in the same region as * the API endpoint that you are calling. The general form is:

                *

                - * s3://// + * s3://DOC-EXAMPLE-BUCKET/keyprefix/objectkey *

                *

                For example:

                *

                - * s3://AWSDOC-EXAMPLE-BUCKET/example.mp4 + * s3://DOC-EXAMPLE-BUCKET/example.flac *

                *

                - * s3://AWSDOC-EXAMPLE-BUCKET/mediadocs/example.mp4 + * s3://DOC-EXAMPLE-BUCKET/mediafiles/example.flac *

                *

                For more information about S3 object names, see Object Keys in the * Amazon S3 Developer Guide.

                @@ -1049,14 +1049,17 @@ export class ConflictException extends __BaseException { export interface CreateCallAnalyticsCategoryRequest { /** - *

                The name that you choose for your category when you create it.

                + *

                A unique name, chosen by you, for your call analytics category. For example, + * sentiment-positive-last30seconds.

                */ CategoryName: string | undefined; /** - *

                To create a category, you must specify between 1 and 20 rules. For each rule, you - * specify a filter to be applied to the attributes of the call. For example, you can specify a - * sentiment filter to detect if the customer's sentiment was negative or neutral.

                + *

                Rules make up a call analytics category. When creating a call analytics category, + * you must create between 1 and 20 rules for your category. For each rule, you + * specify a filter you want applied to the attributes of a call. For example, you can choose + * a sentiment filter that detects if a customer's sentiment was positive during the last + * 30 seconds of the call.

                */ Rules: Rule[] | undefined; } @@ -1073,7 +1076,13 @@ export namespace CreateCallAnalyticsCategoryRequest { export interface CreateCallAnalyticsCategoryResponse { /** - *

                The rules and associated metadata used to create a category.

                + *

                If your audio matches one of your categories, this field contains data on that + * category and its associated rules. This parameter shows which category is flagged + * (CategoryName) along with metadata for the rules that match your + * audio. Metadata includes the rule filter (such as InterruptionFilter, + * NonTalkTimeFilter, SentimentFilter, and + * TranscriptFilter) and where in your audio (StartTime + * and EndTime) the rule has a match.

                */ CategoryProperties?: CategoryProperties; } @@ -1198,35 +1207,39 @@ export namespace Tag { export interface CreateLanguageModelRequest { /** - *

                The language of the input text you're using to train your custom language - * model.

                + *

                The language of your custom language model; note that the language code you + * select must match the language of your training and tuning data.

                */ LanguageCode: CLMLanguageCode | string | undefined; /** - *

                The Amazon Transcribe standard language model, or base model used to create your custom - * language model.

                - *

                If you want to use your custom language model to transcribe audio with a sample rate - * of 16,000 Hz or greater, choose Wideband.

                - *

                If you want to use your custom language model to transcribe audio with a sample rate - * that is less than 16,000 Hz, choose Narrowband.

                + *

                The Amazon Transcribe standard language model, or base model, used to create your + * custom language model. Amazon Transcribe offers two options for base models: Wideband and + * Narrowband.

                + *

                If the audio you want to transcribe has a sample rate of 16,000 Hz or greater, + * choose WideBand. To transcribe audio with a sample rate less than + * 16,000 Hz, choose NarrowBand.

                */ BaseModelName: BaseModelName | string | undefined; /** - *

                The name you choose for your custom language model when you create it.

                + *

                The name of your new custom language model.

                + *

                This name is case sensitive, cannot contain spaces, and must be unique within an + * Amazon Web Services account. If you try to create a language model with the same name as a + * previous language model, you get a ConflictException error.

                */ ModelName: string | undefined; /** - *

                Contains the data access role and the Amazon S3 prefixes to read the required input files to - * create a custom language model.

                + *

                Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 + * locations of your training (S3Uri) and tuning + * (TuningDataS3Uri) data.

                */ InputDataConfig: InputDataConfig | undefined; /** - *

                Adds one or more tags, each in the form of a key:value pair, to a new language model - * at the time you create this new model.

                + *

                Optionally add tags, each in the form of a key:value pair, to your new language + * model. See also: .

                */ Tags?: Tag[]; } @@ -1248,29 +1261,34 @@ export enum ModelStatus { export interface CreateLanguageModelResponse { /** - *

                The language code of the text you've used to create a custom language model.

                + *

                The language code you selected for your custom language model.

                */ LanguageCode?: CLMLanguageCode | string; /** - *

                The Amazon Transcribe standard language model, or base model you've used to create a custom - * language model.

                + *

                The Amazon Transcribe standard language model, or base model, you used when creating your + * custom language model.

                + *

                If your audio has a sample rate of 16,000 Hz or greater, this value should be + * WideBand. If your audio has a sample rate of less than + * 16,000 Hz, this value should be NarrowBand.

                */ BaseModelName?: BaseModelName | string; /** - *

                The name you've chosen for your custom language model.

                + *

                The unique name you chose for your custom language model.

                */ ModelName?: string; /** - *

                The data access role and Amazon S3 prefixes you've chosen to create your custom language model.

                + *

                Lists your data access role ARN (Amazon Resource Name) and the Amazon S3 + * locations your provided for your training (S3Uri) and tuning + * (TuningDataS3Uri) data.

                */ InputDataConfig?: InputDataConfig; /** - *

                The status of the custom language model. When the status is - * COMPLETED the model is ready to use.

                + *

                The status of your custom language model. When the status shows as + * COMPLETED, your model is ready to use.

                */ ModelStatus?: ModelStatus | string; } @@ -1286,40 +1304,35 @@ export namespace CreateLanguageModelResponse { export interface CreateMedicalVocabularyRequest { /** - *

                The name of the custom vocabulary. This case-sensitive name must be unique within - * an Amazon Web Services account. If you try to create a vocabulary with the same name - * as a previous vocabulary, you get a ConflictException error.

                + *

                The name of your new vocabulary.

                + *

                This name is case sensitive, cannot contain spaces, and must be unique within an + * Amazon Web Services account. If you try to create a vocabulary with the same name as a + * previous vocabulary, you get a ConflictException error.

                */ VocabularyName: string | undefined; /** - *

                The language code for the language used for the entries in your custom vocabulary. - * The language code of your custom vocabulary must match the language code of your - * transcription job. US English (en-US) is the only language code available for Amazon Transcribe Medical.

                + *

                The language code that represents the language of the entries in your custom + * vocabulary. Note that U.S. English (en-US) is the only language + * supported with Amazon Transcribe Medical.

                */ LanguageCode: LanguageCode | string | undefined; /** - *

                The location in Amazon S3 of the text file you use to define your custom vocabulary. The URI - * must be in the same Amazon Web Services Region as the resource that you're calling. Enter - * information about your VocabularyFileUri in the following format:

                + *

                The Amazon S3 location (URI) of the text file that contains your custom vocabulary. + * The URI must be in the same Amazon Web Services Region as the resource that you're + * calling.

                + *

                Here's an example URI path:

                *

                - * https://s3..amazonaws.com/// - *

                - *

                The following is an example URI for a vocabulary file that is stored in Amazon S3:

                - *

                - * https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt + * https://s3.us-east-1.amazonaws.com/my-s3-bucket/my-vocab-file.txt *

                - *

                For more information about Amazon S3 object names, see Object Keys in - * the Amazon S3 Developer Guide.

                - *

                For more information about custom vocabularies, see Medical Custom - * Vocabularies.

                */ VocabularyFileUri: string | undefined; /** *

                Adds one or more tags, each in the form of a key:value pair, to a new medical - * vocabulary at the time you create this new vocabulary.

                + * vocabulary at the time you create the new vocabulary.

                + *

                To learn more about using tags with Amazon Transcribe, refer to Tagging resources.

                */ Tags?: Tag[]; } @@ -1341,32 +1354,31 @@ export enum VocabularyState { export interface CreateMedicalVocabularyResponse { /** - *

                The name of the vocabulary. The name must be unique within an Amazon Web Services - * account and is case sensitive.

                + *

                The name you chose for your vocabulary.

                */ VocabularyName?: string; /** - *

                The language code for the entries in your custom vocabulary. US English (en-US) is the - * only valid language code for Amazon Transcribe Medical.

                + *

                The language code you selected for your medical vocabulary. Note that U.S. English + * (en-US) is the only language supported with Amazon Transcribe Medical.

                */ LanguageCode?: LanguageCode | string; /** - *

                The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is + *

                The processing state of your custom medical vocabulary. If the state is * READY, you can use the vocabulary in a * StartMedicalTranscriptionJob request.

                */ VocabularyState?: VocabularyState | string; /** - *

                The date and time that you created the vocabulary.

                + *

                The date and time you created your custom medical vocabulary.

                */ LastModifiedTime?: Date; /** - *

                If the VocabularyState field is FAILED, this field contains - * information about why the job failed.

                + *

                If the VocabularyState field is FAILED, + * FailureReason contains information about why the job failed.

                */ FailureReason?: string; } @@ -1382,44 +1394,43 @@ export namespace CreateMedicalVocabularyResponse { export interface CreateVocabularyRequest { /** - *

                The name of the vocabulary. The name must be unique within an - * Amazon Web Services account. The name is case sensitive. If you try to create a vocabulary - * with the same name as a previous vocabulary you will receive a - * ConflictException error.

                + *

                The name of your new vocabulary.

                + *

                This name is case sensitive, cannot contain spaces, and must be unique within an + * Amazon Web Services account. If you try to create a vocabulary with the same name as a + * previous vocabulary, you get a ConflictException error.

                */ VocabularyName: string | undefined; /** - *

                The language code of the vocabulary entries. For a list of languages and their - * corresponding language codes, see table-language-matrix.

                + *

                The language code that represents the language of the entries in your custom + * vocabulary. Each vocabulary must contain terms in only one language. For a list of + * languages and their corresponding language codes, see Supported + * languages.

                */ LanguageCode: LanguageCode | string | undefined; /** - *

                An array of strings that contains the vocabulary entries.

                + *

                Use this flag to include a list of terms within your request.

                + *

                Note that if you include Phrases in your request, you cannot + * use VocabularyFileUri; you must choose one or the other.

                */ Phrases?: string[]; /** - *

                The S3 location of the text file that contains the definition of the custom vocabulary. The - * URI must be in the same region as the API endpoint that you are calling. The general form - * is:

                - *

                - * https://s3..amazonaws.com/// - *

                - *

                For example:

                - *

                - * https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt + *

                The S3 location of the text file that contains your custom vocabulary. The + * URI must be located in the same region as the API endpoint you're calling.

                + *

                Here's an example URI path:

                + *

                + * https://s3.us-east-1.amazonaws.com/my-s3-bucket/my-vocab-file.txt *

                - *

                For more information about S3 object names, see Object Keys in the - * Amazon S3 Developer Guide.

                - *

                For more information about custom vocabularies, see Custom vocabularies.

                + *

                Note that if you include VocabularyFileUri in your request, you + * cannot use the Phrases flag; you must choose one or the other.

                */ VocabularyFileUri?: string; /** - *

                Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary at - * the time you create this new vocabulary.

                + *

                Adds one or more tags, each in the form of a key:value pair, to a new + * custom vocabulary at the time you create this new vocabulary.

                */ Tags?: Tag[]; } @@ -1435,30 +1446,29 @@ export namespace CreateVocabularyRequest { export interface CreateVocabularyResponse { /** - *

                The name of the vocabulary.

                + *

                The name you chose for your vocabulary.

                */ VocabularyName?: string; /** - *

                The language code of the vocabulary entries.

                + *

                The language code you selected for your vocabulary.

                */ LanguageCode?: LanguageCode | string; /** - *

                The processing state of the vocabulary. When the VocabularyState field - * contains READY the vocabulary is ready to be used in a StartTranscriptionJob - * request.

                + *

                The processing state of your vocabulary. If the state is READY, you can + * use the vocabulary in a StartTranscriptionJob request.

                */ VocabularyState?: VocabularyState | string; /** - *

                The date and time that the vocabulary was created.

                + *

                The date and time you created your custom vocabulary.

                */ LastModifiedTime?: Date; /** - *

                If the VocabularyState field is FAILED, this field contains - * information about why the job failed.

                + *

                If the VocabularyState field is FAILED, + * FailureReason contains information about why the job failed.

                */ FailureReason?: string; } @@ -1474,9 +1484,10 @@ export namespace CreateVocabularyResponse { export interface CreateVocabularyFilterRequest { /** - *

                The vocabulary filter name. The name must be unique within the account that contains - * it. If you try to create a vocabulary filter with the same name as another vocabulary filter, you - * get a ConflictException error.

                + *

                The name of your new vocabulary filter.

                + *

                This name is case sensitive, cannot contain spaces, and must be unique within an + * Amazon Web Services account. If you try to create a vocabulary filter with the same name + * as a previous vocabulary filter, you get a ConflictException error.

                */ VocabularyFilterName: string | undefined; @@ -1488,27 +1499,27 @@ export interface CreateVocabularyFilterRequest { LanguageCode: LanguageCode | string | undefined; /** - *

                The words to use in the vocabulary filter. Only use characters from the character set - * defined for custom vocabularies. For a list of character sets, see Character Sets for Custom - * Vocabularies.

                - *

                If you provide a list of words in the Words parameter, you can't use the - * VocabularyFilterFileUri parameter.

                + *

                The words you want in your vocabulary filter. Only use characters specified in the + * Character + * sets for the language you're transcribing.

                + *

                Note that if you include Words in your request, you cannot use + * VocabularyFilterFileUri; you must choose one or the other.

                */ Words?: string[]; /** *

                The Amazon S3 location of a text file used as input to create the vocabulary filter. Only - * use characters from the character set defined for custom vocabularies. For a list of character - * sets, see Character Sets for Custom + * use characters from the character set defined for custom vocabularies. For a list of + * character sets, see Character Sets for Custom * Vocabularies.

                - *

                The specified file must be less than 50 KB of UTF-8 characters.

                - *

                If you provide the location of a list of words in the VocabularyFilterFileUri - * parameter, you can't use the Words parameter.

                + *

                Your vocabulary filter file must be less than 50 KB in size.

                + *

                Note that if you include VocabularyFilterFileUri in your request, you + * cannot use Words; you must choose one or the other.

                */ VocabularyFilterFileUri?: string; /** - *

                Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary + *

                Adds one or more tags, each in the form of a key:value pair, to a new vocabulary * filter at the time you create this new vocabulary filter.

                */ Tags?: Tag[]; @@ -1530,12 +1541,12 @@ export interface CreateVocabularyFilterResponse { VocabularyFilterName?: string; /** - *

                The language code of the words in the collection.

                + *

                The language code associated with your vocabulary filter.

                */ LanguageCode?: LanguageCode | string; /** - *

                The date and time that the vocabulary filter was modified.

                + *

                The date and time the vocabulary filter was modified.

                */ LastModifiedTime?: Date; } @@ -1551,8 +1562,8 @@ export namespace CreateVocabularyFilterResponse { export interface DeleteCallAnalyticsCategoryRequest { /** - *

                The name of the call analytics category that you're choosing to delete. The value is case - * sensitive.

                + *

                The name of the call analytics category you want to delete. Category names are + * case-sensitive.

                */ CategoryName: string | undefined; } @@ -1600,7 +1611,8 @@ export class NotFoundException extends __BaseException { export interface DeleteCallAnalyticsJobRequest { /** - *

                The name of the call analytics job you want to delete.

                + *

                The name of the call analytics job you want to delete. Job names are + * case-sensitive.

                */ CallAnalyticsJobName: string | undefined; } @@ -1627,7 +1639,7 @@ export namespace DeleteCallAnalyticsJobResponse { export interface DeleteLanguageModelRequest { /** - *

                The name of the model you're choosing to delete.

                + *

                The name of the model you want to delete. Model names are case-sensitive.

                */ ModelName: string | undefined; } @@ -1643,8 +1655,8 @@ export namespace DeleteLanguageModelRequest { export interface DeleteMedicalTranscriptionJobRequest { /** - *

                The name you provide to the DeleteMedicalTranscriptionJob object to - * delete a transcription job.

                + *

                The name of the medical transcription job you want to delete. Job names are + * case-sensitive.

                */ MedicalTranscriptionJobName: string | undefined; } @@ -1660,7 +1672,8 @@ export namespace DeleteMedicalTranscriptionJobRequest { export interface DeleteMedicalVocabularyRequest { /** - *

                The name of the vocabulary that you want to delete.

                + *

                The name of the vocabulary that you want to delete. Vocabulary names are + * case-sensitive.

                */ VocabularyName: string | undefined; } @@ -1676,7 +1689,8 @@ export namespace DeleteMedicalVocabularyRequest { export interface DeleteTranscriptionJobRequest { /** - *

                The name of the transcription job to be deleted.

                + *

                The name of the transcription job you want to delete. Job names are + * case-sensitive.

                */ TranscriptionJobName: string | undefined; } @@ -1692,7 +1706,8 @@ export namespace DeleteTranscriptionJobRequest { export interface DeleteVocabularyRequest { /** - *

                The name of the vocabulary to delete.

                + *

                The name of the vocabulary you want to delete. Vocabulary names are + * case-sensitive.

                */ VocabularyName: string | undefined; } @@ -1708,7 +1723,8 @@ export namespace DeleteVocabularyRequest { export interface DeleteVocabularyFilterRequest { /** - *

                The name of the vocabulary filter to remove.

                + *

                The name of the vocabulary filter you want to delete. Vocabulary filter names are + * case-sensitive.

                */ VocabularyFilterName: string | undefined; } @@ -1724,7 +1740,8 @@ export namespace DeleteVocabularyFilterRequest { export interface DescribeLanguageModelRequest { /** - *

                The name of the custom language model you submit to get more information.

                + *

                The name of the custom language model you want described. Model names are + * case-sensitive.

                */ ModelName: string | undefined; } @@ -1776,7 +1793,7 @@ export interface LanguageModel { /** *

                Whether the base model used for the custom language model is up to date. If this field - * is true then you are running the most up-to-date version of the base model + * is false then you are running the most up-to-date version of the base model * in your custom language model.

                */ UpgradeAvailability?: boolean; @@ -1820,7 +1837,8 @@ export namespace DescribeLanguageModelResponse { export interface GetCallAnalyticsCategoryRequest { /** - *

                The name of the category you want information about. This value is case sensitive.

                + *

                The name of the category you want information about. Category names are case + * sensitive.

                */ CategoryName: string | undefined; } @@ -1836,7 +1854,8 @@ export namespace GetCallAnalyticsCategoryRequest { export interface GetCallAnalyticsCategoryResponse { /** - *

                The rules you've defined for a category.

                + *

                Provides you with the rules associated with the category you specified in your + * GetCallAnalyticsCategory request.

                */ CategoryProperties?: CategoryProperties; } @@ -1856,7 +1875,7 @@ export namespace GetCallAnalyticsCategoryResponse { export interface GetCallAnalyticsJobRequest { /** *

                The name of the analytics job you want information about. This value is case - * sensitive.

                + * sensitive.

                */ CallAnalyticsJobName: string | undefined; } @@ -1872,7 +1891,14 @@ export namespace GetCallAnalyticsJobRequest { export interface GetCallAnalyticsJobResponse { /** - *

                An object that contains the results of your call analytics job.

                + *

                An object that contains detailed information about your call analytics job. Returned fields + * include: CallAnalyticsJobName, CallAnalyticsJobStatus, + * ChannelDefinitions, CompletionTime, + * CreationTime, DataAccessRoleArn, + * FailureReason, IdentifiedLanguageScore, + * LanguageCode, Media, MediaFormat, + * MediaSampleRateHertz, Settings, StartTime, + * and Transcript.

                */ CallAnalyticsJob?: CallAnalyticsJob; } @@ -1888,7 +1914,8 @@ export namespace GetCallAnalyticsJobResponse { export interface GetMedicalTranscriptionJobRequest { /** - *

                The name of the medical transcription job.

                + *

                The name of the medical transcription job you want information about. This value is case + * sensitive.

                */ MedicalTranscriptionJobName: string | undefined; } @@ -2162,7 +2189,15 @@ export namespace MedicalTranscriptionJob { export interface GetMedicalTranscriptionJobResponse { /** - *

                An object that contains the results of the medical transcription job.

                + *

                An object that contains detailed information about your medical transcription job. + * Returned fields include: CompletionTime, + * ContentIdentificationType, CreationTime, + * FailureReason, LanguageCode, Media, + * MediaFormat, MediaSampleRateHertz, + * MedicalTranscriptionJobName, Settings, + * Specialty, StartTime, Tags, + * Transcript, TranscriptionJobStatus, and + * Type.

                */ MedicalTranscriptionJob?: MedicalTranscriptionJob; } @@ -2178,7 +2213,8 @@ export namespace GetMedicalTranscriptionJobResponse { export interface GetMedicalVocabularyRequest { /** - *

                The name of the vocabulary that you want information about. The value is case sensitive.

                + *

                The name of the medical vocabulary you want information about. This value is case + * sensitive.

                */ VocabularyName: string | undefined; } @@ -2216,14 +2252,17 @@ export interface GetMedicalVocabularyResponse { LastModifiedTime?: Date; /** - *

                If the VocabularyState is FAILED, this field contains information about why - * the job failed.

                + *

                If your request returns a VocabularyState that is FAILED, + * the FailureReason field contains information about why the request + * failed.

                + *

                For more information, refer to the Common Errors + * section.

                */ FailureReason?: string; /** - *

                The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You - * can download your vocabulary from the URI for a limited time.

                + *

                The S3 location where the vocabulary is stored; use this URI to view or download the + * vocabulary.

                */ DownloadUri?: string; } @@ -2394,7 +2433,8 @@ export enum SubtitleFormat { } /** - *

                Specify the output format for your subtitle file.

                + *

                Choose the output format for your subtitle file and the S3 location where you want + * your file saved.

                */ export interface SubtitlesOutput { /** @@ -2404,7 +2444,7 @@ export interface SubtitlesOutput { Formats?: (SubtitleFormat | string)[]; /** - *

                Choose the output location for your subtitle file. This location must be an S3 + *

                Contains the output location for your subtitle file. This location must be an S3 * bucket.

                */ SubtitleFileUris?: string[]; @@ -3050,14 +3090,15 @@ export namespace ListMedicalTranscriptionJobsResponse { export interface ListMedicalVocabulariesRequest { /** *

                If the result of your previous request to ListMedicalVocabularies was - * truncated, include the NextToken to fetch the next set of vocabularies.

                + * truncated, include the NextToken to fetch the next set of + * vocabularies.

                */ NextToken?: string; /** - *

                The maximum number of vocabularies to return in each page of results. If there are fewer - * results than the value you specify, only the actual results are returned. If you do not specify - * a value, the default of 5 is used.

                + *

                The maximum number of vocabularies to return in each page of results. If there are + * fewer results than the value you specify, only the actual results are returned. If you do not + * specify a value, the default of 5 is used.

                */ MaxResults?: number; @@ -3127,12 +3168,12 @@ export interface ListMedicalVocabulariesResponse { Status?: VocabularyState | string; /** - *

                The ListMedicalVocabularies operation returns a page of vocabularies at a - * time. You set the maximum number of vocabularies to return on a page with the + *

                The ListMedicalVocabularies operation returns a page of vocabularies at + * a time. You set the maximum number of vocabularies to return on a page with the * MaxResults parameter. If there are more jobs in the list will fit on a page, * Amazon Transcribe Medical returns the NextPage token. To return the next page of vocabularies, * include the token in the next request to the ListMedicalVocabularies - * operation .

                + * operation.

                */ NextToken?: string; @@ -3759,7 +3800,7 @@ export interface StartMedicalTranscriptionJobRequest { Type: Type | string | undefined; /** - *

                Add tags to an Amazon Transcribe medical transcription job.

                + *

                Add tags to an Amazon Transcribe Medical transcription job.

                */ Tags?: Tag[]; } @@ -4146,11 +4187,11 @@ export interface UpdateMedicalVocabularyRequest { * be in the same Amazon Web Services Region as the resource that you are calling. The following * is the format for a URI:

                *

                - * https://s3..amazonaws.com/// + * https://s3.aws-region.amazonaws.com/bucket-name/keyprefix/objectkey *

                *

                For example:

                *

                - * https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt + * https://s3.us-east-1.amazonaws.com/DOC-EXAMPLE-BUCKET/vocab.txt *

                *

                For more information about Amazon S3 object names, see Object Keys in the * Amazon S3 Developer Guide.

                @@ -4227,11 +4268,11 @@ export interface UpdateVocabularyRequest { * The URI must be in the same region as the API endpoint that you are calling. The general form * is:

                *

                - * https://s3..amazonaws.com/// + * https://s3.aws-region.amazonaws.com/bucket-name/keyprefix/objectkey *

                *

                For example:

                *

                - * https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt + * https://s3.us-east-1.amazonaws.com/DOC-EXAMPLE-BUCKET/vocab.txt *

                *

                For more information about S3 object names, see Object Keys in the * Amazon S3 Developer Guide.

                diff --git a/clients/client-transfer/package.json b/clients/client-transfer/package.json index 23fe5bd2a916..7c1e5ac46535 100644 --- a/clients/client-transfer/package.json +++ b/clients/client-transfer/package.json @@ -48,6 +48,7 @@ "@aws-sdk/util-user-agent-node": "*", "@aws-sdk/util-utf8-browser": "*", "@aws-sdk/util-utf8-node": "*", + "@aws-sdk/util-waiter": "*", "tslib": "^2.3.0" }, "devDependencies": { diff --git a/clients/client-transfer/src/index.ts b/clients/client-transfer/src/index.ts index 4ecf9c95ec36..d8f08fc338a1 100644 --- a/clients/client-transfer/src/index.ts +++ b/clients/client-transfer/src/index.ts @@ -3,4 +3,5 @@ export * from "./TransferClient"; export * from "./commands"; export * from "./models"; export * from "./pagination"; +export * from "./waiters"; export { TransferServiceException } from "./models/TransferServiceException"; diff --git a/clients/client-transfer/src/models/models_0.ts b/clients/client-transfer/src/models/models_0.ts index 2a02ce46d604..7e39471d650d 100644 --- a/clients/client-transfer/src/models/models_0.ts +++ b/clients/client-transfer/src/models/models_0.ts @@ -928,8 +928,25 @@ export interface CreateServerRequest { */ LoggingRole?: string; + /** + *

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                + * + *

                The SFTP protocol does not support post-authentication display banners.

                + *
                + */ PostAuthenticationLoginBanner?: string; + + /** + *

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates. + * For example, the following banner displays details about using the system.

                + *

                + * This system is for the use of authorized users only. Individuals using this computer system without authority, + * or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by + * system personnel. + *

                + */ PreAuthenticationLoginBanner?: string; + /** *

                Specifies the file transfer protocol or protocols over which your file transfer protocol * client can connect to your server's endpoint. The available protocols are:

                @@ -1848,7 +1865,14 @@ export namespace LoggingConfiguration { } export enum ExecutionErrorType { + ALREADY_EXISTS = "ALREADY_EXISTS", + BAD_REQUEST = "BAD_REQUEST", + CUSTOM_STEP_FAILED = "CUSTOM_STEP_FAILED", + INTERNAL_SERVER_ERROR = "INTERNAL_SERVER_ERROR", + NOT_FOUND = "NOT_FOUND", PERMISSION_DENIED = "PERMISSION_DENIED", + THROTTLED = "THROTTLED", + TIMEOUT = "TIMEOUT", } /** @@ -1856,8 +1880,49 @@ export enum ExecutionErrorType { */ export interface ExecutionError { /** - *

                Specifies the error type: currently, the only valid value is PERMISSION_DENIED, which occurs - * if your policy does not contain the correct permissions to complete one or more of the steps in the workflow.

                + *

                Specifies the error type.

                + *
                  + *
                • + *

                  + * ALREADY_EXISTS: occurs for a copy step, if the overwrite option is not selected and a file with the same name already exists in the target location.

                  + *
                • + *
                • + *

                  + * BAD_REQUEST: a general bad request: for example, a step that attempts to + * tag an EFS file returns BAD_REQUEST, as only S3 files can be tagged.

                  + *
                • + *
                • + *

                  + * CUSTOM_STEP_FAILED: occurs when the custom step provided a callback that indicates failure.

                  + *
                • + *
                • + *

                  + * INTERNAL_SERVER_ERROR: a catch-all error that can occur for a variety of + * reasons.

                  + *
                • + *
                • + *

                  + * NOT_FOUND: occurs when a requested entity, for example a source file for + * a copy step, does not exist.

                  + *
                • + *
                • + *

                  + * PERMISSION_DENIED: occurs if your policy does not contain the correct + * permissions to complete one or more of the steps in the workflow.

                  + *
                • + *
                • + *

                  + * TIMEOUT: occurs when the execution times out.

                  + * + *

                  You can set the TimeoutSeconds for a custom step, anywhere from 1 second to 1800 seconds (30 minutes).

                  + *
                  + *
                • + *
                • + *

                  + * THROTTLED: occurs if you exceed the new execution refill rate of one + * workflow per second.

                  + *
                • + *
                */ Type: ExecutionErrorType | string | undefined; @@ -2208,8 +2273,25 @@ export interface DescribedServer { */ LoggingRole?: string; + /** + *

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                + * + *

                The SFTP protocol does not support post-authentication display banners.

                + *
                + */ PostAuthenticationLoginBanner?: string; + + /** + *

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates. + * For example, the following banner displays details about using the system.

                + *

                + * This system is for the use of authorized users only. Individuals using this computer system without authority, + * or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by + * system personnel. + *

                + */ PreAuthenticationLoginBanner?: string; + /** *

                Specifies the file transfer protocol or protocols over which your file transfer protocol * client can connect to your server's endpoint. The available protocols are:

                @@ -3848,8 +3930,25 @@ export interface UpdateServerRequest { */ LoggingRole?: string; + /** + *

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                + * + *

                The SFTP protocol does not support post-authentication display banners.

                + *
                + */ PostAuthenticationLoginBanner?: string; + + /** + *

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates. + * For example, the following banner displays details about using the system.

                + *

                + * This system is for the use of authorized users only. Individuals using this computer system without authority, + * or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by + * system personnel. + *

                + */ PreAuthenticationLoginBanner?: string; + /** *

                Specifies the file transfer protocol or protocols over which your file transfer protocol * client can connect to your server's endpoint. The available protocols are:

                diff --git a/clients/client-transfer/src/waiters/index.ts b/clients/client-transfer/src/waiters/index.ts new file mode 100644 index 000000000000..e182fb2c6536 --- /dev/null +++ b/clients/client-transfer/src/waiters/index.ts @@ -0,0 +1,2 @@ +export * from "./waitForServerOffline"; +export * from "./waitForServerOnline"; diff --git a/clients/client-transfer/src/waiters/waitForServerOffline.ts b/clients/client-transfer/src/waiters/waitForServerOffline.ts new file mode 100644 index 000000000000..d552abfa6f46 --- /dev/null +++ b/clients/client-transfer/src/waiters/waitForServerOffline.ts @@ -0,0 +1,55 @@ +import { checkExceptions, createWaiter, WaiterConfiguration, WaiterResult, WaiterState } from "@aws-sdk/util-waiter"; + +import { DescribeServerCommand, DescribeServerCommandInput } from "../commands/DescribeServerCommand"; +import { TransferClient } from "../TransferClient"; + +const checkState = async (client: TransferClient, input: DescribeServerCommandInput): Promise => { + let reason; + try { + const result: any = await client.send(new DescribeServerCommand(input)); + reason = result; + try { + const returnComparator = () => { + return result.Server.State; + }; + if (returnComparator() === "OFFLINE") { + return { state: WaiterState.SUCCESS, reason }; + } + } catch (e) {} + try { + const returnComparator = () => { + return result.Server.State; + }; + if (returnComparator() === "STOP_FAILED") { + return { state: WaiterState.FAILURE, reason }; + } + } catch (e) {} + } catch (exception) { + reason = exception; + } + return { state: WaiterState.RETRY, reason }; +}; +/** + * + * @deprecated Use waitUntilServerOffline instead. waitForServerOffline does not throw error in non-success cases. + */ +export const waitForServerOffline = async ( + params: WaiterConfiguration, + input: DescribeServerCommandInput +): Promise => { + const serviceDefaults = { minDelay: 30, maxDelay: 120 }; + return createWaiter({ ...serviceDefaults, ...params }, input, checkState); +}; +/** + * + * @param params - Waiter configuration options. + * @param input - The input to DescribeServerCommand for polling. + */ +export const waitUntilServerOffline = async ( + params: WaiterConfiguration, + input: DescribeServerCommandInput +): Promise => { + const serviceDefaults = { minDelay: 30, maxDelay: 120 }; + const result = await createWaiter({ ...serviceDefaults, ...params }, input, checkState); + return checkExceptions(result); +}; diff --git a/clients/client-transfer/src/waiters/waitForServerOnline.ts b/clients/client-transfer/src/waiters/waitForServerOnline.ts new file mode 100644 index 000000000000..73718712c718 --- /dev/null +++ b/clients/client-transfer/src/waiters/waitForServerOnline.ts @@ -0,0 +1,55 @@ +import { checkExceptions, createWaiter, WaiterConfiguration, WaiterResult, WaiterState } from "@aws-sdk/util-waiter"; + +import { DescribeServerCommand, DescribeServerCommandInput } from "../commands/DescribeServerCommand"; +import { TransferClient } from "../TransferClient"; + +const checkState = async (client: TransferClient, input: DescribeServerCommandInput): Promise => { + let reason; + try { + const result: any = await client.send(new DescribeServerCommand(input)); + reason = result; + try { + const returnComparator = () => { + return result.Server.State; + }; + if (returnComparator() === "ONLINE") { + return { state: WaiterState.SUCCESS, reason }; + } + } catch (e) {} + try { + const returnComparator = () => { + return result.Server.State; + }; + if (returnComparator() === "START_FAILED") { + return { state: WaiterState.FAILURE, reason }; + } + } catch (e) {} + } catch (exception) { + reason = exception; + } + return { state: WaiterState.RETRY, reason }; +}; +/** + * + * @deprecated Use waitUntilServerOnline instead. waitForServerOnline does not throw error in non-success cases. + */ +export const waitForServerOnline = async ( + params: WaiterConfiguration, + input: DescribeServerCommandInput +): Promise => { + const serviceDefaults = { minDelay: 30, maxDelay: 120 }; + return createWaiter({ ...serviceDefaults, ...params }, input, checkState); +}; +/** + * + * @param params - Waiter configuration options. + * @param input - The input to DescribeServerCommand for polling. + */ +export const waitUntilServerOnline = async ( + params: WaiterConfiguration, + input: DescribeServerCommandInput +): Promise => { + const serviceDefaults = { minDelay: 30, maxDelay: 120 }; + const result = await createWaiter({ ...serviceDefaults, ...params }, input, checkState); + return checkExceptions(result); +}; diff --git a/codegen/sdk-codegen/aws-models/amplify.json b/codegen/sdk-codegen/aws-models/amplify.json index d0e2e115335c..b0417cb9c93d 100644 --- a/codegen/sdk-codegen/aws-models/amplify.json +++ b/codegen/sdk-codegen/aws-models/amplify.json @@ -36,6 +36,7 @@ "min": 1, "max": 255 }, + "smithy.api#pattern": "^(?s).+$", "smithy.api#sensitive": {} } }, @@ -223,7 +224,7 @@ "repository": { "target": "com.amazonaws.amplify#Repository", "traits": { - "smithy.api#documentation": "

                The repository for the Amplify app.

                ", + "smithy.api#documentation": "

                The Git repository for the Amplify app.

                ", "smithy.api#required": {} } }, @@ -291,7 +292,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for branches for the Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for branches for the Amplify app. You must\n base64-encode the authorization credentials and provide them in the format\n user:password.

                " } }, "customRules": { @@ -335,6 +336,12 @@ "traits": { "smithy.api#documentation": "

                Describes the automated branch creation configuration for the Amplify app.

                " } + }, + "repositoryCloneMethod": { + "target": "com.amazonaws.amplify#RepositoryCloneMethod", + "traits": { + "smithy.api#documentation": "

                The authentication protocol to use to access the Git repository for an Amplify app.\n For a GitHub repository, specify TOKEN. For an Amazon Web Services CodeCommit repository,\n specify SIGV4. For GitLab and Bitbucket repositories, specify\n SSH.

                " + } } }, "traits": { @@ -403,7 +410,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#ArtifactUrl": { @@ -475,7 +483,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for the autocreated branch. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for the autocreated branch. You must\n base64-encode the authorization credentials and provide them in the format\n user:password.

                " } }, "enableBasicAuth": { @@ -487,7 +495,7 @@ "enablePerformanceMode": { "target": "com.amazonaws.amplify#EnablePerformanceMode", "traits": { - "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

                " + "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at\n the edge for a longer interval. When performance mode is enabled, hosting configuration\n or code changes can take up to 10 minutes to roll out.

                " } }, "buildSpec": { @@ -519,7 +527,8 @@ "smithy.api#length": { "min": 1, "max": 2048 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#AutoBranchCreationPatterns": { @@ -534,7 +543,8 @@ "smithy.api#length": { "min": 1, "max": 2048 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#AutoSubDomainCreationPatterns": { @@ -605,9 +615,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#BackendEnvironments": { @@ -636,6 +647,7 @@ "min": 0, "max": 2000 }, + "smithy.api#pattern": "^(?s)", "smithy.api#sensitive": {} } }, @@ -756,7 +768,7 @@ "enablePerformanceMode": { "target": "com.amazonaws.amplify#EnablePerformanceMode", "traits": { - "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

                " + "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at\n the edge for a longer interval. When performance mode is enabled, hosting configuration\n or code changes can take up to 10 minutes to roll out.

                " } }, "thumbnailUrl": { @@ -768,7 +780,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for a branch of an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for a branch of an Amplify app. You must\n base64-encode the authorization credentials and provide them in the format\n user:password.

                " } }, "buildSpec": { @@ -832,7 +844,8 @@ "smithy.api#length": { "min": 0, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#BranchName": { @@ -841,7 +854,8 @@ "smithy.api#length": { "min": 1, "max": 255 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#Branches": { @@ -863,7 +877,9 @@ "smithy.api#length": { "min": 1, "max": 25000 - } + }, + "smithy.api#pattern": "^(?s).+$", + "smithy.api#sensitive": {} } }, "com.amazonaws.amplify#CertificateVerificationDNSRecord": { @@ -884,7 +900,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#CommitMessage": { @@ -893,7 +910,8 @@ "smithy.api#length": { "min": 0, "max": 10000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#CommitTime": { @@ -903,9 +921,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 2048 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#Context": { @@ -1018,7 +1037,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The credentials for basic authorization for an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The credentials for basic authorization for an Amplify app. You must base64-encode\n the authorization credentials and provide them in the format\n user:password.

                " } }, "customRules": { @@ -1257,7 +1276,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for the branch. You must base64-encode the\n authorization credentials and provide them in the format\n user:password.

                " } }, "enableBasicAuth": { @@ -1269,7 +1288,7 @@ "enablePerformanceMode": { "target": "com.amazonaws.amplify#EnablePerformanceMode", "traits": { - "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

                " + "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at\n the edge for a longer interval. When performance mode is enabled, hosting configuration\n or code changes can take up to 10 minutes to roll out.

                " } }, "tags": { @@ -1632,9 +1651,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 25000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#CustomRule": { @@ -1657,7 +1677,7 @@ "status": { "target": "com.amazonaws.amplify#Status", "traits": { - "smithy.api#documentation": "

                The status code for a URL rewrite or redirect rule.

                \n \n
                \n
                200
                \n
                \n

                Represents a 200 rewrite rule.

                \n
                \n
                301
                \n
                \n

                Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.

                \n
                \n
                302
                \n
                \n

                Represents a 302 temporary redirect rule.

                \n
                \n
                404
                \n
                \n

                Represents a 404 redirect rule.

                \n
                \n
                404-200
                \n
                \n

                Represents a 404 rewrite rule.

                \n
                \n
                " + "smithy.api#documentation": "

                The status code for a URL rewrite or redirect rule.

                \n\n
                \n
                200
                \n
                \n

                Represents a 200 rewrite rule.

                \n
                \n
                301
                \n
                \n

                Represents a 301 (moved pemanently) redirect rule. This and all future\n requests should be directed to the target URL.

                \n
                \n
                302
                \n
                \n

                Represents a 302 temporary redirect rule.

                \n
                \n
                404
                \n
                \n

                Represents a 404 redirect rule.

                \n
                \n
                404-200
                \n
                \n

                Represents a 404 rewrite rule.

                \n
                \n
                " } }, "condition": { @@ -2138,7 +2158,8 @@ "smithy.api#length": { "min": 1, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#Description": { @@ -2147,7 +2168,8 @@ "smithy.api#length": { "min": 0, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#DisplayName": { @@ -2156,7 +2178,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#DomainAssociation": { @@ -2253,8 +2276,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 255 - } + "max": 64 + }, + "smithy.api#pattern": "^(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])(\\.)?$" } }, "com.amazonaws.amplify#DomainPrefix": { @@ -2263,7 +2287,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#DomainStatus": { @@ -2368,7 +2393,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#EnvValue": { @@ -2376,8 +2402,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 1000 - } + "max": 5500 + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#EnvironmentName": { @@ -2386,7 +2413,8 @@ "smithy.api#length": { "min": 1, "max": 255 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#EnvironmentVariables": { @@ -2422,7 +2450,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#FileUploadUrls": { @@ -2440,7 +2469,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#GenerateAccessLogs": { @@ -3051,7 +3081,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^[0-9]+$" } }, "com.amazonaws.amplify#JobReason": { @@ -3060,7 +3091,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#JobStatus": { @@ -3875,7 +3907,8 @@ "smithy.api#length": { "min": 0, "max": 32 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#MaxResults": { @@ -3893,7 +3926,8 @@ "smithy.api#length": { "min": 1, "max": 255 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#NextToken": { @@ -3902,7 +3936,8 @@ "smithy.api#length": { "min": 0, "max": 2000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#NotFoundException": { @@ -3925,6 +3960,7 @@ "min": 0, "max": 1000 }, + "smithy.api#pattern": "^(?s)", "smithy.api#sensitive": {} } }, @@ -3935,6 +3971,10 @@ { "value": "WEB", "name": "WEB" + }, + { + "value": "WEB_DYNAMIC", + "name": "WEB_DYNAMIC" } ] } @@ -3977,7 +4017,8 @@ "smithy.api#length": { "min": 0, "max": 20 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#Repository": { @@ -3986,12 +4027,36 @@ "smithy.api#length": { "min": 0, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s)" + } + }, + "com.amazonaws.amplify#RepositoryCloneMethod": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SSH", + "name": "SSH" + }, + { + "value": "TOKEN", + "name": "TOKEN" + }, + { + "value": "SIGV4", + "name": "SIGV4" + } + ] } }, "com.amazonaws.amplify#ResourceArn": { "type": "string", "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, "smithy.api#pattern": "^arn:aws:amplify:" } }, @@ -4030,9 +4095,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 1000 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#Source": { @@ -4041,7 +4107,8 @@ "smithy.api#length": { "min": 1, "max": 2048 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#SourceUrl": { @@ -4049,8 +4116,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 1000 - } + "max": 3000 + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#StackName": { @@ -4059,7 +4127,8 @@ "smithy.api#length": { "min": 1, "max": 255 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#Stage": { @@ -4293,7 +4362,8 @@ "smithy.api#length": { "min": 3, "max": 7 - } + }, + "smithy.api#pattern": "^.{3,7}$" } }, "com.amazonaws.amplify#StatusReason": { @@ -4557,7 +4627,12 @@ "com.amazonaws.amplify#TTL": { "type": "string", "traits": { - "smithy.api#documentation": "

                The content Time to Live (TTL) for the website in seconds.

                " + "smithy.api#documentation": "

                The content Time to Live (TTL) for the website in seconds.

                ", + "smithy.api#length": { + "min": 0, + "max": 32 + }, + "smithy.api#pattern": "^\\d*$" } }, "com.amazonaws.amplify#TagKey": { @@ -4592,7 +4667,7 @@ }, "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 50 } } @@ -4661,7 +4736,8 @@ "smithy.api#length": { "min": 0, "max": 256 - } + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" } }, "com.amazonaws.amplify#Target": { @@ -4670,7 +4746,8 @@ "smithy.api#length": { "min": 1, "max": 2048 - } + }, + "smithy.api#pattern": "^(?s).+$" } }, "com.amazonaws.amplify#TestArtifactsUrl": { @@ -4883,7 +4960,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for an Amplify app. You must base64-encode the\n authorization credentials and provide them in the format\n user:password.

                " } }, "customRules": { @@ -5052,7 +5129,7 @@ "basicAuthCredentials": { "target": "com.amazonaws.amplify#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "

                The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password.

                " + "smithy.api#documentation": "

                The basic authorization credentials for the branch. You must base64-encode the\n authorization credentials and provide them in the format\n user:password.

                " } }, "enableBasicAuth": { @@ -5064,7 +5141,7 @@ "enablePerformanceMode": { "target": "com.amazonaws.amplify#EnablePerformanceMode", "traits": { - "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

                " + "smithy.api#documentation": "

                Enables performance mode for the branch.

                \n

                Performance mode optimizes for faster hosting performance by keeping content cached at\n the edge for a longer interval. When performance mode is enabled, hosting configuration\n or code changes can take up to 10 minutes to roll out.

                " } }, "buildSpec": { @@ -5185,8 +5262,7 @@ "subDomainSettings": { "target": "com.amazonaws.amplify#SubDomainSettings", "traits": { - "smithy.api#documentation": "

                Describes the settings for the subdomain.

                ", - "smithy.api#required": {} + "smithy.api#documentation": "

                Describes the settings for the subdomain.

                " } }, "autoSubDomainCreationPatterns": { @@ -5388,7 +5464,8 @@ "smithy.api#length": { "min": 0, "max": 255 - } + }, + "smithy.api#pattern": "^(?s)" } }, "com.amazonaws.amplify#WebhookUrl": { diff --git a/codegen/sdk-codegen/aws-models/amplifyuibuilder.json b/codegen/sdk-codegen/aws-models/amplifyuibuilder.json index a6f7ed19acec..bc970f109d28 100644 --- a/codegen/sdk-codegen/aws-models/amplifyuibuilder.json +++ b/codegen/sdk-codegen/aws-models/amplifyuibuilder.json @@ -1,6 +1,68 @@ { "smithy": "1.0", "shapes": { + "com.amazonaws.amplifyuibuilder#ActionParameters": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The type of navigation action. Valid values are url and anchor. This value is required for a navigation action.

                " + } + }, + "url": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The URL to the location to open. Specify this value for a navigation action.

                " + } + }, + "anchor": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The HTML anchor link to the location to open. Specify this value for a navigation action.

                " + } + }, + "target": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The element within the same component to modify when the action occurs.

                " + } + }, + "global": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                Specifies whether the user should be signed out globally. Specify this value for an auth sign out action.

                " + } + }, + "model": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The name of the data model. Use when the action performs an operation on an Amplify DataStore\n model.

                " + } + }, + "id": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The unique ID of the component that the ActionParameters apply to.

                " + } + }, + "fields": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperties", + "traits": { + "smithy.api#documentation": "

                A dictionary of key-value pairs mapping Amplify Studio properties to fields in a data model. Use when the action\n performs an operation on an Amplify DataStore model.

                " + } + }, + "state": { + "target": "com.amazonaws.amplifyuibuilder#MutationActionSetStateParameter", + "traits": { + "smithy.api#documentation": "

                A key-value pair that specifies the state property name and its initial value.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Represents the event action configuration for an element of a Component\n or ComponentChild. Use for the workflow feature in Amplify Studio\n that allows you to bind events and actions to components. ActionParameters\n defines the action that is performed when an event occurs on the component.

                " + } + }, "com.amazonaws.amplifyuibuilder#AmplifyUIBuilder": { "type": "service", "traits": { @@ -8,15 +70,14 @@ "sdkId": "AmplifyUIBuilder", "arnNamespace": "amplifyuibuilder", "cloudFormationName": "AmplifyUIBuilder", - "cloudTrailEventSource": "amplifyuibuilder.amazonaws.com", - "endpointPrefix": "amplifyuibuilder" + "cloudTrailEventSource": "amplifyuibuilder.amazonaws.com" }, "aws.auth#sigv4": { "name": "amplifyuibuilder" }, "aws.protocols#restJson1": {}, "smithy.api#cors": {}, - "smithy.api#documentation": "

                The Amplify UI Builder API provides a programmatic interface for creating and configuring\n user interface (UI) component libraries and themes for use in your Amplify applications. You\n can then connect these UI components to an application's backend Amazon Web Services\n resources.

                \n

                You can also use the Amplify Studio visual designer to create UI components and model data\n for an app. For more information, see Introduction in the\n Amplify Docs.

                \n

                The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation\n for client app development. For more information, see the Amplify Framework. For more information about\n deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide.

                ", + "smithy.api#documentation": "

                The Amplify UI Builder API provides a programmatic interface for creating\n and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's\n backend Amazon Web Services resources.

                \n

                You can also use the Amplify Studio visual designer to create UI components\n and model data for an app. For more information, see Introduction in the\n Amplify Docs.

                \n

                The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and\n documentation for client app development. For more information, see the Amplify Framework. For more information about\n deploying an Amplify application to Amazon Web Services, see the Amplify User Guide.

                ", "smithy.api#title": "AWS Amplify UI Builder" }, "version": "2021-08-11", @@ -50,7 +111,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#required": {} } }, @@ -77,14 +138,14 @@ "componentType": { "target": "com.amazonaws.amplifyuibuilder#ComponentType", "traits": { - "smithy.api#documentation": "

                The type of the component. This can be an Amplify custom UI component or another custom\n component.

                ", + "smithy.api#documentation": "

                The type of the component. This can be an Amplify custom UI component or\n another custom component.

                ", "smithy.api#required": {} } }, "properties": { "target": "com.amazonaws.amplifyuibuilder#ComponentProperties", "traits": { - "smithy.api#documentation": "

                Describes the component's properties.

                ", + "smithy.api#documentation": "

                Describes the component's properties. You can't specify tags as a valid\n property for properties.

                ", "smithy.api#required": {} } }, @@ -97,34 +158,34 @@ "variants": { "target": "com.amazonaws.amplifyuibuilder#ComponentVariants", "traits": { - "smithy.api#documentation": "

                A list of the component's variants. A variant is a unique style configuration of a\n main component.

                ", + "smithy.api#documentation": "

                A list of the component's variants. A variant is a unique style configuration of a main\n component.

                ", "smithy.api#required": {} } }, "overrides": { "target": "com.amazonaws.amplifyuibuilder#ComponentOverrides", "traits": { - "smithy.api#documentation": "

                Describes the component's properties that can be overriden in a customized instance of the\n component.

                ", + "smithy.api#documentation": "

                Describes the component's properties that can be overriden in a customized instance of the\n component. You can't specify tags as a valid property for\n overrides.

                ", "smithy.api#required": {} } }, "bindingProperties": { "target": "com.amazonaws.amplifyuibuilder#ComponentBindingProperties", "traits": { - "smithy.api#documentation": "

                The information to connect a component's properties to data at runtime.

                ", + "smithy.api#documentation": "

                The information to connect a component's properties to data at runtime. You can't specify\n tags as a valid property for bindingProperties.

                \n

                ", "smithy.api#required": {} } }, "collectionProperties": { "target": "com.amazonaws.amplifyuibuilder#ComponentCollectionProperties", "traits": { - "smithy.api#documentation": "

                The data binding configuration for the component's properties. Use this for a collection component.

                " + "smithy.api#documentation": "

                The data binding configuration for the component's properties. Use this for a collection\n component. You can't specify tags as a valid property for\n collectionProperties.

                " } }, "createdAt": { "target": "smithy.api#Timestamp", "traits": { - "aws.cloudformation#cfnMutability": "read", + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

                The time that the component was created.

                ", "smithy.api#required": {}, "smithy.api#timestampFormat": "date-time" @@ -133,7 +194,7 @@ "modifiedAt": { "target": "smithy.api#Timestamp", "traits": { - "aws.cloudformation#cfnMutability": "read", + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

                The time that the component was modified.

                ", "smithy.api#timestampFormat": "date-time" } @@ -144,10 +205,22 @@ "aws.cloudformation#cfnMutability": "create-and-read", "smithy.api#documentation": "

                One or more key-value pairs to use when tagging the component.

                " } + }, + "events": { + "target": "com.amazonaws.amplifyuibuilder#ComponentEvents", + "traits": { + "smithy.api#documentation": "

                Describes the events that can be raised on the component. Use for the workflow feature in Amplify Studio that allows you to\n bind events and actions to components.

                " + } + }, + "schemaVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The schema version of the component when it was imported.

                " + } } }, "traits": { - "smithy.api#documentation": "

                Contains the configuration settings for a user interface (UI) element for an Amplify app. A\n component is configured as a primary, stand-alone UI element. Use ComponentChild\n to configure an instance of a Component. A ComponentChild instance\n inherits the configuration of the main Component.

                ", + "smithy.api#documentation": "

                Contains the configuration settings for a user interface (UI) element for an Amplify app. A component is configured as a primary, stand-alone UI element. Use\n ComponentChild to configure an instance of a Component. A\n ComponentChild instance inherits the configuration of the main\n Component.

                ", "smithy.api#references": [ { "resource": "com.amazonaws.amplifyuibuilder#ComponentResource" @@ -260,7 +333,7 @@ "properties": { "target": "com.amazonaws.amplifyuibuilder#ComponentProperties", "traits": { - "smithy.api#documentation": "

                Describes the properties of the child component.

                ", + "smithy.api#documentation": "

                Describes the properties of the child component. You can't specify tags as a\n valid property for properties.

                ", "smithy.api#required": {} } }, @@ -269,6 +342,12 @@ "traits": { "smithy.api#documentation": "

                The list of ComponentChild instances for this component.

                " } + }, + "events": { + "target": "com.amazonaws.amplifyuibuilder#ComponentEvents", + "traits": { + "smithy.api#documentation": "

                Describes the events that can be raised on the child component. Use for the workflow feature in Amplify Studio that allows you to\n bind events and actions to components.

                " + } } }, "traits": { @@ -328,6 +407,12 @@ "traits": { "smithy.api#documentation": "

                The value to assign to the property if the condition is not met.

                " } + }, + "operandType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The type of the property to evaluate.

                " + } } }, "traits": { @@ -367,6 +452,35 @@ "smithy.api#documentation": "

                Describes the configuration for binding a component's properties to data.

                " } }, + "com.amazonaws.amplifyuibuilder#ComponentEvent": { + "type": "structure", + "members": { + "action": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The action to perform when a specific event is raised.

                " + } + }, + "parameters": { + "target": "com.amazonaws.amplifyuibuilder#ActionParameters", + "traits": { + "smithy.api#documentation": "

                Describes information about the action.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration of an event. You can bind an event and a corresponding\n action to a Component or a ComponentChild. A button click\n is an example of an event.

                " + } + }, + "com.amazonaws.amplifyuibuilder#ComponentEvents": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "com.amazonaws.amplifyuibuilder#ComponentEvent" + } + }, "com.amazonaws.amplifyuibuilder#ComponentList": { "type": "list", "member": { @@ -427,7 +541,7 @@ "collectionBindingProperties": { "target": "com.amazonaws.amplifyuibuilder#ComponentPropertyBindingProperties", "traits": { - "smithy.api#documentation": "

                The information to bind the component property to data at runtime. Use this for collection components.

                " + "smithy.api#documentation": "

                The information to bind the component property to data at runtime. Use this for collection\n components.

                " } }, "defaultValue": { @@ -463,19 +577,19 @@ "concat": { "target": "com.amazonaws.amplifyuibuilder#ComponentPropertyList", "traits": { - "smithy.api#documentation": "

                A list of component properties to concatenate to create the value to assign to this component property.

                " + "smithy.api#documentation": "

                A list of component properties to concatenate to create the value to assign to this\n component property.

                " } }, "condition": { "target": "com.amazonaws.amplifyuibuilder#ComponentConditionProperty", "traits": { - "smithy.api#documentation": "

                The conditional expression to use to assign a value to the component property..

                " + "smithy.api#documentation": "

                The conditional expression to use to assign a value to the component property.

                " } }, "configured": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

                Specifies whether the user configured the property in Amplify Studio after importing it.

                " + "smithy.api#documentation": "

                Specifies whether the user configured the property in Amplify Studio after\n importing it.

                " } }, "type": { @@ -487,12 +601,24 @@ "importedValue": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The default value assigned to property when the component is imported into an app.

                " + "smithy.api#documentation": "

                The default value assigned to the property when the component is imported into an\n app.

                " + } + }, + "componentName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The name of the component that is affected by an event.

                " + } + }, + "property": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The name of the component's property that is affected by an event.

                " } } }, "traits": { - "smithy.api#documentation": "

                Describes the configuration for all of a component's properties. Use\n ComponentProperty to specify the values to render or bind by\n default.

                " + "smithy.api#documentation": "

                Describes the configuration for all of a component's properties. Use\n ComponentProperty to specify the values to render or bind by default.

                " } }, "com.amazonaws.amplifyuibuilder#ComponentPropertyBindingProperties": { @@ -557,14 +683,11 @@ ], "traits": { "aws.api#arn": { - "template": "app/{appId}/environment/{environmentName}/components/{id}", - "absolute": false, - "noAccount": false, - "noRegion": false + "template": "app/{appId}/environment/{environmentName}/components/{id}" }, "aws.cloudformation#cfnResource": { - "additionalSchemas": ["com.amazonaws.amplifyuibuilder#Component"], - "name": "Component" + "name": "Component", + "additionalSchemas": ["com.amazonaws.amplifyuibuilder#Component"] } } }, @@ -581,7 +704,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#required": {} } }, @@ -632,13 +755,13 @@ "variantValues": { "target": "com.amazonaws.amplifyuibuilder#ComponentVariantValues", "traits": { - "smithy.api#documentation": "

                The combination of variants that comprise this variant.

                " + "smithy.api#documentation": "

                The combination of variants that comprise this variant. You can't specify\n tags as a valid property for variantValues.

                " } }, "overrides": { "target": "com.amazonaws.amplifyuibuilder#ComponentOverrides", "traits": { - "smithy.api#documentation": "

                The properties of the component variant that can be overriden when customizing an instance\n of the component.

                " + "smithy.api#documentation": "

                The properties of the component variant that can be overriden when customizing an instance\n of the component. You can't specify tags as a valid property for\n overrides.

                " } } }, @@ -684,11 +807,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Creates a new component for an Amplify app.

                ", "smithy.api#http": { - "method": "POST", "uri": "/app/{appId}/environment/{environmentName}/components", - "code": 200 + "method": "POST" }, "smithy.api#idempotent": {} } @@ -712,7 +835,7 @@ "componentType": { "target": "com.amazonaws.amplifyuibuilder#ComponentType", "traits": { - "smithy.api#documentation": "

                The component type. This can be an Amplify custom UI component or another custom\n component.

                ", + "smithy.api#documentation": "

                The component type. This can be an Amplify custom UI component or another\n custom component.

                ", "smithy.api#required": {} } }, @@ -753,7 +876,7 @@ "collectionProperties": { "target": "com.amazonaws.amplifyuibuilder#ComponentCollectionProperties", "traits": { - "smithy.api#documentation": "

                The data binding configuration for customizing a component's properties. Use this for a collection component.

                " + "smithy.api#documentation": "

                The data binding configuration for customizing a component's properties. Use this for a\n collection component.

                " } }, "tags": { @@ -761,6 +884,18 @@ "traits": { "smithy.api#documentation": "

                One or more key-value pairs to use when tagging the component data.

                " } + }, + "events": { + "target": "com.amazonaws.amplifyuibuilder#ComponentEvents", + "traits": { + "smithy.api#documentation": "

                The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.

                " + } + }, + "schemaVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The schema version of the component when it was imported.

                " + } } }, "traits": { @@ -781,7 +916,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -841,11 +976,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Creates a theme to apply to the components in an Amplify app.

                ", "smithy.api#http": { - "method": "POST", "uri": "/app/{appId}/environment/{environmentName}/themes", - "code": 200 + "method": "POST" }, "smithy.api#idempotent": {} } @@ -898,7 +1033,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -940,6 +1075,9 @@ "input": { "target": "com.amazonaws.amplifyuibuilder#DeleteComponentRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.amplifyuibuilder#InternalServerException" @@ -952,11 +1090,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Deletes a component from an Amplify app.

                ", "smithy.api#http": { - "method": "DELETE", "uri": "/app/{appId}/environment/{environmentName}/components/{id}", - "code": 200 + "method": "DELETE" }, "smithy.api#idempotent": {} } @@ -967,7 +1105,7 @@ "appId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The unique ID of the Amplify app associated with the component to delete.

                ", + "smithy.api#documentation": "

                The unique ID of the Amplify app associated with the component to\n delete.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -975,7 +1113,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -995,6 +1133,9 @@ "input": { "target": "com.amazonaws.amplifyuibuilder#DeleteThemeRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.amplifyuibuilder#InternalServerException" @@ -1007,11 +1148,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Deletes a theme from an Amplify app.

                ", "smithy.api#http": { - "method": "DELETE", "uri": "/app/{appId}/environment/{environmentName}/themes/{id}", - "code": 200 + "method": "DELETE" }, "smithy.api#idempotent": {} } @@ -1022,7 +1163,7 @@ "appId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The unique ID of the Amplify app associated with the theme to delete.

                ", + "smithy.api#documentation": "

                The unique ID of the Amplify app associated with the theme to\n delete.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1030,7 +1171,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1061,9 +1202,8 @@ "traits": { "smithy.api#documentation": "

                Exchanges an access code for a token.

                ", "smithy.api#http": { - "method": "POST", "uri": "/tokens/{provider}", - "code": 200 + "method": "POST" } } }, @@ -1156,11 +1296,15 @@ } ], "traits": { - "smithy.api#documentation": "

                Exports component configurations to code that is ready to integrate into an Amplify\n app.

                ", + "smithy.api#documentation": "

                Exports component configurations to code that is ready to integrate into an Amplify app.

                ", "smithy.api#http": { - "method": "GET", "uri": "/export/app/{appId}/environment/{environmentName}/components", - "code": 200 + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "entities" }, "smithy.api#readonly": {} } @@ -1179,10 +1323,17 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The token to request the next page of results.

                ", + "smithy.api#httpQuery": "nextToken" + } } } }, @@ -1195,6 +1346,12 @@ "smithy.api#documentation": "

                Represents the configuration of the exported components.

                ", "smithy.api#required": {} } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The pagination token that's included if more results are available.

                " + } } } }, @@ -1215,11 +1372,15 @@ } ], "traits": { - "smithy.api#documentation": "

                Exports theme configurations to code that is ready to integrate into an Amplify\n app.

                ", + "smithy.api#documentation": "

                Exports theme configurations to code that is ready to integrate into an Amplify app.

                ", "smithy.api#http": { - "method": "GET", "uri": "/export/app/{appId}/environment/{environmentName}/themes", - "code": 200 + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "entities" }, "smithy.api#readonly": {} } @@ -1242,6 +1403,13 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The token to request the next page of results.

                ", + "smithy.api#httpQuery": "nextToken" + } } } }, @@ -1254,6 +1422,12 @@ "smithy.api#documentation": "

                Represents the configuration of the exported themes.

                ", "smithy.api#required": {} } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The pagination token that's included if more results are available.

                " + } } } }, @@ -1308,11 +1482,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Returns an existing component for an Amplify app.

                ", "smithy.api#http": { - "method": "GET", "uri": "/app/{appId}/environment/{environmentName}/components/{id}", - "code": 200 + "method": "GET" }, "smithy.api#readonly": {} } @@ -1379,11 +1553,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Returns an existing theme for an Amplify app.

                ", "smithy.api#http": { - "method": "GET", "uri": "/app/{appId}/environment/{environmentName}/themes/{id}", - "code": 200 + "method": "GET" }, "smithy.api#readonly": {} } @@ -1479,17 +1653,17 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves a list of components for a specified Amplify app and backend environment.

                ", + "aws.iam#requiredActions": ["amplify:GetApp"], + "smithy.api#documentation": "

                Retrieves a list of components for a specified Amplify app and backend\n environment.

                ", "smithy.api#http": { - "method": "GET", "uri": "/app/{appId}/environment/{environmentName}/components", - "code": 200 + "method": "GET" }, "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "entities", - "pageSize": "maxResults" + "pageSize": "maxResults", + "items": "entities" }, "smithy.api#readonly": {} } @@ -1517,7 +1691,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1573,17 +1747,17 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves a list of themes for a specified Amplify app and backend environment.

                ", + "aws.iam#requiredActions": ["amplify:GetApp"], + "smithy.api#documentation": "

                Retrieves a list of themes for a specified Amplify app and backend\n environment.

                ", "smithy.api#http": { - "method": "GET", "uri": "/app/{appId}/environment/{environmentName}/themes", - "code": 200 + "method": "GET" }, "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "entities", - "pageSize": "maxResults" + "pageSize": "maxResults", + "items": "entities" }, "smithy.api#readonly": {} } @@ -1611,7 +1785,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1650,6 +1824,35 @@ } } }, + "com.amazonaws.amplifyuibuilder#MutationActionSetStateParameter": { + "type": "structure", + "members": { + "componentName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The name of the component that is being modified.

                ", + "smithy.api#required": {} + } + }, + "property": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The name of the component property to apply the state configuration to.

                ", + "smithy.api#required": {} + } + }, + "set": { + "target": "com.amazonaws.amplifyuibuilder#ComponentProperty", + "traits": { + "smithy.api#documentation": "

                The state configuration to assign to the property.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Represents the state configuration when an action modifies a property of another\n element within the same component.

                " + } + }, "com.amazonaws.amplifyuibuilder#Predicate": { "type": "structure", "members": { @@ -1685,7 +1888,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Stores information for generating Amplify DataStore queries. Use a Predicate\n to retrieve a subset of the data in a collection.

                " + "smithy.api#documentation": "

                Stores information for generating Amplify DataStore queries. Use a\n Predicate to retrieve a subset of the data in a collection.

                " } }, "com.amazonaws.amplifyuibuilder#PredicateList": { @@ -1710,9 +1913,8 @@ "traits": { "smithy.api#documentation": "

                Refreshes a previously issued access token that might have expired.

                ", "smithy.api#http": { - "method": "POST", "uri": "/tokens/{provider}/refresh", - "code": 200 + "method": "POST" } } }, @@ -1894,7 +2096,7 @@ "environmentName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify app.

                ", + "smithy.api#documentation": "

                The name of the backend environment that is a part of the Amplify\n app.

                ", "smithy.api#required": {} } }, @@ -1915,7 +2117,7 @@ "createdAt": { "target": "smithy.api#Timestamp", "traits": { - "aws.cloudformation#cfnMutability": "read", + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

                The time that the theme was created.

                ", "smithy.api#required": {}, "smithy.api#timestampFormat": "date-time" @@ -1924,7 +2126,7 @@ "modifiedAt": { "target": "smithy.api#Timestamp", "traits": { - "aws.cloudformation#cfnMutability": "read", + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

                The time that the theme was modified.

                ", "smithy.api#timestampFormat": "date-time" } @@ -2009,14 +2211,11 @@ ], "traits": { "aws.api#arn": { - "template": "app/{appId}/environment/{environmentName}/themes/{id}", - "absolute": false, - "noAccount": false, - "noRegion": false + "template": "app/{appId}/environment/{environmentName}/themes/{id}" }, "aws.cloudformation#cfnResource": { - "additionalSchemas": ["com.amazonaws.amplifyuibuilder#Theme"], - "name": "Theme" + "name": "Theme", + "additionalSchemas": ["com.amazonaws.amplifyuibuilder#Theme"] } } }, @@ -2140,11 +2339,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Updates an existing component.

                ", "smithy.api#http": { - "method": "PATCH", "uri": "/app/{appId}/environment/{environmentName}/components/{id}", - "code": 200 + "method": "PATCH" }, "smithy.api#idempotent": {} } @@ -2173,7 +2372,7 @@ "componentType": { "target": "com.amazonaws.amplifyuibuilder#ComponentType", "traits": { - "smithy.api#documentation": "

                The type of the component. This can be an Amplify custom UI component or another custom\n component.

                " + "smithy.api#documentation": "

                The type of the component. This can be an Amplify custom UI component or\n another custom component.

                " } }, "properties": { @@ -2209,7 +2408,19 @@ "collectionProperties": { "target": "com.amazonaws.amplifyuibuilder#ComponentCollectionProperties", "traits": { - "smithy.api#documentation": "

                The configuration for binding a component's properties to a data model. Use this for a collection component.

                " + "smithy.api#documentation": "

                The configuration for binding a component's properties to a data model. Use this for a\n collection component.

                " + } + }, + "events": { + "target": "com.amazonaws.amplifyuibuilder#ComponentEvents", + "traits": { + "smithy.api#documentation": "

                The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.

                " + } + }, + "schemaVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                The schema version of the component when it was imported.

                " } } }, @@ -2296,11 +2507,11 @@ } ], "traits": { + "aws.iam#requiredActions": ["amplify:GetApp"], "smithy.api#documentation": "

                Updates an existing theme.

                ", "smithy.api#http": { - "method": "PATCH", "uri": "/app/{appId}/environment/{environmentName}/themes/{id}", - "code": 200 + "method": "PATCH" }, "smithy.api#idempotent": {} } diff --git a/codegen/sdk-codegen/aws-models/appflow.json b/codegen/sdk-codegen/aws-models/appflow.json index c0f2ea4d6fd1..9f7022845618 100644 --- a/codegen/sdk-codegen/aws-models/appflow.json +++ b/codegen/sdk-codegen/aws-models/appflow.json @@ -3173,6 +3173,12 @@ "smithy.api#documentation": "

                The properties required to query Zendesk.

                " } }, + "Marketo": { + "target": "com.amazonaws.appflow#MarketoDestinationProperties", + "traits": { + "smithy.api#documentation": "

                The properties required to query Marketo.

                " + } + }, "CustomConnector": { "target": "com.amazonaws.appflow#CustomConnectorDestinationProperties", "traits": { @@ -4750,6 +4756,24 @@ "smithy.api#documentation": "

                The connector-specific profile properties required when using Marketo.

                " } }, + "com.amazonaws.appflow#MarketoDestinationProperties": { + "type": "structure", + "members": { + "object": { + "target": "com.amazonaws.appflow#Object", + "traits": { + "smithy.api#documentation": "

                The object specified in the Marketo flow destination.

                ", + "smithy.api#required": {} + } + }, + "errorHandlingConfig": { + "target": "com.amazonaws.appflow#ErrorHandlingConfig" + } + }, + "traits": { + "smithy.api#documentation": "

                The properties that Amazon AppFlow applies when you use Marketo as a flow destination.

                " + } + }, "com.amazonaws.appflow#MarketoMetadata": { "type": "structure", "members": {}, diff --git a/codegen/sdk-codegen/aws-models/athena.json b/codegen/sdk-codegen/aws-models/athena.json index 31af73ed89f9..257d9917a516 100644 --- a/codegen/sdk-codegen/aws-models/athena.json +++ b/codegen/sdk-codegen/aws-models/athena.json @@ -29,6 +29,21 @@ ] }, "shapes": { + "com.amazonaws.athena#AclConfiguration": { + "type": "structure", + "members": { + "S3AclOption": { + "target": "com.amazonaws.athena#S3AclOption", + "traits": { + "smithy.api#documentation": "

                The Amazon S3 canned ACL that Athena should specify when storing\n query results. Currently the only supported canned ACL is\n BUCKET_OWNER_FULL_CONTROL. If a query runs in a workgroup and the\n workgroup overrides client-side settings, then the Amazon S3 canned ACL\n specified in the workgroup's settings is used for all queries that run in the workgroup.\n For more information about Amazon S3 canned ACLs, see Canned ACL in the Amazon S3 User\n Guide.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Indicates that an Amazon S3 canned ACL should be set to control ownership of\n stored query results. When Athena stores query results in Amazon S3,\n the canned ACL is set with the x-amz-acl request header. For more\n information about S3 Object Ownership, see Object Ownership settings in the Amazon S3 User\n Guide.

                " + } + }, "com.amazonaws.athena#AmazonAthena": { "type": "service", "traits": { @@ -144,6 +159,9 @@ { "target": "com.amazonaws.athena#UpdateDataCatalog" }, + { + "target": "com.amazonaws.athena#UpdateNamedQuery" + }, { "target": "com.amazonaws.athena#UpdatePreparedStatement" }, @@ -167,7 +185,7 @@ "ErrorCategory": { "target": "com.amazonaws.athena#ErrorCategory", "traits": { - "smithy.api#documentation": "

                An integer value that specifies the category of a query failure error. The following\n list shows the category for each integer value.

                \n

                \n 1 - System

                \n

                \n 2 - User

                \n

                \n 3 - Unknown

                " + "smithy.api#documentation": "

                An integer value that specifies the category of a query failure error. The following\n list shows the category for each integer value.

                \n

                \n 1 - System

                \n

                \n 2 - User

                \n

                \n 3 - Other

                " } }, "ErrorType": { @@ -178,7 +196,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides information about an Athena query error. The\n AthenaError feature provides standardized error information to help you\n understand failed queries and take steps after a query failure occurs.\n AthenaError includes an ErrorCategory field that specifies\n whether the cause of the failed query is due to system error, user error, or unknown\n error.

                " + "smithy.api#documentation": "

                Provides information about an Athena query error. The\n AthenaError feature provides standardized error information to help you\n understand failed queries and take steps after a query failure occurs.\n AthenaError includes an ErrorCategory field that specifies\n whether the cause of the failed query is due to system error, user error, or other\n error.

                " } }, "com.amazonaws.athena#BatchGetNamedQuery": { @@ -2320,7 +2338,7 @@ "QueryString": { "target": "com.amazonaws.athena#QueryString", "traits": { - "smithy.api#documentation": "

                The SQL query statements that comprise the query.

                ", + "smithy.api#documentation": "

                The SQL statements that make up the query.

                ", "smithy.api#required": {} } }, @@ -2338,7 +2356,16 @@ } }, "traits": { - "smithy.api#documentation": "

                A query, where QueryString is the list of SQL query statements that\n comprise the query.

                " + "smithy.api#documentation": "

                A query, where QueryString contains the SQL statements that\n make up the query.

                " + } + }, + "com.amazonaws.athena#NamedQueryDescriptionString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } } }, "com.amazonaws.athena#NamedQueryId": { @@ -2712,6 +2739,12 @@ "traits": { "smithy.api#documentation": "

                The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.

                \n

                This is a client-side setting. If workgroup settings override client-side settings,\n then the query uses the ExpectedBucketOwner setting that is specified for\n the workgroup, and also uses the location for storing query results specified in the\n workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration\n and Workgroup Settings Override Client-Side Settings.

                " } + }, + "AclConfiguration": { + "target": "com.amazonaws.athena#AclConfiguration", + "traits": { + "smithy.api#documentation": "

                Indicates that an Amazon S3 canned ACL should be set to control ownership of\n stored query results. Currently the only supported canned ACL is\n BUCKET_OWNER_FULL_CONTROL. This is a client-side setting. If workgroup\n settings override client-side settings, then the query uses the ACL configuration that\n is specified for the workgroup, and also uses the location for storing query results\n specified in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.

                " + } } }, "traits": { @@ -2756,6 +2789,18 @@ "traits": { "smithy.api#documentation": "

                If set to \"true\", removes the Amazon Web Services account ID previously specified for\n ResultConfiguration$ExpectedBucketOwner. If set to \"false\" or not\n set, and a value is present in the ExpectedBucketOwner in\n ResultConfigurationUpdates (the client-side setting), the\n ExpectedBucketOwner in the workgroup's ResultConfiguration\n is updated with the new value. For more information, see Workgroup Settings Override\n Client-Side Settings.

                " } + }, + "AclConfiguration": { + "target": "com.amazonaws.athena#AclConfiguration", + "traits": { + "smithy.api#documentation": "

                The ACL configuration for the query results.

                " + } + }, + "RemoveAclConfiguration": { + "target": "com.amazonaws.athena#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

                If set to true, indicates that the previously-specified ACL configuration\n for queries in this workgroup should be ignored and set to null. If set to\n false or not set, and a value is present in the\n AclConfiguration of ResultConfigurationUpdates, the\n AclConfiguration in the workgroup's ResultConfiguration is\n updated with the new value. For more information, see Workgroup Settings Override\n Client-Side Settings.

                " + } } }, "traits": { @@ -2779,7 +2824,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The metadata and rows that comprise a query result set. The metadata describes the\n column structure and data types. To return a ResultSet object, use GetQueryResults.

                " + "smithy.api#documentation": "

                The metadata and rows that make up a query result set. The metadata describes the\n column structure and data types. To return a ResultSet object, use GetQueryResults.

                " } }, "com.amazonaws.athena#ResultSetMetadata": { @@ -2807,7 +2852,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The rows that comprise a query result table.

                " + "smithy.api#documentation": "

                The rows that make up a query result table.

                " } }, "com.amazonaws.athena#RowList": { @@ -2816,6 +2861,17 @@ "target": "com.amazonaws.athena#Row" } }, + "com.amazonaws.athena#S3AclOption": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BUCKET_OWNER_FULL_CONTROL", + "name": "BUCKET_OWNER_FULL_CONTROL" + } + ] + } + }, "com.amazonaws.athena#StartQueryExecution": { "type": "operation", "input": { @@ -3334,6 +3390,63 @@ "type": "structure", "members": {} }, + "com.amazonaws.athena#UpdateNamedQuery": { + "type": "operation", + "input": { + "target": "com.amazonaws.athena#UpdateNamedQueryInput" + }, + "output": { + "target": "com.amazonaws.athena#UpdateNamedQueryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.athena#InternalServerException" + }, + { + "target": "com.amazonaws.athena#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

                Updates a NamedQuery object. The database or workgroup cannot be updated.

                ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.athena#UpdateNamedQueryInput": { + "type": "structure", + "members": { + "NamedQueryId": { + "target": "com.amazonaws.athena#NamedQueryId", + "traits": { + "smithy.api#documentation": "

                The unique identifier (UUID) of the query.

                ", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.athena#NameString", + "traits": { + "smithy.api#documentation": "

                The name of the query.

                ", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.athena#NamedQueryDescriptionString", + "traits": { + "smithy.api#documentation": "

                The query description.

                " + } + }, + "QueryString": { + "target": "com.amazonaws.athena#QueryString", + "traits": { + "smithy.api#documentation": "

                The contents of the query with all query statements.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.athena#UpdateNamedQueryOutput": { + "type": "structure", + "members": {} + }, "com.amazonaws.athena#UpdatePreparedStatement": { "type": "operation", "input": { diff --git a/codegen/sdk-codegen/aws-models/chime-sdk-meetings.json b/codegen/sdk-codegen/aws-models/chime-sdk-meetings.json index 43ad5207e261..0a787f996fad 100644 --- a/codegen/sdk-codegen/aws-models/chime-sdk-meetings.json +++ b/codegen/sdk-codegen/aws-models/chime-sdk-meetings.json @@ -83,7 +83,7 @@ } }, "traits": { - "smithy.api#documentation": "

                An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.

                " + "smithy.api#documentation": "

                An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.

                " } }, "com.amazonaws.chimesdkmeetings#BadRequestException": { @@ -205,7 +205,7 @@ "name": "chime" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

                The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and \n receive meeting notifications. For more information \n about the meeting APIs, see Amazon Chime SDK meetings.

                ", + "smithy.api#documentation": "

                The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and \n receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings.

                ", "smithy.api#title": "Amazon Chime SDK Meetings" }, "version": "2021-07-15", @@ -425,7 +425,7 @@ "MediaRegion": { "target": "com.amazonaws.chimesdkmeetings#MediaRegion", "traits": { - "smithy.api#documentation": "

                The Region in which to create the meeting.

                \n \n

                \n Available values: \n af-south-1\n , \n ap-northeast-1\n , \n ap-northeast-2\n , \n ap-south-1\n , \n ap-southeast-1\n , \n ap-southeast-2\n , \n ca-central-1\n , \n eu-central-1\n , \n eu-north-1\n , \n eu-south-1\n , \n eu-west-1\n , \n eu-west-2\n , \n eu-west-3\n , \n sa-east-1\n , \n us-east-1\n , \n us-east-2\n , \n us-west-1\n , \n us-west-2\n .\n

                ", + "smithy.api#documentation": "

                The Region in which to create the meeting.

                \n \n

                \n Available values: \n af-south-1, \n ap-northeast-1, \n ap-northeast-2, \n ap-south-1, \n ap-southeast-1, \n ap-southeast-2, \n ca-central-1, \n eu-central-1, \n eu-north-1, \n eu-south-1, \n eu-west-1, \n eu-west-2, \n eu-west-3, \n sa-east-1, \n us-east-1, \n us-east-2, \n us-west-1, \n us-west-2.\n

                \n

                Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

                ", "smithy.api#required": {} } }, @@ -512,7 +512,7 @@ "MediaRegion": { "target": "com.amazonaws.chimesdkmeetings#MediaRegion", "traits": { - "smithy.api#documentation": "

                The Region in which to create the meeting.

                ", + "smithy.api#documentation": "

                The Region in which to create the meeting.

                \n \n

                \n Available values: \n af-south-1, \n ap-northeast-1, \n ap-northeast-2, \n ap-south-1, \n ap-southeast-1, \n ap-southeast-2, \n ca-central-1, \n eu-central-1, \n eu-north-1, \n eu-south-1, \n eu-west-1, \n eu-west-2, \n eu-west-3, \n sa-east-1, \n us-east-1, \n us-east-2, \n us-west-1, \n us-west-2.\n

                \n

                Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

                ", "smithy.api#required": {} } }, @@ -716,8 +716,7 @@ "LanguageCode": { "target": "com.amazonaws.chimesdkmeetings#TranscribeLanguageCode", "traits": { - "smithy.api#documentation": "

                The language code specified for the Amazon Transcribe engine.

                ", - "smithy.api#required": {} + "smithy.api#documentation": "

                The language code specified for the Amazon Transcribe engine.

                " } }, "VocabularyFilterMethod": { @@ -771,7 +770,7 @@ "PiiEntityTypes": { "target": "com.amazonaws.chimesdkmeetings#TranscribePiiEntityTypes", "traits": { - "smithy.api#documentation": "

                Lists the PII entity types you want to identify or redact. To specify entity types, you must enable ContentIdentificationType or ContentRedactionType.

                \n \n

                PIIEntityTypes must be comma-separated. The available values are: \n BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, \n ADDRESS, NAME, PHONE, SSN, and ALL.

                \n \n

                \n PiiEntityTypes is an optional parameter with a default value of ALL.

                " + "smithy.api#documentation": "

                Lists the PII entity types you want to identify or redact. To specify entity types, you must enable ContentIdentificationType or ContentRedactionType.

                \n \n

                \n PIIEntityTypes must be comma-separated. The available values are: \n BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, \n ADDRESS, NAME, PHONE, SSN, and ALL.

                \n \n

                \n PiiEntityTypes is an optional parameter with a default value of ALL.

                " } }, "LanguageModelName": { @@ -779,6 +778,24 @@ "traits": { "smithy.api#documentation": "

                The name of the language model used during transcription.

                " } + }, + "IdentifyLanguage": { + "target": "com.amazonaws.chimesdkmeetings#Boolean", + "traits": { + "smithy.api#documentation": "

                Automatically identifies the language spoken in media files.

                " + } + }, + "LanguageOptions": { + "target": "com.amazonaws.chimesdkmeetings#TranscribeLanguageOptions", + "traits": { + "smithy.api#documentation": "

                Language codes for the languages that you want to identify. You must provide at least 2 codes.

                " + } + }, + "PreferredLanguage": { + "target": "com.amazonaws.chimesdkmeetings#TranscribeLanguageCode", + "traits": { + "smithy.api#documentation": "

                Language code for the preferred language.

                " + } } }, "traits": { @@ -1143,7 +1160,7 @@ "MediaRegion": { "target": "com.amazonaws.chimesdkmeetings#MediaRegion", "traits": { - "smithy.api#documentation": "

                The Region in which you create the meeting. Available values: af-south-1, ap-northeast-1, \n ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, \n eu-central-1, eu-north-1, eu-south-1,\n eu-west-1, eu-west-2, eu-west-3,\n sa-east-1, us-east-1, us-east-2,\n us-west-1, us-west-2.

                " + "smithy.api#documentation": "

                The Region in which you create the meeting. Available values: af-south-1, ap-northeast-1, \n ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, \n eu-central-1, eu-north-1, eu-south-1,\n eu-west-1, eu-west-2, eu-west-3,\n sa-east-1, us-east-1, us-east-2,\n us-west-1, us-west-2.

                \n

                Available values in AWS GovCloud (US) Regions: us-gov-east-1, us-gov-west-1.

                " } }, "MediaPlacement": { @@ -1189,7 +1206,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The configuration settings of the features available to a meeting.

                " + "smithy.api#documentation": "

                The configuration settings of the features available to a meeting.>

                " } }, "com.amazonaws.chimesdkmeetings#NotFoundException": { @@ -1482,6 +1499,16 @@ "smithy.api#pattern": "^[0-9a-zA-Z._-]+$" } }, + "com.amazonaws.chimesdkmeetings#TranscribeLanguageOptions": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^[a-zA-Z-,]+$" + } + }, "com.amazonaws.chimesdkmeetings#TranscribeMedicalContentIdentificationType": { "type": "string", "traits": { diff --git a/codegen/sdk-codegen/aws-models/cloudtrail.json b/codegen/sdk-codegen/aws-models/cloudtrail.json index 1d0a0a57eb63..a1219d705f09 100644 --- a/codegen/sdk-codegen/aws-models/cloudtrail.json +++ b/codegen/sdk-codegen/aws-models/cloudtrail.json @@ -245,7 +245,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. \n The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might \n show as CANCELLED even if the operation is not yet finished.

                ", + "smithy.api#documentation": "

                Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED, TIMED_OUT, or FINISHED. You must specify an ARN value for EventDataStore.\n The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might \n show as CANCELLED even if the operation is not yet finished.

                ", "smithy.api#idempotent": {} } }, @@ -1152,7 +1152,7 @@ "QueryStatus": { "target": "com.amazonaws.cloudtrail#QueryStatus", "traits": { - "smithy.api#documentation": "

                The status of a query. Values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED\n

                " + "smithy.api#documentation": "

                The status of a query. Values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, TIMED_OUT, or CANCELLED\n

                " } }, "QueryStatistics": { @@ -1883,7 +1883,7 @@ "QueryStatus": { "target": "com.amazonaws.cloudtrail#QueryStatus", "traits": { - "smithy.api#documentation": "

                The status of the query. Values include QUEUED, RUNNING, FINISHED, FAILED, \n or CANCELLED.

                " + "smithy.api#documentation": "

                The status of the query. Values include QUEUED, RUNNING, FINISHED, FAILED, \n TIMED_OUT, or CANCELLED.

                " } }, "QueryStatistics": { @@ -2146,7 +2146,7 @@ "code": "InactiveQuery", "httpResponseCode": 400 }, - "smithy.api#documentation": "

                The specified query cannot be canceled because it is in the FINISHED, FAILED, or \n CANCELLED state.

                ", + "smithy.api#documentation": "

                The specified query cannot be canceled because it is in the FINISHED, FAILED, TIMED_OUT, or\n CANCELLED state.

                ", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -3008,7 +3008,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a list of queries and query statuses for the past seven days. You must specify an ARN value for \n EventDataStore. Optionally, to shorten the list of results, you can specify a time range, \n formatted as timestamps, by adding StartTime and EndTime parameters, and a \n QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED.

                ", + "smithy.api#documentation": "

                Returns a list of queries and query statuses for the past seven days. You must specify an ARN value for \n EventDataStore. Optionally, to shorten the list of results, you can specify a time range, \n formatted as timestamps, by adding StartTime and EndTime parameters, and a \n QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, TIMED_OUT, or CANCELLED.

                ", "smithy.api#idempotent": {}, "smithy.api#paginated": { "inputToken": "NextToken", @@ -3064,7 +3064,7 @@ "QueryStatus": { "target": "com.amazonaws.cloudtrail#QueryStatus", "traits": { - "smithy.api#documentation": "

                The status of queries that you want to return in results. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED.

                " + "smithy.api#documentation": "

                The status of queries that you want to return in results. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, TIMED_OUT, or CANCELLED.

                " } } } @@ -3807,7 +3807,7 @@ "QueryStatus": { "target": "com.amazonaws.cloudtrail#QueryStatus", "traits": { - "smithy.api#documentation": "

                The status of the query. This can be QUEUED, RUNNING, FINISHED, FAILED, \n or CANCELLED.

                " + "smithy.api#documentation": "

                The status of the query. This can be QUEUED, RUNNING, FINISHED, FAILED, \n TIMED_OUT, or CANCELLED.

                " } }, "CreationTime": { @@ -3892,6 +3892,12 @@ "traits": { "smithy.api#documentation": "

                The total number of results returned by a query.

                " } + }, + "BytesScanned": { + "target": "com.amazonaws.cloudtrail#Long", + "traits": { + "smithy.api#documentation": "

                The total bytes that the query scanned in the event data store. This value matches the number of\n bytes for which your account is billed for the query, unless the query is still running.

                " + } } }, "traits": { @@ -3913,6 +3919,12 @@ "smithy.api#documentation": "

                The number of events that the query scanned in the event data store.

                " } }, + "BytesScanned": { + "target": "com.amazonaws.cloudtrail#Long", + "traits": { + "smithy.api#documentation": "

                The total bytes that the query scanned in the event data store. This value matches the number of \n bytes for which your account is billed for the query, unless the query is still running.

                " + } + }, "ExecutionTimeInMillis": { "target": "com.amazonaws.cloudtrail#Integer", "traits": { @@ -3953,6 +3965,10 @@ { "value": "CANCELLED", "name": "CANCELLED" + }, + { + "value": "TIMED_OUT", + "name": "TIMED_OUT" } ] } diff --git a/codegen/sdk-codegen/aws-models/comprehend.json b/codegen/sdk-codegen/aws-models/comprehend.json index 66d5408aa765..20106b954321 100644 --- a/codegen/sdk-codegen/aws-models/comprehend.json +++ b/codegen/sdk-codegen/aws-models/comprehend.json @@ -908,6 +908,9 @@ { "target": "com.amazonaws.comprehend#DescribeSentimentDetectionJob" }, + { + "target": "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJob" + }, { "target": "com.amazonaws.comprehend#DescribeTopicsDetectionJob" }, @@ -971,6 +974,9 @@ { "target": "com.amazonaws.comprehend#ListTagsForResource" }, + { + "target": "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobs" + }, { "target": "com.amazonaws.comprehend#ListTopicsDetectionJobs" }, @@ -998,6 +1004,9 @@ { "target": "com.amazonaws.comprehend#StartSentimentDetectionJob" }, + { + "target": "com.amazonaws.comprehend#StartTargetedSentimentDetectionJob" + }, { "target": "com.amazonaws.comprehend#StartTopicsDetectionJob" }, @@ -1019,6 +1028,9 @@ { "target": "com.amazonaws.comprehend#StopSentimentDetectionJob" }, + { + "target": "com.amazonaws.comprehend#StopTargetedSentimentDetectionJob" + }, { "target": "com.amazonaws.comprehend#StopTrainingDocumentClassifier" }, @@ -2216,6 +2228,55 @@ } } }, + "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJobRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#JobNotFoundException" + }, + { + "target": "com.amazonaws.comprehend#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

                Gets the properties associated with a targeted sentiment detection job. Use this operation \n to get the status of the job.

                " + } + }, + "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJobRequest": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.comprehend#JobId", + "traits": { + "smithy.api#documentation": "

                The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its\n response.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.comprehend#DescribeTargetedSentimentDetectionJobResponse": { + "type": "structure", + "members": { + "TargetedSentimentDetectionJobProperties": { + "target": "com.amazonaws.comprehend#TargetedSentimentDetectionJobProperties", + "traits": { + "smithy.api#documentation": "

                An object that contains the properties associated with a targeted sentiment detection job.

                " + } + } + } + }, "com.amazonaws.comprehend#DescribeTopicsDetectionJob": { "type": "operation", "input": { @@ -3762,7 +3823,7 @@ "F1Score": { "target": "com.amazonaws.comprehend#Double", "traits": { - "smithy.api#documentation": "

                A measure of how accurate the recognizer results are for the test data. It is derived from\n the Precision and Recall values. The F1Score is the\n harmonic average of the two scores. The highest score is 1, and the worst score is 0.

                " + "smithy.api#documentation": "

                A measure of how accurate the recognizer results are for the test data. It is derived from\n the Precision and Recall values. The F1Score is the\n harmonic average of the two scores. For plain text entity recognizer models, the range is 0 to 100, \n where 100 is the best score. For PDF/Word entity recognizer models, the range is 0 to 1, \n where 1 is the best score. \n

                " } } }, @@ -4727,7 +4788,7 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": ".*" + "smithy.api#pattern": "^\\p{ASCII}+$" } }, "com.amazonaws.comprehend#KmsKeyValidationException": { @@ -5777,6 +5838,77 @@ } } }, + "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobsRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidFilterException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

                Gets a list of targeted sentiment detection jobs that you have submitted.

                ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobsRequest": { + "type": "structure", + "members": { + "Filter": { + "target": "com.amazonaws.comprehend#TargetedSentimentDetectionJobFilter", + "traits": { + "smithy.api#documentation": "

                Filters the jobs that are returned. You can filter jobs on their name, status, or the date\n and time that they were submitted. You can only set one filter at a time.

                " + } + }, + "NextToken": { + "target": "com.amazonaws.comprehend#String", + "traits": { + "smithy.api#documentation": "

                Identifies the next page of results to return.

                " + } + }, + "MaxResults": { + "target": "com.amazonaws.comprehend#MaxResultsInteger", + "traits": { + "smithy.api#documentation": "

                The maximum number of results to return in each page. The default is 100.

                " + } + } + } + }, + "com.amazonaws.comprehend#ListTargetedSentimentDetectionJobsResponse": { + "type": "structure", + "members": { + "TargetedSentimentDetectionJobPropertiesList": { + "target": "com.amazonaws.comprehend#TargetedSentimentDetectionJobPropertiesList", + "traits": { + "smithy.api#documentation": "

                A list containing the properties of each job that is returned.

                " + } + }, + "NextToken": { + "target": "com.amazonaws.comprehend#String", + "traits": { + "smithy.api#documentation": "

                Identifies the next page of results to return.

                " + } + } + } + }, "com.amazonaws.comprehend#ListTopicsDetectionJobs": { "type": "operation", "input": { @@ -5919,7 +6051,7 @@ "S3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "

                When you use the OutputDataConfig object with asynchronous operations, you\n specify the Amazon S3 location where you want to write the output data. The URI must be in the\n same region as the API endpoint that you are calling. The location is used as the prefix for\n the actual location of the output file.

                \n

                When the topic detection job is finished, the service creates an output file in a\n directory specific to the job. The S3Uri field contains the location of the\n output file, called output.tar.gz. It is a compressed archive that contains the\n ouput of the operation.

                ", + "smithy.api#documentation": "

                When you use the OutputDataConfig object with asynchronous operations, you\n specify the Amazon S3 location where you want to write the output data. The URI must be in the\n same region as the API endpoint that you are calling. The location is used as the prefix for\n the actual location of the output file.

                \n

                When the topic detection job is finished, the service creates an output file in a\n directory specific to the job. The S3Uri field contains the location of the\n output file, called output.tar.gz. It is a compressed archive that contains the\n ouput of the operation.

                \n

                \n For a PII entity detection job, the output file is plain text, not a compressed archive. \n The output file name is the same as the input file, with .out appended at the end.\n

                ", "smithy.api#required": {} } }, @@ -5931,7 +6063,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration parameters for the output of topic detection jobs.

                \n

                " + "smithy.api#documentation": "

                Provides configuration parameters for the output of inference jobs.

                \n

                " } }, "com.amazonaws.comprehend#PartOfSpeechTag": { @@ -6324,7 +6456,7 @@ "S3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "

                When you use the PiiOutputDataConfig object with asynchronous operations,\n you specify the Amazon S3 location where you want to write the output data.

                ", + "smithy.api#documentation": "

                When you use the PiiOutputDataConfig object with asynchronous operations,\n you specify the Amazon S3 location where you want to write the output data.

                \n

                \n For a PII entity detection job, the output file is plain text, not a compressed archive. \n The output file name is the same as the input file, with .out appended at the end.\n

                ", "smithy.api#required": {} } }, @@ -7451,7 +7583,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Starts an asynchronous sentiment detection job for a collection of documents. use the\n operation to track the status of a\n job.

                " + "smithy.api#documentation": "

                Starts an asynchronous sentiment detection job for a collection of documents. Use the\n operation to track the status of a\n job.

                " } }, "com.amazonaws.comprehend#StartSentimentDetectionJobRequest": { @@ -7541,6 +7673,118 @@ } } }, + "com.amazonaws.comprehend#StartTargetedSentimentDetectionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#StartTargetedSentimentDetectionJobRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#StartTargetedSentimentDetectionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#KmsKeyValidationException" + }, + { + "target": "com.amazonaws.comprehend#TooManyRequestsException" + }, + { + "target": "com.amazonaws.comprehend#TooManyTagsException" + } + ], + "traits": { + "smithy.api#documentation": "

                Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the\n operation to track the status of a\n job.

                " + } + }, + "com.amazonaws.comprehend#StartTargetedSentimentDetectionJobRequest": { + "type": "structure", + "members": { + "InputDataConfig": { + "target": "com.amazonaws.comprehend#InputDataConfig", + "traits": { + "smithy.api#required": {} + } + }, + "OutputDataConfig": { + "target": "com.amazonaws.comprehend#OutputDataConfig", + "traits": { + "smithy.api#documentation": "

                Specifies where to send the output files.

                ", + "smithy.api#required": {} + } + }, + "DataAccessRoleArn": { + "target": "com.amazonaws.comprehend#IamRoleArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that\n grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.

                ", + "smithy.api#required": {} + } + }, + "JobName": { + "target": "com.amazonaws.comprehend#JobName", + "traits": { + "smithy.api#documentation": "

                The identifier of the job.

                " + } + }, + "LanguageCode": { + "target": "com.amazonaws.comprehend#LanguageCode", + "traits": { + "smithy.api#documentation": "

                The language of the input documents. You can specify any of the primary languages\n supported by Amazon Comprehend. All documents must be in the same language.

                ", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.comprehend#ClientRequestTokenString", + "traits": { + "smithy.api#documentation": "

                A unique identifier for the request. If you don't set the client request token, Amazon\n Comprehend generates one.

                ", + "smithy.api#idempotencyToken": {} + } + }, + "VolumeKmsKeyId": { + "target": "com.amazonaws.comprehend#KmsKeyId", + "traits": { + "smithy.api#documentation": "

                ID for the KMS key that Amazon Comprehend uses to encrypt\n data on the storage volume attached to the ML compute instance(s) that process the analysis\n job. The VolumeKmsKeyId can be either of the following formats:

                \n
                  \n
                • \n

                  KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n

                  \n
                • \n
                • \n

                  Amazon Resource Name (ARN) of a KMS Key:\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n

                  \n
                • \n
                " + } + }, + "VpcConfig": { + "target": "com.amazonaws.comprehend#VpcConfig" + }, + "Tags": { + "target": "com.amazonaws.comprehend#TagList", + "traits": { + "smithy.api#documentation": "

                Tags to be associated with the targeted sentiment detection job. A tag is a key-value pair that\n adds metadata to a resource used by Amazon Comprehend. For example, a tag with \"Sales\" as the\n key might be added to a resource to indicate its use by the sales department.

                " + } + } + } + }, + "com.amazonaws.comprehend#StartTargetedSentimentDetectionJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.comprehend#JobId", + "traits": { + "smithy.api#documentation": "

                The identifier generated for the job. To get the status of a job, use this identifier with\n the operation.

                " + } + }, + "JobArn": { + "target": "com.amazonaws.comprehend#ComprehendArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully\n qualified identifier for the job. It includes the AWS account, Region, and the job ID. The\n format of the ARN is as follows:

                \n

                \n arn::comprehend:::targeted-sentiment-detection-job/\n

                \n

                The following is an example job ARN:

                \n

                \n arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab\n

                " + } + }, + "JobStatus": { + "target": "com.amazonaws.comprehend#JobStatus", + "traits": { + "smithy.api#documentation": "

                The status of the job.

                \n
                  \n
                • \n

                  SUBMITTED - The job has been received and is queued for processing.

                  \n
                • \n
                • \n

                  IN_PROGRESS - Amazon Comprehend is processing the job.

                  \n
                • \n
                • \n

                  COMPLETED - The job was successfully completed and the output is available.

                  \n
                • \n
                • \n

                  FAILED - The job did not complete. To get details, use the operation.

                  \n
                • \n
                " + } + } + } + }, "com.amazonaws.comprehend#StartTopicsDetectionJob": { "type": "operation", "input": { @@ -7968,6 +8212,58 @@ } } }, + "com.amazonaws.comprehend#StopTargetedSentimentDetectionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#StopTargetedSentimentDetectionJobRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#StopTargetedSentimentDetectionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#JobNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

                Stops a targeted sentiment detection job in progress.

                \n

                If the job state is IN_PROGRESS the job is marked for termination and put\n into the STOP_REQUESTED state. If the job completes before it can be stopped, it\n is put into the COMPLETED state; otherwise the job is be stopped and put into the\n STOPPED state.

                \n

                If the job is in the COMPLETED or FAILED state when you call the\n StopDominantLanguageDetectionJob operation, the operation returns a 400\n Internal Request Exception.

                \n

                When a job is stopped, any documents already processed are written to the output\n location.

                " + } + }, + "com.amazonaws.comprehend#StopTargetedSentimentDetectionJobRequest": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.comprehend#JobId", + "traits": { + "smithy.api#documentation": "

                The identifier of the targeted sentiment detection job to stop.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.comprehend#StopTargetedSentimentDetectionJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.comprehend#JobId", + "traits": { + "smithy.api#documentation": "

                The identifier of the targeted sentiment detection job to stop.

                " + } + }, + "JobStatus": { + "target": "com.amazonaws.comprehend#JobStatus", + "traits": { + "smithy.api#documentation": "

                Either STOP_REQUESTED if the job is currently running, or\n STOPPED if the job was previously stopped with the\n StopSentimentDetectionJob operation.

                " + } + } + } + }, "com.amazonaws.comprehend#StopTrainingDocumentClassifier": { "type": "operation", "input": { @@ -8265,6 +8561,121 @@ } } }, + "com.amazonaws.comprehend#TargetedSentimentDetectionJobFilter": { + "type": "structure", + "members": { + "JobName": { + "target": "com.amazonaws.comprehend#JobName", + "traits": { + "smithy.api#documentation": "

                Filters on the name of the job.

                " + } + }, + "JobStatus": { + "target": "com.amazonaws.comprehend#JobStatus", + "traits": { + "smithy.api#documentation": "

                Filters the list of jobs based on job status. Returns only jobs with the specified\n status.

                " + } + }, + "SubmitTimeBefore": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "

                Filters the list of jobs based on the time that the job was submitted for processing.\n Returns only jobs submitted before the specified time. Jobs are returned in ascending order,\n oldest to newest.

                " + } + }, + "SubmitTimeAfter": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "

                Filters the list of jobs based on the time that the job was submitted for processing.\n Returns only jobs submitted after the specified time. Jobs are returned in descending order,\n newest to oldest.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Provides information for filtering a list of dominant language detection jobs. For more\n information, see the operation.

                " + } + }, + "com.amazonaws.comprehend#TargetedSentimentDetectionJobProperties": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.comprehend#JobId", + "traits": { + "smithy.api#documentation": "

                The identifier assigned to the targeted sentiment detection job.

                " + } + }, + "JobArn": { + "target": "com.amazonaws.comprehend#ComprehendArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully\n qualified identifier for the job. It includes the AWS account, Region, and the job ID. The\n format of the ARN is as follows:

                \n

                \n arn::comprehend:::targeted-sentiment-detection-job/\n

                \n

                The following is an example job ARN:

                \n

                \n arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab\n

                " + } + }, + "JobName": { + "target": "com.amazonaws.comprehend#JobName", + "traits": { + "smithy.api#documentation": "

                The name that you assigned to the targeted sentiment detection job.

                " + } + }, + "JobStatus": { + "target": "com.amazonaws.comprehend#JobStatus", + "traits": { + "smithy.api#documentation": "

                The current status of the targeted sentiment detection job. If the status is FAILED,\n the Messages field shows the reason for the failure.

                " + } + }, + "Message": { + "target": "com.amazonaws.comprehend#AnyLengthString", + "traits": { + "smithy.api#documentation": "

                A description of the status of a job.

                " + } + }, + "SubmitTime": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "

                The time that the targeted sentiment detection job was submitted for processing.

                " + } + }, + "EndTime": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "

                The time that the targeted sentiment detection job ended.

                " + } + }, + "InputDataConfig": { + "target": "com.amazonaws.comprehend#InputDataConfig" + }, + "OutputDataConfig": { + "target": "com.amazonaws.comprehend#OutputDataConfig" + }, + "LanguageCode": { + "target": "com.amazonaws.comprehend#LanguageCode", + "traits": { + "smithy.api#documentation": "

                The language code of the input documents.

                " + } + }, + "DataAccessRoleArn": { + "target": "com.amazonaws.comprehend#IamRoleArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input\n data.

                " + } + }, + "VolumeKmsKeyId": { + "target": "com.amazonaws.comprehend#KmsKeyId", + "traits": { + "smithy.api#documentation": "

                ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt\n data on the storage volume attached to the ML compute instance(s) that process the \n targeted sentiment detection job. The VolumeKmsKeyId can be either of the following formats:

                \n
                  \n
                • \n

                  KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"\n

                  \n
                • \n
                • \n

                  Amazon Resource Name (ARN) of a KMS Key:\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\n

                  \n
                • \n
                " + } + }, + "VpcConfig": { + "target": "com.amazonaws.comprehend#VpcConfig" + } + }, + "traits": { + "smithy.api#documentation": "

                Provides information about a targeted sentiment detection job.

                " + } + }, + "com.amazonaws.comprehend#TargetedSentimentDetectionJobPropertiesList": { + "type": "list", + "member": { + "target": "com.amazonaws.comprehend#TargetedSentimentDetectionJobProperties" + } + }, "com.amazonaws.comprehend#TextSizeLimitExceededException": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/connect.json b/codegen/sdk-codegen/aws-models/connect.json index 38bcb491efc6..4ce5bd9ce393 100644 --- a/codegen/sdk-codegen/aws-models/connect.json +++ b/codegen/sdk-codegen/aws-models/connect.json @@ -7337,6 +7337,10 @@ { "value": "AGENT_EVENTS", "name": "AGENT_EVENTS" + }, + { + "value": "REAL_TIME_CONTACT_ANALYSIS_SEGMENTS", + "name": "REAL_TIME_CONTACT_ANALYSIS_SEGMENTS" } ] } diff --git a/codegen/sdk-codegen/aws-models/devops-guru.json b/codegen/sdk-codegen/aws-models/devops-guru.json index d0f2c1ef953b..9867a0dcc7b4 100644 --- a/codegen/sdk-codegen/aws-models/devops-guru.json +++ b/codegen/sdk-codegen/aws-models/devops-guru.json @@ -167,6 +167,20 @@ } } }, + "com.amazonaws.devopsguru#AmazonCodeGuruProfilerIntegration": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.devopsguru#EventSourceOptInStatus", + "traits": { + "smithy.api#documentation": "

                The status of the CodeGuru Profiler integration.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Information about your account's integration with Amazon CodeGuru Profiler.

                " + } + }, "com.amazonaws.devopsguru#AnomalyDescription": { "type": "string" }, @@ -227,7 +241,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in \n \tthe generation of an anomaly. When DevOps Guru detects multiple related anomalies, it creates \n and insight with details about the anomalous behavior and suggestions about how to correct the \n problem.

                " + "smithy.api#documentation": "

                The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in the\n\t\t\tgeneration of an anomaly. When DevOps Guru detects multiple related anomalies, it creates and\n\t\t\tinsight with details about the anomalous behavior and suggestions about how to correct\n\t\t\tthe problem.

                " } }, "com.amazonaws.devopsguru#AnomalyResources": { @@ -255,6 +269,9 @@ ] } }, + "com.amazonaws.devopsguru#AnomalySource": { + "type": "string" + }, "com.amazonaws.devopsguru#AnomalySourceDetails": { "type": "structure", "members": { @@ -267,7 +284,7 @@ "PerformanceInsightsMetrics": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricsDetails", "traits": { - "smithy.api#documentation": "

                An array of PerformanceInsightsMetricsDetail objects that contain information\n \t\tabout analyzed Performance Insights metrics that show anomalous behavior.

                " + "smithy.api#documentation": "

                An array of PerformanceInsightsMetricsDetail objects that contain\n\t\t\tinformation about analyzed Performance Insights metrics that show anomalous behavior.

                " } } }, @@ -275,6 +292,32 @@ "smithy.api#documentation": "

                Details about the source of the anomalous operational data that triggered the\n\t\t\tanomaly.

                " } }, + "com.amazonaws.devopsguru#AnomalySourceMetadata": { + "type": "structure", + "members": { + "Source": { + "target": "com.amazonaws.devopsguru#AnomalySource", + "traits": { + "smithy.api#documentation": "

                The source of the anomaly.

                " + } + }, + "SourceResourceName": { + "target": "com.amazonaws.devopsguru#ResourceName", + "traits": { + "smithy.api#documentation": "

                The name of the anomaly's resource.

                " + } + }, + "SourceResourceType": { + "target": "com.amazonaws.devopsguru#ResourceType", + "traits": { + "smithy.api#documentation": "

                The anomaly's resource type.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Metadata about an anomaly. The anomaly is detected using analysis of the metric data\u2028 over a period of time

                " + } + }, "com.amazonaws.devopsguru#AnomalyStatus": { "type": "string", "traits": { @@ -383,6 +426,9 @@ { "target": "com.amazonaws.devopsguru#DescribeAnomaly" }, + { + "target": "com.amazonaws.devopsguru#DescribeEventSourcesConfig" + }, { "target": "com.amazonaws.devopsguru#DescribeFeedback" }, @@ -443,6 +489,9 @@ { "target": "com.amazonaws.devopsguru#StartCostEstimation" }, + { + "target": "com.amazonaws.devopsguru#UpdateEventSourcesConfig" + }, { "target": "com.amazonaws.devopsguru#UpdateResourceCollection" }, @@ -566,7 +615,7 @@ "StatusCode": { "target": "com.amazonaws.devopsguru#CloudWatchMetricDataStatusCode", "traits": { - "smithy.api#documentation": "

                This is an enum of the status showing whether the metric value pair list has partial or\n\t\t\tcomplete data, or if there was an error.

                " + "smithy.api#documentation": "

                This is an enum of the status showing whether the metric value pair list has partial\n\t\t\tor complete data, or if there was an error.

                " } } }, @@ -758,7 +807,7 @@ "Tags": { "target": "com.amazonaws.devopsguru#TagCostEstimationResourceCollectionFilters", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services tags used to filter the resource collection that is used for \n \t\ta cost estimate.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                The Amazon Web Services tags used to filter the resource collection that is used for a cost\n\t\t\testimate.

                \n

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } } }, @@ -1057,6 +1106,52 @@ } } }, + "com.amazonaws.devopsguru#DescribeEventSourcesConfig": { + "type": "operation", + "input": { + "target": "com.amazonaws.devopsguru#DescribeEventSourcesConfigRequest" + }, + "output": { + "target": "com.amazonaws.devopsguru#DescribeEventSourcesConfigResponse" + }, + "errors": [ + { + "target": "com.amazonaws.devopsguru#AccessDeniedException" + }, + { + "target": "com.amazonaws.devopsguru#InternalServerException" + }, + { + "target": "com.amazonaws.devopsguru#ThrottlingException" + }, + { + "target": "com.amazonaws.devopsguru#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                This operation lists details about a DevOps Guru event source that is shared with your\u2028 account.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/event-sources", + "code": 200 + } + } + }, + "com.amazonaws.devopsguru#DescribeEventSourcesConfigRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.devopsguru#DescribeEventSourcesConfigResponse": { + "type": "structure", + "members": { + "EventSources": { + "target": "com.amazonaws.devopsguru#EventSourcesConfig", + "traits": { + "smithy.api#documentation": "

                The name of the event source.

                " + } + } + } + }, "com.amazonaws.devopsguru#DescribeFeedback": { "type": "operation", "input": { @@ -1419,7 +1514,7 @@ "CloudFormation": { "target": "com.amazonaws.devopsguru#CloudFormationHealths", "traits": { - "smithy.api#documentation": "

                The returned CloudFormationHealthOverview object that contains an\n\t\t\tInsightHealthOverview object with the requested system health\n\t\t\tinformation.

                " + "smithy.api#documentation": "

                The returned CloudFormationHealthOverview object that contains an\n\t\t\t\tInsightHealthOverview object with the requested system health\n\t\t\tinformation.

                " } }, "Service": { @@ -1527,7 +1622,7 @@ "Tags": { "target": "com.amazonaws.devopsguru#TagHealths", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services tags that are used by resources in the resource collection.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                The Amazon Web Services tags that are used by resources in the resource collection.

                \n\t\t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } } } @@ -1786,6 +1881,35 @@ "smithy.api#pattern": "^[a-z]+[a-z0-9]*\\.amazonaws\\.com|aws\\.events$" } }, + "com.amazonaws.devopsguru#EventSourceOptInStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + } + ] + } + }, + "com.amazonaws.devopsguru#EventSourcesConfig": { + "type": "structure", + "members": { + "AmazonCodeGuruProfiler": { + "target": "com.amazonaws.devopsguru#AmazonCodeGuruProfilerIntegration", + "traits": { + "smithy.api#documentation": "

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the event sources.

                " + } + }, "com.amazonaws.devopsguru#EventTimeRange": { "type": "structure", "members": { @@ -1980,6 +2104,9 @@ } } }, + "com.amazonaws.devopsguru#InsightDescription": { + "type": "string" + }, "com.amazonaws.devopsguru#InsightFeedback": { "type": "structure", "members": { @@ -3115,13 +3242,13 @@ "Group": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricGroup", "traits": { - "smithy.api#documentation": "

                The name of the dimension group. Its valid values are:

                \n \t\n \t
                  \n
                • \n \t\t\t

                  \n \t\t\t\t db - The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS PostgreSQL,\n \t\t\t\tAurora MySQL, Amazon RDS MySQL, and MariaDB)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.application - The name of the application that is connected to the database (only Aurora\n \t\t\t\tPostgreSQL and RDS PostgreSQL)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.host - The host name of the connected client (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.session_type - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql - The SQL that is currently executing (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql_tokenized - The SQL digest (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.wait_event - The event for which the database backend is waiting (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.wait_event_type - The type of event for which the database backend is waiting (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.user - The user logged in to the database (all engines)

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                The name of the dimension group. Its valid values are:

                \n\n\t\t
                  \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db - The name of the database to which the client is connected\n\t\t\t\t\t(only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.application - The name of the application that is connected to\n\t\t\t\t\tthe database (only Aurora PostgreSQL and RDS PostgreSQL)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.host - The host name of the connected client (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.session_type - The type of the current session (only Aurora PostgreSQL\n\t\t\t\t\tand RDS PostgreSQL)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql - The SQL that is currently executing (all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql_tokenized - The SQL digest (all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.wait_event - The event for which the database backend is waiting\n\t\t\t\t\t(all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.wait_event_type - The type of event for which the database\n\t\t\t\t\tbackend is waiting (all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.user - The user logged in to the database (all engines)

                  \n\t\t\t
                • \n
                " } }, "Dimensions": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricDimensions", "traits": { - "smithy.api#documentation": "

                A list of specific dimensions from a dimension group. If this parameter is not present,\n \t\tthen it signifies that all of the dimensions in the group were requested or are present in\n \t\tthe response.

                \n \t

                Valid values for elements in the Dimensions array are:

                \n \t\n \t
                  \n
                • \n \t\t\t

                  \n \t\t\t\t db.application.name - The name of the application that is connected to the database (only\n \t\t\t\tAurora PostgreSQL and RDS PostgreSQL)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.host.id - The host ID of the connected client (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.host.name - The host name of the connected client (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.name - The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS\n \t\t\t\tPostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.session_type.name - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql.id - The SQL ID generated by Performance Insights (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql.db_id - The SQL ID generated by the database (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql.statement - The SQL text that is being executed (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql.tokenized_id\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql_tokenized.id - The SQL digest ID generated by Performance Insights (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql_tokenized.db_id - SQL digest ID generated by the database (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sql_tokenized.statement - The SQL digest text (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.user.id - The ID of the user logged in to the database (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.user.name - The name of the user logged in to the database (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.wait_event.name - The event for which the backend is waiting (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.wait_event.type - The type of event for which the backend is waiting (all engines)

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.wait_event_type.name - The name of the event type for which the backend is waiting (all\n \t\t\t\tengines)

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                A list of specific dimensions from a dimension group. If this parameter is not\n\t\t\tpresent, then it signifies that all of the dimensions in the group were requested or are\n\t\t\tpresent in the response.

                \n\t\t

                Valid values for elements in the Dimensions array are:

                \n\n\t\t
                  \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.application.name - The name of the application that is connected\n\t\t\t\t\tto the database (only Aurora PostgreSQL and RDS PostgreSQL)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.host.id - The host ID of the connected client (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.host.name - The host name of the connected client (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.name - The name of the database to which the client is connected\n\t\t\t\t\t(only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.session_type.name - The type of the current session (only Aurora\n\t\t\t\t\tPostgreSQL and RDS PostgreSQL)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql.id - The SQL ID generated by Performance Insights (all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql.db_id - The SQL ID generated by the database (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql.statement - The SQL text that is being executed (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql.tokenized_id\n\t\t\t\t

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql_tokenized.id - The SQL digest ID generated by Performance Insights (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql_tokenized.db_id - SQL digest ID generated by the database\n\t\t\t\t\t(all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sql_tokenized.statement - The SQL digest text (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.user.id - The ID of the user logged in to the database (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.user.name - The name of the user logged in to the database (all\n\t\t\t\t\tengines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.wait_event.name - The event for which the backend is waiting\n\t\t\t\t\t(all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.wait_event.type - The type of event for which the backend is\n\t\t\t\t\twaiting (all engines)

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.wait_event_type.name - The name of the event type for which the\n\t\t\t\t\tbackend is waiting (all engines)

                  \n\t\t\t
                • \n
                " } }, "Limit": { @@ -3132,7 +3259,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A logical grouping of Performance Insights metrics for a related subject area. For example, the\n \t\tdb.sql dimension group consists of the following dimensions:\n \t\tdb.sql.id, db.sql.db_id, db.sql.statement, and\n \t\tdb.sql.tokenized_id.

                \n \t \n \t\t

                Each response element returns a maximum of 500 bytes. For larger elements, such as SQL statements, \n \t\t\tonly the first 500 bytes are returned.

                \n \t
                \n \t\n \t

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                A logical grouping of Performance Insights metrics for a related subject area. For example, the\n\t\t\t\tdb.sql dimension group consists of the following dimensions:\n\t\t\t\tdb.sql.id, db.sql.db_id, db.sql.statement,\n\t\t\tand db.sql.tokenized_id.

                \n\t\t \n\t\t\t

                Each response element returns a maximum of 500 bytes. For larger elements, such as\n\t\t\t\tSQL statements, only the first 500 bytes are returned.

                \n\t\t
                \n

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " } }, "com.amazonaws.devopsguru#PerformanceInsightsMetricDimensions": { @@ -3181,24 +3308,24 @@ "Metric": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricName", "traits": { - "smithy.api#documentation": "

                The name of the meteric used used when querying an Performance Insights GetResourceMetrics API for \n \tanomaly metrics.

                \n \t\n \t

                Valid values for Metric are:

                \n \t\n \t
                  \n
                • \n \t\t\t

                  \n \t\t\t\t db.load.avg - a scaled representation of the number of active sessions\n \t\t\t\tfor the database engine.

                  \n \t\t
                • \n
                • \n \t\t\t

                  \n \t\t\t\t db.sampledload.avg - the raw number of active sessions for the\n \t\t\t\tdatabase engine.

                  \n \t\t
                • \n
                \n \t

                If the number of active sessions is less than an internal Performance Insights threshold, db.load.avg and db.sampledload.avg \n \t\tare the same value. If the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with db.load.avg \n \t\tshowing the scaled values, db.sampledload.avg showing the raw values, and db.sampledload.avg less than db.load.avg. \n \t\tFor most use cases, you can query db.load.avg only.

                " + "smithy.api#documentation": "

                The name of the meteric used used when querying an Performance Insights\n\t\t\t\tGetResourceMetrics API for anomaly metrics.

                \n\n\t\t

                Valid values for Metric are:

                \n\n\t\t
                  \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.load.avg - a scaled representation of the number of active sessions for the\n\t\t\t\t\tdatabase engine.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n\t\t\t\t\t db.sampledload.avg - the raw number of active sessions for the database\n\t\t\t\t\tengine.

                  \n\t\t\t
                • \n
                \n\t\t

                If the number of active sessions is less than an internal Performance Insights threshold,\n\t\t\t\tdb.load.avg and db.sampledload.avg are the same value. If\n\t\t\tthe number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with\n\t\t\t\tdb.load.avg showing the scaled values, db.sampledload.avg\n\t\t\tshowing the raw values, and db.sampledload.avg less than\n\t\t\t\tdb.load.avg. For most use cases, you can query db.load.avg\n\t\t\tonly.

                " } }, "GroupBy": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricDimensionGroup", "traits": { - "smithy.api#documentation": "

                The specification for how to aggregate the data points from a Performance Insights GetResourceMetrics API query. The \n \t\tPerformance Insights query returns all of the dimensions within that group,\n \t\tunless you provide the names of specific dimensions within that group. You can also request\n \t\tthat Performance Insights return a limited number of values for a dimension.

                " + "smithy.api#documentation": "

                The specification for how to aggregate the data points from a Performance Insights\n\t\t\t\tGetResourceMetrics API query. The Performance Insights query returns all of the\n\t\t\tdimensions within that group, unless you provide the names of specific dimensions within\n\t\t\tthat group. You can also request that Performance Insights return a limited number of values for a\n\t\t\tdimension.

                " } }, "Filter": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricFilterMap", "traits": { - "smithy.api#documentation": "

                One or more filters to apply to a Performance Insights GetResourceMetrics API query. Restrictions:

                \n \t
                  \n
                • \n \t\t\t

                  Any number of filters by the same dimension, as specified in the GroupBy parameter.

                  \n \t\t
                • \n
                • \n \t\t\t

                  A single filter for any other dimension in this dimension group.

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                One or more filters to apply to a Performance Insights GetResourceMetrics API query.\n\t\t\tRestrictions:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Any number of filters by the same dimension, as specified in the\n\t\t\t\t\t\tGroupBy parameter.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  A single filter for any other dimension in this dimension group.

                  \n\t\t\t
                • \n
                " } } }, "traits": { - "smithy.api#documentation": "

                A single query to be processed. Use these parameters to \n \t\tquery the Performance Insights GetResourceMetrics API to retrieve the metrics \n \t\tfor an anomaly. For more information, see \n GetResourceMetrics\n \n \tin the Amazon RDS Performance Insights API Reference.

                \n \t\n \t

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                A single query to be processed. Use these parameters to query the Performance Insights\n\t\t\t\tGetResourceMetrics API to retrieve the metrics for an anomaly. For more\n\t\t\tinformation, see \n GetResourceMetrics\n in the Amazon RDS Performance Insights API\n\t\t\t\tReference.

                \n

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " } }, "com.amazonaws.devopsguru#PerformanceInsightsMetricUnit": { @@ -3222,13 +3349,13 @@ "MetricQuery": { "target": "com.amazonaws.devopsguru#PerformanceInsightsMetricQuery", "traits": { - "smithy.api#documentation": "

                A single query to be processed for the metric. For more information, see \n \t\t\n PerformanceInsightsMetricQuery\n .

                " + "smithy.api#documentation": "

                A single query to be processed for the metric. For more information, see \n PerformanceInsightsMetricQuery\n .

                " } }, "ReferenceData": { "target": "com.amazonaws.devopsguru#PerformanceInsightsReferenceDataList", "traits": { - "smithy.api#documentation": "

                \n \tFor more information, see \n \t\n PerformanceInsightsReferenceData\n .\n

                " + "smithy.api#documentation": "

                For more information, see \n PerformanceInsightsReferenceData\n .

                " } }, "StatsAtAnomaly": { @@ -3245,7 +3372,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Details about Performance Insights metrics.

                \n \t\n \t

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " + "smithy.api#documentation": "

                Details about Performance Insights metrics.

                \n

                Amazon RDS Performance Insights enables you to monitor and explore different \n \t\tdimensions of database load based on data captured from a running DB instance. \n \t\tDB load is measured as average active sessions. Performance Insights provides the \n \t\tdata to API consumers as a two-dimensional time-series dataset. The time dimension \n \t\tprovides DB load data for each time point in the queried time range. Each time point \n \t\tdecomposes overall load in relation to the requested dimensions, measured at that \n \t\ttime point. Examples include SQL, Wait event, User, and Host.

                \n \t\n \t
                  \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon Aurora DB instances, go to the Amazon Aurora User Guide.\n \t\t\t

                  \n \t\t
                • \n
                • \n \t\t\t

                  To learn more about Performance Insights and Amazon RDS DB instances, go to the Amazon RDS User Guide.\n \t\t\t

                  \n \t\t
                • \n
                " } }, "com.amazonaws.devopsguru#PerformanceInsightsMetricsDetails": { @@ -3260,18 +3387,18 @@ "ReferenceScalar": { "target": "com.amazonaws.devopsguru#PerformanceInsightsReferenceScalar", "traits": { - "smithy.api#documentation": "

                A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This reference value is used \n to determine if an actual metric value should be considered anomalous.

                " + "smithy.api#documentation": "

                A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This\n\t\t\treference value is used to determine if an actual metric value should be considered\n\t\t\tanomalous.

                " } }, "ReferenceMetric": { "target": "com.amazonaws.devopsguru#PerformanceInsightsReferenceMetric", "traits": { - "smithy.api#documentation": "

                A metric that DevOps Guru compares to actual metric values. This reference metric is used \n \t\tto determine if an actual metric should be considered anomalous.

                " + "smithy.api#documentation": "

                A metric that DevOps Guru compares to actual metric values. This reference metric is used to\n\t\t\tdetermine if an actual metric should be considered anomalous.

                " } } }, "traits": { - "smithy.api#documentation": "

                Reference scalar values and other metrics that DevOps Guru displays on a graph in its console along with the actual metrics it \n analyzed. Compare these reference values to your actual metrics to help you understand anomalous behavior that DevOps Guru detected.

                " + "smithy.api#documentation": "

                Reference scalar values and other metrics that DevOps Guru displays on a graph in its\n\t\t\tconsole along with the actual metrics it analyzed. Compare these reference values to\n\t\t\tyour actual metrics to help you understand anomalous behavior that DevOps Guru\n\t\t\tdetected.

                " } }, "com.amazonaws.devopsguru#PerformanceInsightsReferenceData": { @@ -3286,12 +3413,12 @@ "ComparisonValues": { "target": "com.amazonaws.devopsguru#PerformanceInsightsReferenceComparisonValues", "traits": { - "smithy.api#documentation": "

                The specific reference values used to evaluate the Performance Insights. For more information, see \n\t\t\t\n PerformanceInsightsReferenceComparisonValues\n .\n\t\t

                " + "smithy.api#documentation": "

                The specific reference values used to evaluate the Performance Insights. For more information, see\n\t\t\t\t\t\n PerformanceInsightsReferenceComparisonValues\n .

                " } } }, "traits": { - "smithy.api#documentation": "

                Reference data used to evaluate Performance Insights to determine if its performance \n is anomalous or not.

                " + "smithy.api#documentation": "

                Reference data used to evaluate Performance Insights to determine if its performance is anomalous or\n\t\t\tnot.

                " } }, "com.amazonaws.devopsguru#PerformanceInsightsReferenceDataList": { @@ -3328,7 +3455,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A reference value to compare Performance Insights metrics against to determine if the metrics \n demonstrate anomalous behavior.

                " + "smithy.api#documentation": "

                A reference value to compare Performance Insights metrics against to determine if the metrics\n\t\t\tdemonstrate anomalous behavior.

                " } }, "com.amazonaws.devopsguru#PerformanceInsightsStat": { @@ -3452,6 +3579,18 @@ "traits": { "smithy.api#documentation": "

                A threshold that was exceeded by behavior in analyzed resources. Exceeding this\n\t\t\tthreshold is related to the anomalous behavior that generated this anomaly.

                " } + }, + "SourceMetadata": { + "target": "com.amazonaws.devopsguru#AnomalySourceMetadata", + "traits": { + "smithy.api#documentation": "

                The metadata for the anomaly.

                " + } + }, + "AnomalyResources": { + "target": "com.amazonaws.devopsguru#AnomalyResources", + "traits": { + "smithy.api#documentation": "

                Information about a resource in which DevOps Guru detected anomalous behavior.

                " + } } }, "traits": { @@ -3517,6 +3656,18 @@ "traits": { "smithy.api#documentation": "

                A threshold that was exceeded by behavior in analyzed resources. Exceeding this\n\t\t\tthreshold is related to the anomalous behavior that generated this anomaly.

                " } + }, + "SourceMetadata": { + "target": "com.amazonaws.devopsguru#AnomalySourceMetadata", + "traits": { + "smithy.api#documentation": "

                Returns the metadata of the source.

                " + } + }, + "AnomalyResources": { + "target": "com.amazonaws.devopsguru#AnomalyResources", + "traits": { + "smithy.api#documentation": "

                Information about a resource in which DevOps Guru detected anomalous behavior.

                " + } } }, "traits": { @@ -3564,6 +3715,12 @@ "traits": { "smithy.api#documentation": "

                The ID of the Amazon Web Services System Manager OpsItem created for this insight. You must enable\n\t\t\tthe creation of OpstItems insights before they are created for each insight.

                " } + }, + "Description": { + "target": "com.amazonaws.devopsguru#InsightDescription", + "traits": { + "smithy.api#documentation": "

                Describes the proactive insight.

                " + } } }, "traits": { @@ -3659,7 +3816,7 @@ "Severity": { "target": "com.amazonaws.devopsguru#InsightSeverity", "traits": { - "smithy.api#documentation": "

                An array of severity values used to search for insights. For more information, see \n\tUnderstanding \n\tinsight severities in the Amazon DevOps Guru User Guide.

                " + "smithy.api#documentation": "

                An array of severity values used to search for insights.\n\t\t\tFor more information, see \n\tUnderstanding \n\tinsight severities in the Amazon DevOps Guru User Guide.

                " } }, "Status": { @@ -3682,7 +3839,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Details about a proactive insight. This object is returned by\n\t\t\tDescribeInsight.

                " + "smithy.api#documentation": "

                Details about a proactive insight. This object is returned by\n\t\t\t\tDescribeInsight.

                " } }, "com.amazonaws.devopsguru#ProactiveOrganizationInsights": { @@ -3947,6 +4104,12 @@ "traits": { "smithy.api#documentation": "

                The ID of the Amazon Web Services System Manager OpsItem created for this insight. You must enable\n\t\t\tthe creation of OpstItems insights before they are created for each insight.

                " } + }, + "Description": { + "target": "com.amazonaws.devopsguru#InsightDescription", + "traits": { + "smithy.api#documentation": "

                Describes the reactive insight.

                " + } } }, "traits": { @@ -4039,7 +4202,7 @@ "Severity": { "target": "com.amazonaws.devopsguru#InsightSeverity", "traits": { - "smithy.api#documentation": "

                An array of severity values used to search for insights. For more information, see \n\tUnderstanding \n\tinsight severities in the Amazon DevOps Guru User Guide.

                " + "smithy.api#documentation": "

                An array of severity values used to search for insights.\n\t\t\tFor more information, see \n\tUnderstanding \n\tinsight severities in the Amazon DevOps Guru User Guide.

                " } }, "Status": { @@ -4059,7 +4222,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Information about a reactive insight. This object is returned by\n\t\t\tDescribeInsight.

                " + "smithy.api#documentation": "

                Information about a reactive insight. This object is returned by\n\t\t\t\tDescribeInsight.

                " } }, "com.amazonaws.devopsguru#ReactiveOrganizationInsights": { @@ -4106,12 +4269,21 @@ "traits": { "smithy.api#documentation": "

                Anomalies that are related to the problem. Use these Anomalies to learn more about\n\t\t\twhat's happening and to help address the issue.

                " } + }, + "Category": { + "target": "com.amazonaws.devopsguru#RecommendationCategory", + "traits": { + "smithy.api#documentation": "

                The category type of the recommendation.

                " + } } }, "traits": { "smithy.api#documentation": "

                Recommendation information to help you remediate detected anomalous behavior that\n\t\t\tgenerated an insight.

                " } }, + "com.amazonaws.devopsguru#RecommendationCategory": { + "type": "string" + }, "com.amazonaws.devopsguru#RecommendationDescription": { "type": "string" }, @@ -4168,7 +4340,7 @@ "Type": { "target": "com.amazonaws.devopsguru#RecommendationRelatedAnomalyResourceType", "traits": { - "smithy.api#documentation": "

                The type of the resource. Resource types take the same form that is \n\t\t\tused by Amazon Web Services CloudFormation resource type identifiers, service-provider::service-name::data-type-name. \n\t\t\tFor example, AWS::RDS::DBCluster. For more information, see \n\t\t\tAmazon Web Services resource and \n\t\t\t\tproperty types reference in the Amazon Web Services CloudFormation User Guide.

                " + "smithy.api#documentation": "

                The type of the resource. Resource types take the same form that is used by Amazon Web Services CloudFormation\n\t\t\tresource type identifiers, service-provider::service-name::data-type-name.\n\t\t\tFor example, AWS::RDS::DBCluster. For more information, see Amazon Web Services\n\t\t\t\tresource and property types reference in the Amazon Web Services CloudFormation User\n\t\t\t\tGuide.

                " } } }, @@ -4388,7 +4560,7 @@ "Tags": { "target": "com.amazonaws.devopsguru#TagCollections", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services tags that are used by resources in the resource collection.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                The Amazon Web Services tags that are used by resources in the resource collection.

                \n\t\t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } } }, @@ -4408,7 +4580,7 @@ "Tags": { "target": "com.amazonaws.devopsguru#TagCollectionFilters", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services tags used to filter the resources in the resource collection.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                The Amazon Web Services tags used to filter the resources in the resource collection.

                \n\t\t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } } }, @@ -4663,7 +4835,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a list of insights in your organization. You can specify which insights are\n\t\t\treturned by their start time, one or more statuses (ONGOING,\n\t\t\tCLOSED, and CLOSED), one or more severities\n\t\t\t(LOW, MEDIUM, and HIGH), and type\n\t\t\t(REACTIVE or PROACTIVE).

                \n\t\t

                Use the Filters parameter to specify status and severity search\n\t\t\tparameters. Use the Type parameter to specify REACTIVE or\n\t\t\tPROACTIVE in your search.

                ", + "smithy.api#documentation": "

                Returns a list of insights in your organization. You can specify which insights are\n\t\t\treturned by their start time, one or more statuses (ONGOING,\n\t\t\t\tCLOSED, and CLOSED), one or more severities\n\t\t\t\t(LOW, MEDIUM, and HIGH), and type\n\t\t\t\t(REACTIVE or PROACTIVE).

                \n\t\t

                Use the Filters parameter to specify status and severity search\n\t\t\tparameters. Use the Type parameter to specify REACTIVE or\n\t\t\t\tPROACTIVE in your search.

                ", "smithy.api#http": { "method": "POST", "uri": "/organization/insights/search", @@ -4749,7 +4921,7 @@ "Type": { "target": "com.amazonaws.devopsguru#InsightType", "traits": { - "smithy.api#documentation": "

                The type of insights you are searching for (REACTIVE or\n\t\t\tPROACTIVE).

                ", + "smithy.api#documentation": "

                The type of insights you are searching for (REACTIVE or\n\t\t\t\tPROACTIVE).

                ", "smithy.api#required": {} } } @@ -5155,13 +5327,13 @@ "TagValues": { "target": "com.amazonaws.devopsguru#TagValues", "traits": { - "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n \t

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", + "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                A collection of Amazon Web Services stags.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                A collection of Amazon Web Services stags.

                \n

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } }, "com.amazonaws.devopsguru#TagCollectionFilter": { @@ -5177,13 +5349,13 @@ "TagValues": { "target": "com.amazonaws.devopsguru#TagValues", "traits": { - "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n \t

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", + "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                A collection of Amazon Web Services tags used to filter insights. This is used to return insights generated from \n only resources that contain the tags in the tag collection.

                " + "smithy.api#documentation": "

                A collection of Amazon Web Services tags used to filter insights. This is used to return insights\n\t\t\tgenerated from only resources that contain the tags in the tag collection.

                " } }, "com.amazonaws.devopsguru#TagCollectionFilters": { @@ -5211,13 +5383,13 @@ "TagValues": { "target": "com.amazonaws.devopsguru#CostEstimationTagValues", "traits": { - "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n \t

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", + "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                Information about a collection of Amazon Web Services resources that are identified by an \n \t\tAmazon Web Services tag. This collection of resources is used to create a monthly cost estimate\n \t\tfor DevOps Guru to analyze Amazon Web Services resources. The maximum number of tags you can specify for a\n \t\tcost estimate is one. The estimate created is for the cost to analyze the Amazon Web Services\n \t\tresources defined by the tag. For more information, see Stacks in the\n \t\tAmazon Web Services CloudFormation User Guide.

                " + "smithy.api#documentation": "

                Information about a collection of Amazon Web Services resources that are identified by an Amazon Web Services tag.\n\t\t\tThis collection of resources is used to create a monthly cost estimate for DevOps Guru to\n\t\t\tanalyze Amazon Web Services resources. The maximum number of tags you can specify for a cost estimate\n\t\t\tis one. The estimate created is for the cost to analyze the Amazon Web Services resources defined by\n\t\t\tthe tag. For more information, see Stacks in the\n\t\t\t\tAmazon Web Services CloudFormation User Guide.

                " } }, "com.amazonaws.devopsguru#TagCostEstimationResourceCollectionFilters": { @@ -5238,18 +5410,18 @@ "TagValue": { "target": "com.amazonaws.devopsguru#TagValue", "traits": { - "smithy.api#documentation": "

                The value in an Amazon Web Services tag.

                \n \t

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                " + "smithy.api#documentation": "

                The value in an Amazon Web Services tag.

                \n

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                " } }, "Insight": { "target": "com.amazonaws.devopsguru#InsightHealth", "traits": { - "smithy.api#documentation": "

                Information about the health of the Amazon Web Services resources in your account that are\n\t\t\tspecified by an Amazon Web Services tag, including the number of open proactive, open reactive\n\t\t\tinsights, and the Mean Time to Recover (MTTR) of closed insights.

                " + "smithy.api#documentation": "

                Information about the health of the Amazon Web Services resources in your account that are specified\n\t\t\tby an Amazon Web Services tag, including the number of open proactive, open reactive insights, and the\n\t\t\tMean Time to Recover (MTTR) of closed insights.

                " } } }, "traits": { - "smithy.api#documentation": "

                Information about the health of Amazon Web Services resources in your account that are specified by\n \t\tan Amazon Web Services tag key.

                " + "smithy.api#documentation": "

                Information about the health of Amazon Web Services resources in your account that are specified by\n\t\t\tan Amazon Web Services tag key.

                " } }, "com.amazonaws.devopsguru#TagHealths": { @@ -5362,6 +5534,52 @@ "smithy.api#documentation": "

                Contains the names of Amazon Web Services CloudFormation stacks used to update a collection of stacks.\n\t\t\tYou can specify up to 500 Amazon Web Services CloudFormation stacks.

                " } }, + "com.amazonaws.devopsguru#UpdateEventSourcesConfig": { + "type": "operation", + "input": { + "target": "com.amazonaws.devopsguru#UpdateEventSourcesConfigRequest" + }, + "output": { + "target": "com.amazonaws.devopsguru#UpdateEventSourcesConfigResponse" + }, + "errors": [ + { + "target": "com.amazonaws.devopsguru#AccessDeniedException" + }, + { + "target": "com.amazonaws.devopsguru#InternalServerException" + }, + { + "target": "com.amazonaws.devopsguru#ThrottlingException" + }, + { + "target": "com.amazonaws.devopsguru#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Updates the event source configuration.

                ", + "smithy.api#http": { + "method": "PUT", + "uri": "/event-sources", + "code": 200 + } + } + }, + "com.amazonaws.devopsguru#UpdateEventSourcesConfigRequest": { + "type": "structure", + "members": { + "EventSources": { + "target": "com.amazonaws.devopsguru#EventSourcesConfig", + "traits": { + "smithy.api#documentation": "

                The name of the event source.

                " + } + } + } + }, + "com.amazonaws.devopsguru#UpdateEventSourcesConfigResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.devopsguru#UpdateResourceCollection": { "type": "operation", "input": { @@ -5423,7 +5641,7 @@ "Tags": { "target": "com.amazonaws.devopsguru#UpdateTagCollectionFilters", "traits": { - "smithy.api#documentation": "

                The updated Amazon Web Services tags used to filter the resources in the resource collection.

                \n \t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " + "smithy.api#documentation": "

                The updated Amazon Web Services tags used to filter the resources in the resource collection.

                \n\t\t

                Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support\n \t\ttagging, so you can assign the same tag to resources from different services to indicate\n \t\tthat the resources are related. For example, you can assign the same tag to an Amazon DynamoDB\n \t\ttable resource that you assign to an Lambda function. For more information about\n \t\tusing tags, see the Tagging\n \t\t\tbest practices whitepaper.

                \n \t

                Each Amazon Web Services tag has two parts.

                \n \t
                  \n
                • \n \t\t\t

                  A tag key (for example, CostCenter,\n \t\t\t\tEnvironment, Project, or Secret). Tag\n \t\t\t\tkeys are case-sensitive.

                  \n \t\t
                • \n
                • \n \t\t\t

                  An optional field known as a tag value (for example,\n \t\t\t\t111122223333, Production, or a team\n \t\t\t\tname). Omitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive.

                  \n \t\t
                • \n
                \n \t

                Together these are known as key-value pairs.

                \n \t \n\t\t

                The string used for a key in a tag that you use to define your resource coverage must begin with the\n\t\t\tprefix Devops-guru-. The tag key might be\n\t\t\tDevops-guru-deployment-application or\n\t\t\tDevops-guru-rds-application. While keys are case-sensitive, the\n\t\t\tcase of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a\n\t\t\tkey named devops-guru-rds and a key named\n\t\t\tDevOps-Guru-RDS. Possible key/value pairs in your\n\t\t\tapplication might be Devops-Guru-production-application/RDS or\n\t\t\tDevops-Guru-production-application/containers.

                \n\t
                " } } }, @@ -5539,13 +5757,13 @@ "TagValues": { "target": "com.amazonaws.devopsguru#UpdateTagValues", "traits": { - "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n \t

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", + "smithy.api#documentation": "

                The values in an Amazon Web Services tag collection.

                \n

                The tag's value is an optional field used to associate a string with \n\t\t\t\t\tthe tag key (for example, 111122223333, Production, or a team\n \t\t\t\tname). The key and value are the tag's key pair. \n \t\t\t\tOmitting the tag value is the same as using an empty\n \t\t\t\tstring. Like tag keys, tag values are\n \t\t\t\tcase-sensitive. You can specify a maximum of 256 characters for a tag value.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                A new collection of Amazon Web Services resources that are defined by an Amazon Web Services tag or tag \n key/value pair.

                " + "smithy.api#documentation": "

                A new collection of Amazon Web Services resources that are defined by an Amazon Web Services tag or tag\n\t\t\tkey/value pair.

                " } }, "com.amazonaws.devopsguru#UpdateTagCollectionFilters": { diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index 40db29c5429c..81656826042e 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -10901,7 +10901,7 @@ "Type": { "target": "com.amazonaws.ec2#FleetType", "traits": { - "smithy.api#documentation": "

                The fleet type. The default value is maintain.

                \n
                  \n
                • \n

                  \n maintain - The EC2 Fleet places an asynchronous request for your desired\n capacity, and continues to maintain your desired Spot capacity by replenishing\n interrupted Spot Instances.

                  \n
                • \n
                • \n

                  \n request - The EC2 Fleet places an asynchronous one-time request for your\n desired capacity, but does submit Spot requests in alternative capacity pools if Spot\n capacity is unavailable, and does not maintain Spot capacity if Spot Instances are\n interrupted.

                  \n
                • \n
                • \n

                  \n instant - The EC2 Fleet places a synchronous one-time request for your\n desired capacity, and returns errors for any instances that could not be\n launched.

                  \n
                • \n
                \n

                For more information, see EC2 Fleet\n request types in the Amazon EC2 User Guide.

                " + "smithy.api#documentation": "

                The fleet type. The default value is maintain.

                \n
                  \n
                • \n

                  \n maintain - The EC2 Fleet places an asynchronous request for your desired\n capacity, and continues to maintain your desired Spot capacity by replenishing\n interrupted Spot Instances.

                  \n
                • \n
                • \n

                  \n request - The EC2 Fleet places an asynchronous one-time request for your\n desired capacity, but does submit Spot requests in alternative capacity pools if Spot\n capacity is unavailable, and does not maintain Spot capacity if Spot Instances are\n interrupted.

                  \n
                • \n
                • \n

                  \n instant - The EC2 Fleet places a synchronous one-time request for your\n desired capacity, and returns errors for any instances that could not be\n launched.

                  \n
                • \n
                \n

                For more information, see EC2 Fleet\n request types in the Amazon EC2 User Guide.

                " } }, "ValidFrom": { @@ -11504,7 +11504,8 @@ "AddressFamily": { "target": "com.amazonaws.ec2#AddressFamily", "traits": { - "smithy.api#documentation": "

                The IP protocol assigned to this IPAM pool. You must choose either IPv4 or IPv6 protocol for a pool.

                " + "smithy.api#documentation": "

                The IP protocol assigned to this IPAM pool. You must choose either IPv4 or IPv6 protocol for a pool.

                ", + "smithy.api#required": {} } }, "AutoImport": { @@ -40699,8 +40700,17 @@ "target": "com.amazonaws.ec2#AttributeValue", "traits": { "aws.protocols#ec2QueryName": "BootMode", + "smithy.api#documentation": "

                The boot mode.

                ", "smithy.api#xmlName": "bootMode" } + }, + "LastLaunchedTime": { + "target": "com.amazonaws.ec2#AttributeValue", + "traits": { + "aws.protocols#ec2QueryName": "LastLaunchedTime", + "smithy.api#documentation": "

                The date and time, in ISO 8601 date-time\n format, when the AMI was last used to launch an EC2 instance. When the AMI is used,\n there is a 24-hour delay before that usage is reported.

                \n \n

                \n lastLaunchedTime data is available starting April 2017.

                \n
                ", + "smithy.api#xmlName": "lastLaunchedTime" + } } }, "traits": { @@ -40742,6 +40752,10 @@ { "value": "bootMode", "name": "bootMode" + }, + { + "value": "lastLaunchedTime", + "name": "lastLaunchedTime" } ] } @@ -44381,7 +44395,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "SpotMaxPricePercentageOverLowestPrice", - "smithy.api#documentation": "

                The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n

                Default: 100\n

                ", + "smithy.api#documentation": "

                The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n \n

                If you set TargetCapacityUnitType to vcpu or\n memory-mib, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.

                \n
                \n

                Default: 100\n

                ", "smithy.api#xmlName": "spotMaxPricePercentageOverLowestPrice" } }, @@ -44389,7 +44403,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "OnDemandMaxPricePercentageOverLowestPrice", - "smithy.api#documentation": "

                The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n

                Default: 20\n

                ", + "smithy.api#documentation": "

                The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n \n

                If you set TargetCapacityUnitType to vcpu or\n memory-mib, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.

                \n
                \n

                Default: 20\n

                ", "smithy.api#xmlName": "onDemandMaxPricePercentageOverLowestPrice" } }, @@ -44549,13 +44563,13 @@ "SpotMaxPricePercentageOverLowestPrice": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

                The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n

                Default: 100\n

                " + "smithy.api#documentation": "

                The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n \n

                If you set TargetCapacityUnitType to vcpu or\n memory-mib, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.

                \n
                \n

                Default: 100\n

                " } }, "OnDemandMaxPricePercentageOverLowestPrice": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

                The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n

                Default: 20\n

                " + "smithy.api#documentation": "

                The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.

                \n

                The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

                \n

                To turn off price protection, specify a high value, such as 999999.

                \n

                This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

                \n \n

                If you set TargetCapacityUnitType to vcpu or\n memory-mib, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.

                \n
                \n

                Default: 20\n

                " } }, "BareMetal": { @@ -65382,6 +65396,10 @@ "value": "subnet", "name": "subnet" }, + { + "value": "subnet-cidr-reservation", + "name": "subnet_cidr_reservation" + }, { "value": "traffic-mirror-filter", "name": "traffic_mirror_filter" diff --git a/codegen/sdk-codegen/aws-models/ecr.json b/codegen/sdk-codegen/aws-models/ecr.json index fccc5d863834..c9c1d887318c 100644 --- a/codegen/sdk-codegen/aws-models/ecr.json +++ b/codegen/sdk-codegen/aws-models/ecr.json @@ -353,7 +353,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Checks the availability of one or more image layers in a repository.

                \n

                When an image is pushed to a repository, each image layer is checked to verify if it\n has been uploaded before. If it has been uploaded, then the image layer is\n skipped.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " + "smithy.api#documentation": "

                Checks the availability of one or more image layers in a repository.

                \n

                When an image is pushed to a repository, each image layer is checked to verify if it\n has been uploaded before. If it has been uploaded, then the image layer is\n skipped.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " } }, "com.amazonaws.ecr#BatchCheckLayerAvailabilityRequest": { @@ -651,7 +651,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Informs Amazon ECR that the image layer upload has completed for a specified registry,\n repository name, and upload ID. You can optionally provide a sha256 digest\n of the image layer for data validation purposes.

                \n

                When an image is pushed, the CompleteLayerUpload API is called once per each new image\n layer to verify that the upload has completed.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " + "smithy.api#documentation": "

                Informs Amazon ECR that the image layer upload has completed for a specified registry,\n repository name, and upload ID. You can optionally provide a sha256 digest\n of the image layer for data validation purposes.

                \n

                When an image is pushed, the CompleteLayerUpload API is called once per each new image\n layer to verify that the upload has completed.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " } }, "com.amazonaws.ecr#CompleteLayerUploadRequest": { @@ -2143,7 +2143,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can\n only get URLs for image layers that are referenced in an image.

                \n

                When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer\n that is not already cached.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " + "smithy.api#documentation": "

                Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can\n only get URLs for image layers that are referenced in an image.

                \n

                When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer\n that is not already cached.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " } }, "com.amazonaws.ecr#GetDownloadUrlForLayerRequest": { @@ -2703,6 +2703,12 @@ "traits": { "smithy.api#documentation": "

                The artifact media type of the image.

                " } + }, + "lastRecordedPullTime": { + "target": "com.amazonaws.ecr#RecordedPullTimestamp", + "traits": { + "smithy.api#documentation": "

                The date and time, expressed in standard JavaScript date format, when Amazon ECR recorded\n the last image pull.

                \n \n

                Amazon ECR refreshes the last image pull timestamp at least once every 24 hours. For\n example, if you pull an image once a day then the lastRecordedPullTime\n timestamp will indicate the exact time that the image was last pulled. However, if\n you pull an image once an hour, because Amazon ECR refreshes the\n lastRecordedPullTime timestamp at least once every 24 hours, the\n result may not be the exact time that the image was last pulled.

                \n
                " + } } }, "traits": { @@ -3116,7 +3122,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Notifies Amazon ECR that you intend to upload an image layer.

                \n

                When an image is pushed, the InitiateLayerUpload API is called once per image layer\n that has not already been uploaded. Whether or not an image layer has been uploaded is\n determined by the BatchCheckLayerAvailability API action.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " + "smithy.api#documentation": "

                Notifies Amazon ECR that you intend to upload an image layer.

                \n

                When an image is pushed, the InitiateLayerUpload API is called once per image layer\n that has not already been uploaded. Whether or not an image layer has been uploaded is\n determined by the BatchCheckLayerAvailability API action.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " } }, "com.amazonaws.ecr#InitiateLayerUploadRequest": { @@ -4116,7 +4122,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Updates the image scanning configuration for the specified repository.

                " + "smithy.api#documentation": "\n

                The PutImageScanningConfiguration API is being deprecated, in favor\n of specifying the image scanning configuration at the registry level. For more\n information, see PutRegistryScanningConfiguration.

                \n
                \n

                Updates the image scanning configuration for the specified repository.

                " } }, "com.amazonaws.ecr#PutImageScanningConfigurationRequest": { @@ -4390,7 +4396,7 @@ "scanType": { "target": "com.amazonaws.ecr#ScanType", "traits": { - "smithy.api#documentation": "

                The scanning type to set for the registry.

                \n

                By default, the BASIC scan type is used. When basic scanning is set, you\n may specify filters to determine which individual repositories, or all repositories, are\n scanned when new images are pushed. Alternatively, you can do manual scans of images\n with basic scanning.

                \n

                When the ENHANCED scan type is set, Amazon Inspector provides automated, continuous\n scanning of all repositories in your registry.

                " + "smithy.api#documentation": "

                The scanning type to set for the registry.

                \n

                When a registry scanning configuration is not defined, by default the\n BASIC scan type is used. When basic scanning is used, you may specify\n filters to determine which individual repositories, or all repositories, are scanned\n when new images are pushed to those repositories. Alternatively, you can do manual scans\n of images with basic scanning.

                \n

                When the ENHANCED scan type is set, Amazon Inspector provides automated\n vulnerability scanning. You may choose between continuous scanning or scan on push and\n you may specify filters to determine which individual repositories, or all repositories,\n are scanned.

                " } }, "rules": { @@ -4484,6 +4490,9 @@ "com.amazonaws.ecr#RecommendationText": { "type": "string" }, + "com.amazonaws.ecr#RecordedPullTimestamp": { + "type": "timestamp" + }, "com.amazonaws.ecr#ReferenceUrlsList": { "type": "list", "member": { @@ -4565,7 +4574,7 @@ "scanFrequency": { "target": "com.amazonaws.ecr#ScanFrequency", "traits": { - "smithy.api#documentation": "

                The frequency that scans are performed at for a private registry.

                ", + "smithy.api#documentation": "

                The frequency that scans are performed at for a private registry. When the\n ENHANCED scan type is specified, the supported scan frequencies are\n CONTINUOUS_SCAN and SCAN_ON_PUSH. When the\n BASIC scan type is specified, the SCAN_ON_PUSH and\n MANUAL scan frequencies are supported.

                ", "smithy.api#required": {} } }, @@ -5546,12 +5555,12 @@ "Value": { "target": "com.amazonaws.ecr#TagValue", "traits": { - "smithy.api#documentation": "

                The optional part of a key-value pair that make up a tag. A value acts as\n a descriptor within a tag category (key).

                " + "smithy.api#documentation": "

                A value acts as a descriptor within a tag category (key).

                " } } }, "traits": { - "smithy.api#documentation": "

                The metadata that you apply to a resource to help you categorize and organize them.\n Each tag consists of a key and an optional value, both of which you define.\n Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

                " + "smithy.api#documentation": "

                The metadata to apply to a resource to help you categorize and organize them. Each tag\n consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

                " } }, "com.amazonaws.ecr#TagKey": { @@ -5784,7 +5793,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Uploads an image layer part to Amazon ECR.

                \n

                When an image is pushed, each new image layer is uploaded in parts. The maximum size\n of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API\n is called once per each new image layer part.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " + "smithy.api#documentation": "

                Uploads an image layer part to Amazon ECR.

                \n

                When an image is pushed, each new image layer is uploaded in parts. The maximum size\n of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API\n is called once per each new image layer part.

                \n \n

                This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                \n
                " } }, "com.amazonaws.ecr#UploadLayerPartRequest": { diff --git a/codegen/sdk-codegen/aws-models/ecs.json b/codegen/sdk-codegen/aws-models/ecs.json index ed4ac8ab6c8f..95e9dd8de59b 100644 --- a/codegen/sdk-codegen/aws-models/ecs.json +++ b/codegen/sdk-codegen/aws-models/ecs.json @@ -369,7 +369,7 @@ } }, "traits": { - "smithy.api#documentation": "

                An attribute is a name-value pair that's associated with an Amazon ECS object. Attributes\n\t\t\tenable you to extend the Amazon ECS data model by adding custom metadata to your resources.\n\t\t\tFor more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                An attribute is a name-value pair that's associated with an Amazon ECS object. Use attributes\n\t\t\tto extend the Amazon ECS data model by adding custom metadata to your resources.\n\t\t\tFor more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

                " } }, "com.amazonaws.ecs#AttributeLimitExceededException": { @@ -898,7 +898,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The settings to use when creating a cluster. This parameter is used to enable CloudWatch\n\t\t\tContainer Insights for a cluster.

                " + "smithy.api#documentation": "

                The settings to use when creating a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster.

                " } }, "com.amazonaws.ecs#ClusterSettingName": { @@ -1205,19 +1205,19 @@ "dependsOn": { "target": "com.amazonaws.ecs#ContainerDependencies", "traits": { - "smithy.api#documentation": "

                The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.

                \n\t\t

                For tasks using the EC2 launch type, the container instances require at\n\t\t\tleast version 1.26.0 of the container agent to enable container dependencies. However,\n\t\t\twe recommend using the latest container agent version. For information about checking\n\t\t\tyour agent version and updating to the latest version, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                " + "smithy.api#documentation": "

                The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.

                \n\t\t

                For tasks using the EC2 launch type, the container instances require at\n\t\t\tleast version 1.26.0 of the container agent to turn on container dependencies. However,\n\t\t\twe recommend using the latest container agent version. For information about checking\n\t\t\tyour agent version and updating to the latest version, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                " } }, "startTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

                Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE,\n\t\t\tSUCCESS, or HEALTHY status. If a startTimeout\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

                \n\t\t \n\t\t\t

                When the ECS_CONTAINER_START_TIMEOUT container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.

                \n\t\t
                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                \n\t\t

                For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0 of the container agent to enable a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE,\n\t\t\tSUCCESS, or HEALTHY status. If a startTimeout\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

                \n\t\t \n\t\t\t

                When the ECS_CONTAINER_START_TIMEOUT container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.

                \n\t\t
                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                \n\t\t

                For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0 of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " } }, "stopTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

                Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.

                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                \n\t\t

                The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.

                \n\t\t

                For tasks that use the EC2 launch type, if the stopTimeout\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the\n\t\t\t\tstopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to enable a container stop\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.

                \n\t\t

                For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t
                • \n
                \n\t\t

                The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.

                \n\t\t

                For tasks that use the EC2 launch type, if the stopTimeout\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the\n\t\t\t\tstopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " } }, "hostname": { @@ -1370,7 +1370,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.

                \n\t\t

                Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tto enable container dependencies. However, we recommend using the latest container agent\n\t\t\tversion. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                \n\t\t \n\t\t\t

                For tasks that use the Fargate launch type, the task or service\n\t\t\t\trequires the following platforms:

                \n\t\t\t
                  \n
                • \n\t\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t\t
                • \n
                • \n\t\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t\t
                • \n
                \n\t\t
                " + "smithy.api#documentation": "

                The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.

                \n\t\t

                Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tto use container dependencies. However, we recommend using the latest container agent\n\t\t\tversion. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                \n\t\t \n\t\t\t

                For tasks that use the Fargate launch type, the task or service\n\t\t\t\trequires the following platforms:

                \n\t\t\t
                  \n
                • \n\t\t\t\t\t

                  Linux platform version 1.3.0 or later.

                  \n\t\t\t\t
                • \n
                • \n\t\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t\t
                • \n
                \n\t\t
                " } }, "com.amazonaws.ecs#ContainerInstance": { @@ -1793,7 +1793,7 @@ "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "

                The setting to use when creating a cluster. This parameter is used to enable CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.

                " + "smithy.api#documentation": "

                The setting to use when creating a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.

                " } }, "configuration": { @@ -1862,7 +1862,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.

                \n\t\t

                In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and the container\n\t\t\tinstance that they're hosted on is reported as healthy by the load balancer.

                \n\t\t

                There are two service scheduler strategies available:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

                  \n\t\t\t
                • \n
                \n\t\t

                You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

                \n\t\t

                If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

                \n\t\t

                If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

                \n\t\t

                If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

                \n\t\t

                When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster using the following logic:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Determine which of the container instances in your cluster can support the\n\t\t\t\t\ttask definition of your service. For example, they have the required CPU,\n\t\t\t\t\tmemory, ports, and container instance attributes.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner. This is the case even if you can choose a\n\t\t\t\t\tdifferent placement strategy with the placementStrategy\n\t\t\t\t\tparameter.

                  \n\t\t\t\t
                    \n
                  • \n\t\t\t\t\t\t

                    Sort the valid container instances, giving priority to instances that\n\t\t\t\t\t\t\thave the fewest number of running tasks for this service in their\n\t\t\t\t\t\t\trespective Availability Zone. For example, if zone A has one running\n\t\t\t\t\t\t\tservice task and zones B and C each have zero, valid container instances\n\t\t\t\t\t\t\tin either zone B or C are considered optimal for placement.

                    \n\t\t\t\t\t
                  • \n
                  • \n\t\t\t\t\t\t

                    Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone based on the previous steps, favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

                    \n\t\t\t\t\t
                  • \n
                  \n\t\t\t
                • \n
                " + "smithy.api#documentation": "

                Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.

                \n\t\t

                In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

                \n\t\t

                There are two service scheduler strategies available:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

                  \n\t\t\t
                • \n
                \n\t\t

                You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

                \n\t\t

                If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

                \n\t\t

                If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

                \n\t\t

                If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

                \n\t\t

                When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster using the following logic:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Determine which of the container instances in your cluster can support the\n\t\t\t\t\ttask definition of your service. For example, they have the required CPU,\n\t\t\t\t\tmemory, ports, and container instance attributes.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner. This is the case even if you can choose a\n\t\t\t\t\tdifferent placement strategy with the placementStrategy\n\t\t\t\t\tparameter.

                  \n\t\t\t\t
                    \n
                  • \n\t\t\t\t\t\t

                    Sort the valid container instances, giving priority to instances that\n\t\t\t\t\t\t\thave the fewest number of running tasks for this service in their\n\t\t\t\t\t\t\trespective Availability Zone. For example, if zone A has one running\n\t\t\t\t\t\t\tservice task and zones B and C each have zero, valid container instances\n\t\t\t\t\t\t\tin either zone B or C are considered optimal for placement.

                    \n\t\t\t\t\t
                  • \n
                  • \n\t\t\t\t\t\t

                    Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone based on the previous steps, favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

                    \n\t\t\t\t\t
                  • \n
                  \n\t\t\t
                • \n
                " } }, "com.amazonaws.ecs#CreateServiceRequest": { @@ -1890,7 +1890,7 @@ "loadBalancers": { "target": "com.amazonaws.ecs#LoadBalancers", "traits": { - "smithy.api#documentation": "

                A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                If the service uses the rolling update (ECS) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                If the service uses the CODE_DEPLOY deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.

                \n\t\t

                After you create a service using the ECS deployment controller, the load\n\t\t\tbalancer name or target group ARN, container name, and container port that's specified\n\t\t\tin the service definition are immutable. If you use the CODE_DEPLOY\n\t\t\tdeployment controller, these values can be changed when updating the service.

                \n\t\t

                For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.

                \n\t\t

                For Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted. When\n\t\t\ta task from this service is placed on a container instance, the container instance is\n\t\t\tregistered with the load balancer that's specified here.

                \n\t\t

                Services with tasks that use the awsvpc network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip as the target type, not instance. This is because\n\t\t\ttasks that use the awsvpc network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.

                " + "smithy.api#documentation": "

                A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                If the service uses the rolling update (ECS) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

                \n\t\t

                If the service uses the CODE_DEPLOY deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.

                \n\t\t

                If you use the CODE_DEPLOY deployment controller, these values can be changed\n\t\t\twhen updating the service.

                \n\t\t

                For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.

                \n\t\t

                For Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted. When\n\t\t\ta task from this service is placed on a container instance, the container instance is\n\t\t\tregistered with the load balancer that's specified here.

                \n\t\t

                Services with tasks that use the awsvpc network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip as the target type, not instance. This is because\n\t\t\ttasks that use the awsvpc network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.

                " } }, "serviceRegistries": { @@ -1962,7 +1962,7 @@ "healthCheckGracePeriodSeconds": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

                The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0 is used.

                \n\t\t

                If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.

                " + "smithy.api#documentation": "

                The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0 is used.

                \n\t\t

                If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod in the task definition healtch check parameters. For more information, see Health check.

                \n\t\t

                If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.

                " } }, "schedulingStrategy": { @@ -1986,7 +1986,7 @@ "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " } }, "propagateTags": { @@ -2624,20 +2624,20 @@ "enable": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Determines whether to enable the deployment circuit breaker logic for the\n\t\t\tservice.

                ", + "smithy.api#documentation": "

                Determines whether to use the deployment circuit breaker logic for the\n\t\t\tservice.

                ", "smithy.api#required": {} } }, "rollback": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Determines whether to enable Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is enabled, when a service deployment fails, the service is rolled\n\t\t\tback to the last deployment that completed successfully.

                ", + "smithy.api#documentation": "

                Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is enabled, when a service deployment fails, the service is rolled\n\t\t\tback to the last deployment that completed successfully.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "\n\t\t\t

                The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS) deployment type that aren't behind a Classic Load Balancer.

                \n\t\t
                \n\t\t

                The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also enable Amazon ECS to roll back your service to the last completed deployment after a\n\t\t\tfailure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "\n\t\t\t

                The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS) deployment type that aren't behind a Classic Load Balancer.

                \n\t\t
                \n\t\t

                The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also configure Amazon ECS to roll back your service to the last completed deployment after a\n\t\t\tfailure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.

                " } }, "com.amazonaws.ecs#DeploymentConfiguration": { @@ -3638,7 +3638,7 @@ "transitEncryption": { "target": "com.amazonaws.ecs#EFSTransitEncryption", "traits": { - "smithy.api#documentation": "

                Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS\n\t\t\thost and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization\n\t\t\tis used. If this parameter is omitted, the default value of DISABLED is\n\t\t\tused. For more information, see Encrypting Data in Transit in\n\t\t\tthe Amazon Elastic File System User Guide.

                " + "smithy.api#documentation": "

                Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS\n\t\t\thost and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization\n\t\t\tis used. If this parameter is omitted, the default value of DISABLED is\n\t\t\tused. For more information, see Encrypting Data in Transit in\n\t\t\tthe Amazon Elastic File System User Guide.

                " } }, "transitEncryptionPort": { @@ -3715,7 +3715,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.

                \n\t\t \n\t\t\t

                This parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tthe following platform versions:

                \n\t\t\t
                  \n
                • \n\t\t\t\t\t

                  Linux platform version 1.4.0 or later.

                  \n\t\t\t\t
                • \n
                • \n\t\t\t\t\t

                  Windows platform version 1.0.0 or later.

                  \n\t\t\t\t
                • \n
                \n\t\t
                " + "smithy.api#documentation": "

                The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.

                \n\t\t \n\t\t\t

                This parameter is only supported for tasks hosted on Fargate using Linux\n\t\t\t\tplatform version 1.4.0 or later. This parameter is not supported for\n\t\t\t\tWindows containers on Fargate.

                \n\t\t
                " } }, "com.amazonaws.ecs#ExecuteCommand": { @@ -3788,7 +3788,7 @@ "cloudWatchEncryptionEnabled": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Determines whether to enable encryption on the CloudWatch logs. If not specified,\n\t\t\tencryption will be disabled.

                " + "smithy.api#documentation": "

                Determines whether to use encryption on the CloudWatch logs. If not specified,\n\t\t\tencryption will be disabled.

                " } }, "s3BucketName": { @@ -5175,7 +5175,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The load balancer configuration to use with a service or task set.

                \n\t\t

                For specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.

                " + "smithy.api#documentation": "

                The load balancer configuration to use with a service or task set.

                \n\t\t

                For specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.

                \n\t\t

                When you add, update, or remove a load blaancer configuration, Amazon ECS starts a new\n\t\t\tdeployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and\n\t\t\tderegister from load balancers.

                \n\t\t

                We recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.

                \n\t\t

                A service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Service-linked\n\t\t\t\troles in the Amazon Elastic Container Service Developer Guide.

                " } }, "com.amazonaws.ecs#LoadBalancers": { @@ -5358,7 +5358,7 @@ "status": { "target": "com.amazonaws.ecs#ManagedScalingStatus", "traits": { - "smithy.api#documentation": "

                Determines whether to enable managed scaling for the capacity provider.

                " + "smithy.api#documentation": "

                Determines whether to use managed scaling for the capacity provider.

                " } }, "targetCapacity": { @@ -5859,6 +5859,10 @@ { "value": "SERVICE", "name": "SERVICE" + }, + { + "value": "NONE", + "name": "NONE" } ] } @@ -5887,7 +5891,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                For tasks that use the EC2 launch type, the container instances require\n\t\t\tat least version 1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init package to enable a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n\t\t

                " + "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                For tasks that use the EC2 launch type, the container instances require\n\t\t\tat least version 1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init package to use a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n\t\t

                " } }, "com.amazonaws.ecs#ProxyConfigurationProperties": { @@ -5927,7 +5931,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Modifies an account setting. Account settings are set on a per-Region basis.

                \n\t\t

                If you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When serviceLongArnFormat, taskLongArnFormat, or\n\t\t\t\tcontainerInstanceLongArnFormat are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource is\n\t\t\tdefined by the opt-in status of the IAM user or role that created the resource. You must\n\t\t\tenable this setting to use Amazon ECS features such as resource tagging.

                \n\t\t

                When awsvpcTrunking is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When containerInsights is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Modifies an account setting. Account settings are set on a per-Region basis.

                \n\t\t

                If you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When serviceLongArnFormat, taskLongArnFormat, or\n\t\t\t\tcontainerInstanceLongArnFormat are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource is\n\t\t\tdefined by the opt-in status of the IAM user or role that created the resource. You must\n\t\t\tturn on this setting to use Amazon ECS features such as resource tagging.

                \n\t\t

                When awsvpcTrunking is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                When containerInsights is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.

                " } }, "com.amazonaws.ecs#PutAccountSettingDefault": { @@ -6345,7 +6349,7 @@ "proxyConfiguration": { "target": "com.amazonaws.ecs#ProxyConfiguration", "traits": { - "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                For tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0 of the container agent and at least version\n\t\t\t\t1.26.0-1 of the ecs-init package to enable a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS-optimized\n\t\t\tAMI version 20190301 or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init. For more information, see Amazon ECS-optimized AMI versions in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                For tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0 of the container agent and at least version\n\t\t\t\t1.26.0-1 of the ecs-init package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS-optimized\n\t\t\tAMI version 20190301 or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init. For more information, see Amazon ECS-optimized AMI versions in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

                " } }, "inferenceAccelerators": { @@ -6588,13 +6592,13 @@ "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Specifies whether to enable Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Specifies whether to use Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Determines whether to enable the execute command functionality for the containers in\n\t\t\tthis task. If true, this enables execute command functionality on all\n\t\t\tcontainers in the task.

                " + "smithy.api#documentation": "

                Determines whether to use the execute command functionality for the containers in\n\t\t\tthis task. If true, this enables execute command functionality on all\n\t\t\tcontainers in the task.

                " } }, "group": { @@ -6783,7 +6787,7 @@ "valueFrom": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

                The secret to expose to the container. The supported values are either the full ARN of\n\t\t\tthe Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.

                \n\t\t \n\t\t\t

                If the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter. If\n\t\t\t\tthe parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.

                \n\t\t
                ", + "smithy.api#documentation": "

                The secret to expose to the container. The supported values are either the full ARN of\n\t\t\tthe Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.

                \n\t\t

                For information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter store) in the Amazon Elastic Container Service Developer Guide.

                \n\t\t \n\t\t\t

                If the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter. If\n\t\t\t\tthe parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.

                \n\t\t
                ", "smithy.api#required": {} } } @@ -6966,7 +6970,7 @@ "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "

                The scheduling strategy to use for the service. For more information, see Services.

                \n\t\t

                There are two service scheduler strategies available.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n REPLICA-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n DAEMON-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container\n\t\t\t\t\tinstance.\n\t\t\t\t\tThis taskmeets all of the task placement constraints that you\n\t\t\t\t\tspecify in your cluster. The service scheduler also evaluates the task placement\n\t\t\t\t\tconstraints for running tasks. It stop tasks that don't meet the placement\n\t\t\t\t\tconstraints.

                  \n\t\t\t\t \n\t\t\t\t\t

                  Fargate tasks don't support the DAEMON\n\t\t\t\t\t\tscheduling strategy.

                  \n\t\t\t\t
                  \n\t\t\t
                • \n
                " + "smithy.api#documentation": "

                The scheduling strategy to use for the service. For more information, see Services.

                \n\t\t

                There are two service scheduler strategies available.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n REPLICA-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n DAEMON-The daemon scheduling strategy deploys exactly one task on each\n\t\t\t\t\tactive container instance. This task meets all of the task placement constraints\n\t\t\t\t\tthat you specify in your cluster. The service scheduler also evaluates the task\n\t\t\t\t\tplacement constraints for running tasks. It stop tasks that don't meet the\n\t\t\t\t\tplacement constraints.

                  \n\t\t\t\t \n\t\t\t\t\t

                  Fargate tasks don't support the DAEMON\n\t\t\t\t\t\tscheduling strategy.

                  \n\t\t\t\t
                  \n\t\t\t
                • \n
                " } }, "deploymentController": { @@ -6990,7 +6994,7 @@ "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Determines whether to enable Amazon ECS managed tags for the tasks in the service. For more\n\t\t\tinformation, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Determines whether to use Amazon ECS managed tags for the tasks in the service. For more\n\t\t\tinformation, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " } }, "propagateTags": { @@ -7118,7 +7122,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The details for the service registry.

                " + "smithy.api#documentation": "

                The details for the service registry.

                \n\t\t

                Each service may be associated with one service registry. Multiple service registries for\n\t\t\teach service are not supported.

                \n\t\t

                When you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.

                " } }, "com.amazonaws.ecs#Services": { @@ -7139,7 +7143,7 @@ "streamUrl": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

                A URL\n\t\t\tback\n\t\t\tto managed agent on the container that the SSM Session Manager client\n\t\t\tuses to send commands and receive output from the container.

                " + "smithy.api#documentation": "

                A URL\n\t\t\t\t\t\tto the managed agent on the container that the SSM Session Manager client\n\t\t\tuses to send commands and receive output from the container.

                " } }, "tokenValue": { @@ -7287,7 +7291,7 @@ "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "

                Specifies whether to enable Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                Specifies whether to use Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                " } }, "enableExecuteCommand": { @@ -8032,7 +8036,7 @@ "stopCode": { "target": "com.amazonaws.ecs#TaskStopCode", "traits": { - "smithy.api#documentation": "

                The stop code indicating why a task was stopped. The stoppedReason might\n\t\t\tcontain additional details.

                " + "smithy.api#documentation": "

                The stop code indicating why a task was stopped. The stoppedReason might\n\t\t\tcontain additional details.

                \n\t\t

                The following are valid values:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n TaskFailedToStart\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n EssentialContainerExited\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n UserInitiated\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n TerminationNotice\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n ServiceSchedulerInitiated\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n SpotInterruption\n

                  \n\t\t\t
                • \n
                " } }, "stoppedAt": { @@ -8208,7 +8212,7 @@ "proxyConfiguration": { "target": "com.amazonaws.ecs#ProxyConfiguration", "traits": { - "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tand at least version 1.26.0-1 of the ecs-init package to enable a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS optimized AMI\n\t\t\tversion 20190301 or later, they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " + "smithy.api#documentation": "

                The configuration details for the App Mesh proxy.

                \n\t\t

                Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tand at least version 1.26.0-1 of the ecs-init package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS optimized AMI\n\t\t\tversion 20190301 or later, they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

                " } }, "registeredAt": { @@ -8978,7 +8982,7 @@ "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "

                The setting to use by default for a cluster. This parameter is used to enable CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.

                ", + "smithy.api#documentation": "

                The setting to use by default for a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.

                ", "smithy.api#required": {} } } @@ -9082,7 +9086,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Modifies the status of an Amazon ECS container instance.

                \n\t\t

                Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

                \n\t\t \n\t\t\t

                A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

                \n\t\t
                \n\t\t

                When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

                \n\t\t

                Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and the container instance they're\n\t\t\t\t\thosted on is reported as healthy by the load balancer.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

                  \n\t\t\t
                • \n
                \n\t\t

                Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

                \n\t\t

                A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

                \n\t\t

                When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

                " + "smithy.api#documentation": "

                Modifies the status of an Amazon ECS container instance.

                \n\t\t

                Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

                \n\t\t \n\t\t\t

                A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

                \n\t\t
                \n\t\t

                When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

                \n\t\t

                Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the load balancer..

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

                  \n\t\t\t
                • \n
                \n\t\t

                Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

                \n\t\t

                A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

                \n\t\t

                When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

                " } }, "com.amazonaws.ecs#UpdateContainerInstancesStateRequest": { @@ -9177,7 +9181,7 @@ } ], "traits": { - "smithy.api#documentation": "\n\t\t\t

                Updating the task placement strategies and constraints on an Amazon ECS service remains\n\t\t\t\tin preview and is a Beta Service as defined by and subject to the Beta Service\n\t\t\t\tParticipation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms\n\t\t\t\tapply to your participation in this preview.

                \n\t\t
                \n\t\t

                Modifies the parameters of a service.

                \n\t\t

                For services using the rolling update (ECS) deployment controller, the\n\t\t\tdesired count, deployment configuration, network configuration, task placement\n\t\t\tconstraints and strategies, or task definition used can be updated.

                \n\t\t

                For services using the blue/green (CODE_DEPLOY) deployment controller,\n\t\t\tonly the desired count, deployment configuration, task placement constraints and\n\t\t\tstrategies, and health check grace period can be updated using this API. If the network\n\t\t\tconfiguration, platform version, or task definition need to be updated, a new CodeDeploy\n\t\t\tdeployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference.

                \n\t\t

                For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, and health check grace period using\n\t\t\tthis API. If the launch type, load balancer, network configuration, platform version, or\n\t\t\ttask definition need to be updated, create a new task set. For more information, see\n\t\t\t\tCreateTaskSet.

                \n\t\t

                You can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount parameter.

                \n\t\t

                If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.

                \n\t\t \n\t\t\t

                If your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.

                \n\t\t
                \n\t\t

                You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent and\n\t\t\t\tmaximumPercent, to determine the deployment strategy.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING state and the\n\t\t\t\t\tcontainer instance they're hosted on is reported as healthy by the load\n\t\t\t\t\tbalancer.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).

                  \n\t\t\t
                • \n
                \n\t\t

                When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM and a 30-second timeout. After this,\n\t\t\t\tSIGKILL is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL is sent.

                \n\t\t

                When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Determine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.

                  \n\t\t\t\t
                    \n
                  • \n\t\t\t\t\t\t

                    Sort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.

                    \n\t\t\t\t\t
                  • \n
                  • \n\t\t\t\t\t\t

                    Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

                    \n\t\t\t\t\t
                  • \n
                  \n\t\t\t
                • \n
                \n\t\t

                When the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Sort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Stop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.

                  \n\t\t\t
                • \n
                " + "smithy.api#documentation": "\n\t\t\t

                Updating the task placement strategies and constraints on an Amazon ECS service remains\n\t\t\t\tin preview and is a Beta Service as defined by and subject to the Beta Service\n\t\t\t\tParticipation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms\n\t\t\t\tapply to your participation in this preview.

                \n\t\t
                \n\t\t

                Modifies the parameters of a service.

                \n\t\t

                For services using the rolling update (ECS) you can update the desired count,\n\t\t\tthe deployment configuration, the network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and the task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.

                \n\t\t

                For services using the blue/green (CODE_DEPLOY) deployment controller, only the\n\t\t\tdesired count, deployment configuration, task placement constraints and strategies,\n\t\t\tenable ECS managed tags option, and propagate tags can be updated using this API. If the\n\t\t\tnetwork configuration, platform version, task definition, or load balancer need to be\n\t\t\tupdated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

                \n\t\t

                For services using an external deployment controller, you can update only the desired count,\n\t\t\ttask placement constraints and strategies, health check grace period, enable ECS managed\n\t\t\ttags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.

                \n\t\t

                You can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount parameter.

                \n\t\t

                If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.

                \n\t\t \n\t\t\t

                If your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.

                \n\t\t
                \n\t\t

                You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent and\n\t\t\t\tmaximumPercent, to determine the deployment strategy.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING state and are reported\n\t\t\t\t\tas healthy by the load balancer.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).

                  \n\t\t\t
                • \n
                \n\t\t

                When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM and a 30-second timeout. After this,\n\t\t\t\tSIGKILL is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL is sent.

                \n\t\t

                When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Determine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.

                  \n\t\t\t\t
                    \n
                  • \n\t\t\t\t\t\t

                    Sort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.

                    \n\t\t\t\t\t
                  • \n
                  • \n\t\t\t\t\t\t

                    Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

                    \n\t\t\t\t\t
                  • \n
                  \n\t\t\t
                • \n
                \n\t\t\n\t\t

                When the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:

                \n\t\t
                  \n
                • \n\t\t\t\t

                  Sort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  Stop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.

                  \n\t\t\t
                • \n
                \n\t\t \n\t\t

                You must have a service-linked role when you update any of the following service properties.\n\t\t\tIf you specified a custom IAM role when you created the service, Amazon ECS automatically\n\t\t\treplaces the roleARN associated with the service with the ARN of your service-linked\n\t\t\trole. For more information, see Service-linked\n\t\t\t\troles in the Amazon Elastic Container Service Developer Guide.

                \n\t\t
                  \n
                • \n\t\t\t\t

                  \n loadBalancers,\n

                  \n\t\t\t
                • \n
                • \n\t\t\t\t

                  \n serviceRegistries\n

                  \n\t\t\t
                • \n
                \n\t\t
                " } }, "com.amazonaws.ecs#UpdateServicePrimaryTaskSet": { @@ -9253,7 +9257,7 @@ "taskSet": { "target": "com.amazonaws.ecs#TaskSet", "traits": { - "smithy.api#documentation": "

                Details about the task set.

                " + "smithy.api#documentation": "

                etails about the task set.

                " } } } @@ -9339,6 +9343,30 @@ "traits": { "smithy.api#documentation": "

                If true, this enables execute command functionality on all task\n\t\t\tcontainers.

                \n\t\t

                If you do not want to override the value that was set when the service was created,\n\t\t\tyou can set this to null when performing this action.

                " } + }, + "enableECSManagedTags": { + "target": "com.amazonaws.ecs#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

                Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more\n\t\t\tinformation, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

                \n\t\t

                Only tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment to true, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.

                " + } + }, + "loadBalancers": { + "target": "com.amazonaws.ecs#LoadBalancers", + "traits": { + "smithy.api#documentation": "

                A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.

                \n\t\t

                When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with\n\t\t\tthe updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are\n\t\t\trunning.

                \n\t\t

                You can remove existing loadBalancers by passing an empty list.

                " + } + }, + "propagateTags": { + "target": "com.amazonaws.ecs#PropagateTags", + "traits": { + "smithy.api#documentation": "

                Determines whether to propagate the tags from the task definition or the service to\n\t\t\tthe task. If no value is specified, the tags aren't propagated.

                \n\t\t

                Only tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment to true, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.

                " + } + }, + "serviceRegistries": { + "target": "com.amazonaws.ecs#ServiceRegistries", + "traits": { + "smithy.api#documentation": "

                The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.

                \n\t\t

                When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks\n\t\t\twith the updated service registries configuration, and then stops the old tasks when the\n\t\t\tnew tasks are running.

                \n\t\t

                You can remove existing serviceRegistries by passing an empty\n\t\t\tlist.

                " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/eks.json b/codegen/sdk-codegen/aws-models/eks.json index dcac720e44e3..9bd93798b39c 100644 --- a/codegen/sdk-codegen/aws-models/eks.json +++ b/codegen/sdk-codegen/aws-models/eks.json @@ -4020,6 +4020,10 @@ { "value": "ClusterUnreachable", "name": "CLUSTER_UNREACHABLE" + }, + { + "value": "Ec2SubnetMissingIpv6Assignment", + "name": "EC2_SUBNET_MISSING_IPV6_ASSIGNMENT" } ] } diff --git a/codegen/sdk-codegen/aws-models/elasticache.json b/codegen/sdk-codegen/aws-models/elasticache.json index e241ff490622..16a5b95b06db 100644 --- a/codegen/sdk-codegen/aws-models/elasticache.json +++ b/codegen/sdk-codegen/aws-models/elasticache.json @@ -2081,7 +2081,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

                The compute and memory capacity of the nodes in the node group (shard).

                \n

                The following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.

                \n\t\t
                  \n
                • \n

                  General purpose:

                  \n\t\t\t\t
                    \n
                  • \n

                    Current generation:

                    \n\t\t\t\t\t \n \t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t

                    \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t\n\t\t\t\t\t\t \n

                    For region availability, see Supported Node Types\n

                    \n
                    \n \t\t\t\t\t\n \t\t\t\t\t\n \t\t\t\t\t

                    \n M5 node types:\n \t\t\t\t\t\t cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t

                    \t\n \t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \n M4 node types:\n \t\t\t\t\t\t cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

                    \n \t\t\n\t\t\t\t\t

                    \n T4g node types (available only for Redis engine version 6.0 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n\t\t\t\t\t

                    \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t

                    \n T3 node types:\n\t\t\t\t\t cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

                    \n \t\t\t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t

                    \n T2 node types:\n\t\t\t\t\t cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
                  • \n
                  • \n

                    Previous generation: (not recommended)

                    \n\t\t\t\t\t\t

                    \n T1 node types:\n\t\t\t\t\t cache.t1.micro\n

                    \n\t\t\t\t\t \n\t\t\t\t\t\t

                    \n M1 node types:\n\t\t\t\t\t\t cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

                    \n\t\t\t\t\t\t \n\t\t\t\t\t\t

                    \n M3 node types:\n \t\t\t\t\t\t cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

                    \n\t\t\t\t\t\t \n\t\t\t\t\t\t
                  • \n
                  \n
                • \n
                • \n

                  Compute optimized:

                  \n\n\t\t\t\t
                    \n
                  • \n

                    Previous generation: (not recommended)

                    \n\t\t\t

                    \n C1 node types:\n\t\t\t cache.c1.xlarge\n

                    \n
                  • \n
                  \n
                • \n
                • \n

                  Memory optimized with data tiering:

                  \n\t\t
                    \n
                  • \n

                    Current generation:

                    \n\t\t \n\t\t

                    \n R6gd node types (available only for Redis engine version 6.2 onward).

                    \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t

                    \t\n\t\t \n\t\t cache.r6gd.xlarge,\n\t\t cache.r6gd.2xlarge,\n\t\t cache.r6gd.4xlarge,\n\t\t cache.r6gd.8xlarge,\n\t\t cache.r6gd.12xlarge,\n\t\t cache.r6gd.16xlarge\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t

                    \t\t\t\t\t \n\t\t \n\t\t
                  • \n
                  \n
                • \n
                • \n

                  Memory optimized:

                  \n\t\t\t\t
                    \n
                  • \n

                    Current generation:

                    \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t

                    \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

                    \n\t\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t\t cache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t \n

                    For region availability, see Supported Node Types\n

                    \n
                    \n\t\t\t\t\t

                    \n R5 node types:\n \t\t\t\t\t cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

                    \n\t\t\t\t\t\t\n \t\t\t\t\t

                    \n R4 node types:\n \t\t\t\t\t cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

                    \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n\n \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t
                  • \n
                  • \n

                    Previous generation: (not recommended)

                    \n

                    \n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t

                    \n R3 node types:\n \t\t\t\t\t cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t
                  • \n
                  \n
                • \n
                \n\t\t\t\t\n\t\t

                \n Additional node type info\n

                \n\t\t
                  \n
                • \n

                  All current generation instance types are created in Amazon VPC by default.

                  \n
                • \n
                • \n

                  Redis append-only files (AOF) are not supported for T1 or T2 instances.

                  \n
                • \n
                • \n

                  Redis Multi-AZ with automatic failover is not supported on T1 instances.

                  \n
                • \n
                • \n

                  Redis configuration variables appendonly and \n\t\t\t\tappendfsync are not supported on Redis version 2.8.22 and later.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The compute and memory capacity of the nodes in the node group (shard).

                \n

                The following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.

                \n\t\t
                  \n
                • \n

                  General purpose:

                  \n\t\t\t\t
                    \n
                  • \n

                    Current generation:

                    \n\t\t\t\t\t \n \t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t

                    \n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t\n\t\t\t\t\t\t \n

                    For region availability, see Supported Node Types\n

                    \n
                    \n \t\t\t\t\t\n \t\t\t\t\t\n \t\t\t\t\t

                    \n M5 node types:\n \t\t\t\t\t\t cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t

                    \t\n \t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \n M4 node types:\n \t\t\t\t\t\t cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

                    \n \t\t\n\t\t\t\t\t

                    \n T4g node types (available only for Redis engine version 6.0 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n\t\t\t\t\t

                    \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t

                    \n T3 node types:\n\t\t\t\t\t cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

                    \n \t\t\t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t

                    \n T2 node types:\n\t\t\t\t\t cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
                  • \n
                  • \n

                    Previous generation: (not recommended)

                    \n\t\t\t\t\t\t

                    \n T1 node types:\n\t\t\t\t\t cache.t1.micro\n

                    \n\t\t\t\t\t \n\t\t\t\t\t\t

                    \n M1 node types:\n\t\t\t\t\t\t cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

                    \n\t\t\t\t\t\t \n\t\t\t\t\t\t

                    \n M3 node types:\n \t\t\t\t\t\t cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

                    \n\t\t\t\t\t\t \n\t\t\t\t\t\t
                  • \n
                  \n
                • \n
                • \n

                  Compute optimized:

                  \n\n\t\t\t\t
                    \n
                  • \n

                    Previous generation: (not recommended)

                    \n\t\t\t

                    \n C1 node types:\n\t\t\t cache.c1.xlarge\n

                    \n
                  • \n
                  \n
                • \n
                • \n

                  Memory optimized:

                  \n\t\t\t\t
                    \n
                  • \n

                    Current generation:

                    \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t

                    \n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

                    \n\t\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t\t cache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t

                    \t\n\t\t\t\t\t\t \n

                    For region availability, see Supported Node Types\n

                    \n
                    \n\t\t\t\t\t

                    \n R5 node types:\n \t\t\t\t\t cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

                    \n\t\t\t\t\t\t\n \t\t\t\t\t

                    \n R4 node types:\n \t\t\t\t\t cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

                    \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n\n \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t \n \t\t\t\t\t
                  • \n
                  • \n

                    Previous generation: (not recommended)

                    \n

                    \n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t

                    \n R3 node types:\n \t\t\t\t\t cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

                    \n \t\t\t\t\t\t\n \t\t\t\t\t\t
                  • \n
                  \n
                • \n
                \n\t\t\t\t\n\t\t

                \n Additional node type info\n

                \n\t\t
                  \n
                • \n

                  All current generation instance types are created in Amazon VPC by default.

                  \n
                • \n
                • \n

                  Redis append-only files (AOF) are not supported for T1 or T2 instances.

                  \n
                • \n
                • \n

                  Redis Multi-AZ with automatic failover is not supported on T1 instances.

                  \n
                • \n
                • \n

                  Redis configuration variables appendonly and \n\t\t\t\tappendfsync are not supported on Redis version 2.8.22 and later.

                  \n
                • \n
                " } }, "Engine": { diff --git a/codegen/sdk-codegen/aws-models/finspace-data.json b/codegen/sdk-codegen/aws-models/finspace-data.json index c0a07474069a..07c33ad8c547 100644 --- a/codegen/sdk-codegen/aws-models/finspace-data.json +++ b/codegen/sdk-codegen/aws-models/finspace-data.json @@ -57,9 +57,24 @@ { "target": "com.amazonaws.finspacedata#CreateDataView" }, + { + "target": "com.amazonaws.finspacedata#CreatePermissionGroup" + }, + { + "target": "com.amazonaws.finspacedata#CreateUser" + }, { "target": "com.amazonaws.finspacedata#DeleteDataset" }, + { + "target": "com.amazonaws.finspacedata#DeletePermissionGroup" + }, + { + "target": "com.amazonaws.finspacedata#DisableUser" + }, + { + "target": "com.amazonaws.finspacedata#EnableUser" + }, { "target": "com.amazonaws.finspacedata#GetChangeset" }, @@ -72,6 +87,9 @@ { "target": "com.amazonaws.finspacedata#GetProgrammaticAccessCredentials" }, + { + "target": "com.amazonaws.finspacedata#GetUser" + }, { "target": "com.amazonaws.finspacedata#GetWorkingLocation" }, @@ -84,11 +102,26 @@ { "target": "com.amazonaws.finspacedata#ListDataViews" }, + { + "target": "com.amazonaws.finspacedata#ListPermissionGroups" + }, + { + "target": "com.amazonaws.finspacedata#ListUsers" + }, + { + "target": "com.amazonaws.finspacedata#ResetUserPassword" + }, { "target": "com.amazonaws.finspacedata#UpdateChangeset" }, { "target": "com.amazonaws.finspacedata#UpdateDataset" + }, + { + "target": "com.amazonaws.finspacedata#UpdatePermissionGroup" + }, + { + "target": "com.amazonaws.finspacedata#UpdateUser" } ] }, @@ -115,6 +148,62 @@ "smithy.api#pattern": "^alias\\/\\S+$" } }, + "com.amazonaws.finspacedata#ApiAccess": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + } + ] + } + }, + "com.amazonaws.finspacedata#ApplicationPermission": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CreateDataset", + "name": "CreateDataset" + }, + { + "value": "ManageClusters", + "name": "ManageClusters" + }, + { + "value": "ManageUsersAndGroups", + "name": "ManageUsersAndGroups" + }, + { + "value": "ManageAttributeSets", + "name": "ManageAttributeSets" + }, + { + "value": "ViewAuditData", + "name": "ViewAuditData" + }, + { + "value": "AccessNotebooks", + "name": "AccessNotebooks" + }, + { + "value": "GetTemporaryCredentials", + "name": "GetTemporaryCredentials" + } + ] + } + }, + "com.amazonaws.finspacedata#ApplicationPermissionList": { + "type": "list", + "member": { + "target": "com.amazonaws.finspacedata#ApplicationPermission" + } + }, "com.amazonaws.finspacedata#Boolean": { "type": "boolean", "traits": { @@ -159,7 +248,7 @@ "errorCategory": { "target": "com.amazonaws.finspacedata#ErrorCategory", "traits": { - "smithy.api#documentation": "

                The category of the error.

                \n
                  \n
                • \n

                  \n VALIDATION -The inputs to this request are invalid.

                  \n
                • \n
                • \n

                  \n SERVICE_QUOTA_EXCEEDED - Service quotas have been exceeded. Please\n contact AWS support to increase quotas.

                  \n
                • \n
                • \n

                  \n ACCESS_DENIED - Missing required permission to perform this\n request.

                  \n
                • \n
                • \n

                  \n RESOURCE_NOT_FOUND - One or more inputs to this request were not\n found.

                  \n
                • \n
                • \n

                  \n THROTTLING - The system temporarily lacks sufficient resources to process\n the request.

                  \n
                • \n
                • \n

                  \n INTERNAL_SERVICE_EXCEPTION - An internal service error has\n occurred.

                  \n
                • \n
                • \n

                  \n CANCELLED - Cancelled.

                  \n
                • \n
                • \n

                  \n USER_RECOVERABLE - A user recoverable error has occurred.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The category of the error.

                \n
                  \n
                • \n

                  \n VALIDATION – The inputs to this request are invalid.

                  \n
                • \n
                • \n

                  \n SERVICE_QUOTA_EXCEEDED – Service quotas have been exceeded. Please\n contact AWS support to increase quotas.

                  \n
                • \n
                • \n

                  \n ACCESS_DENIED – Missing required permission to perform this\n request.

                  \n
                • \n
                • \n

                  \n RESOURCE_NOT_FOUND – One or more inputs to this request were not\n found.

                  \n
                • \n
                • \n

                  \n THROTTLING – The system temporarily lacks sufficient resources to process\n the request.

                  \n
                • \n
                • \n

                  \n INTERNAL_SERVICE_EXCEPTION – An internal service error has\n occurred.

                  \n
                • \n
                • \n

                  \n CANCELLED – Cancelled.

                  \n
                • \n
                • \n

                  \n USER_RECOVERABLE – A user recoverable error has occurred.

                  \n
                • \n
                " } } }, @@ -210,7 +299,7 @@ "changeType": { "target": "com.amazonaws.finspacedata#ChangeType", "traits": { - "smithy.api#documentation": "

                Type that indicates how a Changeset is applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE - Changeset is considered as a replacement to all prior loaded\n Changesets.

                  \n
                • \n
                • \n

                  \n APPEND - Changeset is considered as an addition to the end of all prior\n loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY - Changeset is considered as a replacement to a specific prior\n ingested Changeset.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Type that indicates how a Changeset is applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE – Changeset is considered as a replacement to all prior loaded\n Changesets.

                  \n
                • \n
                • \n

                  \n APPEND – Changeset is considered as an addition to the end of all prior\n loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY – Changeset is considered as a replacement to a specific prior\n ingested Changeset.

                  \n
                • \n
                " } }, "sourceParams": { @@ -228,13 +317,13 @@ "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "status": { "target": "com.amazonaws.finspacedata#IngestionStatus", "traits": { - "smithy.api#documentation": "

                Status of the Changeset ingestion.

                \n
                  \n
                • \n

                  \n PENDING - Changeset is pending creation.

                  \n
                • \n
                • \n

                  \n FAILED - Changeset creation has failed.

                  \n
                • \n
                • \n

                  \n SUCCESS - Changeset creation has succeeded.

                  \n
                • \n
                • \n

                  \n RUNNING - Changeset creation is running.

                  \n
                • \n
                • \n

                  \n STOP_REQUESTED - User requested Changeset creation to stop.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Status of the Changeset ingestion.

                \n
                  \n
                • \n

                  \n PENDING – Changeset is pending creation.

                  \n
                • \n
                • \n

                  \n FAILED – Changeset creation has failed.

                  \n
                • \n
                • \n

                  \n SUCCESS – Changeset creation has succeeded.

                  \n
                • \n
                • \n

                  \n RUNNING – Changeset creation is running.

                  \n
                • \n
                • \n

                  \n STOP_REQUESTED – User requested Changeset creation to stop.

                  \n
                • \n
                " } }, "errorInfo": { @@ -247,14 +336,14 @@ "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "activeFromTimestamp": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "updatesChangesetId": { @@ -347,13 +436,13 @@ "dataType": { "target": "com.amazonaws.finspacedata#ColumnDataType", "traits": { - "smithy.api#documentation": "

                Data type of a column.

                \n
                  \n
                • \n

                  \n STRING - A String data type.

                  \n

                  \n CHAR - A char data type.

                  \n

                  \n INTEGER - An integer data type.

                  \n

                  \n TINYINT - A tinyint data type.

                  \n

                  \n SMALLINT - A smallint data type.

                  \n

                  \n BIGINT - A bigint data type.

                  \n

                  \n FLOAT - A float data type.

                  \n

                  \n DOUBLE - A double data type.

                  \n

                  \n DATE - A date data type.

                  \n

                  \n DATETIME - A datetime data type.

                  \n

                  \n BOOLEAN - A boolean data type.

                  \n

                  \n BINARY - A binary data type.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Data type of a column.

                \n
                  \n
                • \n

                  \n STRING – A String data type.

                  \n

                  \n CHAR – A char data type.

                  \n

                  \n INTEGER – An integer data type.

                  \n

                  \n TINYINT – A tinyint data type.

                  \n

                  \n SMALLINT – A smallint data type.

                  \n

                  \n BIGINT – A bigint data type.

                  \n

                  \n FLOAT – A float data type.

                  \n

                  \n DOUBLE – A double data type.

                  \n

                  \n DATE – A date data type.

                  \n

                  \n DATETIME – A datetime data type.

                  \n

                  \n BOOLEAN – A boolean data type.

                  \n

                  \n BINARY – A binary data type.

                  \n
                • \n
                " } }, "columnName": { "target": "com.amazonaws.finspacedata#ColumnName", "traits": { - "smithy.api#documentation": "

                Name for a column.

                " + "smithy.api#documentation": "

                The name of a column.

                " } }, "columnDescription": { @@ -412,6 +501,9 @@ "members": { "message": { "target": "com.amazonaws.finspacedata#ErrorMessage2" + }, + "reason": { + "target": "com.amazonaws.finspacedata#ErrorMessage2" } }, "traits": { @@ -481,21 +573,21 @@ "changeType": { "target": "com.amazonaws.finspacedata#ChangeType", "traits": { - "smithy.api#documentation": "

                Option to indicate how a Changeset will be applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE - Changeset will be considered as a replacement to all prior\n loaded Changesets.

                  \n
                • \n
                • \n

                  \n APPEND - Changeset will be considered as an addition to the end of all\n prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY - Changeset is considered as a replacement to a specific prior ingested Changeset.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                The option to indicate how a Changeset will be applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE – Changeset will be considered as a replacement to all prior\n loaded Changesets.

                  \n
                • \n
                • \n

                  \n APPEND – Changeset will be considered as an addition to the end of all\n prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY – Changeset is considered as a replacement to a specific prior ingested Changeset.

                  \n
                • \n
                ", "smithy.api#required": {} } }, "sourceParams": { "target": "com.amazonaws.finspacedata#SourceParams", "traits": { - "smithy.api#documentation": "

                Options that define the location of the data being ingested (s3SourcePath) and the source of the changeset (sourceType).

                \n

                Both s3SourcePath and sourceType are required attributes.

                \n

                Here is an example of how you could specify the sourceParams:

                \n

                \n \n \"sourceParams\": \n {\n \"s3SourcePath\": \"s3://finspace-landing-us-east-2-bk7gcfvitndqa6ebnvys4d/scratch/wr5hh8pwkpqqkxa4sxrmcw/ingestion/equity.csv\",\n \"sourceType\": \"S3\"\n }\n \n

                \n

                The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace APIsection.

                ", + "smithy.api#documentation": "

                Options that define the location of the data being ingested (s3SourcePath) and the source of the changeset (sourceType).

                \n

                Both s3SourcePath and sourceType are required attributes.

                \n

                Here is an example of how you could specify the sourceParams:

                \n

                \n \n \"sourceParams\": \n {\n \"s3SourcePath\": \"s3://finspace-landing-us-east-2-bk7gcfvitndqa6ebnvys4d/scratch/wr5hh8pwkpqqkxa4sxrmcw/ingestion/equity.csv\",\n \"sourceType\": \"S3\"\n }\n \n

                \n

                The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace API section.

                ", "smithy.api#required": {} } }, "formatParams": { "target": "com.amazonaws.finspacedata#FormatParams", "traits": { - "smithy.api#documentation": "

                Options that define the structure of the source file(s) including the format type (formatType), header row (withHeader), data separation character (separator) and the type of compression (compression).\n

                \n

                \n formatType is a required attribute and can have the following values:\n

                \n
                  \n
                • \n

                  \n PARQUET - Parquet source file format.

                  \n
                • \n
                • \n

                  \n CSV - CSV source file format.

                  \n
                • \n
                • \n

                  \n JSON - JSON source file format.

                  \n
                • \n
                • \n

                  \n XML - XML source file format.

                  \n
                • \n
                \n \n

                Here is an example of how you could specify the formatParams:

                \n

                \n \n \"formatParams\": \n {\n \"formatType\": \"CSV\",\n \"withHeader\": \"true\",\n \"separator\": \",\",\n \"compression\":\"None\"\n } \n \n

                \n

                Note that if you only provide formatType as CSV, the rest of the attributes will automatically default to CSV values as following:

                \n

                \n \n {\n \"withHeader\": \"true\",\n \"separator\": \",\"\n }\n \n

                \n

                For more information about supported file formats, see Supported Data Types and File Formats in the FinSpace User Guide.

                ", + "smithy.api#documentation": "

                Options that define the structure of the source file(s) including the format type (formatType), header row (withHeader), data separation character (separator) and the type of compression (compression).\n

                \n

                \n formatType is a required attribute and can have the following values:\n

                \n
                  \n
                • \n

                  \n PARQUET – Parquet source file format.

                  \n
                • \n
                • \n

                  \n CSV – CSV source file format.

                  \n
                • \n
                • \n

                  \n JSON – JSON source file format.

                  \n
                • \n
                • \n

                  \n XML – XML source file format.

                  \n
                • \n
                \n \n

                Here is an example of how you could specify the formatParams:

                \n

                \n \n \"formatParams\": \n {\n \"formatType\": \"CSV\",\n \"withHeader\": \"true\",\n \"separator\": \",\",\n \"compression\":\"None\"\n } \n \n

                \n

                Note that if you only provide formatType as CSV, the rest of the attributes will automatically default to CSV values as following:

                \n

                \n \n {\n \"withHeader\": \"true\",\n \"separator\": \",\"\n }\n \n

                \n

                For more information about supported file formats, see Supported Data Types and File Formats in the FinSpace User Guide.

                ", "smithy.api#required": {} } } @@ -601,7 +693,7 @@ "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Beginning time to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Beginning time to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "destinationTypeParams": { @@ -696,7 +788,7 @@ "kind": { "target": "com.amazonaws.finspacedata#DatasetKind", "traits": { - "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR - Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR - Data is structured in a non-tabular format.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR – Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR – Data is structured in a non-tabular format.

                  \n
                • \n
                ", "smithy.api#required": {} } }, @@ -750,6 +842,184 @@ "smithy.api#documentation": "The response from a CreateDataset operation" } }, + "com.amazonaws.finspacedata#CreatePermissionGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#CreatePermissionGroupRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#CreatePermissionGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#LimitExceededException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Creates a group of permissions for various actions that a user can perform in FinSpace.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/permission-group", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#CreatePermissionGroupRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.finspacedata#PermissionGroupName", + "traits": { + "smithy.api#documentation": "

                The name of the permission group.

                ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.finspacedata#PermissionGroupDescription", + "traits": { + "smithy.api#documentation": "

                A brief description for the permission group.

                " + } + }, + "applicationPermissions": { + "target": "com.amazonaws.finspacedata#ApplicationPermissionList", + "traits": { + "smithy.api#documentation": "

                The option to indicate FinSpace application permissions that are granted to a specific group.

                \n
                  \n
                • \n

                  \n CreateDataset – Group members can create new datasets.

                  \n
                • \n
                • \n

                  \n ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n ManageUsersAndGroups – Group members can manage users and permission groups.

                  \n
                • \n
                • \n

                  \n ManageAttributeSets – Group members can manage attribute sets.

                  \n
                • \n
                • \n

                  \n ViewAuditData – Group members can view audit data.

                  \n
                • \n
                • \n

                  \n AccessNotebooks – Group members will have access to FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n GetTemporaryCredentials – Group members can get temporary API credentials.

                  \n
                • \n
                ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#CreatePermissionGroupResponse": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the permission group.

                " + } + } + } + }, + "com.amazonaws.finspacedata#CreateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#CreateUserRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#CreateUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#LimitExceededException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Creates a new user in FinSpace.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/user", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#CreateUserRequest": { + "type": "structure", + "members": { + "emailAddress": { + "target": "com.amazonaws.finspacedata#Email", + "traits": { + "smithy.api#documentation": "

                The email address of the user that you want to register. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.

                ", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.finspacedata#UserType", + "traits": { + "smithy.api#documentation": "

                The option to indicate the type of user. Use one of the following options to specify this parameter:

                \n
                  \n
                • \n

                  \n SUPER_USER – A user with permission to all the functionality and data in FinSpace.

                  \n
                • \n
                • \n

                  \n APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permission group.

                  \n
                • \n
                ", + "smithy.api#required": {} + } + }, + "firstName": { + "target": "com.amazonaws.finspacedata#FirstName", + "traits": { + "smithy.api#documentation": "

                The first name of the user that you want to register.

                " + } + }, + "lastName": { + "target": "com.amazonaws.finspacedata#LastName", + "traits": { + "smithy.api#documentation": "

                The last name of the user that you want to register.

                " + } + }, + "ApiAccess": { + "target": "com.amazonaws.finspacedata#ApiAccess", + "traits": { + "smithy.api#documentation": "

                The option to indicate whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

                \n
                  \n
                • \n

                  \n ENABLED – The user has permissions to use the APIs.

                  \n
                • \n
                • \n

                  \n DISABLED – The user does not have permissions to use any APIs.

                  \n
                • \n
                " + } + }, + "apiAccessPrincipalArn": { + "target": "com.amazonaws.finspacedata#RoleArn", + "traits": { + "smithy.api#documentation": "

                The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

                " + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#CreateUserResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user.

                " + } + } + } + }, "com.amazonaws.finspacedata#Credentials": { "type": "structure", "members": { @@ -794,14 +1064,14 @@ "destinationType": { "target": "com.amazonaws.finspacedata#DataViewDestinationType", "traits": { - "smithy.api#documentation": "

                Destination type for a Dataview.

                \n
                  \n
                • \n

                  \n GLUE_TABLE - Glue table destination type.

                  \n
                • \n
                • \n

                  \n S3 - S3 destination type.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                Destination type for a Dataview.

                \n
                  \n
                • \n

                  \n GLUE_TABLE – Glue table destination type.

                  \n
                • \n
                • \n

                  \n S3 – S3 destination type.

                  \n
                • \n
                ", "smithy.api#required": {} } }, "s3DestinationExportFileFormat": { "target": "com.amazonaws.finspacedata#ExportFileFormat", "traits": { - "smithy.api#documentation": "

                Data view export file format.

                \n
                  \n
                • \n

                  \n PARQUET - Parquet export file format.

                  \n
                • \n
                • \n

                  \n DELIMITED_TEXT - Delimited text export file format.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Data view export file format.

                \n
                  \n
                • \n

                  \n PARQUET – Parquet export file format.

                  \n
                • \n
                • \n

                  \n DELIMITED_TEXT – Delimited text export file format.

                  \n
                • \n
                " } }, "s3DestinationExportFileFormatOptions": { @@ -827,7 +1097,7 @@ "errorCategory": { "target": "com.amazonaws.finspacedata#ErrorCategory", "traits": { - "smithy.api#documentation": "

                The category of the error.

                \n
                  \n
                • \n

                  \n VALIDATION -The inputs to this request are invalid.

                  \n
                • \n
                • \n

                  \n SERVICE_QUOTA_EXCEEDED - Service quotas have been exceeded. Please\n contact AWS support to increase quotas.

                  \n
                • \n
                • \n

                  \n ACCESS_DENIED - Missing required permission to perform this\n request.

                  \n
                • \n
                • \n

                  \n RESOURCE_NOT_FOUND - One or more inputs to this request were not\n found.

                  \n
                • \n
                • \n

                  \n THROTTLING - The system temporarily lacks sufficient resources to process\n the request.

                  \n
                • \n
                • \n

                  \n INTERNAL_SERVICE_EXCEPTION - An internal service error has\n occurred.

                  \n
                • \n
                • \n

                  \n CANCELLED - Cancelled.

                  \n
                • \n
                • \n

                  \n USER_RECOVERABLE - A user recoverable error has occurred.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The category of the error.

                \n
                  \n
                • \n

                  \n VALIDATION – The inputs to this request are invalid.

                  \n
                • \n
                • \n

                  \n SERVICE_QUOTA_EXCEEDED – Service quotas have been exceeded. Please\n contact AWS support to increase quotas.

                  \n
                • \n
                • \n

                  \n ACCESS_DENIED – Missing required permission to perform this\n request.

                  \n
                • \n
                • \n

                  \n RESOURCE_NOT_FOUND – One or more inputs to this request were not\n found.

                  \n
                • \n
                • \n

                  \n THROTTLING – The system temporarily lacks sufficient resources to process\n the request.

                  \n
                • \n
                • \n

                  \n INTERNAL_SERVICE_EXCEPTION – An internal service error has\n occurred.

                  \n
                • \n
                • \n

                  \n CANCELLED – Cancelled.

                  \n
                • \n
                • \n

                  \n USER_RECOVERABLE – A user recoverable error has occurred.

                  \n
                • \n
                " } } }, @@ -919,7 +1189,7 @@ "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "partitionColumns": { @@ -937,7 +1207,7 @@ "status": { "target": "com.amazonaws.finspacedata#DataViewStatus", "traits": { - "smithy.api#documentation": "

                The status of a Dataview creation.

                \n
                  \n
                • \n

                  \n RUNNING - Dataview creation is running.

                  \n
                • \n
                • \n

                  \n STARTING - Dataview creation is starting.

                  \n
                • \n
                • \n

                  \n FAILED - Dataview creation has failed.

                  \n
                • \n
                • \n

                  \n CANCELLED - Dataview creation has been cancelled.

                  \n
                • \n
                • \n

                  \n TIMEOUT - Dataview creation has timed out.

                  \n
                • \n
                • \n

                  \n SUCCESS - Dataview creation has succeeded.

                  \n
                • \n
                • \n

                  \n PENDING - Dataview creation is pending.

                  \n
                • \n
                • \n

                  \n FAILED_CLEANUP_FAILED - Dataview creation failed and resource cleanup failed.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The status of a Dataview creation.

                \n
                  \n
                • \n

                  \n RUNNING – Dataview creation is running.

                  \n
                • \n
                • \n

                  \n STARTING – Dataview creation is starting.

                  \n
                • \n
                • \n

                  \n FAILED – Dataview creation has failed.

                  \n
                • \n
                • \n

                  \n CANCELLED – Dataview creation has been cancelled.

                  \n
                • \n
                • \n

                  \n TIMEOUT – Dataview creation has timed out.

                  \n
                • \n
                • \n

                  \n SUCCESS – Dataview creation has succeeded.

                  \n
                • \n
                • \n

                  \n PENDING – Dataview creation is pending.

                  \n
                • \n
                • \n

                  \n FAILED_CLEANUP_FAILED – Dataview creation failed and resource cleanup failed.

                  \n
                • \n
                " } }, "errorInfo": { @@ -961,13 +1231,13 @@ "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } } }, @@ -999,7 +1269,7 @@ "kind": { "target": "com.amazonaws.finspacedata#DatasetKind", "traits": { - "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR - Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR - Data is structured in a non-tabular format.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR – Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR – Data is structured in a non-tabular format.

                  \n
                • \n
                " } }, "datasetDescription": { @@ -1017,13 +1287,13 @@ "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "schemaDefinition": { @@ -1101,7 +1371,7 @@ "name": { "target": "com.amazonaws.finspacedata#OwnerName", "traits": { - "smithy.api#documentation": "

                Name of the Dataset owner.

                " + "smithy.api#documentation": "

                The name of the Dataset owner.

                " } }, "phoneNumber": { @@ -1234,24 +1504,235 @@ "smithy.api#documentation": "The response from an DeleteDataset operation" } }, - "com.amazonaws.finspacedata#Email": { - "type": "string", + "com.amazonaws.finspacedata#DeletePermissionGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#DeletePermissionGroupRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#DeletePermissionGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#LimitExceededException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], "traits": { - "smithy.api#documentation": "Email of Dataset Owner", - "smithy.api#length": { - "min": 4, - "max": 320 + "smithy.api#documentation": "

                Deletes a permission group. This action is irreversible.

                ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/permission-group/{permissionGroupId}", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#DeletePermissionGroupRequest": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the permission group that you want to delete.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } }, - "smithy.api#pattern": "^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}$" + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } } }, - "com.amazonaws.finspacedata#ErrorCategory": { - "type": "string", - "traits": { - "smithy.api#documentation": "Changeset Error Category", - "smithy.api#enum": [ - { - "value": "VALIDATION", + "com.amazonaws.finspacedata#DeletePermissionGroupResponse": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the deleted permission group.

                " + } + } + } + }, + "com.amazonaws.finspacedata#DisableUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#DisableUserRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#DisableUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Denies access to the FinSpace web application and API for the specified user.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/user/{userId}/disable", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#DisableUserRequest": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user account that you want to disable.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#DisableUserResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the disabled user account.

                " + } + } + } + }, + "com.amazonaws.finspacedata#Email": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 320 + }, + "smithy.api#pattern": "^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.finspacedata#EnableUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#EnableUserRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#EnableUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#LimitExceededException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Allows the specified user to access the FinSpace web application and API.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/user/{userId}/enable", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#EnableUserRequest": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user account that you want to enable.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#EnableUserResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the enabled user account.

                " + } + } + } + }, + "com.amazonaws.finspacedata#ErrorCategory": { + "type": "string", + "traits": { + "smithy.api#documentation": "Changeset Error Category", + "smithy.api#enum": [ + { + "value": "VALIDATION", "name": "VALIDATION" }, { @@ -1314,6 +1795,17 @@ ] } }, + "com.amazonaws.finspacedata#FirstName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "\\S", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.finspacedata#FormatParams": { "type": "map", "key": { @@ -1411,7 +1903,7 @@ "changeType": { "target": "com.amazonaws.finspacedata#ChangeType", "traits": { - "smithy.api#documentation": "

                Type that indicates how a Changeset is applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE - Changeset is considered as a replacement to all prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n APPEND - Changeset is considered as an addition to the end of all prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY - Changeset is considered as a replacement to a specific prior ingested Changeset.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Type that indicates how a Changeset is applied to a Dataset.

                \n
                  \n
                • \n

                  \n REPLACE – Changeset is considered as a replacement to all prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n APPEND – Changeset is considered as an addition to the end of all prior loaded Changesets.

                  \n
                • \n
                • \n

                  \n MODIFY – Changeset is considered as a replacement to a specific prior ingested Changeset.

                  \n
                • \n
                " } }, "sourceParams": { @@ -1429,7 +1921,7 @@ "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "status": { @@ -1448,14 +1940,14 @@ "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "activeFromTimestamp": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "updatesChangesetId": { @@ -1558,7 +2050,7 @@ "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "errorInfo": { @@ -1570,13 +2062,13 @@ "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "sortColumns": { @@ -1606,7 +2098,7 @@ "status": { "target": "com.amazonaws.finspacedata#DataViewStatus", "traits": { - "smithy.api#documentation": "

                The status of a Dataview creation.

                \n
                  \n
                • \n

                  \n RUNNING - Dataview creation is running.

                  \n
                • \n
                • \n

                  \n STARTING - Dataview creation is starting.

                  \n
                • \n
                • \n

                  \n FAILED - Dataview creation has failed.

                  \n
                • \n
                • \n

                  \n CANCELLED - Dataview creation has been cancelled.

                  \n
                • \n
                • \n

                  \n TIMEOUT - Dataview creation has timed out.

                  \n
                • \n
                • \n

                  \n SUCCESS - Dataview creation has succeeded.

                  \n
                • \n
                • \n

                  \n PENDING - Dataview creation is pending.

                  \n
                • \n
                • \n

                  \n FAILED_CLEANUP_FAILED - Dataview creation failed and resource cleanup failed.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The status of a Dataview creation.

                \n
                  \n
                • \n

                  \n RUNNING – Dataview creation is running.

                  \n
                • \n
                • \n

                  \n STARTING – Dataview creation is starting.

                  \n
                • \n
                • \n

                  \n FAILED – Dataview creation has failed.

                  \n
                • \n
                • \n

                  \n CANCELLED – Dataview creation has been cancelled.

                  \n
                • \n
                • \n

                  \n TIMEOUT – Dataview creation has timed out.

                  \n
                • \n
                • \n

                  \n SUCCESS – Dataview creation has succeeded.

                  \n
                • \n
                • \n

                  \n PENDING – Dataview creation is pending.

                  \n
                • \n
                • \n

                  \n FAILED_CLEANUP_FAILED – Dataview creation failed and resource cleanup failed.

                  \n
                • \n
                " } } }, @@ -1691,7 +2183,7 @@ "kind": { "target": "com.amazonaws.finspacedata#DatasetKind", "traits": { - "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR - Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR - Data is structured in a non-tabular format.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The format in which Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR – Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR – Data is structured in a non-tabular format.

                  \n
                • \n
                " } }, "datasetDescription": { @@ -1703,13 +2195,13 @@ "createTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { - "smithy.api#documentation": "

                The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " + "smithy.api#documentation": "

                The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

                " } }, "schemaDefinition": { @@ -1727,7 +2219,7 @@ "status": { "target": "com.amazonaws.finspacedata#DatasetStatus", "traits": { - "smithy.api#documentation": "

                Status of the Dataset creation.

                \n
                  \n
                • \n

                  \n PENDING - Dataset is pending creation.

                  \n
                • \n
                • \n

                  \n FAILED - Dataset creation has failed.

                  \n
                • \n
                • \n

                  \n SUCCESS - Dataset creation has succeeded.

                  \n
                • \n
                • \n

                  \n RUNNING - Dataset creation is running.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Status of the Dataset creation.

                \n
                  \n
                • \n

                  \n PENDING – Dataset is pending creation.

                  \n
                • \n
                • \n

                  \n FAILED – Dataset creation has failed.

                  \n
                • \n
                • \n

                  \n SUCCESS – Dataset creation has succeeded.

                  \n
                • \n
                • \n

                  \n RUNNING – Dataset creation is running.

                  \n
                • \n
                " } } }, @@ -1809,6 +2301,136 @@ "smithy.api#documentation": "Response for GetProgrammaticAccessCredentials operation" } }, + "com.amazonaws.finspacedata#GetUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#GetUserRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#GetUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Retrieves details for a specific user.

                ", + "smithy.api#http": { + "method": "GET", + "uri": "/user/{userId}", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#GetUserRequest": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the user to get data for.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.finspacedata#GetUserResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user account that is retrieved.

                " + } + }, + "status": { + "target": "com.amazonaws.finspacedata#UserStatus", + "traits": { + "smithy.api#documentation": "

                The current status of the user account.

                \n
                  \n
                • \n

                  \n CREATING – The user account creation is in progress.

                  \n
                • \n
                • \n

                  \n ENABLED – The user account is created and is currently active.

                  \n
                • \n
                • \n

                  \n DISABLED – The user account is currently inactive.

                  \n
                • \n
                " + } + }, + "firstName": { + "target": "com.amazonaws.finspacedata#FirstName", + "traits": { + "smithy.api#documentation": "

                The first name of the user.

                " + } + }, + "lastName": { + "target": "com.amazonaws.finspacedata#LastName", + "traits": { + "smithy.api#documentation": "

                The last name of the user.

                " + } + }, + "emailAddress": { + "target": "com.amazonaws.finspacedata#Email", + "traits": { + "smithy.api#documentation": "

                The email address that is associated with the user.

                " + } + }, + "type": { + "target": "com.amazonaws.finspacedata#UserType", + "traits": { + "smithy.api#documentation": "

                Indicates the type of user.

                \n
                  \n
                • \n

                  \n SUPER_USER – A user with permission to all the functionality and data in FinSpace.

                  \n
                • \n
                \n
                  \n
                • \n

                  \n APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

                  \n
                • \n
                " + } + }, + "apiAccess": { + "target": "com.amazonaws.finspacedata#ApiAccess", + "traits": { + "smithy.api#documentation": "

                Indicates whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

                \n
                  \n
                • \n

                  \n ENABLED – The user has permissions to use the APIs.

                  \n
                • \n
                • \n

                  \n DISABLED – The user does not have permissions to use any APIs.

                  \n
                • \n
                " + } + }, + "apiAccessPrincipalArn": { + "target": "com.amazonaws.finspacedata#RoleArn", + "traits": { + "smithy.api#documentation": "

                The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

                " + } + }, + "createTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastEnabledTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastDisabledTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastModifiedTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastLoginTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds.

                " + } + } + } + }, "com.amazonaws.finspacedata#GetWorkingLocation": { "type": "operation", "input": { @@ -1846,7 +2468,7 @@ "locationType": { "target": "com.amazonaws.finspacedata#locationType", "traits": { - "smithy.api#documentation": "

                Specify the type of the working location.

                \n
                  \n
                • \n

                  \n SAGEMAKER - Use the Amazon S3 location as a temporary location to store data content when\n working with FinSpace Notebooks that run on SageMaker studio.

                  \n
                • \n
                • \n

                  \n INGESTION - Use the Amazon S3 location as a staging location to copy your\n data content and then use the location with the Changeset creation operation.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Specify the type of the working location.

                \n
                  \n
                • \n

                  \n SAGEMAKER – Use the Amazon S3 location as a temporary location to store data content when\n working with FinSpace Notebooks that run on SageMaker studio.

                  \n
                • \n
                • \n

                  \n INGESTION – Use the Amazon S3 location as a staging location to copy your\n data content and then use the location with the Changeset creation operation.

                  \n
                • \n
                " } } } @@ -1924,6 +2546,17 @@ "smithy.api#httpError": 500 } }, + "com.amazonaws.finspacedata#LastName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "\\S", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.finspacedata#LimitExceededException": { "type": "structure", "members": { @@ -2001,7 +2634,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                ", + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                ", "smithy.api#httpQuery": "nextToken" } } @@ -2022,7 +2655,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                " + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                " } } }, @@ -2083,7 +2716,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                ", + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                ", "smithy.api#httpQuery": "nextToken" } }, @@ -2106,7 +2739,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                " + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                " } }, "dataViews": { @@ -2162,7 +2795,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                ", + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                ", "smithy.api#httpQuery": "nextToken" } }, @@ -2191,7 +2824,7 @@ "nextToken": { "target": "com.amazonaws.finspacedata#PaginationToken", "traits": { - "smithy.api#documentation": "

                A token indicating where a results page should begin.

                " + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                " } } }, @@ -2199,20 +2832,166 @@ "smithy.api#documentation": "Response for the ListDatasets operation" } }, - "com.amazonaws.finspacedata#OwnerName": { - "type": "string", - "traits": { - "smithy.api#documentation": "1 - 250 character String", - "smithy.api#length": { - "min": 1, - "max": 250 - }, - "smithy.api#pattern": "\\S" - } - }, - "com.amazonaws.finspacedata#PaginationToken": { - "type": "string", - "traits": { + "com.amazonaws.finspacedata#ListPermissionGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#ListPermissionGroupsRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#ListPermissionGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Lists all available permission groups in FinSpace.

                ", + "smithy.api#http": { + "method": "GET", + "uri": "/permission-group", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.finspacedata#ListPermissionGroupsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.finspacedata#PaginationToken", + "traits": { + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.finspacedata#ResultLimit", + "traits": { + "smithy.api#documentation": "

                The maximum number of results per page.

                ", + "smithy.api#httpQuery": "maxResults", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.finspacedata#ListPermissionGroupsResponse": { + "type": "structure", + "members": { + "permissionGroups": { + "target": "com.amazonaws.finspacedata#PermissionGroupList", + "traits": { + "smithy.api#documentation": "

                A list of all the permission groups.

                " + } + }, + "nextToken": { + "target": "com.amazonaws.finspacedata#PaginationToken", + "traits": { + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                " + } + } + } + }, + "com.amazonaws.finspacedata#ListUsers": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#ListUsersRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#ListUsersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Lists all available user accounts in FinSpace.

                ", + "smithy.api#http": { + "method": "GET", + "uri": "/user", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.finspacedata#ListUsersRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.finspacedata#PaginationToken", + "traits": { + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.finspacedata#ResultLimit", + "traits": { + "smithy.api#documentation": "

                The maximum number of results per page.

                ", + "smithy.api#httpQuery": "maxResults", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.finspacedata#ListUsersResponse": { + "type": "structure", + "members": { + "users": { + "target": "com.amazonaws.finspacedata#UserList", + "traits": { + "smithy.api#documentation": "

                A list of all the user accounts.

                " + } + }, + "nextToken": { + "target": "com.amazonaws.finspacedata#PaginationToken", + "traits": { + "smithy.api#documentation": "

                A token that indicates where a results page should begin.

                " + } + } + } + }, + "com.amazonaws.finspacedata#OwnerName": { + "type": "string", + "traits": { + "smithy.api#documentation": "1 - 250 character String", + "smithy.api#length": { + "min": 1, + "max": 250 + }, + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.finspacedata#PaginationToken": { + "type": "string", + "traits": { "smithy.api#documentation": "Pagination token for list operations" } }, @@ -2225,14 +3004,97 @@ "smithy.api#documentation": "DataView Partition Column List" } }, + "com.amazonaws.finspacedata#Password": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 8, + "max": 20 + }, + "smithy.api#pattern": "\\S", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.finspacedata#PermissionGroup": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the permission group.

                " + } + }, + "name": { + "target": "com.amazonaws.finspacedata#PermissionGroupName", + "traits": { + "smithy.api#documentation": "

                The name of the permission group.

                " + } + }, + "description": { + "target": "com.amazonaws.finspacedata#PermissionGroupDescription", + "traits": { + "smithy.api#documentation": "

                A brief description for the permission group.

                " + } + }, + "applicationPermissions": { + "target": "com.amazonaws.finspacedata#ApplicationPermissionList", + "traits": { + "smithy.api#documentation": "

                Indicates the permissions that are granted to a specific group for accessing the FinSpace application.

                \n
                  \n
                • \n

                  \n CreateDataset – Group members can create new datasets.

                  \n
                • \n
                • \n

                  \n ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n ManageUsersAndGroups – Group members can manage users and permission groups.

                  \n
                • \n
                • \n

                  \n ManageAttributeSets – Group members can manage attribute sets.

                  \n
                • \n
                • \n

                  \n ViewAuditData – Group members can view audit data.

                  \n
                • \n
                • \n

                  \n AccessNotebooks – Group members will have access to FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n GetTemporaryCredentials – Group members can get temporary API credentials.

                  \n
                • \n
                " + } + }, + "createTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                The timestamp at which the group was created in FinSpace. The value is determined as epoch time in milliseconds.\n

                " + } + }, + "lastModifiedTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the permission group was updated. The value is determined as epoch time in milliseconds.\n

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                The structure for a permission group.

                " + } + }, + "com.amazonaws.finspacedata#PermissionGroupDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4000 + }, + "smithy.api#pattern": "\\S", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.finspacedata#PermissionGroupId": { "type": "string", "traits": { - "smithy.api#documentation": "Id of the associated PermissionGroup", "smithy.api#length": { "min": 1, "max": 26 - } + }, + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.finspacedata#PermissionGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.finspacedata#PermissionGroup" + } + }, + "com.amazonaws.finspacedata#PermissionGroupName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "\\S", + "smithy.api#sensitive": {} } }, "com.amazonaws.finspacedata#PermissionGroupParams": { @@ -2241,7 +3103,7 @@ "permissionGroupId": { "target": "com.amazonaws.finspacedata#PermissionGroupId", "traits": { - "smithy.api#documentation": "

                The unique identifier of the PermissionGroup.

                " + "smithy.api#documentation": "

                The unique identifier for the PermissionGroup.

                " } }, "datasetPermissions": { @@ -2266,11 +3128,88 @@ "smithy.api#pattern": "^[\\+0-9\\#\\,\\(][\\+0-9\\-\\.\\/\\(\\)\\,\\#\\s]+$" } }, + "com.amazonaws.finspacedata#ResetUserPassword": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#ResetUserPasswordRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#ResetUserPasswordResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Resets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.

                ", + "smithy.api#http": { + "method": "POST", + "uri": "/user/{userId}/password", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#ResetUserPasswordRequest": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the user that a temporary password is requested for.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#ResetUserPasswordResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the user that a new password is generated for.

                " + } + }, + "temporaryPassword": { + "target": "com.amazonaws.finspacedata#Password", + "traits": { + "smithy.api#documentation": "

                A randomly generated temporary password for the requested user account. This password expires in 7 days.

                " + } + } + } + }, "com.amazonaws.finspacedata#ResourceNotFoundException": { "type": "structure", "members": { "message": { "target": "com.amazonaws.finspacedata#ErrorMessage2" + }, + "reason": { + "target": "com.amazonaws.finspacedata#ErrorMessage2" } }, "traits": { @@ -2290,7 +3229,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Resource permission for a dataset. When you create a dataset, all the other members of the same user group inherit access to the dataset. You can only create a dataset if your user group has application permission for Create Datasets.

                \n

                The following is a list of valid dataset permissions that you can apply:\n \n

                \n
                  \n
                • \n

                  \n ViewDatasetDetails\n

                  \n
                • \n
                • \n

                  \n ReadDatasetDetails\n

                  \n
                • \n
                • \n

                  \n AddDatasetData\n

                  \n
                • \n
                • \n

                  \n CreateSnapshot\n

                  \n
                • \n
                • \n

                  \n EditDatasetMetadata\n

                  \n
                • \n
                • \n

                  \n DeleteDataset\n

                  \n
                • \n
                \n

                For more information on the ataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.

                " + "smithy.api#documentation": "

                Resource permission for a dataset. When you create a dataset, all the other members of the same user group inherit access to the dataset. You can only create a dataset if your user group has application permission for Create Datasets.

                \n

                The following is a list of valid dataset permissions that you can apply:\n \n

                \n
                  \n
                • \n

                  \n ViewDatasetDetails\n

                  \n
                • \n
                • \n

                  \n ReadDatasetDetails\n

                  \n
                • \n
                • \n

                  \n AddDatasetData\n

                  \n
                • \n
                • \n

                  \n CreateSnapshot\n

                  \n
                • \n
                • \n

                  \n EditDatasetMetadata\n

                  \n
                • \n
                • \n

                  \n DeleteDataset\n

                  \n
                • \n
                \n

                For more information on the dataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.

                " } }, "com.amazonaws.finspacedata#ResourcePermissionsList": { @@ -2312,6 +3251,16 @@ } } }, + "com.amazonaws.finspacedata#RoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + } + }, "com.amazonaws.finspacedata#S3DestinationFormatOptions": { "type": "map", "key": { @@ -2524,7 +3473,7 @@ "formatParams": { "target": "com.amazonaws.finspacedata#FormatParams", "traits": { - "smithy.api#documentation": "

                Options that define the structure of the source file(s) including the format type (formatType), header row (withHeader), data separation character (separator) and the type of compression (compression).\n

                \n

                \n formatType is a required attribute and can have the following values:\n

                \n
                  \n
                • \n

                  \n PARQUET - Parquet source file format.

                  \n
                • \n
                • \n

                  \n CSV - CSV source file format.

                  \n
                • \n
                • \n

                  \n JSON - JSON source file format.

                  \n
                • \n
                • \n

                  \n XML - XML source file format.

                  \n
                • \n
                \n \n

                Here is an example of how you could specify the formatParams:

                \n

                \n \n \"formatParams\": \n {\n \"formatType\": \"CSV\",\n \"withHeader\": \"true\",\n \"separator\": \",\",\n \"compression\":\"None\"\n } \n \n

                \n

                Note that if you only provide formatType as CSV, the rest of the attributes will automatically default to CSV values as following:

                \n

                \n \n {\n \"withHeader\": \"true\",\n \"separator\": \",\"\n }\n \n

                \n

                For more information about supported file formats, see Supported Data Types and File Formats in the FinSpace User Guide.

                ", + "smithy.api#documentation": "

                Options that define the structure of the source file(s) including the format type (formatType), header row (withHeader), data separation character (separator) and the type of compression (compression).\n

                \n

                \n formatType is a required attribute and can have the following values:\n

                \n
                  \n
                • \n

                  \n PARQUET – Parquet source file format.

                  \n
                • \n
                • \n

                  \n CSV – CSV source file format.

                  \n
                • \n
                • \n

                  \n JSON – JSON source file format.

                  \n
                • \n
                • \n

                  \n XML – XML source file format.

                  \n
                • \n
                \n \n

                Here is an example of how you could specify the formatParams:

                \n

                \n \n \"formatParams\": \n {\n \"formatType\": \"CSV\",\n \"withHeader\": \"true\",\n \"separator\": \",\",\n \"compression\":\"None\"\n } \n \n

                \n

                Note that if you only provide formatType as CSV, the rest of the attributes will automatically default to CSV values as following:

                \n

                \n \n {\n \"withHeader\": \"true\",\n \"separator\": \",\"\n }\n \n

                \n

                For more information about supported file formats, see Supported Data Types and File Formats in the FinSpace User Guide.

                ", "smithy.api#required": {} } } @@ -2618,7 +3567,7 @@ "kind": { "target": "com.amazonaws.finspacedata#DatasetKind", "traits": { - "smithy.api#documentation": "

                The format in which the Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR - Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR - Data is structured in a non-tabular format.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                The format in which the Dataset data is structured.

                \n
                  \n
                • \n

                  \n TABULAR – Data is structured in a tabular format.

                  \n
                • \n
                • \n

                  \n NON_TABULAR – Data is structured in a non-tabular format.

                  \n
                • \n
                ", "smithy.api#required": {} } }, @@ -2659,11 +3608,334 @@ "smithy.api#documentation": "The response from an UpdateDataset operation" } }, + "com.amazonaws.finspacedata#UpdatePermissionGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#UpdatePermissionGroupRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#UpdatePermissionGroupResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Modifies the details of a permission group. You cannot modify a permissionGroupID.

                ", + "smithy.api#http": { + "method": "PUT", + "uri": "/permission-group/{permissionGroupId}", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#UpdatePermissionGroupRequest": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the permission group to update.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.finspacedata#PermissionGroupName", + "traits": { + "smithy.api#documentation": "

                The name of the permission group.

                " + } + }, + "description": { + "target": "com.amazonaws.finspacedata#PermissionGroupDescription", + "traits": { + "smithy.api#documentation": "

                A brief description for the permission group.

                " + } + }, + "applicationPermissions": { + "target": "com.amazonaws.finspacedata#ApplicationPermissionList", + "traits": { + "smithy.api#documentation": "

                The permissions that are granted to a specific group for accessing the FinSpace application.

                \n
                  \n
                • \n

                  \n CreateDataset – Group members can create new datasets.

                  \n
                • \n
                • \n

                  \n ManageClusters – Group members can manage Apache Spark clusters from FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n ManageUsersAndGroups – Group members can manage users and permission groups.

                  \n
                • \n
                • \n

                  \n ManageAttributeSets – Group members can manage attribute sets.

                  \n
                • \n
                • \n

                  \n ViewAuditData – Group members can view audit data.

                  \n
                • \n
                • \n

                  \n AccessNotebooks – Group members will have access to FinSpace notebooks.

                  \n
                • \n
                • \n

                  \n GetTemporaryCredentials – Group members can get temporary API credentials.

                  \n
                • \n
                " + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#UpdatePermissionGroupResponse": { + "type": "structure", + "members": { + "permissionGroupId": { + "target": "com.amazonaws.finspacedata#PermissionGroupId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the updated permission group.

                " + } + } + } + }, + "com.amazonaws.finspacedata#UpdateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.finspacedata#UpdateUserRequest" + }, + "output": { + "target": "com.amazonaws.finspacedata#UpdateUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.finspacedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.finspacedata#ConflictException" + }, + { + "target": "com.amazonaws.finspacedata#InternalServerException" + }, + { + "target": "com.amazonaws.finspacedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.finspacedata#ThrottlingException" + }, + { + "target": "com.amazonaws.finspacedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Modifies the details of the specified user account. You cannot update the userId for a user.

                ", + "smithy.api#http": { + "method": "PUT", + "uri": "/user/{userId}", + "code": 200 + } + } + }, + "com.amazonaws.finspacedata#UpdateUserRequest": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user account to update.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.finspacedata#UserType", + "traits": { + "smithy.api#documentation": "

                The option to indicate the type of user.

                \n
                  \n
                • \n

                  \n SUPER_USER– A user with permission to all the functionality and data in FinSpace.

                  \n
                • \n
                • \n

                  \n APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

                  \n
                • \n
                " + } + }, + "firstName": { + "target": "com.amazonaws.finspacedata#FirstName", + "traits": { + "smithy.api#documentation": "

                The first name of the user.

                " + } + }, + "lastName": { + "target": "com.amazonaws.finspacedata#LastName", + "traits": { + "smithy.api#documentation": "

                The last name of the user.

                " + } + }, + "apiAccess": { + "target": "com.amazonaws.finspacedata#ApiAccess", + "traits": { + "smithy.api#documentation": "

                The option to indicate whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

                \n
                  \n
                • \n

                  \n ENABLED – The user has permissions to use the APIs.

                  \n
                • \n
                • \n

                  \n DISABLED – The user does not have permissions to use any APIs.

                  \n
                • \n
                " + } + }, + "apiAccessPrincipalArn": { + "target": "com.amazonaws.finspacedata#RoleArn", + "traits": { + "smithy.api#documentation": "

                The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

                " + } + }, + "clientToken": { + "target": "com.amazonaws.finspacedata#ClientToken", + "traits": { + "smithy.api#documentation": "

                A token that ensures idempotency. This token expires in 10 minutes.

                ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.finspacedata#UpdateUserResponse": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the updated user account.

                " + } + } + } + }, + "com.amazonaws.finspacedata#User": { + "type": "structure", + "members": { + "userId": { + "target": "com.amazonaws.finspacedata#UserId", + "traits": { + "smithy.api#documentation": "

                The unique identifier for the user.

                " + } + }, + "status": { + "target": "com.amazonaws.finspacedata#UserStatus", + "traits": { + "smithy.api#documentation": "

                The current status of the user account.

                \n
                  \n
                • \n

                  \n CREATING – The user account creation is in progress.

                  \n
                • \n
                • \n

                  \n ENABLED – The user account is created and is currently active.

                  \n
                • \n
                • \n

                  \n DISABLED – The user account is currently inactive.

                  \n
                • \n
                " + } + }, + "firstName": { + "target": "com.amazonaws.finspacedata#FirstName", + "traits": { + "smithy.api#documentation": "

                The first name of the user.

                " + } + }, + "lastName": { + "target": "com.amazonaws.finspacedata#LastName", + "traits": { + "smithy.api#documentation": "

                The last name of the user.

                " + } + }, + "emailAddress": { + "target": "com.amazonaws.finspacedata#Email", + "traits": { + "smithy.api#documentation": "

                The email address of the user. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.

                " + } + }, + "type": { + "target": "com.amazonaws.finspacedata#UserType", + "traits": { + "smithy.api#documentation": "

                Indicates the type of user.

                \n
                  \n
                • \n

                  \n SUPER_USER – A user with permission to all the functionality and data in FinSpace.

                  \n
                • \n
                • \n

                  \n APP_USER – A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.

                  \n
                • \n
                " + } + }, + "apiAccess": { + "target": "com.amazonaws.finspacedata#ApiAccess", + "traits": { + "smithy.api#documentation": "

                Indicates whether the user can use the GetProgrammaticAccessCredentials API to obtain credentials that can then be used to access other FinSpace Data API operations.

                \n
                  \n
                • \n

                  \n ENABLED – The user has permissions to use the APIs.

                  \n
                • \n
                • \n

                  \n DISABLED – The user does not have permissions to use any APIs.

                  \n
                • \n
                " + } + }, + "apiAccessPrincipalArn": { + "target": "com.amazonaws.finspacedata#RoleArn", + "traits": { + "smithy.api#documentation": "

                The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.

                " + } + }, + "createTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastEnabledTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.\n

                " + } + }, + "lastDisabledTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.

                " + } + }, + "lastModifiedTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.\n

                " + } + }, + "lastLoginTime": { + "target": "com.amazonaws.finspacedata#TimestampEpoch", + "traits": { + "smithy.api#documentation": "

                Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds.\n

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                The details of the user account.

                " + } + }, + "com.amazonaws.finspacedata#UserId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 26 + }, + "smithy.api#pattern": "\\S" + } + }, + "com.amazonaws.finspacedata#UserList": { + "type": "list", + "member": { + "target": "com.amazonaws.finspacedata#User" + } + }, + "com.amazonaws.finspacedata#UserStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + } + ] + } + }, + "com.amazonaws.finspacedata#UserType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUPER_USER", + "name": "SUPER_USER" + }, + { + "value": "APP_USER", + "name": "APP_USER" + } + ] + } + }, "com.amazonaws.finspacedata#ValidationException": { "type": "structure", "members": { "message": { "target": "com.amazonaws.finspacedata#ErrorMessage2" + }, + "reason": { + "target": "com.amazonaws.finspacedata#ErrorMessage2" } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/fis.json b/codegen/sdk-codegen/aws-models/fis.json index 9d2e3a7dfed8..ffc1fa5b2288 100644 --- a/codegen/sdk-codegen/aws-models/fis.json +++ b/codegen/sdk-codegen/aws-models/fis.json @@ -221,6 +221,16 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#CloudWatchLogGroupArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, "com.amazonaws.fis#ConflictException": { "type": "structure", "members": { @@ -317,6 +327,33 @@ "target": "com.amazonaws.fis#CreateExperimentTemplateActionInput" } }, + "com.amazonaws.fis#CreateExperimentTemplateLogConfigurationInput": { + "type": "structure", + "members": { + "cloudWatchLogsConfiguration": { + "target": "com.amazonaws.fis#ExperimentTemplateCloudWatchLogsLogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, + "s3Configuration": { + "target": "com.amazonaws.fis#ExperimentTemplateS3LogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon S3.

                " + } + }, + "logSchemaVersion": { + "target": "com.amazonaws.fis#LogSchemaVersion", + "traits": { + "smithy.api#documentation": "

                The schema version.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Specifies the configuration for experiment logging.

                " + } + }, "com.amazonaws.fis#CreateExperimentTemplateRequest": { "type": "structure", "members": { @@ -367,6 +404,12 @@ "traits": { "smithy.api#documentation": "

                The tags to apply to the experiment template.

                " } + }, + "logConfiguration": { + "target": "com.amazonaws.fis#CreateExperimentTemplateLogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging.

                " + } } } }, @@ -593,6 +636,12 @@ "traits": { "smithy.api#documentation": "

                The tags for the experiment.

                " } + }, + "logConfiguration": { + "target": "com.amazonaws.fis#ExperimentLogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging.

                " + } } }, "traits": { @@ -823,6 +872,20 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentCloudWatchLogsLogConfiguration": { + "type": "structure", + "members": { + "logGroupArn": { + "target": "com.amazonaws.fis#CloudWatchLogGroupArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, "com.amazonaws.fis#ExperimentEndTime": { "type": "timestamp" }, @@ -836,6 +899,52 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentLogConfiguration": { + "type": "structure", + "members": { + "cloudWatchLogsConfiguration": { + "target": "com.amazonaws.fis#ExperimentCloudWatchLogsLogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, + "s3Configuration": { + "target": "com.amazonaws.fis#ExperimentS3LogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon S3.

                " + } + }, + "logSchemaVersion": { + "target": "com.amazonaws.fis#LogSchemaVersion", + "traits": { + "smithy.api#documentation": "

                The schema version.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging.

                " + } + }, + "com.amazonaws.fis#ExperimentS3LogConfiguration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.fis#S3BucketName", + "traits": { + "smithy.api#documentation": "

                The name of the destination bucket.

                " + } + }, + "prefix": { + "target": "com.amazonaws.fis#S3ObjectKey", + "traits": { + "smithy.api#documentation": "

                The bucket prefix.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging to Amazon S3.

                " + } + }, "com.amazonaws.fis#ExperimentStartTime": { "type": "timestamp" }, @@ -1183,6 +1292,12 @@ "traits": { "smithy.api#documentation": "

                The tags for the experiment template.

                " } + }, + "logConfiguration": { + "target": "com.amazonaws.fis#ExperimentTemplateLogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging.

                " + } } }, "traits": { @@ -1320,6 +1435,35 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentTemplateCloudWatchLogsLogConfiguration": { + "type": "structure", + "members": { + "logGroupArn": { + "target": "com.amazonaws.fis#CloudWatchLogGroupArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, + "com.amazonaws.fis#ExperimentTemplateCloudWatchLogsLogConfigurationInput": { + "type": "structure", + "members": { + "logGroupArn": { + "target": "com.amazonaws.fis#CloudWatchLogGroupArn", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Specifies the configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, "com.amazonaws.fis#ExperimentTemplateDescription": { "type": "string", "traits": { @@ -1340,6 +1484,73 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentTemplateLogConfiguration": { + "type": "structure", + "members": { + "cloudWatchLogsConfiguration": { + "target": "com.amazonaws.fis#ExperimentTemplateCloudWatchLogsLogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, + "s3Configuration": { + "target": "com.amazonaws.fis#ExperimentTemplateS3LogConfiguration", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon S3.

                " + } + }, + "logSchemaVersion": { + "target": "com.amazonaws.fis#LogSchemaVersion", + "traits": { + "smithy.api#documentation": "

                The schema version.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging.

                " + } + }, + "com.amazonaws.fis#ExperimentTemplateS3LogConfiguration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.fis#S3BucketName", + "traits": { + "smithy.api#documentation": "

                The name of the destination bucket.

                " + } + }, + "prefix": { + "target": "com.amazonaws.fis#S3ObjectKey", + "traits": { + "smithy.api#documentation": "

                The bucket prefix.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the configuration for experiment logging to Amazon S3.

                " + } + }, + "com.amazonaws.fis#ExperimentTemplateS3LogConfigurationInput": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.fis#S3BucketName", + "traits": { + "smithy.api#documentation": "

                The name of the destination bucket.

                ", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.fis#S3ObjectKey", + "traits": { + "smithy.api#documentation": "

                The bucket prefix.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Specifies the configuration for experiment logging to Amazon S3.

                " + } + }, "com.amazonaws.fis#ExperimentTemplateStopCondition": { "type": "structure", "members": { @@ -2193,6 +2404,12 @@ } } }, + "com.amazonaws.fis#LogSchemaVersion": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.fis#NextToken": { "type": "string", "traits": { @@ -2252,6 +2469,26 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#S3BucketName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.fis#S3ObjectKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[\\s\\S]+$" + } + }, "com.amazonaws.fis#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -2729,6 +2966,32 @@ "target": "com.amazonaws.fis#UpdateExperimentTemplateActionInputItem" } }, + "com.amazonaws.fis#UpdateExperimentTemplateLogConfigurationInput": { + "type": "structure", + "members": { + "cloudWatchLogsConfiguration": { + "target": "com.amazonaws.fis#ExperimentTemplateCloudWatchLogsLogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon CloudWatch Logs.

                " + } + }, + "s3Configuration": { + "target": "com.amazonaws.fis#ExperimentTemplateS3LogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging to Amazon S3.

                " + } + }, + "logSchemaVersion": { + "target": "com.amazonaws.fis#LogSchemaVersion", + "traits": { + "smithy.api#documentation": "

                The schema version.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Specifies the configuration for experiment logging.

                " + } + }, "com.amazonaws.fis#UpdateExperimentTemplateRequest": { "type": "structure", "members": { @@ -2769,6 +3032,12 @@ "traits": { "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of an IAM role that grants the FIS service permission to perform service actions on your behalf.

                " } + }, + "logConfiguration": { + "target": "com.amazonaws.fis#UpdateExperimentTemplateLogConfigurationInput", + "traits": { + "smithy.api#documentation": "

                The configuration for experiment logging.

                " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/fsx.json b/codegen/sdk-codegen/aws-models/fsx.json index 426ce4440a8d..36a6e38ed251 100644 --- a/codegen/sdk-codegen/aws-models/fsx.json +++ b/codegen/sdk-codegen/aws-models/fsx.json @@ -1208,7 +1208,7 @@ "FileSystemPath": { "target": "com.amazonaws.fsx#Namespace", "traits": { - "smithy.api#documentation": "

                A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                \n

                This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                ", + "smithy.api#documentation": "

                A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                \n

                This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                \n \n

                If you specify only a forward slash (/) as the file system\n path, you can link only 1 data repository to the file system. You can only specify\n \"/\" as the file system path for the first data repository associated with a file system.

                \n
                ", "smithy.api#required": {} } }, @@ -1428,7 +1428,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File\n Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

                \n \n

                If a file system with the specified client request token exists and the parameters\n match, this operation returns the description of the file system. If a client request\n token with the specified by the file system exists and the parameters don't match, this\n call returns IncompatibleParameterError. If a file system with the\n specified client request token doesn't exist, this operation does the following:

                \n \n
                  \n
                • \n

                  Creates a new Amazon FSx file system from backup with an assigned ID,\n and an initial lifecycle state of CREATING.

                  \n
                • \n
                • \n

                  Returns the description of the file system.

                  \n
                • \n
                \n\n

                Parameters like the Active Directory, default share name, automatic backup, and backup\n settings default to the parameters of the file system that was backed up, unless\n overridden. You can explicitly supply other settings.

                \n\n

                By using the idempotent operation, you can retry a\n CreateFileSystemFromBackup call without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives a success message as long as the\n parameters are the same.

                \n \n

                The CreateFileSystemFromBackup call returns while the file system's\n lifecycle state is still CREATING. You can check the file-system\n creation status by calling the \n DescribeFileSystems operation, which returns the file system state along\n with other information.

                \n
                " + "smithy.api#documentation": "

                Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File\n Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

                \n \n

                If a file system with the specified client request token exists and the parameters\n match, this operation returns the description of the file system. If a file system\n with the specified client request token exists but the parameters don't match, this\n call returns IncompatibleParameterError. If a file system with the\n specified client request token doesn't exist, this operation does the following:

                \n \n
                  \n
                • \n

                  Creates a new Amazon FSx file system from backup with an assigned ID,\n and an initial lifecycle state of CREATING.

                  \n
                • \n
                • \n

                  Returns the description of the file system.

                  \n
                • \n
                \n\n

                Parameters like the Active Directory, default share name, automatic backup, and backup\n settings default to the parameters of the file system that was backed up, unless\n overridden. You can explicitly supply other settings.

                \n\n

                By using the idempotent operation, you can retry a\n CreateFileSystemFromBackup call without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives a success message as long as the\n parameters are the same.

                \n \n

                The CreateFileSystemFromBackup call returns while the file system's\n lifecycle state is still CREATING. You can check the file-system\n creation status by calling the \n DescribeFileSystems operation, which returns the file system state along\n with other information.

                \n
                " } }, "com.amazonaws.fsx#CreateFileSystemFromBackupRequest": { @@ -1551,7 +1551,7 @@ "AutoImportPolicy": { "target": "com.amazonaws.fsx#AutoImportPolicyType", "traits": { - "smithy.api#documentation": "

                (Optional) Available with Scratch and Persistent_1 deployment types. When you \n create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                \n
                  \n
                • \n

                  \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                  \n
                • \n
                • \n

                  \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                  \n
                • \n
                • \n

                  \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option.

                  \n
                • \n
                • \n

                  \n NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically\n imports file and directory listings of any new objects added to the S3 bucket, any \n existing objects that are changed in the S3 bucket, and any objects that were deleted\n in the S3 bucket.

                  \n
                • \n
                \n

                For more information, see \n Automatically import updates from your S3 bucket.

                \n \n

                This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation\" to create\n a data repository association to link your Lustre file system to a data repository.

                \n
                " + "smithy.api#documentation": "

                (Optional) Available with Scratch and Persistent_1 deployment types. When you \n create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                \n
                  \n
                • \n

                  \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                  \n
                • \n
                • \n

                  \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                  \n
                • \n
                • \n

                  \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option.

                  \n
                • \n
                • \n

                  \n NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically\n imports file and directory listings of any new objects added to the S3 bucket, any \n existing objects that are changed in the S3 bucket, and any objects that were deleted\n in the S3 bucket.

                  \n
                • \n
                \n

                For more information, see \n Automatically import updates from your S3 bucket.

                \n \n

                This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation to create\n a data repository association to link your Lustre file system to a data repository.

                \n
                " } }, "PerUnitStorageThroughput": { @@ -1614,7 +1614,7 @@ "EndpointIpAddressRange": { "target": "com.amazonaws.fsx#IpAddressRange", "traits": { - "smithy.api#documentation": "

                Specifies the IP address range in which the endpoints to access your file system\n will be created. By default, Amazon FSx selects an unused IP address range for you\n from the 198.19.* range.

                " + "smithy.api#documentation": "

                Specifies the IP address range in which the endpoints to access your file system\n will be created. By default, Amazon FSx selects an unused IP address range for you\n from the 198.19.* range.

                \n \n

                The Endpoint IP address range you select for your file system\n must exist outside the VPC's CIDR range and must be at least /30 or larger.

                \n
                " } }, "FsxAdminPassword": { @@ -1680,7 +1680,7 @@ "DeploymentType": { "target": "com.amazonaws.fsx#OpenZFSDeploymentType", "traits": { - "smithy.api#documentation": "

                Specifies the file system deployment type. Amazon FSx for OpenZFS supports\n SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a\n single Availability Zone (AZ) of redundancy.

                ", + "smithy.api#documentation": "

                Specifies the file system deployment type. Amazon FSx for OpenZFS supports\n SINGLE_AZ_1. SINGLE_AZ_1 deployment type is configured for redundancy\n within a single Availability Zone.

                ", "smithy.api#required": {} } }, @@ -1705,7 +1705,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The OpenZFS configuration properties for the file system that you are creating.

                " + "smithy.api#documentation": "

                The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.

                " } }, "com.amazonaws.fsx#CreateFileSystemRequest": { @@ -1947,32 +1947,38 @@ "ParentVolumeId": { "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                The ID of the volume to use as the parent volume.

                ", + "smithy.api#documentation": "

                The ID of the volume to use as the parent volume of the volume that you are creating.

                ", "smithy.api#required": {} } }, "StorageCapacityReservationGiB": { - "target": "com.amazonaws.fsx#IntegerNoMax", + "target": "com.amazonaws.fsx#IntegerNoMaxFromNegativeOne", "traits": { - "smithy.api#documentation": "

                The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't\n reserve more storage than the parent volume has reserved.

                " + "smithy.api#documentation": "

                Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting\n StorageCapacityReservationGiB guarantees that the specified amount of storage space\n on the parent volume will always be available for the volume. \n You can't reserve more storage than the parent volume has. To not specify a storage capacity \n reservation, set this to 0 or -1. For more information, see \n Volume properties \n in the Amazon FSx for OpenZFS User Guide.

                " } }, "StorageCapacityQuotaGiB": { - "target": "com.amazonaws.fsx#IntegerNoMax", + "target": "com.amazonaws.fsx#IntegerNoMaxFromNegativeOne", + "traits": { + "smithy.api#documentation": "

                Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify \n a quota that is larger than the storage on the parent volume. A volume quota limits \n the amount of storage that the volume can consume to the configured amount, but does not \n guarantee the space will be available on the parent volume. To guarantee quota space, you must also set \n StorageCapacityReservationGiB. To not specify a storage capacity quota, set this to -1.\n

                \n

                For more information, see \n Volume properties \n in the Amazon FSx for OpenZFS User Guide.

                " + } + }, + "RecordSizeKiB": { + "target": "com.amazonaws.fsx#IntegerRecordSizeKiB", "traits": { - "smithy.api#documentation": "

                The maximum amount of storage in gibibytes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                " + "smithy.api#documentation": "

                Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8,\n 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB.\n We recommend using the default setting for the majority of use cases. \n Generally, workloads that write in fixed small or large record sizes \n may benefit from setting a custom record size, like database workloads \n (small record size) or media streaming workloads (large record size). \n For additional guidance on when\n to set a custom record size, see \n \n ZFS Record size in the Amazon FSx for OpenZFS User Guide.

                " } }, "DataCompressionType": { "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", "traits": { - "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. The compression\n type is NONE by default.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.\n NONE is the default.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. ZSTD compression provides a higher level of \n data compression and higher read throughput performance than LZ4 compression.

                  \n
                • \n
                • \n

                  \n LZ4 - Compresses the data in the volume using the LZ4\n compression algorithm. LZ4 compression provides a lower level of compression \n and higher write throughput performance than ZSTD compression.

                  \n
                • \n
                \n

                For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system,\n see \n Tips for maximizing performance File system and volume settings in the Amazon FSx for OpenZFS User Guide.

                " } }, "CopyTagsToSnapshots": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true, and you specify one or more tags, only the specified tags\n are copied to snapshots. If you specify one or more tags when creating the snapshot, no\n tags are copied from the volume, regardless of this value.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true, and you specify one or more tags, only the specified tags\n are copied to snapshots. If you specify one or more tags when creating the snapshot, no\n tags are copied from the volume, regardless of this value.

                " } }, "OriginSnapshot": { @@ -1984,24 +1990,24 @@ "ReadOnly": { "target": "com.amazonaws.fsx#ReadOnly", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether the volume is read-only.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether the volume is read-only.

                " } }, "NfsExports": { "target": "com.amazonaws.fsx#OpenZFSNfsExports", "traits": { - "smithy.api#documentation": "

                The configuration object for mounting a Network File System (NFS) file system.

                " + "smithy.api#documentation": "

                The configuration object for mounting a Network File System (NFS) file system.

                " } }, "UserAndGroupQuotas": { "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", "traits": { - "smithy.api#documentation": "

                An object specifying how much storage users or groups can use on the volume.

                " + "smithy.api#documentation": "

                An object specifying how much storage users or groups can use on the volume.

                " } } }, "traits": { - "smithy.api#documentation": "

                Specifies the configuration of the OpenZFS volume that you are creating.

                " + "smithy.api#documentation": "

                Specifies the configuration of the Amazon FSx for OpenZFS volume that you are creating.

                " } }, "com.amazonaws.fsx#CreateSnapshot": { @@ -2027,7 +2033,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With\n snapshots, you can easily undo file changes and compare file versions by restoring the\n volume to a previous version.

                \n

                If a snapshot with the specified client request token exists, and the parameters\n match, this operation returns the description of the existing snapshot. If a snapshot\n with the specified client request token exists, and the parameters don't match, this\n operation returns IncompatibleParameterError. If a snapshot with the\n specified client request token doesn't exist, CreateSnapshot does the\n following:

                \n
                  \n
                • \n

                  Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle\n state of CREATING.

                  \n
                • \n
                • \n

                  Returns the description of the snapshot.

                  \n
                • \n
                \n

                By using the idempotent operation, you can retry a CreateSnapshot\n operation without the risk of creating an extra snapshot. This approach can be useful\n when an initial call fails in a way that makes it unclear whether a snapshot was\n created. If you use the same client request token and the initial call created a\n snapshot, the operation returns a successful result because all the parameters are the\n same.

                \n

                The CreateSnapshot operation returns while the snapshot's lifecycle state\n is still CREATING. You can check the snapshot creation status by calling\n the DescribeSnapshots operation, which returns the snapshot state along with\n other information.

                ", + "smithy.api#documentation": "

                Creates a snapshot of an existing Amazon FSx for OpenZFS volume. With\n snapshots, you can easily undo file changes and compare file versions by restoring the\n volume to a previous version.

                \n

                If a snapshot with the specified client request token exists, and the parameters\n match, this operation returns the description of the existing snapshot. If a snapshot\n with the specified client request token exists, and the parameters don't match, this\n operation returns IncompatibleParameterError. If a snapshot with the\n specified client request token doesn't exist, CreateSnapshot does the\n following:

                \n
                  \n
                • \n

                  Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle\n state of CREATING.

                  \n
                • \n
                • \n

                  Returns the description of the snapshot.

                  \n
                • \n
                \n

                By using the idempotent operation, you can retry a CreateSnapshot\n operation without the risk of creating an extra snapshot. This approach can be useful\n when an initial call fails in a way that makes it unclear whether a snapshot was\n created. If you use the same client request token and the initial call created a\n snapshot, the operation returns a successful result because all the parameters are the\n same.

                \n

                The CreateSnapshot operation returns while the snapshot's lifecycle state\n is still CREATING. You can check the snapshot creation status by calling\n the DescribeSnapshots operation, which returns the snapshot state along with\n other information.

                ", "smithy.api#idempotent": {} } }, @@ -2214,7 +2220,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage\n volume.

                " + "smithy.api#documentation": "

                Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.

                " } }, "com.amazonaws.fsx#CreateVolumeFromBackup": { @@ -2420,7 +2426,7 @@ "FileSystemPath": { "target": "com.amazonaws.fsx#Namespace", "traits": { - "smithy.api#documentation": "

                A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                \n

                This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                " + "smithy.api#documentation": "

                A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                \n

                This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                \n \n

                If you specify only a forward slash (/) as the file system\n path, you can link only 1 data repository to the file system. You can only specify\n \"/\" as the file system path for the first data repository associated with a file system.

                \n
                " } }, "DataRepositoryPath": { @@ -3137,15 +3143,47 @@ "SkipFinalBackup": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                By default, Amazon FSx for OpenZFS takes a final backup on your behalf when\n the DeleteFileSystem operation is invoked. Doing this helps protect you\n from data loss, and we highly recommend taking the final backup. If you want to skip\n this backup, use this\n value\n to do so.

                " + "smithy.api#documentation": "

                By default, Amazon FSx for OpenZFS takes a final backup on your behalf when\n the DeleteFileSystem operation is invoked. Doing this helps protect you\n from data loss, and we highly recommend taking the final backup. If you want to skip\n taking a final backup, set this value to true.

                " } }, "FinalBackupTags": { - "target": "com.amazonaws.fsx#Tags" + "target": "com.amazonaws.fsx#Tags", + "traits": { + "smithy.api#documentation": "

                A list of tags to apply to the file system's final backup.

                " + } + }, + "Options": { + "target": "com.amazonaws.fsx#DeleteFileSystemOpenZFSOptions", + "traits": { + "smithy.api#documentation": "

                To delete a file system if there are child volumes present below the root volume,\n use the string DELETE_CHILD_VOLUMES_AND_SNAPSHOTS. If your file system\n has child volumes and you don't use this option, the delete request will fail.

                " + } } }, "traits": { - "smithy.api#documentation": "

                The configuration object for the OpenZFS file system used in the\n DeleteFileSystem operation.

                " + "smithy.api#documentation": "

                The configuration object for the Amazon FSx for OpenZFS file system used in the\n DeleteFileSystem operation.

                " + } + }, + "com.amazonaws.fsx#DeleteFileSystemOpenZFSOption": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS", + "name": "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS" + } + ] + } + }, + "com.amazonaws.fsx#DeleteFileSystemOpenZFSOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#DeleteFileSystemOpenZFSOption" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } } }, "com.amazonaws.fsx#DeleteFileSystemOpenZFSResponse": { @@ -3311,7 +3349,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer\n exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a\n file system backup.

                \n

                The DeleteSnapshot operation returns instantly. The snapshot appears with\n the lifecycle status of DELETING until the deletion is complete.

                ", + "smithy.api#documentation": "

                Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer\n exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a\n file system backup.

                \n

                The DeleteSnapshot operation returns instantly. The snapshot appears with\n the lifecycle status of DELETING until the deletion is complete.

                ", "smithy.api#idempotent": {} } }, @@ -3936,7 +3974,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns the description of specific Amazon FSx snapshots, if a\n SnapshotIds value is provided. Otherwise, this operation returns all\n snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of\n the endpoint that you're calling.

                \n

                When retrieving all snapshots, you can optionally specify the MaxResults\n parameter to limit the number of snapshots in a response. If more backups remain,\n Amazon FSx returns a NextToken value in the response. In this\n case, send a later request with the NextToken request parameter set to the\n value of NextToken from the last response.

                \n

                Use this operation in an iterative process to retrieve a list of your snapshots.\n DescribeSnapshots is called first without a NextToken\n value. Then the operation continues to be called with the NextToken\n parameter set to the value of the last NextToken value until a response has\n no NextToken value.

                \n

                When using this operation, keep the following in mind:

                \n
                  \n
                • \n

                  The operation might return fewer than the MaxResults value of\n snapshot descriptions while still including a NextToken\n value.

                  \n
                • \n
                • \n

                  The order of snapshots returned in the response of one\n DescribeSnapshots call and the order of backups returned across\n the responses of a multi-call iteration is unspecified.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                Returns the description of specific Amazon FSx for OpenZFS snapshots, if a\n SnapshotIds value is provided. Otherwise, this operation returns all\n snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of\n the endpoint that you're calling.

                \n

                When retrieving all snapshots, you can optionally specify the MaxResults\n parameter to limit the number of snapshots in a response. If more backups remain,\n Amazon FSx returns a NextToken value in the response. In this\n case, send a later request with the NextToken request parameter set to the\n value of NextToken from the last response.

                \n

                Use this operation in an iterative process to retrieve a list of your snapshots.\n DescribeSnapshots is called first without a NextToken\n value. Then the operation continues to be called with the NextToken\n parameter set to the value of the last NextToken value until a response has\n no NextToken value.

                \n

                When using this operation, keep the following in mind:

                \n
                  \n
                • \n

                  The operation might return fewer than the MaxResults value of\n snapshot descriptions while still including a NextToken\n value.

                  \n
                • \n
                • \n

                  The order of snapshots returned in the response of one\n DescribeSnapshots call and the order of backups returned across\n the responses of a multi-call iteration is unspecified.

                  \n
                • \n
                ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4813,6 +4851,26 @@ } } }, + "com.amazonaws.fsx#IntegerNoMaxFromNegativeOne": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": -1, + "max": 2147483647 + } + } + }, + "com.amazonaws.fsx#IntegerRecordSizeKiB": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 4, + "max": 1024 + } + } + }, "com.amazonaws.fsx#InternalServerError": { "type": "structure", "members": { @@ -5324,7 +5382,7 @@ "min": 1, "max": 4096 }, - "smithy.api#pattern": "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4096}$" + "smithy.api#pattern": "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$" } }, "com.amazonaws.fsx#NetBiosAlias": { @@ -5432,7 +5490,7 @@ "EndpointIpAddressRange": { "target": "com.amazonaws.fsx#IpAddressRange", "traits": { - "smithy.api#documentation": "

                The IP address range in which the endpoints to access your file system\n are created.

                " + "smithy.api#documentation": "

                The IP address range in which the endpoints to access your file system\n are created.

                \n \n

                The Endpoint IP address range you select for your file system\n must exist outside the VPC's CIDR range and must be at least /30 or larger.\n If you do not specify this optional parameter, Amazon FSx will automatically\n select a CIDR block for you.

                \n
                " } }, "Endpoints": { @@ -5560,14 +5618,14 @@ "Clients": { "target": "com.amazonaws.fsx#OpenZFSClients", "traits": { - "smithy.api#documentation": "

                A value that specifies who can mount the file system. You can provide a wildcard\n character (*), an IP address (0.0.0.0), or a CIDR address\n (192.0.2.0/24. By default, Amazon FSx uses the wildcard\n character when specifying the client.

                ", + "smithy.api#documentation": "

                A value that specifies who can mount the file system. You can provide a wildcard\n character (*), an IP address (0.0.0.0), or a CIDR address\n (192.0.2.0/24). By default, Amazon FSx uses the wildcard\n character when specifying the client.

                ", "smithy.api#required": {} } }, "Options": { "target": "com.amazonaws.fsx#OpenZFSNfsExportOptions", "traits": { - "smithy.api#documentation": "

                The options to use when mounting the file system. For a list of options that you can\n use with Network File System (NFS), see the exports(5) - Linux man page. When\n choosing your options, consider the following:

                \n
                  \n
                • \n

                  \n crossmount is used by default. If you don't specify\n crossmount when changing the client configuration, you won't be\n able to see or access snapshots in your file system's snapshot directory.

                  \n
                • \n
                • \n

                  \n sync is used by default. If you instead specify\n async, the system acknowledges writes before writing to disk.\n If the system crashes before the writes are finished, you lose the unwritten\n data.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                The options to use when mounting the file system. For a list of options that you can\n use with Network File System (NFS), see the exports(5) - Linux man page. When\n choosing your options, consider the following:

                \n
                  \n
                • \n

                  \n crossmnt is used by default. If you don't specify\n crossmnt when changing the client configuration, you won't be\n able to see or access snapshots in your file system's snapshot directory.

                  \n
                • \n
                • \n

                  \n sync is used by default. If you instead specify\n async, the system acknowledges writes before writing to disk.\n If the system crashes before the writes are finished, you lose the unwritten\n data.

                  \n
                • \n
                ", "smithy.api#required": {} } } @@ -5616,10 +5674,16 @@ "com.amazonaws.fsx#OpenZFSCreateRootVolumeConfiguration": { "type": "structure", "members": { + "RecordSizeKiB": { + "target": "com.amazonaws.fsx#IntegerRecordSizeKiB", + "traits": { + "smithy.api#documentation": "

                Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8,\n 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the \n default record size. Database workflows can benefit from a smaller record size, while streaming \n workflows can benefit from a larger record size. For additional guidance on setting a custom record \n size, see \n Tips for maximizing performance in the\n Amazon FSx for OpenZFS User Guide.

                " + } + }, "DataCompressionType": { "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", "traits": { - "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the ZStandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. The compression\n type is NONE by default.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.\n NONE is the default.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better\n compression ratio to minimize on-disk storage utilization.

                  \n
                • \n
                • \n

                  \n LZ4 - Compresses the data in the volume using the LZ4\n compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive\n and delivers higher write throughput speeds.

                  \n
                • \n
                " } }, "NfsExports": { @@ -5637,7 +5701,7 @@ "CopyTagsToSnapshots": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots\n of the volume. This value defaults to false. If it's set to true,\n all tags for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " } }, "ReadOnly": { @@ -5662,6 +5726,10 @@ { "value": "ZSTD", "name": "ZSTD" + }, + { + "value": "LZ4", + "name": "LZ4" } ] } @@ -5707,7 +5775,7 @@ "ThroughputCapacity": { "target": "com.amazonaws.fsx#MegabytesPerSecond", "traits": { - "smithy.api#documentation": "

                The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

                " + "smithy.api#documentation": "

                The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

                " } }, "WeeklyMaintenanceStartTime": { @@ -5739,7 +5807,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The Network File System NFS) configurations for mounting an Amazon FSx for\n OpenZFS file system.

                " + "smithy.api#documentation": "

                The Network File System (NFS) configurations for mounting an Amazon FSx for\n OpenZFS file system.

                " } }, "com.amazonaws.fsx#OpenZFSNfsExportOption": { @@ -5876,16 +5944,22 @@ "smithy.api#documentation": "

                The maximum amount of storage in gibibtyes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                " } }, + "RecordSizeKiB": { + "target": "com.amazonaws.fsx#IntegerRecordSizeKiB", + "traits": { + "smithy.api#documentation": "

                The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8,\n 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB.\n Most workloads should use the default record size. For guidance on when\n to set a custom record size, see the\n Amazon FSx for OpenZFS User Guide.

                " + } + }, "DataCompressionType": { "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", "traits": { - "smithy.api#documentation": "

                The method used to compress the data on the volume. Unless a compression type is\n specified, volumes inherit the DataCompressionType value of their parent\n volume.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. The compression\n type is NONE by default.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.\n NONE is the default.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better\n compression ratio to minimize on-disk storage utilization.

                  \n
                • \n
                • \n

                  \n LZ4 - Compresses the data in the volume using the LZ4\n compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive\n and delivers higher write throughput speeds.

                  \n
                • \n
                " } }, "CopyTagsToSnapshots": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " } }, "OriginSnapshot": { @@ -5903,13 +5977,13 @@ "NfsExports": { "target": "com.amazonaws.fsx#OpenZFSNfsExports", "traits": { - "smithy.api#documentation": "

                The configuration object for mounting a Network File System (NFS) file\n system.

                " + "smithy.api#documentation": "

                The configuration object for mounting a Network File System (NFS)\n file system.

                " } }, "UserAndGroupQuotas": { "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", "traits": { - "smithy.api#documentation": "

                An object specifying how much storage users or groups can use on the volume.

                " + "smithy.api#documentation": "

                An object specifying how much storage users or groups can use on the volume.

                " } } }, @@ -6509,6 +6583,9 @@ "smithy.api#documentation": "

                The lifecycle status of the snapshot.

                \n
                  \n
                • \n

                  \n PENDING - Amazon FSx hasn't started creating the\n snapshot.

                  \n
                • \n
                • \n

                  \n CREATING - Amazon FSx is creating the snapshot.

                  \n
                • \n
                • \n

                  \n DELETING - Amazon FSx is deleting the snapshot.

                  \n
                • \n
                • \n

                  \n AVAILABLE - The snapshot is fully available.

                  \n
                • \n
                " } }, + "LifecycleTransitionReason": { + "target": "com.amazonaws.fsx#LifecycleTransitionReason" + }, "Tags": { "target": "com.amazonaws.fsx#Tags" }, @@ -6963,7 +7040,7 @@ } }, "traits": { - "smithy.api#documentation": "

                No Amazon FSx for NetApp ONTAP SVMs were found based upon the supplied parameters.

                ", + "smithy.api#documentation": "

                No FSx for ONTAP SVMs were found based upon the supplied parameters.

                ", "smithy.api#error": "client" } }, @@ -7580,13 +7657,13 @@ "CopyTagsToBackups": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether tags for the file system should be copied to\n backups. This value defaults to false. If it's set to true,\n all tags for the file system are copied to all automatic and user-initiated backups\n where the user doesn't specify tags. If this value is true and you specify\n one or more tags, only the specified tags are copied to backups. If you specify one or\n more tags when creating a user-initiated backup, no tags are copied from the file\n system, regardless of this value.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether tags for the file system should be copied to\n backups. This value defaults to false. If it's set to true,\n all tags for the file system are copied to all automatic and user-initiated backups\n where the user doesn't specify tags. If this value is true and you specify\n one or more tags, only the specified tags are copied to backups. If you specify one or\n more tags when creating a user-initiated backup, no tags are copied from the file\n system, regardless of this value.

                " } }, "CopyTagsToVolumes": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " + "smithy.api#documentation": "

                A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                " } }, "DailyAutomaticBackupStartTime": { @@ -7595,7 +7672,7 @@ "ThroughputCapacity": { "target": "com.amazonaws.fsx#MegabytesPerSecond", "traits": { - "smithy.api#documentation": "

                The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps), in 2 to the nth increments, between 2^3 (8) and 2^12 (4096).

                " + "smithy.api#documentation": "

                The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

                " } }, "WeeklyMaintenanceStartTime": { @@ -7755,21 +7832,27 @@ "type": "structure", "members": { "StorageCapacityReservationGiB": { - "target": "com.amazonaws.fsx#IntegerNoMax", + "target": "com.amazonaws.fsx#IntegerNoMaxFromNegativeOne", "traits": { - "smithy.api#documentation": "

                The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't\n reserve more storage than the parent volume has reserved.

                " + "smithy.api#documentation": "

                The amount of storage in gibibytes (GiB) to reserve from the parent volume.\n You can't reserve more storage than the parent volume has reserved. You can specify\n a value of -1 to unset a volume's storage capacity reservation.

                " } }, "StorageCapacityQuotaGiB": { - "target": "com.amazonaws.fsx#IntegerNoMax", + "target": "com.amazonaws.fsx#IntegerNoMaxFromNegativeOne", + "traits": { + "smithy.api#documentation": "

                The maximum amount of storage in gibibytes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume. You\n can specify a value of -1 to unset a volume's storage capacity quota.

                " + } + }, + "RecordSizeKiB": { + "target": "com.amazonaws.fsx#IntegerRecordSizeKiB", "traits": { - "smithy.api#documentation": "

                \n

                The maximum amount of storage in gibibytes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                " + "smithy.api#documentation": "

                Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8,\n 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB.\n Most workloads should use the default record size. Database workflows can benefit from a smaller \n record size, while streaming workflows can benefit from a larger record size. For additional guidance on when\n to set a custom record size, see \n Tips for maximizing performance in the\n Amazon FSx for OpenZFS User Guide.

                " } }, "DataCompressionType": { "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", "traits": { - "smithy.api#documentation": "

                \n

                Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                  \n
                • \n
                " + "smithy.api#documentation": "

                Specifies the method used to compress the data on the volume. The compression\n type is NONE by default.

                \n
                  \n
                • \n

                  \n NONE - Doesn't compress the data on the volume.\n NONE is the default.

                  \n
                • \n
                • \n

                  \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better\n compression ratio to minimize on-disk storage utilization.

                  \n
                • \n
                • \n

                  \n LZ4 - Compresses the data in the volume using the LZ4\n compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive\n and delivers higher write throughput speeds.

                  \n
                • \n
                " } }, "NfsExports": { @@ -7792,7 +7875,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Used to specify changes to the OpenZFS configuration for the volume that you are\n updating.

                " + "smithy.api#documentation": "

                Used to specify changes to the OpenZFS configuration for the volume\n that you are updating.

                " } }, "com.amazonaws.fsx#UpdateSnapshot": { @@ -7815,7 +7898,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Updates the name of a snapshot.

                ", + "smithy.api#documentation": "

                Updates the name of an Amazon FSx for OpenZFS snapshot.

                ", "smithy.api#idempotent": {} } }, @@ -7831,7 +7914,7 @@ "Name": { "target": "com.amazonaws.fsx#SnapshotName", "traits": { - "smithy.api#documentation": "

                The name of the snapshot to update.

                ", + "smithy.api#documentation": "

                The name of the snapshot to update.

                ", "smithy.api#required": {} } }, @@ -8228,7 +8311,7 @@ } }, "traits": { - "smithy.api#documentation": "

                No Amazon FSx for NetApp ONTAP volumes were found based upon the supplied parameters.

                ", + "smithy.api#documentation": "

                No Amazon FSx volumes were found based upon the supplied parameters.

                ", "smithy.api#error": "client" } }, diff --git a/codegen/sdk-codegen/aws-models/gamelift.json b/codegen/sdk-codegen/aws-models/gamelift.json index 7a559c6bc5dd..9da83ebda132 100644 --- a/codegen/sdk-codegen/aws-models/gamelift.json +++ b/codegen/sdk-codegen/aws-models/gamelift.json @@ -2676,7 +2676,7 @@ } ], "traits": { - "smithy.api#documentation": "

                The GameLift service limits and current utilization for an Amazon Web Services Region or location.\n Instance limits control the number of instances, per instance type, per location, that\n your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information\n returned includes the maximum number of instances allowed and your account's current\n usage across all fleets. This information can affect your ability to scale your GameLift\n fleets. You can request a limit increase for your account by using the Service limits page in the GameLift console.

                \n

                Instance limits differ based on whether the instances are deployed in a fleet's home\n Region or in a remote location. For remote locations, limits also differ based on the\n combination of home Region and remote location. All requests must specify an Amazon Web Services Region\n (either explicitly or as your default settings). To get the limit for a remote location,\n you must also specify the location. For example, the following requests all return\n different results:

                \n
                  \n
                • \n

                  Request specifies the Region ap-northeast-1 with no location. The\n result is limits and usage data on all instance types that are deployed in\n us-east-2, by all of the fleets that reside in\n ap-northeast-1.

                  \n
                • \n
                • \n

                  Request specifies the Region us-east-1 with location\n ca-central-1. The result is limits and usage data on all\n instance types that are deployed in ca-central-1, by all of the\n fleets that reside in us-east-2. These limits do not affect fleets\n in any other Regions that deploy instances to ca-central-1.

                  \n
                • \n
                • \n

                  Request specifies the Region eu-west-1 with location\n ca-central-1. The result is limits and usage data on all\n instance types that are deployed in ca-central-1, by all of the\n fleets that reside in eu-west-1.

                  \n
                • \n
                \n

                This operation can be used in the following ways:

                \n
                  \n
                • \n

                  To get limit and usage data for all instance types that are deployed in an Amazon Web Services\n Region by fleets that reside in the same Region: Specify the Region only.\n Optionally, specify a single instance type to retrieve information for.

                  \n
                • \n
                • \n

                  To get limit and usage data for all instance types that are deployed to a\n remote location by fleets that reside in different Amazon Web Services Region: Provide both the\n Amazon Web Services Region and the remote location. Optionally, specify a single instance type\n to retrieve information for.

                  \n
                • \n
                \n

                If successful, an EC2InstanceLimits object is returned with limits and\n usage data for each requested instance type.

                \n

                \n Learn more\n

                \n

                \n Setting up GameLift fleets\n

                \n \n

                \n Related actions\n

                \n

                \n CreateFleet | \n UpdateFleetCapacity | \n PutScalingPolicy | \n DescribeEC2InstanceLimits | \n DescribeFleetAttributes | \n DescribeFleetLocationAttributes | \n UpdateFleetAttributes | \n StopFleetActions | \n DeleteFleet | \n All APIs by task\n

                " + "smithy.api#documentation": "

                Retrieves the instance limits and current utilization for an Amazon Web Services Region or location.\n Instance limits control the number of instances, per instance type, per location, that\n your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information\n returned includes the maximum number of instances allowed and your account's current\n usage across all fleets. This information can affect your ability to scale your GameLift\n fleets. You can request a limit increase for your account by using the Service limits page in the GameLift console.

                \n

                Instance limits differ based on whether the instances are deployed in a fleet's home\n Region or in a remote location. For remote locations, limits also differ based on the\n combination of home Region and remote location. All requests must specify an Amazon Web Services Region\n (either explicitly or as your default settings). To get the limit for a remote location,\n you must also specify the location. For example, the following requests all return\n different results:

                \n
                  \n
                • \n

                  Request specifies the Region ap-northeast-1 with no location. The\n result is limits and usage data on all instance types that are deployed in\n us-east-2, by all of the fleets that reside in\n ap-northeast-1.

                  \n
                • \n
                • \n

                  Request specifies the Region us-east-1 with location\n ca-central-1. The result is limits and usage data on all\n instance types that are deployed in ca-central-1, by all of the\n fleets that reside in us-east-2. These limits do not affect fleets\n in any other Regions that deploy instances to ca-central-1.

                  \n
                • \n
                • \n

                  Request specifies the Region eu-west-1 with location\n ca-central-1. The result is limits and usage data on all\n instance types that are deployed in ca-central-1, by all of the\n fleets that reside in eu-west-1.

                  \n
                • \n
                \n

                This operation can be used in the following ways:

                \n
                  \n
                • \n

                  To get limit and usage data for all instance types that are deployed in an Amazon Web Services\n Region by fleets that reside in the same Region: Specify the Region only.\n Optionally, specify a single instance type to retrieve information for.

                  \n
                • \n
                • \n

                  To get limit and usage data for all instance types that are deployed to a\n remote location by fleets that reside in different Amazon Web Services Region: Provide both the\n Amazon Web Services Region and the remote location. Optionally, specify a single instance type\n to retrieve information for.

                  \n
                • \n
                \n

                If successful, an EC2InstanceLimits object is returned with limits and\n usage data for each requested instance type.

                \n

                \n Learn more\n

                \n

                \n Setting up GameLift fleets\n

                \n \n

                \n Related actions\n

                \n

                \n CreateFleet | \n UpdateFleetCapacity | \n PutScalingPolicy | \n DescribeEC2InstanceLimits | \n DescribeFleetAttributes | \n DescribeFleetLocationAttributes | \n UpdateFleetAttributes | \n StopFleetActions | \n DeleteFleet | \n All APIs by task\n

                " } }, "com.amazonaws.gamelift#DescribeEC2InstanceLimitsInput": { @@ -3990,7 +3990,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket\n information, including--after a successful match is made--connection information for the\n resulting new game session.

                \n

                To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the\n request is successful, a ticket object is returned for each requested ID that currently\n exists.

                \n

                This operation is not designed to be continually called to track matchmaking ticket\n status. This practice can cause you to exceed your API limit, which results in errors.\n Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide\n the topic ARN in the matchmaking configuration. Continuously poling ticket status with\n DescribeMatchmaking should only be used for games in development\n with low matchmaking usage.

                \n

                \n

                \n Learn more\n

                \n

                \n \n Add FlexMatch to a game client\n

                \n

                \n \n Set Up FlexMatch event notification\n

                \n

                \n Related actions\n

                \n

                \n StartMatchmaking | \n DescribeMatchmaking | \n StopMatchmaking | \n AcceptMatch | \n StartMatchBackfill | \n All APIs by task\n

                " + "smithy.api#documentation": "

                Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket\n information, including--after a successful match is made--connection information for the\n resulting new game session.

                \n

                To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the\n request is successful, a ticket object is returned for each requested ID that currently\n exists.

                \n

                This operation is not designed to be continually called to track matchmaking ticket\n status. This practice can cause you to exceed your API limit, which results in errors.\n Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide\n the topic ARN in the matchmaking configuration. Continuously polling ticket status with\n DescribeMatchmaking should only be used for games in development\n with low matchmaking usage.

                \n

                \n

                \n Learn more\n

                \n

                \n \n Add FlexMatch to a game client\n

                \n

                \n \n Set Up FlexMatch event notification\n

                \n

                \n Related actions\n

                \n

                \n StartMatchmaking | \n DescribeMatchmaking | \n StopMatchmaking | \n AcceptMatch | \n StartMatchBackfill | \n All APIs by task\n

                " } }, "com.amazonaws.gamelift#DescribeMatchmakingConfigurations": { diff --git a/codegen/sdk-codegen/aws-models/greengrassv2.json b/codegen/sdk-codegen/aws-models/greengrassv2.json index 7f9099cf860f..af4736897f3b 100644 --- a/codegen/sdk-codegen/aws-models/greengrassv2.json +++ b/codegen/sdk-codegen/aws-models/greengrassv2.json @@ -1101,7 +1101,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a continuous deployment for a target, which is a Greengrass core device or group of core\n devices. When you add a new core device to a group of core devices that has a deployment, IoT Greengrass\n deploys that group's deployment to the new device.

                \n

                You can define one deployment for each target. When you create a new deployment for a\n target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the\n new deployment to the target devices.

                \n

                Every deployment has a revision number that indicates how many deployment revisions you\n define for a target. Use this operation to create a new revision of an existing deployment.\n This operation returns the revision number of the new deployment when you create it.

                \n

                For more information, see the Create deployments in the\n IoT Greengrass V2 Developer Guide.

                ", + "smithy.api#documentation": "

                Creates a continuous deployment for a target, which is a Greengrass core device or group of core\n devices. When you add a new core device to a group of core devices that has a deployment, IoT Greengrass\n deploys that group's deployment to the new device.

                \n

                You can define one deployment for each target. When you create a new deployment for a\n target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the\n new deployment to the target devices.

                \n

                Every deployment has a revision number that indicates how many deployment revisions you\n define for a target. Use this operation to create a new revision of an existing\n deployment.

                \n

                For more information, see the Create deployments in the\n IoT Greengrass V2 Developer Guide.

                ", "smithy.api#http": { "method": "POST", "uri": "/greengrass/v2/deployments", @@ -2023,7 +2023,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves connectivity information for a Greengrass core device.

                \n

                Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

                ", + "smithy.api#documentation": "

                Retrieves connectivity information for a Greengrass core device.

                \n

                Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the IoT Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

                ", "smithy.api#http": { "method": "GET", "uri": "/greengrass/things/{thingName}/connectivityInfo", @@ -4347,7 +4347,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Updates connectivity information for a Greengrass core device.

                \n

                Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

                ", + "smithy.api#documentation": "

                Updates connectivity information for a Greengrass core device.

                \n

                Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the IoT Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

                ", "smithy.api#http": { "method": "PUT", "uri": "/greengrass/things/{thingName}/connectivityInfo", diff --git a/codegen/sdk-codegen/aws-models/kafkaconnect.json b/codegen/sdk-codegen/aws-models/kafkaconnect.json index 085180443202..ccda05f60d96 100644 --- a/codegen/sdk-codegen/aws-models/kafkaconnect.json +++ b/codegen/sdk-codegen/aws-models/kafkaconnect.json @@ -14,7 +14,7 @@ "vpc": { "target": "com.amazonaws.kafkaconnect#Vpc", "traits": { - "smithy.api#documentation": "

                Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

                ", + "smithy.api#documentation": "

                Details of an Amazon VPC which has network connectivity to the Apache Kafka\n cluster.

                ", "smithy.api#required": {} } } @@ -35,7 +35,7 @@ "vpc": { "target": "com.amazonaws.kafkaconnect#VpcDescription", "traits": { - "smithy.api#documentation": "

                Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

                " + "smithy.api#documentation": "

                Details of an Amazon VPC which has network connectivity to the Apache Kafka\n cluster.

                " } } }, @@ -56,7 +56,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", "traits": { - "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                ", + "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid\n values are 1,2,4,8.

                ", "smithy.api#required": {} } }, @@ -96,7 +96,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integer", "traits": { - "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                " + "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid\n values are 1,2,4,8.

                " } }, "minWorkerCount": { @@ -135,7 +135,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", "traits": { - "smithy.api#documentation": "

                The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                ", + "smithy.api#documentation": "

                The target number of microcontroller units (MCUs) allocated to each connector worker.\n The valid values are 1,2,4,8.

                ", "smithy.api#required": {} } }, @@ -173,7 +173,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

                ", + "smithy.api#documentation": "

                HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then\n retry it.

                ", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -195,7 +195,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Information about the capacity of the connector, whether it is auto scaled or provisioned.

                " + "smithy.api#documentation": "

                Information about the capacity of the connector, whether it is auto scaled or\n provisioned.

                " } }, "com.amazonaws.kafkaconnect#CapacityDescription": { @@ -235,7 +235,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The target capacity for the connector. The capacity can be auto scaled or provisioned.

                " + "smithy.api#documentation": "

                The target capacity for the connector. The capacity can be auto scaled or\n provisioned.

                " } }, "com.amazonaws.kafkaconnect#CloudWatchLogsLogDelivery": { @@ -287,7 +287,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.

                ", + "smithy.api#documentation": "

                HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your\n request with another name.

                ", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -373,7 +373,7 @@ "kafkaClusterClientAuthentication": { "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", "traits": { - "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

                " + "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. The value\n is NONE when no client authentication is used.

                " } }, "kafkaClusterEncryptionInTransit": { @@ -385,7 +385,7 @@ "kafkaConnectVersion": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

                " + "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka\n cluster's version and the plugins.

                " } }, "logDelivery": { @@ -403,7 +403,7 @@ "serviceExecutionRoleArn": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

                " + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon\n Web Services resources.

                " } }, "workerConfiguration": { @@ -466,7 +466,7 @@ "capacity": { "target": "com.amazonaws.kafkaconnect#Capacity", "traits": { - "smithy.api#documentation": "

                Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.

                ", + "smithy.api#documentation": "

                Information about the capacity allocated to the connector. Exactly one of the two\n properties must be specified.

                ", "smithy.api#required": {} } }, @@ -474,7 +474,8 @@ "target": "com.amazonaws.kafkaconnect#__mapOf__string", "traits": { "smithy.api#documentation": "

                A map of keys to values that represent the configuration for the connector.

                ", - "smithy.api#required": {} + "smithy.api#required": {}, + "smithy.api#sensitive": {} } }, "connectorDescription": { @@ -514,7 +515,7 @@ "kafkaConnectVersion": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

                ", + "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka\n cluster's version and the plugins.

                ", "smithy.api#required": {} } }, @@ -534,7 +535,7 @@ "serviceExecutionRoleArn": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.

                ", + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access the\n Amazon Web Services resources that it needs. The types of resources depends on the logic of\n the connector. For example, a connector that has Amazon S3 as a destination must have\n permissions that allow it to write to the S3 destination bucket.

                ", "smithy.api#required": {} } }, @@ -736,7 +737,8 @@ "target": "com.amazonaws.kafkaconnect#__string", "traits": { "smithy.api#documentation": "

                Base64 encoded contents of connect-distributed.properties file.

                ", - "smithy.api#required": {} + "smithy.api#required": {}, + "smithy.api#sensitive": {} } } } @@ -789,7 +791,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A plugin is an AWS resource that contains the code that defines a connector's logic.

                " + "smithy.api#documentation": "

                A plugin is an AWS resource that contains the code that defines a connector's\n logic.

                " } }, "com.amazonaws.kafkaconnect#CustomPluginContentType": { @@ -833,7 +835,7 @@ "fileMd5": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.

                " + "smithy.api#documentation": "

                The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the\n file.

                " } }, "fileSize": { @@ -853,7 +855,7 @@ "s3Location": { "target": "com.amazonaws.kafkaconnect#S3Location", "traits": { - "smithy.api#documentation": "

                The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

                ", + "smithy.api#documentation": "

                The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin\n file stored in Amazon S3.

                ", "smithy.api#required": {} } } @@ -868,7 +870,7 @@ "s3Location": { "target": "com.amazonaws.kafkaconnect#S3LocationDescription", "traits": { - "smithy.api#documentation": "

                The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

                " + "smithy.api#documentation": "

                The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin\n file stored in Amazon S3.

                " } } }, @@ -1073,6 +1075,77 @@ } } }, + "com.amazonaws.kafkaconnect#DeleteCustomPlugin": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DeleteCustomPluginRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DeleteCustomPluginResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

                Deletes a custom plugin.

                ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/custom-plugins/{customPluginArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.kafkaconnect#DeleteCustomPluginRequest": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the custom plugin that you want to delete.

                ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DeleteCustomPluginResponse": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the custom plugin that you requested to delete.

                " + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "

                The state of the custom plugin.

                " + } + } + } + }, "com.amazonaws.kafkaconnect#DescribeConnector": { "type": "operation", "input": { @@ -1133,7 +1206,7 @@ "capacity": { "target": "com.amazonaws.kafkaconnect#CapacityDescription", "traits": { - "smithy.api#documentation": "

                Information about the capacity of the connector, whether it is auto scaled or provisioned.

                " + "smithy.api#documentation": "

                Information about the capacity of the connector, whether it is auto scaled or\n provisioned.

                " } }, "connectorArn": { @@ -1145,7 +1218,8 @@ "connectorConfiguration": { "target": "com.amazonaws.kafkaconnect#__mapOf__string", "traits": { - "smithy.api#documentation": "

                A map of keys to values that represent the configuration for the connector.

                " + "smithy.api#documentation": "

                A map of keys to values that represent the configuration for the connector.

                ", + "smithy.api#sensitive": {} } }, "connectorDescription": { @@ -1187,7 +1261,7 @@ "kafkaClusterClientAuthentication": { "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", "traits": { - "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

                " + "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. The value\n is NONE when no client authentication is used.

                " } }, "kafkaClusterEncryptionInTransit": { @@ -1199,7 +1273,7 @@ "kafkaConnectVersion": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

                " + "smithy.api#documentation": "

                The version of Kafka Connect. It has to be compatible with both the Apache Kafka\n cluster's version and the plugins.

                " } }, "logDelivery": { @@ -1217,7 +1291,7 @@ "serviceExecutionRoleArn": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

                " + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon\n Web Services resources.

                " } }, "workerConfiguration": { @@ -1225,6 +1299,12 @@ "traits": { "smithy.api#documentation": "

                Specifies which worker configuration was used for the connector.

                " } + }, + "stateDescription": { + "target": "com.amazonaws.kafkaconnect#StateDescription", + "traits": { + "smithy.api#documentation": "

                Details about the state of a connector.

                " + } } } }, @@ -1312,7 +1392,7 @@ "latestRevision": { "target": "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary", "traits": { - "smithy.api#documentation": "

                The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.

                " + "smithy.api#documentation": "

                The latest successfully created revision of the custom plugin. If there are no\n successfully created revisions, this field will be absent.

                " } }, "name": { @@ -1320,6 +1400,12 @@ "traits": { "smithy.api#documentation": "

                The name of the custom plugin.

                " } + }, + "stateDescription": { + "target": "com.amazonaws.kafkaconnect#StateDescription", + "traits": { + "smithy.api#documentation": "

                Details about the state of a custom plugin.

                " + } } } }, @@ -1370,7 +1456,7 @@ "workerConfigurationArn": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.

                ", + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the worker configuration that you want to get\n information about.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1418,7 +1504,7 @@ "deliveryStream": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

                " + "smithy.api#documentation": "

                The name of the Kinesis Data Firehose delivery stream that is the destination for log\n delivery.

                " } }, "enabled": { @@ -1439,7 +1525,7 @@ "deliveryStream": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

                " + "smithy.api#documentation": "

                The name of the Kinesis Data Firehose delivery stream that is the destination for log\n delivery.

                " } }, "enabled": { @@ -1450,7 +1536,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A description of the settings for delivering logs to Amazon Kinesis Data Firehose.

                " + "smithy.api#documentation": "

                A description of the settings for delivering logs to Amazon Kinesis Data\n Firehose.

                " } }, "com.amazonaws.kafkaconnect#ForbiddenException": { @@ -1461,7 +1547,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

                ", + "smithy.api#documentation": "

                HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your\n request.

                ", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -1474,7 +1560,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

                ", + "smithy.api#documentation": "

                HTTP Status Code 500: Unexpected internal server error. Retrying your request might\n resolve the issue.

                ", "smithy.api#error": "server", "smithy.api#httpError": 500 } @@ -1500,13 +1586,13 @@ "authenticationType": { "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", "traits": { - "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

                ", + "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. Value\n NONE means that no client authentication is used.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                The client authentication information used in order to authenticate with the Apache Kafka cluster.

                " + "smithy.api#documentation": "

                The client authentication information used in order to authenticate with the Apache\n Kafka cluster.

                " } }, "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription": { @@ -1515,12 +1601,12 @@ "authenticationType": { "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", "traits": { - "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

                " + "smithy.api#documentation": "

                The type of client authentication used to connect to the Apache Kafka cluster. Value\n NONE means that no client authentication is used.

                " } } }, "traits": { - "smithy.api#documentation": "

                The client authentication information used in order to authenticate with the Apache Kafka cluster.

                " + "smithy.api#documentation": "

                The client authentication information used in order to authenticate with the Apache\n Kafka cluster.

                " } }, "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType": { @@ -1598,6 +1684,25 @@ }, "com.amazonaws.kafkaconnect#KafkaConnect": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "KafkaConnect", + "name": "kafkaconnect", + "arnNamespace": "kafkaconnect", + "cloudFormationName": "KafkaConnect", + "cloudTrailEventSource": "kafkaconnect.amazonaws.com", + "endpointPrefix": "kafkaconnect" + }, + "aws.auth#sigv4": { + "name": "kafkaconnect" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": ["X-Api-Key", "Content-Type", "Content-Length"] + }, + "smithy.api#documentation": "

                ", + "smithy.api#title": "Managed Streaming for Kafka Connect" + }, "version": "2021-09-14", "operations": [ { @@ -1612,6 +1717,9 @@ { "target": "com.amazonaws.kafkaconnect#DeleteConnector" }, + { + "target": "com.amazonaws.kafkaconnect#DeleteCustomPlugin" + }, { "target": "com.amazonaws.kafkaconnect#DescribeConnector" }, @@ -1633,25 +1741,7 @@ { "target": "com.amazonaws.kafkaconnect#UpdateConnector" } - ], - "traits": { - "aws.api#service": { - "sdkId": "KafkaConnect", - "arnNamespace": "kafkaconnect", - "cloudFormationName": "KafkaConnect", - "cloudTrailEventSource": "kafkaconnect.amazonaws.com", - "endpointPrefix": "kafkaconnect" - }, - "aws.auth#sigv4": { - "name": "kafkaconnect" - }, - "aws.protocols#restJson1": {}, - "smithy.api#cors": { - "additionalAllowedHeaders": ["X-Api-Key"] - }, - "smithy.api#documentation": "

                ", - "smithy.api#title": "Managed Streaming for Kafka Connect" - } + ] }, "com.amazonaws.kafkaconnect#ListConnectors": { "type": "operation", @@ -1685,7 +1775,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

                ", + "smithy.api#documentation": "

                Returns a list of all the connectors in this account and Region. The list is limited to\n connectors whose name starts with the specified prefix. The response also includes a\n description of each of the listed connectors.

                ", "smithy.api#http": { "method": "GET", "uri": "/v1/connectors", @@ -1694,8 +1784,8 @@ "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "connectors", - "pageSize": "maxResults" + "pageSize": "maxResults", + "items": "connectors" }, "smithy.api#readonly": {} } @@ -1720,7 +1810,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

                ", + "smithy.api#documentation": "

                If the response of a ListConnectors operation is truncated, it will include a NextToken.\n Send this NextToken in a subsequent request to continue listing from where the previous\n operation left off.

                ", "smithy.api#httpQuery": "nextToken" } } @@ -1738,7 +1828,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

                " + "smithy.api#documentation": "

                If the response of a ListConnectors operation is truncated, it will include a NextToken.\n Send this NextToken in a subsequent request to continue listing from where it left\n off.

                " } } } @@ -1784,8 +1874,8 @@ "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "customPlugins", - "pageSize": "maxResults" + "pageSize": "maxResults", + "items": "customPlugins" }, "smithy.api#readonly": {} } @@ -1803,7 +1893,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

                ", + "smithy.api#documentation": "

                If the response of a ListCustomPlugins operation is truncated, it will include a\n NextToken. Send this NextToken in a subsequent request to continue listing from where the\n previous operation left off.

                ", "smithy.api#httpQuery": "nextToken" } } @@ -1821,7 +1911,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

                " + "smithy.api#documentation": "

                If the response of a ListCustomPlugins operation is truncated, it will include a\n NextToken. Send this NextToken in a subsequent request to continue listing from where the\n previous operation left off.

                " } } } @@ -1867,8 +1957,8 @@ "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", - "items": "workerConfigurations", - "pageSize": "maxResults" + "pageSize": "maxResults", + "items": "workerConfigurations" }, "smithy.api#readonly": {} } @@ -1886,7 +1976,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

                ", + "smithy.api#documentation": "

                If the response of a ListWorkerConfigurations operation is truncated, it will include a\n NextToken. Send this NextToken in a subsequent request to continue listing from where the\n previous operation left off.

                ", "smithy.api#httpQuery": "nextToken" } } @@ -1898,7 +1988,7 @@ "nextToken": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

                " + "smithy.api#documentation": "

                If the response of a ListWorkerConfigurations operation is truncated, it will include a\n NextToken. Send this NextToken in a subsequent request to continue listing from where the\n previous operation left off.

                " } }, "workerConfigurations": { @@ -1915,7 +2005,7 @@ "workerLogDelivery": { "target": "com.amazonaws.kafkaconnect#WorkerLogDelivery", "traits": { - "smithy.api#documentation": "

                The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

                ", + "smithy.api#documentation": "

                The workers can send worker logs to different destination types. This configuration\n specifies the details of these destinations.

                ", "smithy.api#required": {} } } @@ -1930,7 +2020,7 @@ "workerLogDelivery": { "target": "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription", "traits": { - "smithy.api#documentation": "

                The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

                " + "smithy.api#documentation": "

                The workers can send worker logs to different destination types. This configuration\n specifies the details of these destinations.

                " } } }, @@ -1955,7 +2045,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

                ", + "smithy.api#documentation": "

                HTTP Status Code 404: Resource not found due to incorrect input. Correct your request\n and then retry it.

                ", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -1972,7 +2062,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A plugin is an AWS resource that contains the code that defines your connector logic.

                " + "smithy.api#documentation": "

                A plugin is an AWS resource that contains the code that defines your connector logic.\n

                " } }, "com.amazonaws.kafkaconnect#PluginDescription": { @@ -1995,7 +2085,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", "traits": { - "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                ", + "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid\n values are 1,2,4,8.

                ", "smithy.api#required": {} } }, @@ -2017,7 +2107,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integer", "traits": { - "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                " + "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid\n values are 1,2,4,8.

                " } }, "workerCount": { @@ -2037,7 +2127,7 @@ "mcuCount": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", "traits": { - "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

                ", + "smithy.api#documentation": "

                The number of microcontroller units (MCUs) allocated to each connector worker. The valid\n values are 1,2,4,8.

                ", "smithy.api#required": {} } }, @@ -2166,7 +2256,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", "traits": { - "smithy.api#documentation": "

                Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

                ", + "smithy.api#documentation": "

                Specifies the CPU utilization percentage threshold at which you want connector scale in\n to be triggered.

                ", "smithy.api#required": {} } } @@ -2181,7 +2271,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integer", "traits": { - "smithy.api#documentation": "

                Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

                " + "smithy.api#documentation": "

                Specifies the CPU utilization percentage threshold at which you want connector scale in\n to be triggered.

                " } } }, @@ -2195,7 +2285,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", "traits": { - "smithy.api#documentation": "

                The target CPU utilization percentage threshold at which you want connector scale in to be triggered.

                ", + "smithy.api#documentation": "

                The target CPU utilization percentage threshold at which you want connector scale in to\n be triggered.

                ", "smithy.api#required": {} } } @@ -2210,7 +2300,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", "traits": { - "smithy.api#documentation": "

                The CPU utilization percentage threshold at which you want connector scale out to be triggered.

                ", + "smithy.api#documentation": "

                The CPU utilization percentage threshold at which you want connector scale out to be\n triggered.

                ", "smithy.api#required": {} } } @@ -2225,7 +2315,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integer", "traits": { - "smithy.api#documentation": "

                The CPU utilization percentage threshold at which you want connector scale out to be triggered.

                " + "smithy.api#documentation": "

                The CPU utilization percentage threshold at which you want connector scale out to be\n triggered.

                " } } }, @@ -2239,7 +2329,7 @@ "cpuUtilizationPercentage": { "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", "traits": { - "smithy.api#documentation": "

                The target CPU utilization percentage threshold at which you want connector scale out to be triggered.

                ", + "smithy.api#documentation": "

                The target CPU utilization percentage threshold at which you want connector scale out to\n be triggered.

                ", "smithy.api#required": {} } } @@ -2256,11 +2346,31 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

                ", + "smithy.api#documentation": "

                HTTP Status Code 503: Service Unavailable. Retrying your request in some time might\n resolve the issue.

                ", "smithy.api#error": "server", "smithy.api#httpError": 503 } }, + "com.amazonaws.kafkaconnect#StateDescription": { + "type": "structure", + "members": { + "code": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

                A code that describes the state of a resource.

                " + } + }, + "message": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

                A message that describes the state of a resource.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Details about the state of a resource.

                " + } + }, "com.amazonaws.kafkaconnect#TooManyRequestsException": { "type": "structure", "members": { @@ -2282,7 +2392,7 @@ } }, "traits": { - "smithy.api#documentation": "

                HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

                ", + "smithy.api#documentation": "

                HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be\n validated.

                ", "smithy.api#error": "client", "smithy.api#httpError": 401 } @@ -2433,7 +2543,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The configuration of the workers, which are the processes that run the connector logic.

                " + "smithy.api#documentation": "

                The configuration of the workers, which are the processes that run the connector\n logic.

                " } }, "com.amazonaws.kafkaconnect#WorkerConfigurationDescription": { @@ -2474,7 +2584,8 @@ "propertiesFileContent": { "target": "com.amazonaws.kafkaconnect#__string", "traits": { - "smithy.api#documentation": "

                Base64 encoded contents of the connect-distributed.properties file.

                " + "smithy.api#documentation": "

                Base64 encoded contents of the connect-distributed.properties file.

                ", + "smithy.api#sensitive": {} } }, "revision": { @@ -2575,7 +2686,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

                " + "smithy.api#documentation": "

                Workers can send worker logs to different destination types. This configuration\n specifies the details of these destinations.

                " } }, "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription": { @@ -2601,7 +2712,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

                " + "smithy.api#documentation": "

                Workers can send worker logs to different destination types. This configuration\n specifies the details of these destinations.

                " } }, "com.amazonaws.kafkaconnect#__boolean": { diff --git a/codegen/sdk-codegen/aws-models/kendra.json b/codegen/sdk-codegen/aws-models/kendra.json index a1421a3d5ad8..3d21ae0aef9a 100644 --- a/codegen/sdk-codegen/aws-models/kendra.json +++ b/codegen/sdk-codegen/aws-models/kendra.json @@ -1438,7 +1438,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for data sources that connect\n to Confluence.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to Confluence \n as your data source.

                " } }, "com.amazonaws.kendra#ConfluencePageConfiguration": { @@ -1723,7 +1723,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the information necessary to connect to a\n database.

                " + "smithy.api#documentation": "

                Provides the configuration information that's required to connect to a\n database.

                " } }, "com.amazonaws.kendra#ContentSourceConfiguration": { @@ -1749,7 +1749,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Configuration information for your content sources, such as data sources, \n FAQs, and content indexed directly via BatchPutDocument.

                " + "smithy.api#documentation": "

                Provides the configuration information for your content sources, such as data sources, \n FAQs, and content indexed directly via BatchPutDocument.

                " } }, "com.amazonaws.kendra#ContentType": { @@ -1779,6 +1779,44 @@ ] } }, + "com.amazonaws.kendra#Correction": { + "type": "structure", + "members": { + "BeginOffset": { + "target": "com.amazonaws.kendra#Integer", + "traits": { + "smithy.api#documentation": "

                The zero-based location in the response string or text where \n the corrected word starts.

                " + } + }, + "EndOffset": { + "target": "com.amazonaws.kendra#Integer", + "traits": { + "smithy.api#documentation": "

                The zero-based location in the response string or text where \n the corrected word ends.

                " + } + }, + "Term": { + "target": "com.amazonaws.kendra#String", + "traits": { + "smithy.api#documentation": "

                The string or text of a misspelled word in a query.

                " + } + }, + "CorrectedTerm": { + "target": "com.amazonaws.kendra#String", + "traits": { + "smithy.api#documentation": "

                The string or text of a corrected misspelled word in a query.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                A corrected misspelled word in a query.

                " + } + }, + "com.amazonaws.kendra#CorrectionList": { + "type": "list", + "member": { + "target": "com.amazonaws.kendra#Correction" + } + }, "com.amazonaws.kendra#CrawlDepth": { "type": "integer", "traits": { @@ -1854,7 +1892,7 @@ "Configuration": { "target": "com.amazonaws.kendra#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

                The connector configuration information that is required to access the\n repository.

                \n

                You can't specify the Configuration parameter when the\n Type parameter is set to CUSTOM. If you do,\n you receive a ValidationException exception.

                \n

                The Configuration parameter is required for all other\n data sources.

                " + "smithy.api#documentation": "

                Configuration information that is required to access the data source \n repository.

                \n

                You can't specify the Configuration parameter when the\n Type parameter is set to CUSTOM. If you do,\n you receive a ValidationException exception.

                \n

                The Configuration parameter is required for all other\n data sources.

                " } }, "Description": { @@ -1975,7 +2013,7 @@ "Configuration": { "target": "com.amazonaws.kendra#ExperienceConfiguration", "traits": { - "smithy.api#documentation": "

                Provides the configuration information for your Amazon Kendra experience. This includes\n ContentSourceConfiguration, which specifies the data source IDs \n and/or FAQ IDs, and UserIdentityConfiguration, which specifies the \n user or group information to grant access to your Amazon Kendra experience.

                " + "smithy.api#documentation": "

                Configuration information for your Amazon Kendra experience. This includes\n ContentSourceConfiguration, which specifies the data source IDs \n and/or FAQ IDs, and UserIdentityConfiguration, which specifies the \n user or group information to grant access to your Amazon Kendra experience.

                " } }, "Description": { @@ -2463,37 +2501,37 @@ "S3Configuration": { "target": "com.amazonaws.kendra#S3DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

                Provides information to create a data source connector for a\n document repository in an Amazon S3 bucket.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to an Amazon S3 \n bucket as your data source.

                " } }, "SharePointConfiguration": { "target": "com.amazonaws.kendra#SharePointConfiguration", "traits": { - "smithy.api#documentation": "

                Provides information necessary to create a data source connector\n for a Microsoft SharePoint site.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to Microsoft SharePoint \n as your data source.

                " } }, "DatabaseConfiguration": { "target": "com.amazonaws.kendra#DatabaseConfiguration", "traits": { - "smithy.api#documentation": "

                Provides information necessary to create a data source connector\n for a database.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to a database as \n your data source.

                " } }, "SalesforceConfiguration": { "target": "com.amazonaws.kendra#SalesforceConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information for data sources that connect\n to a Salesforce site.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to \n Salesforce as your data source.

                " } }, "OneDriveConfiguration": { "target": "com.amazonaws.kendra#OneDriveConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration for data sources that connect to Microsoft\n OneDrive.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to Microsoft\n OneDrive as your data source.

                " } }, "ServiceNowConfiguration": { "target": "com.amazonaws.kendra#ServiceNowConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration for data sources that connect to ServiceNow\n instances.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to ServiceNow\n as your data source.

                " } }, "ConfluenceConfiguration": { @@ -2505,7 +2543,7 @@ "GoogleDriveConfiguration": { "target": "com.amazonaws.kendra#GoogleDriveConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration for data sources that connect to Google\n Drive.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to Google\n Drive as your data source.

                " } }, "WebCrawlerConfiguration": { @@ -2525,7 +2563,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Configuration information for an Amazon Kendra data source.

                " + "smithy.api#documentation": "

                Provides the configuration information for an Amazon Kendra data source.

                " } }, "com.amazonaws.kendra#DataSourceDateFieldFormat": { @@ -2567,7 +2605,7 @@ } }, "traits": { - "smithy.api#documentation": "

                \n Data source information for user context filtering.\n

                " + "smithy.api#documentation": "

                Data source information for user context filtering.

                " } }, "com.amazonaws.kendra#DataSourceGroups": { @@ -2998,7 +3036,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides information for connecting to an Amazon VPC.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to an Amazon VPC.

                " } }, "com.amazonaws.kendra#DatabaseConfiguration": { @@ -3014,7 +3052,7 @@ "ConnectionConfiguration": { "target": "com.amazonaws.kendra#ConnectionConfiguration", "traits": { - "smithy.api#documentation": "

                The information necessary to connect to a database.

                ", + "smithy.api#documentation": "

                Configuration information that's required to connect to a database.

                ", "smithy.api#required": {} } }, @@ -3042,7 +3080,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the information necessary to connect a database to an\n index.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to a \n index.

                " } }, "com.amazonaws.kendra#DatabaseEngineType": { @@ -3523,7 +3561,7 @@ "Configuration": { "target": "com.amazonaws.kendra#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

                Information that describes where the data source is located and how\n the data source is configured. The specific information in the description\n depends on the data source provider.

                " + "smithy.api#documentation": "

                Describes how the data source is configured. The specific information in the description\n depends on the data source provider.

                " } }, "CreatedAt": { @@ -5070,7 +5108,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the configuration information of users or groups in \n your Amazon Web Services SSO identity source to grant access your Amazon Kendra \n experience.

                " + "smithy.api#documentation": "

                Provides the configuration information for users or groups in \n your Amazon Web Services SSO identity source to grant access your Amazon Kendra \n experience.

                " } }, "com.amazonaws.kendra#EntityDisplayData": { @@ -5152,7 +5190,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the configuration information of users or groups in your \n Amazon Web Services SSO identity source for access to your Amazon Kendra experience. \n Specific permissions are defined for each user or group once they are \n granted access to your Amazon Kendra experience.

                " + "smithy.api#documentation": "

                Provides the configuration information for users or groups in your \n Amazon Web Services SSO identity source for access to your Amazon Kendra experience. \n Specific permissions are defined for each user or group once they are \n granted access to your Amazon Kendra experience.

                " } }, "com.amazonaws.kendra#EntityPersonaConfigurationList": { @@ -5260,7 +5298,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Specifies the configuration information for your Amazon Kendra experience. This includes \n the data source IDs and/or FAQ IDs, and user or group information to grant access \n to your Amazon Kendra experience.

                " + "smithy.api#documentation": "

                Provides the configuration information for your Amazon Kendra experience. This includes \n the data source IDs and/or FAQ IDs, and user or group information to grant access \n to your Amazon Kendra experience.

                " } }, "com.amazonaws.kendra#ExperienceEndpoint": { @@ -5280,7 +5318,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the configuration information of the endpoint for your Amazon Kendra\n experience.

                " + "smithy.api#documentation": "

                Provides the configuration information for the endpoint for your Amazon Kendra\n experience.

                " } }, "com.amazonaws.kendra#ExperienceEndpoints": { @@ -5697,14 +5735,14 @@ "VpcConfiguration": { "target": "com.amazonaws.kendra#DataSourceVpcConfiguration", "traits": { - "smithy.api#documentation": "

                Provides the configuration information for connecting to an \n Amazon Virtual Private Cloud for your Amazon FSx. Your Amazon FSx \n instance must reside inside your VPC.

                ", + "smithy.api#documentation": "

                Configuration information for connecting to an \n Amazon Virtual Private Cloud for your Amazon FSx. Your Amazon FSx \n instance must reside inside your VPC.

                ", "smithy.api#required": {} } }, "SecretArn": { "target": "com.amazonaws.kendra#SecretArn", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of an Secrets Manager secret that \n contains the key-value pairs required to connect to your Amazon FSx \n file system. Windows is currently the only supported type. The secret must \n contain a JSON structure with the following keys:

                \n
                  \n
                • \n

                  username—The Active Directory user name, along with the \n Domain Name System (DNS) domain name. For example, \n user@corp.example.com. The Active Directory \n user account must have read and mounting access to the \n Amazon FSx file system for Windows.

                  \n
                • \n
                • \n

                  password—The password of the active directory user with \n read and mounting access Amazon FSx Windows file system.

                  \n
                • \n
                " + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of an Secrets Manager secret that \n contains the key-value pairs required to connect to your Amazon FSx \n file system. Windows is currently the only supported type. The secret must \n contain a JSON structure with the following keys:

                \n
                  \n
                • \n

                  username—The Active Directory user name, along with the \n Domain Name System (DNS) domain name. For example, \n user@corp.example.com. The Active Directory \n user account must have read and mounting access to the \n Amazon FSx file system for Windows.

                  \n
                • \n
                • \n

                  password—The password of the Active Directory user account with \n read and mounting access to the Amazon FSx Windows file system.

                  \n
                • \n
                " } }, "InclusionPatterns": { @@ -5722,7 +5760,7 @@ "FieldMappings": { "target": "com.amazonaws.kendra#DataSourceToIndexFieldMappingList", "traits": { - "smithy.api#documentation": "

                A list of DataSourceToIndexFieldMapping objects that \n map Amazon FSx data source attributes or field names to Amazon Kendra \n index field names in Amazon Kendra. To create custom fields, use the \n UpdateIndex API before you map to Amazon FSx fields. \n For more information, see Mapping \n data source fields. The Amazon FSx data source field names \n must exist in your Amazon FSx custom metadata.

                " + "smithy.api#documentation": "

                A list of DataSourceToIndexFieldMapping objects that \n map Amazon FSx data source attributes or field names to Amazon Kendra \n index field names. To create custom fields, use the \n UpdateIndex API before you map to Amazon FSx fields. \n For more information, see Mapping \n data source fields. The Amazon FSx data source field names \n must exist in your Amazon FSx custom metadata.

                " } } }, @@ -5964,7 +6002,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for data sources that connect\n to Google Drive.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to \n Google Drive as your data source.

                " } }, "com.amazonaws.kendra#GroupAttributeField": { @@ -6280,7 +6318,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A summary of information about an index.

                " + "smithy.api#documentation": "

                A summary of information on the configuration of an index.

                " } }, "com.amazonaws.kendra#IndexConfigurationSummaryList": { @@ -6541,7 +6579,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Configuration information for the JSON token type.

                " + "smithy.api#documentation": "

                Provides the configuration information for the JSON token type.

                " } }, "com.amazonaws.kendra#JwtTokenTypeConfiguration": { @@ -6592,7 +6630,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Configuration information for the JWT token type.

                " + "smithy.api#documentation": "

                Provides the configuration information for the JWT token type.

                " } }, "com.amazonaws.kendra#KeyLocation": { @@ -7255,7 +7293,7 @@ "IndexConfigurationSummaryItems": { "target": "com.amazonaws.kendra#IndexConfigurationSummaryList", "traits": { - "smithy.api#documentation": "

                An array of summary information for one or more indexes.

                " + "smithy.api#documentation": "

                An array of summary information on the configuration of one or more indexes.

                " } }, "NextToken": { @@ -7808,7 +7846,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for data sources that connect\n to OneDrive.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect\n to OneDrive as your data source.

                " } }, "com.amazonaws.kendra#OneDriveUser": { @@ -8292,6 +8330,12 @@ "traits": { "smithy.api#documentation": "

                Provides an identifier for a specific user. The\n VisitorId should be a unique identifier, such as a\n GUID. Don't use personally identifiable information, such as the user's\n email address, as the VisitorId.

                " } + }, + "SpellCorrectionConfiguration": { + "target": "com.amazonaws.kendra#SpellCorrectionConfiguration", + "traits": { + "smithy.api#documentation": "

                Enables suggested spell corrections for queries.

                " + } } } }, @@ -8327,6 +8371,12 @@ "traits": { "smithy.api#documentation": "

                A list of warning codes and their messages on problems with your query.

                \n

                Amazon Kendra currently only supports one type of warning, which is a warning \n on invalid syntax used in the query. For examples of invalid query syntax, \n see Searching \n with advanced query syntax.

                " } + }, + "SpellCorrectedQueries": { + "target": "com.amazonaws.kendra#SpellCorrectedQueryList", + "traits": { + "smithy.api#documentation": "

                A list of information related to suggested spell corrections for a query.

                " + } } } }, @@ -8773,7 +8823,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for a data source to index\n documents in an Amazon S3 bucket.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to \n an Amazon S3 bucket.

                " } }, "com.amazonaws.kendra#S3ObjectKey": { @@ -8837,7 +8887,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Defines configuration for syncing a Salesforce chatter feed. The\n contents of the object comes from the Salesforce FeedItem\n table.

                " + "smithy.api#documentation": "

                The configuration information for syncing a Salesforce chatter feed. The\n contents of the object comes from the Salesforce FeedItem\n table.

                " } }, "com.amazonaws.kendra#SalesforceChatterFeedIncludeFilterType": { @@ -8911,7 +8961,7 @@ "StandardObjectAttachmentConfiguration": { "target": "com.amazonaws.kendra#SalesforceStandardObjectAttachmentConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information for processing attachments to\n Salesforce standard objects.

                " + "smithy.api#documentation": "

                Configuration information for processing attachments to\n Salesforce standard objects.

                " } }, "IncludeAttachmentFilePatterns": { @@ -9000,18 +9050,18 @@ "StandardKnowledgeArticleTypeConfiguration": { "target": "com.amazonaws.kendra#SalesforceStandardKnowledgeArticleTypeConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information for standard Salesforce\n knowledge articles.

                " + "smithy.api#documentation": "

                Configuration information for standard Salesforce\n knowledge articles.

                " } }, "CustomKnowledgeArticleTypeConfigurations": { "target": "com.amazonaws.kendra#SalesforceCustomKnowledgeArticleTypeConfigurationList", "traits": { - "smithy.api#documentation": "

                Provides configuration information for custom Salesforce knowledge\n articles.

                " + "smithy.api#documentation": "

                Configuration information for custom Salesforce knowledge\n articles.

                " } } }, "traits": { - "smithy.api#documentation": "

                Specifies configuration information for the knowledge article\n types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge\n articles and the standard fields of knowledge articles, or the\n custom fields of custom knowledge articles, but not both

                " + "smithy.api#documentation": "

                Provides the configuration information for the knowledge article\n types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge\n articles and the standard fields of knowledge articles, or the\n custom fields of custom knowledge articles, but not both

                " } }, "com.amazonaws.kendra#SalesforceKnowledgeArticleState": { @@ -9069,7 +9119,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for standard Salesforce\n knowledge articles.

                " + "smithy.api#documentation": "

                Configuration information for standard Salesforce\n knowledge articles.

                " } }, "com.amazonaws.kendra#SalesforceStandardObjectAttachmentConfiguration": { @@ -9089,7 +9139,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for processing attachments to\n Salesforce standard objects.

                " + "smithy.api#documentation": "

                Provides the configuration information for processing attachments to\n Salesforce standard objects.

                " } }, "com.amazonaws.kendra#SalesforceStandardObjectConfiguration": { @@ -9123,7 +9173,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Specifies configuration information for indexing a single standard\n object.

                " + "smithy.api#documentation": "

                Provides the configuration information for indexing a single standard\n object.

                " } }, "com.amazonaws.kendra#SalesforceStandardObjectConfigurationList": { @@ -9340,7 +9390,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the configuration information of the seed or starting point URLs to crawl.

                \n

                \n When selecting websites to index, you must adhere to \n the Amazon Acceptable Use Policy \n and all other Amazon terms. Remember that you must only use Amazon Kendra Web \n Crawler to index your own webpages, or webpages that you have authorization \n to index.\n

                " + "smithy.api#documentation": "

                Provides the configuration information for the seed or starting point URLs to crawl.

                \n

                \n When selecting websites to index, you must adhere to \n the Amazon Acceptable Use Policy \n and all other Amazon terms. Remember that you must only use Amazon Kendra Web \n Crawler to index your own webpages, or webpages that you have authorization \n to index.\n

                " } }, "com.amazonaws.kendra#SeedUrlList": { @@ -9426,13 +9476,13 @@ "KnowledgeArticleConfiguration": { "target": "com.amazonaws.kendra#ServiceNowKnowledgeArticleConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information for crawling knowledge articles\n in the ServiceNow site.

                " + "smithy.api#documentation": "

                Configuration information for crawling knowledge articles\n in the ServiceNow site.

                " } }, "ServiceCatalogConfiguration": { "target": "com.amazonaws.kendra#ServiceNowServiceCatalogConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information for crawling service catalogs\n in the ServiceNow site.

                " + "smithy.api#documentation": "

                Configuration information for crawling service catalogs\n in the ServiceNow site.

                " } }, "AuthenticationType": { @@ -9443,7 +9493,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information required to connect to a\n ServiceNow data source.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to \n ServiceNow as your data source.

                " } }, "com.amazonaws.kendra#ServiceNowHostUrl": { @@ -9504,7 +9554,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for crawling knowledge articles\n in the ServiceNow site.

                " + "smithy.api#documentation": "

                Provides the configuration information for crawling knowledge articles\n in the ServiceNow site.

                " } }, "com.amazonaws.kendra#ServiceNowKnowledgeArticleFilterQuery": { @@ -9559,7 +9609,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for crawling service catalog\n items in the ServiceNow site

                " + "smithy.api#documentation": "

                Provides the configuration information for crawling service catalog\n items in the ServiceNow site

                " } }, "com.amazonaws.kendra#ServiceQuotaExceededException": { @@ -9649,7 +9699,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for connecting to a Microsoft\n SharePoint data source.

                " + "smithy.api#documentation": "

                Provides the configuration information to connect to Microsoft\n SharePoint as your data source.

                " } }, "com.amazonaws.kendra#SharePointUrlList": { @@ -9715,7 +9765,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides the configuration information of the sitemap URLs to crawl.

                \n

                \n When selecting websites to index, you must adhere to \n the Amazon Acceptable Use Policy \n and all other Amazon terms. Remember that you must only use Amazon Kendra Web \n Crawler to index your own webpages, or webpages that you have authorization \n to index.\n

                " + "smithy.api#documentation": "

                Provides the configuration information for the sitemap URLs to crawl.

                \n

                \n When selecting websites to index, you must adhere to \n the Amazon Acceptable Use Policy \n and all other Amazon terms. Remember that you must only use Amazon Kendra Web \n Crawler to index your own webpages, or webpages that you have authorization \n to index.\n

                " } }, "com.amazonaws.kendra#SiteMapsList": { @@ -9785,6 +9835,47 @@ "smithy.api#documentation": "

                Specifies the document attribute to use to sort the response to a\n Amazon Kendra query. You can specify a single attribute for sorting. The\n attribute must have the Sortable flag set to\n true, otherwise Amazon Kendra returns an exception.

                \n

                You can sort attributes of the following types.

                \n
                  \n
                • \n

                  Date value

                  \n
                • \n
                • \n

                  Long value

                  \n
                • \n
                • \n

                  String value

                  \n
                • \n
                \n

                You can't sort attributes of the following type.

                \n
                  \n
                • \n

                  String list value

                  \n
                • \n
                " } }, + "com.amazonaws.kendra#SpellCorrectedQuery": { + "type": "structure", + "members": { + "SuggestedQueryText": { + "target": "com.amazonaws.kendra#SuggestedQueryText", + "traits": { + "smithy.api#documentation": "

                The query with the suggested spell corrections.

                " + } + }, + "Corrections": { + "target": "com.amazonaws.kendra#CorrectionList", + "traits": { + "smithy.api#documentation": "

                The corrected misspelled word or words in a query.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                A query with suggested spell corrections.

                " + } + }, + "com.amazonaws.kendra#SpellCorrectedQueryList": { + "type": "list", + "member": { + "target": "com.amazonaws.kendra#SpellCorrectedQuery" + } + }, + "com.amazonaws.kendra#SpellCorrectionConfiguration": { + "type": "structure", + "members": { + "IncludeQuerySpellCheckSuggestions": { + "target": "com.amazonaws.kendra#Boolean", + "traits": { + "smithy.api#documentation": "

                \n TRUE to suggest spell corrections for queries.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Provides the configuration information for suggested query spell corrections.

                \n

                Suggested spell corrections are based on words that appear in your indexed documents \n and how closely a corrected word matches a misspelled word.

                \n

                This feature is designed with certain defaults or limits. For information on the \n current limits and how to request more support for some limits, see the \n Spell \n Checker documentation.

                " + } + }, "com.amazonaws.kendra#SqlConfiguration": { "type": "structure", "members": { @@ -9796,7 +9887,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides information that configures Amazon Kendra to use a SQL\n database.

                " + "smithy.api#documentation": "

                Provides the configuration information to use a SQL\n database.

                " } }, "com.amazonaws.kendra#StartDataSourceSyncJob": { @@ -10041,6 +10132,15 @@ } } }, + "com.amazonaws.kendra#SuggestedQueryText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + } + } + }, "com.amazonaws.kendra#Suggestion": { "type": "structure", "members": { @@ -10562,7 +10662,7 @@ "Configuration": { "target": "com.amazonaws.kendra#DataSourceConfiguration", "traits": { - "smithy.api#documentation": "

                Configuration information for an Amazon Kendra data source.

                " + "smithy.api#documentation": "

                Configuration information for an Amazon Kendra data source you want to update.

                " } }, "Description": { @@ -10658,7 +10758,7 @@ "Configuration": { "target": "com.amazonaws.kendra#ExperienceConfiguration", "traits": { - "smithy.api#documentation": "

                Provides the user configuration information. This includes the Amazon Web Services SSO\n field name that contains the identifiers of your users, such as their emails.

                " + "smithy.api#documentation": "

                Configuration information for your Amazon Kendra you want to update.

                " } }, "Description": { @@ -10732,7 +10832,7 @@ "DocumentMetadataConfigurationUpdates": { "target": "com.amazonaws.kendra#DocumentMetadataConfigurationList", "traits": { - "smithy.api#documentation": "

                The document metadata to update.

                " + "smithy.api#documentation": "

                The document metadata you want to update.

                " } }, "CapacityUnits": { @@ -10989,13 +11089,13 @@ "SeedUrlConfiguration": { "target": "com.amazonaws.kendra#SeedUrlConfiguration", "traits": { - "smithy.api#documentation": "

                Provides the configuration of the seed or starting point URLs of the websites \n you want to crawl.

                \n

                You can choose to crawl only the website host names, or the website host names \n with subdomains, or the website host names with subdomains and other domains \n that the webpages link to.

                \n

                You can list up to 100 seed URLs.

                " + "smithy.api#documentation": "

                Configuration of the seed or starting point URLs of the websites \n you want to crawl.

                \n

                You can choose to crawl only the website host names, or the website host names \n with subdomains, or the website host names with subdomains and other domains \n that the webpages link to.

                \n

                You can list up to 100 seed URLs.

                " } }, "SiteMapsConfiguration": { "target": "com.amazonaws.kendra#SiteMapsConfiguration", "traits": { - "smithy.api#documentation": "

                Provides the configuration of the sitemap URLs of the websites you want to crawl.

                \n

                Only URLs belonging to the same website host names are crawled. You can list up to \n three sitemap URLs.

                " + "smithy.api#documentation": "

                Configuration of the sitemap URLs of the websites you want to crawl.

                \n

                Only URLs belonging to the same website host names are crawled. You can list up to \n three sitemap URLs.

                " } } }, @@ -11111,7 +11211,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Configuration information for the identifiers of your users.

                " + "smithy.api#documentation": "

                Provides the configuration information for the identifiers of your users.

                " } }, "com.amazonaws.kendra#UserNameAttributeField": { @@ -11141,7 +11241,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Provides configuration information for a token configuration.

                " + "smithy.api#documentation": "

                Provides the configuration information for a token.

                " } }, "com.amazonaws.kendra#UserTokenConfigurationList": { @@ -11309,13 +11409,13 @@ "ProxyConfiguration": { "target": "com.amazonaws.kendra#ProxyConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information required to connect to your internal \n websites via a web proxy.

                \n

                You must provide the website host name and port number. For example, the \n host name of https://a.example.com/page1.html is \"a.example.com\" and the \n port is 443, the standard port for HTTPS.

                \n

                Web proxy credentials are optional and you can use them to connect to a \n web proxy server that requires basic authentication. To store web proxy \n credentials, you use a secret in Secrets Manager.

                " + "smithy.api#documentation": "

                Configuration information required to connect to your internal \n websites via a web proxy.

                \n

                You must provide the website host name and port number. For example, the \n host name of https://a.example.com/page1.html is \"a.example.com\" and the \n port is 443, the standard port for HTTPS.

                \n

                Web proxy credentials are optional and you can use them to connect to a \n web proxy server that requires basic authentication. To store web proxy \n credentials, you use a secret in Secrets Manager.

                " } }, "AuthenticationConfiguration": { "target": "com.amazonaws.kendra#AuthenticationConfiguration", "traits": { - "smithy.api#documentation": "

                Provides configuration information required to connect to websites using \n authentication.

                \n

                You can connect to websites using basic authentication of user name and password.

                \n

                You must provide the website host name and port number. For example, the host name \n of https://a.example.com/page1.html is \"a.example.com\" and the port is 443, the \n standard port for HTTPS. You use a secret in Secrets Manager to store \n your authentication credentials.

                " + "smithy.api#documentation": "

                Configuration information required to connect to websites using \n authentication.

                \n

                You can connect to websites using basic authentication of user name and password.

                \n

                You must provide the website host name and port number. For example, the host name \n of https://a.example.com/page1.html is \"a.example.com\" and the port is 443, the \n standard port for HTTPS. You use a secret in Secrets Manager to store \n your authentication credentials.

                " } } }, diff --git a/codegen/sdk-codegen/aws-models/keyspaces.json b/codegen/sdk-codegen/aws-models/keyspaces.json new file mode 100644 index 000000000000..d7cc8b53a302 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/keyspaces.json @@ -0,0 +1,1762 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.keyspaces#ARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 1000 + }, + "smithy.api#pattern": "^arn:(aws[a-zA-Z0-9-]*):cassandra:.+" + } + }, + "com.amazonaws.keyspaces#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "AccessDeniedException", + "httpResponseCode": 403 + }, + "smithy.api#documentation": "

                You do not have sufficient access to perform this action.

                ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.keyspaces#CapacitySpecification": { + "type": "structure", + "members": { + "throughputMode": { + "target": "com.amazonaws.keyspaces#ThroughputMode", + "traits": { + "smithy.api#documentation": "

                The read/write throughput capacity mode for a table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED. The provisioned capacity mode requires\n readCapacityUnits and writeCapacityUnits as inputs.

                  \n
                • \n
                \n

                The default is throughput_mode:PAY_PER_REQUEST.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                ", + "smithy.api#required": {} + } + }, + "readCapacityUnits": { + "target": "com.amazonaws.keyspaces#CapacityUnits", + "traits": { + "smithy.api#documentation": "

                The throughput capacity specified for read operations defined in read capacity units \n (RCUs).

                " + } + }, + "writeCapacityUnits": { + "target": "com.amazonaws.keyspaces#CapacityUnits", + "traits": { + "smithy.api#documentation": "

                The throughput capacity specified for write operations defined in write capacity units \n (WCUs).

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Amazon Keyspaces has two read/write capacity modes for processing reads and writes on your tables:

                \n
                  \n
                • \n

                  \n On-demand (default)\n

                  \n
                • \n
                • \n

                  \n Provisioned\n

                  \n
                • \n
                \n \n

                The read/write capacity mode that you choose controls how you are charged for read and\n write throughput and how table throughput capacity is managed.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#CapacitySpecificationSummary": { + "type": "structure", + "members": { + "throughputMode": { + "target": "com.amazonaws.keyspaces#ThroughputMode", + "traits": { + "smithy.api#documentation": "

                The read/write throughput capacity mode for a table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED. The provisioned capacity mode requires\n readCapacityUnits and writeCapacityUnits as inputs.

                  \n
                • \n
                \n

                The default is throughput_mode:PAY_PER_REQUEST.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                ", + "smithy.api#required": {} + } + }, + "readCapacityUnits": { + "target": "com.amazonaws.keyspaces#CapacityUnits", + "traits": { + "smithy.api#documentation": "

                The throughput capacity specified for read operations defined in read capacity units \n (RCUs).

                " + } + }, + "writeCapacityUnits": { + "target": "com.amazonaws.keyspaces#CapacityUnits", + "traits": { + "smithy.api#documentation": "

                The throughput capacity specified for write operations defined in write capacity units \n (WCUs).

                " + } + }, + "lastUpdateToPayPerRequestTimestamp": { + "target": "com.amazonaws.keyspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

                The timestamp of the last operation that changed the provisioned throughput capacity of a table.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                The read/write throughput capacity mode for a table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED.

                  \n
                • \n
                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#CapacityUnits": { + "type": "long", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.keyspaces#ClusteringKey": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.keyspaces#GenericString", + "traits": { + "smithy.api#documentation": "

                The name(s) of the clustering column(s).

                ", + "smithy.api#required": {} + } + }, + "orderBy": { + "target": "com.amazonaws.keyspaces#SortOrder", + "traits": { + "smithy.api#documentation": "

                Sets the ascendant (ASC) or descendant (DESC) order modifier.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                The optional clustering column portion of your primary key determines how the data is clustered and sorted within each partition.

                " + } + }, + "com.amazonaws.keyspaces#ClusteringKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#ClusteringKey" + } + }, + "com.amazonaws.keyspaces#ColumnDefinition": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.keyspaces#GenericString", + "traits": { + "smithy.api#documentation": "

                The name of the column.

                ", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.keyspaces#GenericString", + "traits": { + "smithy.api#documentation": "

                The data type of the column. For a list of available data types, see Data types in the Amazon Keyspaces Developer\n Guide.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                The names and data types of regular columns.

                " + } + }, + "com.amazonaws.keyspaces#ColumnDefinitionList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#ColumnDefinition" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.keyspaces#Comment": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                An optional description of the table.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                An optional comment that describes the table.

                " + } + }, + "com.amazonaws.keyspaces#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ConflictException", + "httpResponseCode": 409 + }, + "smithy.api#documentation": "

                Amazon Keyspaces could not complete the requested action. This error may occur if you try to \n perform an action and the same or a different action is already\n in progress, or if you try to create a resource that already exists.

                ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.keyspaces#CreateKeyspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#CreateKeyspaceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#CreateKeyspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names\n must be unique within each Region.

                \n

                \n CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace \n by using the GetKeyspace operation.

                \n

                For more information, see Creating keyspaces in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#CreateKeyspaceRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace to be created.

                ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                A list of key-value pair tags to be attached to the keyspace.

                \n

                For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer\n Guide.

                " + } + } + } + }, + "com.amazonaws.keyspaces#CreateKeyspaceResponse": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#CreateTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#CreateTableRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#CreateTableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names\n must be unique.

                \n

                \n CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING.\n You can monitor the creation status of the new table by using the GetTable \n operation, which returns the current status of the table. You can start using a table when the status is ACTIVE.

                \n

                For more information, see Creating tables in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#CreateTableRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace that the table is going to be created in.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the table.

                ", + "smithy.api#required": {} + } + }, + "schemaDefinition": { + "target": "com.amazonaws.keyspaces#SchemaDefinition", + "traits": { + "smithy.api#documentation": "

                The schemaDefinition consists of the\n following parameters.

                \n

                For each column to be created:

                \n
                  \n
                • \n

                  \n \n name\n - The name\n of the column.

                  \n
                • \n
                • \n

                  \n \n type\n - An Amazon Keyspaces\n data type. For more information, see Data types in the Amazon Keyspaces Developer\n Guide.

                  \n
                • \n
                \n \n

                The primary key of the table consists of the\n following columns:

                \n
                  \n
                • \n

                  \n partitionKeys - The partition key can be a single column, or it can be a\n compound value composed of two or more columns. The partition\n key portion of the primary key is required and determines how\n Amazon Keyspaces stores your data.

                  \n
                    \n
                  • \n

                    \n \n name\n - The name of each partition key column.

                    \n
                  • \n
                  \n
                • \n
                • \n

                  \n clusteringKeys - The optional clustering column portion of your primary key\n determines how the data is clustered and sorted within each\n partition.

                  \n
                    \n
                  • \n

                    \n \n name\n - The name of the clustering column.

                    \n
                  • \n
                  • \n

                    \n \n orderBy\n - Sets the\n ascendant (ASC) or descendant (DESC) order modifier.

                    \n
                  • \n
                  \n
                • \n
                \n

                To define a column as static use \n staticColumns\n - \n Static columns store values that are shared by all rows in the same partition:

                \n
                  \n
                • \n

                  \n \n name\n - The name\n of the column.

                  \n
                • \n
                • \n

                  \n \n type\n - An Amazon Keyspaces\n data type.

                  \n
                • \n
                ", + "smithy.api#required": {} + } + }, + "comment": { + "target": "com.amazonaws.keyspaces#Comment", + "traits": { + "smithy.api#documentation": "

                This parameter allows to enter a description of the table.

                " + } + }, + "capacitySpecification": { + "target": "com.amazonaws.keyspaces#CapacitySpecification", + "traits": { + "smithy.api#documentation": "

                Specifies the read/write throughput capacity mode for the table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED. The provisioned capacity mode requires\n readCapacityUnits and writeCapacityUnits as inputs.

                  \n
                • \n
                \n

                The default is\n throughput_mode:PAY_PER_REQUEST.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "encryptionSpecification": { + "target": "com.amazonaws.keyspaces#EncryptionSpecification", + "traits": { + "smithy.api#documentation": "

                Specifies how the encryption key for encryption at rest is managed for the table. You can choose one of the following KMS key (KMS key):

                \n \n
                  \n
                • \n

                  \n type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

                  \n
                • \n
                • \n

                  \n type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. \n This option \n requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

                  \n
                • \n
                \n

                The default is type:AWS_OWNED_KMS_KEY.

                \n

                For more information, see Encryption at rest in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "pointInTimeRecovery": { + "target": "com.amazonaws.keyspaces#PointInTimeRecovery", + "traits": { + "smithy.api#documentation": "

                \n Specifies if pointInTimeRecovery is enabled or disabled for the\n table. The options are:

                \n
                  \n
                • \n

                  \n ENABLED \n

                  \n
                • \n
                • \n

                  \n DISABLED \n

                  \n
                • \n
                \n

                If it's not specified, the default is DISABLED.

                \n

                For more information, see Point-in-time recovery in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "ttl": { + "target": "com.amazonaws.keyspaces#TimeToLive", + "traits": { + "smithy.api#documentation": "

                \n Enables Time to Live custom settings for the\n table. The options are:

                \n
                  \n
                • \n

                  \n status:enabled \n

                  \n
                • \n
                • \n

                  \n status:disabled \n

                  \n
                • \n
                \n

                The default is status:disabled. After\n ttl is enabled, you can't disable it\n for the table.

                \n

                For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "defaultTimeToLive": { + "target": "com.amazonaws.keyspaces#DefaultTimeToLive", + "traits": { + "smithy.api#documentation": "

                The default Time to Live setting in seconds for the\n table.

                \n

                For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "tags": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                A list of key-value pair tags to be\n attached to the resource.

                \n

                For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer\n Guide.

                " + } + } + } + }, + "com.amazonaws.keyspaces#CreateTableResponse": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the table in the format of an Amazon Resource Name (ARN).

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#DefaultTimeToLive": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 630720000 + } + } + }, + "com.amazonaws.keyspaces#DeleteKeyspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#DeleteKeyspaceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#DeleteKeyspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                The DeleteKeyspace operation deletes a keyspace and all of its tables.

                " + } + }, + "com.amazonaws.keyspaces#DeleteKeyspaceRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace to be deleted.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#DeleteKeyspaceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.keyspaces#DeleteTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#DeleteTableRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#DeleteTableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                \n The DeleteTable operation deletes a table and all of its data. After a DeleteTable request is received, \n the specified table is in the DELETING state until Amazon Keyspaces completes the deletion. If the table \n is in the ACTIVE state, you can delete it. If a table is either in the CREATING or UPDATING states, then \n Amazon Keyspaces returns a ResourceInUseException. If the specified table does not exist, Amazon Keyspaces returns\n a ResourceNotFoundException. If the table is already in the DELETING state, no error is returned.

                " + } + }, + "com.amazonaws.keyspaces#DeleteTableRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace of the to be deleted table.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the table to be deleted.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#DeleteTableResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.keyspaces#EncryptionSpecification": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.keyspaces#EncryptionType", + "traits": { + "smithy.api#documentation": "

                \n The encryption option specified for the table. You can choose one of the following KMS keys (KMS keys):

                \n
                  \n
                • \n

                  \n type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

                  \n
                • \n
                • \n

                  \n type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. \n This option \n requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

                  \n
                • \n
                \n

                The default is type:AWS_OWNED_KMS_KEY.

                \n

                For more information, see Encryption at rest in the Amazon Keyspaces Developer\n Guide.

                ", + "smithy.api#required": {} + } + }, + "kmsKeyIdentifier": { + "target": "com.amazonaws.keyspaces#kmsKeyARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the customer managed KMS key, for example kms_key_identifier:ARN.\n

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                \n Amazon Keyspaces encrypts and decrypts the table data at rest transparently and integrates with Key Management Service for storing and managing the encryption key. \n You can choose one of the following KMS keys (KMS keys):

                \n
                  \n
                • \n

                  \n Amazon Web Services owned key - This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge).

                  \n
                • \n
                • \n

                  Customer managed key - This key is stored in your account and is created, owned, and managed by you. You have full control over the customer \n managed key (KMS charges apply).

                  \n
                • \n
                \n

                For more information about encryption at rest in Amazon Keyspaces, see Encryption at rest in the Amazon Keyspaces Developer\n Guide.

                \n

                For more information about KMS, see KMS management service concepts in the Key Management Service Developer Guide.

                " + } + }, + "com.amazonaws.keyspaces#EncryptionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CUSTOMER_MANAGED_KMS_KEY", + "name": "CUSTOMER_MANAGED_KMS_KEY" + }, + { + "value": "AWS_OWNED_KMS_KEY", + "name": "AWS_OWNED_KMS_KEY" + } + ] + } + }, + "com.amazonaws.keyspaces#GenericString": { + "type": "string" + }, + "com.amazonaws.keyspaces#GetKeyspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#GetKeyspaceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#GetKeyspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Returns the name and the Amazon Resource Name (ARN) of the specified table.

                " + } + }, + "com.amazonaws.keyspaces#GetKeyspaceRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#GetKeyspaceResponse": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace.

                ", + "smithy.api#required": {} + } + }, + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The ARN of the keyspace.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#GetTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#GetTableRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#GetTableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Returns information about the table, including the table's name and current status, the keyspace name, \n configuration settings, and metadata.

                \n

                To read table metadata using GetTable, Select action \n permissions for the table and system tables are required to complete the operation.

                " + } + }, + "com.amazonaws.keyspaces#GetTableRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace that the table is stored in.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the table.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#GetTableResponse": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace that the specified table is stored in.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the specified table.

                ", + "smithy.api#required": {} + } + }, + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the specified table.

                ", + "smithy.api#required": {} + } + }, + "creationTimestamp": { + "target": "com.amazonaws.keyspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

                The creation timestamp of the specified table.

                " + } + }, + "status": { + "target": "com.amazonaws.keyspaces#TableStatus", + "traits": { + "smithy.api#documentation": "

                The current status of the specified table.

                " + } + }, + "schemaDefinition": { + "target": "com.amazonaws.keyspaces#SchemaDefinition", + "traits": { + "smithy.api#documentation": "

                The schema definition of the specified table.

                " + } + }, + "capacitySpecification": { + "target": "com.amazonaws.keyspaces#CapacitySpecificationSummary", + "traits": { + "smithy.api#documentation": "

                The read/write throughput capacity mode for a table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED.

                  \n
                • \n
                " + } + }, + "encryptionSpecification": { + "target": "com.amazonaws.keyspaces#EncryptionSpecification", + "traits": { + "smithy.api#documentation": "

                The encryption settings of the specified table.

                " + } + }, + "pointInTimeRecovery": { + "target": "com.amazonaws.keyspaces#PointInTimeRecoverySummary", + "traits": { + "smithy.api#documentation": "

                The point-in-time recovery status of the specified table.

                " + } + }, + "ttl": { + "target": "com.amazonaws.keyspaces#TimeToLive", + "traits": { + "smithy.api#documentation": "

                The custom Time to Live settings of the specified table.

                " + } + }, + "defaultTimeToLive": { + "target": "com.amazonaws.keyspaces#DefaultTimeToLive", + "traits": { + "smithy.api#documentation": "

                The default Time to Live settings of the specified table.

                " + } + }, + "comment": { + "target": "com.amazonaws.keyspaces#Comment", + "traits": { + "smithy.api#documentation": "

                The the description of the specified table.

                " + } + } + } + }, + "com.amazonaws.keyspaces#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InternalServerException", + "httpResponseCode": 500 + }, + "smithy.api#documentation": "

                Amazon Keyspaces was unable to fully process this request because of an internal server error.

                ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.keyspaces#KeyspaceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 48 + }, + "smithy.api#pattern": "^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$" + } + }, + "com.amazonaws.keyspaces#KeyspaceSummary": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace.

                ", + "smithy.api#required": {} + } + }, + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Represents the properties of a keyspace.

                " + } + }, + "com.amazonaws.keyspaces#KeyspaceSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#KeyspaceSummary" + } + }, + "com.amazonaws.keyspaces#KeyspacesService": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Keyspaces", + "cloudFormationName": "Cassandra", + "arnNamespace": "cassandra", + "cloudTrailEventSource": "cassandra.amazonaws.com", + "endpointPrefix": "cassandra" + }, + "aws.auth#sigv4": { + "name": "cassandra" + }, + "aws.protocols#awsJson1_0": {}, + "smithy.api#documentation": "

                Amazon Keyspaces (for Apache Cassandra) is a scalable,\n highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate,\n run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, \n you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.

                \n \n

                In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, \n Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes\n the supported DDL operations in detail.

                \n \n

                For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types \n in Amazon Keyspaces in the Amazon Keyspaces Developer\n Guide.

                \n \n

                To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer\n Guide.

                \n \n

                For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.

                ", + "smithy.api#title": "Amazon Keyspaces" + }, + "version": "2022-02-10", + "operations": [ + { + "target": "com.amazonaws.keyspaces#CreateKeyspace" + }, + { + "target": "com.amazonaws.keyspaces#CreateTable" + }, + { + "target": "com.amazonaws.keyspaces#DeleteKeyspace" + }, + { + "target": "com.amazonaws.keyspaces#DeleteTable" + }, + { + "target": "com.amazonaws.keyspaces#GetKeyspace" + }, + { + "target": "com.amazonaws.keyspaces#GetTable" + }, + { + "target": "com.amazonaws.keyspaces#ListKeyspaces" + }, + { + "target": "com.amazonaws.keyspaces#ListTables" + }, + { + "target": "com.amazonaws.keyspaces#ListTagsForResource" + }, + { + "target": "com.amazonaws.keyspaces#RestoreTable" + }, + { + "target": "com.amazonaws.keyspaces#TagResource" + }, + { + "target": "com.amazonaws.keyspaces#UntagResource" + }, + { + "target": "com.amazonaws.keyspaces#UpdateTable" + } + ] + }, + "com.amazonaws.keyspaces#ListKeyspaces": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#ListKeyspacesRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#ListKeyspacesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Returns a list of keyspaces.

                ", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "keyspaces" + } + } + }, + "com.amazonaws.keyspaces#ListKeyspacesRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

                " + } + }, + "maxResults": { + "target": "com.amazonaws.keyspaces#MaxResults", + "traits": { + "smithy.api#documentation": "

                The total number of keyspaces to return in the output. If the total number of keyspaces available \n is more than the value specified, a NextToken is provided in the output. To resume pagination, \n provide the NextToken value as an argument of a subsequent API invocation.

                " + } + } + } + }, + "com.amazonaws.keyspaces#ListKeyspacesResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                A token to specify where to start paginating. This is the NextToken from a previously truncated response.

                " + } + }, + "keyspaces": { + "target": "com.amazonaws.keyspaces#KeyspaceSummaryList", + "traits": { + "smithy.api#documentation": "

                A list of keyspaces.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#ListTables": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#ListTablesRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#ListTablesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Returns a list of tables for a specified keyspace.

                ", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "tables" + } + } + }, + "com.amazonaws.keyspaces#ListTablesRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                The pagination token. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation.

                " + } + }, + "maxResults": { + "target": "com.amazonaws.keyspaces#MaxResults", + "traits": { + "smithy.api#documentation": "

                The total number of tables to return in the output. If the total number of tables available \n is more than the value specified, a NextToken is provided in the output. To resume pagination, \n provide the NextToken value as an argument of a subsequent API invocation.

                " + } + }, + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#ListTablesResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                A token to specify where to start paginating. This is the NextToken from a previously truncated response.

                " + } + }, + "tables": { + "target": "com.amazonaws.keyspaces#TableSummaryList", + "traits": { + "smithy.api#documentation": "

                A list of tables.

                " + } + } + } + }, + "com.amazonaws.keyspaces#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Returns a list of all tags associated with the specified Amazon Keyspaces resource.

                ", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "tags" + } + } + }, + "com.amazonaws.keyspaces#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the Amazon Keyspaces resource.

                ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

                " + } + }, + "maxResults": { + "target": "com.amazonaws.keyspaces#MaxResults", + "traits": { + "smithy.api#documentation": "

                The total number of tags to return in the output. If the total number of tags available \n is more than the value specified, a NextToken is provided in the output. To resume pagination, \n provide the NextToken value as an argument of a subsequent API invocation.

                " + } + } + } + }, + "com.amazonaws.keyspaces#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.keyspaces#NextToken", + "traits": { + "smithy.api#documentation": "

                A token to specify where to start paginating. This is the NextToken from a previously truncated response.

                " + } + }, + "tags": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                A list of tags.

                " + } + } + } + }, + "com.amazonaws.keyspaces#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.keyspaces#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.keyspaces#PartitionKey": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.keyspaces#GenericString", + "traits": { + "smithy.api#documentation": "

                The name(s) of the partition key column(s).

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                The partition key portion of the primary key is required \n and determines how Amazon Keyspaces stores the data. \n The partition key can be a single column, or it can be a compound value composed of two or more columns.

                " + } + }, + "com.amazonaws.keyspaces#PartitionKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#PartitionKey" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.keyspaces#PointInTimeRecovery": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.keyspaces#PointInTimeRecoveryStatus", + "traits": { + "smithy.api#documentation": "

                The options are:

                \n
                  \n
                • \n

                  \n ENABLED\n

                  \n
                • \n
                • \n

                  \n DISABLED\n

                  \n
                • \n
                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Point-in-time recovery (PITR) helps protect your Amazon Keyspaces tables from accidental write or delete operations by providing you continuous backups of your table data.

                \n

                For more information, see Point-in-time recovery in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#PointInTimeRecoveryStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + } + ] + } + }, + "com.amazonaws.keyspaces#PointInTimeRecoverySummary": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.keyspaces#PointInTimeRecoveryStatus", + "traits": { + "smithy.api#documentation": "

                Shows if point-in-time recovery is enabled or disabled for the specified table.

                ", + "smithy.api#required": {} + } + }, + "earliestRestorableTimestamp": { + "target": "com.amazonaws.keyspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

                Specifies the earliest possible restore point of the table in ISO 8601 format.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                The point-in-time recovery status of the specified table.

                " + } + }, + "com.amazonaws.keyspaces#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The unique identifier in the format of Amazon Resource Name (ARN), for the resource not found.

                " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ResourceNotFoundException", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

                The operation tried to access a keyspace or table that doesn't exist. The resource might not be specified correctly, or its status might not be ACTIVE.

                ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.keyspaces#RestoreTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#RestoreTableRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#RestoreTableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Restores the specified table to the specified point in time within the\n earliest_restorable_timestamp and the current time. For more information about restore points, see \n \n Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide. \n

                \n

                \n Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

                \n

                When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state \n based on the selected timestamp (day:hour:minute:second) to a new table. The Time to Live (TTL) settings\n are also restored to the state based on the selected timestamp.

                \n

                In addition to the table's schema, data, and TTL settings, RestoreTable restores the capacity mode, encryption, and\n point-in-time recovery settings from the source table. \n Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp, \n these settings are always restored based on the table's settings as of the current time or when the table was deleted.

                \n

                You can also overwrite these settings during restore:

                \n
                  \n
                • \n

                  Read/write capacity mode

                  \n
                • \n
                • \n

                  Provisioned throughput capacity settings

                  \n
                • \n
                • \n

                  Point-in-time (PITR) settings

                  \n
                • \n
                • \n

                  Tags

                  \n
                • \n
                \n

                For more information, see PITR restore settings in the Amazon Keyspaces Developer\n Guide.

                \n

                The following settings are not restored, and you must configure them manually for the\n new table.

                \n
                  \n
                • \n

                  Automatic scaling policies (for tables that use provisioned capacity\n mode)

                  \n
                • \n
                • \n

                  Identity and Access Management (IAM) policies

                  \n
                • \n
                • \n

                  Amazon CloudWatch metrics and alarms

                  \n
                • \n
                " + } + }, + "com.amazonaws.keyspaces#RestoreTableRequest": { + "type": "structure", + "members": { + "sourceKeyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The keyspace name of the source table.

                ", + "smithy.api#required": {} + } + }, + "sourceTableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the source table.

                ", + "smithy.api#required": {} + } + }, + "targetKeyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the target keyspace.

                ", + "smithy.api#required": {} + } + }, + "targetTableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the target table.

                ", + "smithy.api#required": {} + } + }, + "restoreTimestamp": { + "target": "com.amazonaws.keyspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

                The restore timestamp in ISO 8601 format.

                " + } + }, + "capacitySpecificationOverride": { + "target": "com.amazonaws.keyspaces#CapacitySpecification", + "traits": { + "smithy.api#documentation": "

                Specifies the read/write throughput capacity mode for the target table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED. The provisioned capacity mode requires\n readCapacityUnits and writeCapacityUnits as inputs.

                  \n
                • \n
                \n

                The default is throughput_mode:PAY_PER_REQUEST.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "encryptionSpecificationOverride": { + "target": "com.amazonaws.keyspaces#EncryptionSpecification", + "traits": { + "smithy.api#documentation": "

                \n Specifies the encryption settings for the target table. You can choose one of the following KMS key (KMS key):

                \n \n
                  \n
                • \n

                  \n type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

                  \n
                • \n
                • \n

                  \n type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. \n This option \n requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

                  \n
                • \n
                \n

                The default is type:AWS_OWNED_KMS_KEY.

                \n

                For more information, see Encryption at rest in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "pointInTimeRecoveryOverride": { + "target": "com.amazonaws.keyspaces#PointInTimeRecovery", + "traits": { + "smithy.api#documentation": "

                \n Specifies the pointInTimeRecovery settings for the target\n table. The options are:

                \n
                  \n
                • \n

                  \n ENABLED \n

                  \n
                • \n
                • \n

                  \n DISABLED \n

                  \n
                • \n
                \n

                If it's not specified, the default is DISABLED.

                \n

                For more information, see Point-in-time recovery in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "tagsOverride": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                A list of key-value pair tags to be\n attached to the restored table.

                \n

                For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer\n Guide.

                " + } + } + } + }, + "com.amazonaws.keyspaces#RestoreTableResponse": { + "type": "structure", + "members": { + "restoredTableARN": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the restored table.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#SchemaDefinition": { + "type": "structure", + "members": { + "allColumns": { + "target": "com.amazonaws.keyspaces#ColumnDefinitionList", + "traits": { + "smithy.api#documentation": "

                The regular columns of the table.

                ", + "smithy.api#required": {} + } + }, + "partitionKeys": { + "target": "com.amazonaws.keyspaces#PartitionKeyList", + "traits": { + "smithy.api#documentation": "

                The columns that are part of the partition key of the table .

                ", + "smithy.api#required": {} + } + }, + "clusteringKeys": { + "target": "com.amazonaws.keyspaces#ClusteringKeyList", + "traits": { + "smithy.api#documentation": "

                The columns that are part of the clustering key of the table.

                " + } + }, + "staticColumns": { + "target": "com.amazonaws.keyspaces#StaticColumnList", + "traits": { + "smithy.api#documentation": "

                The columns that have been defined as STATIC. Static columns store values that are shared by all rows in the same partition.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes the schema of the table.

                " + } + }, + "com.amazonaws.keyspaces#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ServiceQuotaExceededException", + "httpResponseCode": 402 + }, + "smithy.api#documentation": "

                The operation exceeded the service quota for this resource. For more information on service quotas, see Quotas in the Amazon Keyspaces Developer\n Guide.

                ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.keyspaces#SortOrder": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ASC", + "name": "ASC" + }, + { + "value": "DESC", + "name": "DESC" + } + ] + } + }, + "com.amazonaws.keyspaces#StaticColumn": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.keyspaces#GenericString", + "traits": { + "smithy.api#documentation": "

                The name of the static column.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                The static columns of the table. Static columns store values that are shared by all rows in the same partition.

                " + } + }, + "com.amazonaws.keyspaces#StaticColumnList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#StaticColumn" + } + }, + "com.amazonaws.keyspaces#TableName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 48 + }, + "smithy.api#pattern": "^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$" + } + }, + "com.amazonaws.keyspaces#TableStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "RESTORING", + "name": "RESTORING" + }, + { + "value": "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + "name": "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + } + ] + } + }, + "com.amazonaws.keyspaces#TableSummary": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace that the table is stored in.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the table.

                ", + "smithy.api#required": {} + } + }, + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The unique identifier of the table in the format of an Amazon Resource Name (ARN).

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Returns the name of the specified table, the keyspace it is stored in, and the unique identifier in the format of an Amazon Resource Name (ARN).

                " + } + }, + "com.amazonaws.keyspaces#TableSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#TableSummary" + } + }, + "com.amazonaws.keyspaces#Tag": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.keyspaces#TagKey", + "traits": { + "smithy.api#documentation": "

                The key of the tag. Tag keys are case sensitive. Each Amazon Keyspaces resource can only have up to one tag with the same key. If you try to add an \n existing tag (same key), the existing tag value will be updated to the new value.

                ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.keyspaces#TagValue", + "traits": { + "smithy.api#documentation": "

                The value of the tag. Tag values are case-sensitive and can be null.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single Amazon Keyspaces resource.

                \n

                Amazon Web Services-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. \n Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the \n prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag.

                \n

                For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.keyspaces#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.keyspaces#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 60 + } + } + }, + "com.amazonaws.keyspaces#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Associates a set of tags with a Amazon Keyspaces resource. You can then \n activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking.\n For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer\n Guide.

                \n

                For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, \n see Amazon Keyspaces resource access based on tags\n in the Amazon Keyspaces Developer Guide.

                " + } + }, + "com.amazonaws.keyspaces#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the Amazon Keyspaces resource to which to add tags.

                ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                The tags to be assigned to the Amazon Keyspaces resource.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.keyspaces#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.keyspaces#ThroughputMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PAY_PER_REQUEST", + "name": "PAY_PER_REQUEST" + }, + { + "value": "PROVISIONED", + "name": "PROVISIONED" + } + ] + } + }, + "com.amazonaws.keyspaces#TimeToLive": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.keyspaces#TimeToLiveStatus", + "traits": { + "smithy.api#documentation": "

                Shows how to enable custom Time to Live (TTL) settings for the specified table.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Enable custom Time to Live (TTL) settings for rows and columns without setting a TTL default for the specified table.

                \n

                For more information, see Enabling TTL on tables in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "com.amazonaws.keyspaces#TimeToLiveStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + } + ] + } + }, + "com.amazonaws.keyspaces#Timestamp": { + "type": "timestamp" + }, + "com.amazonaws.keyspaces#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Removes the association of tags from a Amazon Keyspaces resource.

                " + } + }, + "com.amazonaws.keyspaces#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Keyspaces resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).

                ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.keyspaces#TagList", + "traits": { + "smithy.api#documentation": "

                A list of existing tags to be removed from the Amazon Keyspaces resource.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.keyspaces#UpdateTable": { + "type": "operation", + "input": { + "target": "com.amazonaws.keyspaces#UpdateTableRequest" + }, + "output": { + "target": "com.amazonaws.keyspaces#UpdateTableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.keyspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.keyspaces#ConflictException" + }, + { + "target": "com.amazonaws.keyspaces#InternalServerException" + }, + { + "target": "com.amazonaws.keyspaces#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.keyspaces#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.keyspaces#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                Adds new columns to the table or updates one of the table's settings, for example\n capacity mode, encryption, point-in-time recovery, or ttl settings.\n Note that you can only update one specific table setting per update operation.

                " + } + }, + "com.amazonaws.keyspaces#UpdateTableRequest": { + "type": "structure", + "members": { + "keyspaceName": { + "target": "com.amazonaws.keyspaces#KeyspaceName", + "traits": { + "smithy.api#documentation": "

                The name of the keyspace the specified table is stored in.

                ", + "smithy.api#required": {} + } + }, + "tableName": { + "target": "com.amazonaws.keyspaces#TableName", + "traits": { + "smithy.api#documentation": "

                The name of the table.

                ", + "smithy.api#required": {} + } + }, + "addColumns": { + "target": "com.amazonaws.keyspaces#ColumnDefinitionList", + "traits": { + "smithy.api#documentation": "

                For each column to be added to the specified table:

                \n
                  \n
                • \n

                  \n \n name\n - The name\n of the column.

                  \n
                • \n
                • \n

                  \n \n type\n - An Amazon Keyspaces\n data type. For more information, see Data types in the Amazon Keyspaces Developer\n Guide.

                  \n
                • \n
                " + } + }, + "capacitySpecification": { + "target": "com.amazonaws.keyspaces#CapacitySpecification", + "traits": { + "smithy.api#documentation": "

                Modifies the read/write throughput capacity mode for the table. The options are:

                \n
                  \n
                • \n

                  \n throughputMode:PAY_PER_REQUEST and

                  \n
                • \n
                • \n

                  \n throughputMode:PROVISIONED. The provisioned capacity mode requires\n readCapacityUnits and writeCapacityUnits as inputs.

                  \n
                • \n
                \n

                The default is throughput_mode:PAY_PER_REQUEST.

                \n

                For more information, see Read/write capacity modes in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "encryptionSpecification": { + "target": "com.amazonaws.keyspaces#EncryptionSpecification", + "traits": { + "smithy.api#documentation": "

                \n Modifies the encryption settings of the table. You can choose one of the following KMS key (KMS key):

                \n \n
                  \n
                • \n

                  \n type:AWS_OWNED_KMS_KEY - This key is owned by Amazon Keyspaces.

                  \n
                • \n
                • \n

                  \n type:CUSTOMER_MANAGED_KMS_KEY - This key is stored in your account and is created, owned, and managed by you. \n This option \n requires the kms_key_identifier of the KMS key in Amazon Resource Name (ARN) format as input.

                  \n
                • \n
                \n

                The default is AWS_OWNED_KMS_KEY.

                \n

                For more information, see Encryption at rest in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "pointInTimeRecovery": { + "target": "com.amazonaws.keyspaces#PointInTimeRecovery", + "traits": { + "smithy.api#documentation": "

                \n Modifies the pointInTimeRecovery settings of the table. The options are:

                \n
                  \n
                • \n

                  \n ENABLED \n

                  \n
                • \n
                • \n

                  \n DISABLED \n

                  \n
                • \n
                \n

                If it's not specified, the default is DISABLED.

                \n

                For more information, see Point-in-time recovery in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "ttl": { + "target": "com.amazonaws.keyspaces#TimeToLive", + "traits": { + "smithy.api#documentation": "

                Modifies Time to Live custom settings for the table. The options are:

                \n
                  \n
                • \n

                  \n status:enabled \n

                  \n
                • \n
                • \n

                  \n status:disabled \n

                  \n
                • \n
                \n

                The default is status:disabled. After\n ttl is enabled, you can't disable it\n for the table.

                \n

                For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer\n Guide.

                " + } + }, + "defaultTimeToLive": { + "target": "com.amazonaws.keyspaces#DefaultTimeToLive", + "traits": { + "smithy.api#documentation": "

                The default Time to Live setting in seconds for the table.

                \n

                For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer\n Guide.

                " + } + } + } + }, + "com.amazonaws.keyspaces#UpdateTableResponse": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.keyspaces#ARN", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the modified table.

                ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.keyspaces#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ValidationException", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

                The operation failed due to an invalid or malformed request.

                ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.keyspaces#kmsKeyARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5096 + } + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/lex-models-v2.json b/codegen/sdk-codegen/aws-models/lex-models-v2.json index fa092db2324f..4446bcf4ec67 100644 --- a/codegen/sdk-codegen/aws-models/lex-models-v2.json +++ b/codegen/sdk-codegen/aws-models/lex-models-v2.json @@ -2902,8 +2902,7 @@ "slotTypeId": { "target": "com.amazonaws.lexmodelsv2#BuiltInOrCustomSlotTypeId", "traits": { - "smithy.api#documentation": "

                The unique identifier for the slot type associated with this slot.\n The slot type determines the values that can be entered into the\n slot.

                ", - "smithy.api#required": {} + "smithy.api#documentation": "

                The unique identifier for the slot type associated with this slot.\n The slot type determines the values that can be entered into the\n slot.

                " } }, "valueElicitationSetting": { @@ -12609,8 +12608,7 @@ "slotTypeId": { "target": "com.amazonaws.lexmodelsv2#BuiltInOrCustomSlotTypeId", "traits": { - "smithy.api#documentation": "

                The unique identifier of the new slot type to associate with this\n slot.

                ", - "smithy.api#required": {} + "smithy.api#documentation": "

                The unique identifier of the new slot type to associate with this\n slot.

                " } }, "valueElicitationSetting": { diff --git a/codegen/sdk-codegen/aws-models/macie.json b/codegen/sdk-codegen/aws-models/macie.json index ee5542a960ee..7d904f6c4206 100644 --- a/codegen/sdk-codegen/aws-models/macie.json +++ b/codegen/sdk-codegen/aws-models/macie.json @@ -32,7 +32,7 @@ "com.amazonaws.macie#AWSAccountId": { "type": "string", "traits": { - "smithy.api#pattern": "[0-9]{12}" + "smithy.api#pattern": "^[0-9]{12}$" } }, "com.amazonaws.macie#AccessDeniedException": { @@ -46,7 +46,7 @@ } }, "traits": { - "smithy.api#documentation": "

                You do not have required permissions to access the requested resource.

                ", + "smithy.api#documentation": "

                (Discontinued) You do not have required permissions to access the requested resource.

                ", "smithy.api#error": "client" } }, @@ -67,7 +67,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Associates a specified AWS account with Amazon Macie Classic as a member\n account.

                " + "smithy.api#documentation": "

                (Discontinued) Associates a specified Amazon Web Services account with Amazon Macie Classic as a member\n account.

                " } }, "com.amazonaws.macie#AssociateMemberAccountRequest": { @@ -76,7 +76,7 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The ID of the AWS account that you want to associate with Amazon Macie Classic as a\n member account.

                ", + "smithy.api#documentation": "

                (Discontinued) The ID of the Amazon Web Services account that you want to associate with Amazon Macie\n Classic as a member account.

                ", "smithy.api#required": {} } } @@ -105,7 +105,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Associates specified S3 resources with Amazon Macie Classic for monitoring and data\n classification. If memberAccountId isn't specified, the action associates specified S3\n resources with Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified,\n the action associates specified S3 resources with Macie Classic for the specified member\n account.

                " + "smithy.api#documentation": "

                (Discontinued) Associates specified S3 resources with Amazon Macie Classic for\n monitoring and data classification. If memberAccountId isn't specified, the action associates\n specified S3 resources with Macie Classic for the current Macie Classic administrator account.\n If memberAccountId is specified, the action associates specified S3 resources with Macie\n Classic for the specified member account.

                " } }, "com.amazonaws.macie#AssociateS3ResourcesRequest": { @@ -114,13 +114,13 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The ID of the Amazon Macie Classic member account whose resources you want to associate\n with Macie Classic.

                " + "smithy.api#documentation": "

                (Discontinued) The ID of the Amazon Macie Classic member account whose resources you\n want to associate with Macie Classic.

                " } }, "s3Resources": { "target": "com.amazonaws.macie#S3ResourcesClassification", "traits": { - "smithy.api#documentation": "

                The S3 resources that you want to associate with Amazon Macie Classic for monitoring\n and data classification.

                ", + "smithy.api#documentation": "

                (Discontinued) The S3 resources that you want to associate with Amazon Macie Classic\n for monitoring and data classification.

                ", "smithy.api#required": {} } } @@ -132,7 +132,7 @@ "failedS3Resources": { "target": "com.amazonaws.macie#FailedS3Resources", "traits": { - "smithy.api#documentation": "

                S3 resources that couldn't be associated with Amazon Macie Classic. An error code and\n an error message are provided for each failed item.

                " + "smithy.api#documentation": "

                (Discontinued) S3 resources that couldn't be associated with Amazon Macie Classic. An\n error code and an error message are provided for each failed item.

                " } } } @@ -152,20 +152,20 @@ "oneTime": { "target": "com.amazonaws.macie#S3OneTimeClassificationType", "traits": { - "smithy.api#documentation": "

                A one-time classification of all of the existing objects in a specified S3 bucket.\n

                ", + "smithy.api#documentation": "

                (Discontinued) A one-time classification of all of the existing objects in a specified\n S3 bucket.

                ", "smithy.api#required": {} } }, "continuous": { "target": "com.amazonaws.macie#S3ContinuousClassificationType", "traits": { - "smithy.api#documentation": "

                A continuous classification of the objects that are added to a specified S3 bucket.\n Amazon Macie Classic begins performing continuous classification after a bucket is\n successfully associated with Macie Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) A continuous classification of the objects that are added to a specified\n S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is\n successfully associated with Macie Classic.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                The classification type that Amazon Macie Classic applies to the associated S3\n resources.

                " + "smithy.api#documentation": "

                (Discontinued) The classification type that Amazon Macie Classic applies to the\n associated S3 resources.

                " } }, "com.amazonaws.macie#ClassificationTypeUpdate": { @@ -174,18 +174,18 @@ "oneTime": { "target": "com.amazonaws.macie#S3OneTimeClassificationType", "traits": { - "smithy.api#documentation": "

                A one-time classification of all of the existing objects in a specified S3 bucket.\n

                " + "smithy.api#documentation": "

                (Discontinued) A one-time classification of all of the existing objects in a specified\n S3 bucket.

                " } }, "continuous": { "target": "com.amazonaws.macie#S3ContinuousClassificationType", "traits": { - "smithy.api#documentation": "

                A continuous classification of the objects that are added to a specified S3 bucket.\n Amazon Macie Classic begins performing continuous classification after a bucket is\n successfully associated with Macie Classic.

                " + "smithy.api#documentation": "

                (Discontinued) A continuous classification of the objects that are added to a specified\n S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is\n successfully associated with Macie Classic.

                " } } }, "traits": { - "smithy.api#documentation": "

                The classification type that Amazon Macie Classic applies to the associated S3\n resources. At least one of the classification types (oneTime or continuous) must be specified.\n

                " + "smithy.api#documentation": "

                (Discontinued) The classification type that Amazon Macie Classic applies to the\n associated S3 resources. At least one of the classification types (oneTime or continuous) must\n be specified.

                " } }, "com.amazonaws.macie#DisassociateMemberAccount": { @@ -202,7 +202,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Removes the specified member account from Amazon Macie Classic.

                " + "smithy.api#documentation": "

                (Discontinued) Removes the specified member account from Amazon Macie\n Classic.

                " } }, "com.amazonaws.macie#DisassociateMemberAccountRequest": { @@ -211,7 +211,7 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The ID of the member account that you want to remove from Amazon Macie\n Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) The ID of the member account that you want to remove from Amazon Macie\n Classic.

                ", "smithy.api#required": {} } } @@ -237,7 +237,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Removes specified S3 resources from being monitored by Amazon Macie Classic. If\n memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic\n for the current Macie Classic administrator account. If memberAccountId is specified, the action removes specified\n S3 resources from Macie Classic for the specified member account.

                " + "smithy.api#documentation": "

                (Discontinued) Removes specified S3 resources from being monitored by Amazon Macie\n Classic. If memberAccountId isn't specified, the action removes specified S3 resources from\n Macie Classic for the current Macie Classic administrator account. If memberAccountId is\n specified, the action removes specified S3 resources from Macie Classic for the specified\n member account.

                " } }, "com.amazonaws.macie#DisassociateS3ResourcesRequest": { @@ -246,13 +246,13 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The ID of the Amazon Macie Classic member account whose resources you want to remove\n from being monitored by Macie Classic.

                " + "smithy.api#documentation": "

                (Discontinued) The ID of the Amazon Macie Classic member account whose resources you\n want to remove from being monitored by Macie Classic.

                " } }, "associatedS3Resources": { "target": "com.amazonaws.macie#S3Resources", "traits": { - "smithy.api#documentation": "

                The S3 resources (buckets or prefixes) that you want to remove from being monitored and\n classified by Amazon Macie Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) The S3 resources (buckets or prefixes) that you want to remove from\n being monitored and classified by Amazon Macie Classic.

                ", "smithy.api#required": {} } } @@ -264,7 +264,7 @@ "failedS3Resources": { "target": "com.amazonaws.macie#FailedS3Resources", "traits": { - "smithy.api#documentation": "

                S3 resources that couldn't be removed from being monitored and classified by Amazon\n Macie Classic. An error code and an error message are provided for each failed item.\n

                " + "smithy.api#documentation": "

                (Discontinued) S3 resources that couldn't be removed from being monitored and\n classified by Amazon Macie Classic. An error code and an error message are provided for each\n failed item.

                " } } } @@ -294,24 +294,24 @@ "failedItem": { "target": "com.amazonaws.macie#S3Resource", "traits": { - "smithy.api#documentation": "

                The failed S3 resources.

                " + "smithy.api#documentation": "

                (Discontinued) The failed S3 resources.

                " } }, "errorCode": { "target": "com.amazonaws.macie#ErrorCode", "traits": { - "smithy.api#documentation": "

                The status code of a failed item.

                " + "smithy.api#documentation": "

                (Discontinued) The status code of a failed item.

                " } }, "errorMessage": { "target": "com.amazonaws.macie#ExceptionMessage", "traits": { - "smithy.api#documentation": "

                The error message of a failed item.

                " + "smithy.api#documentation": "

                (Discontinued) The error message of a failed item.

                " } } }, "traits": { - "smithy.api#documentation": "

                Includes details about the failed S3 resources.

                " + "smithy.api#documentation": "

                (Discontinued) Includes details about the failed S3 resources.

                " } }, "com.amazonaws.macie#FailedS3Resources": { @@ -341,7 +341,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Internal server error.

                ", + "smithy.api#documentation": "

                (Discontinued) Internal server error.

                ", "smithy.api#error": "server" } }, @@ -359,7 +359,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The request was rejected because an invalid or out-of-range value was supplied for an\n input parameter.

                ", + "smithy.api#documentation": "

                (Discontinued) The request was rejected because an invalid or out-of-range value was supplied for an\n input parameter.

                ", "smithy.api#error": "client" } }, @@ -377,7 +377,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The request was rejected because it attempted to create resources beyond the current\n AWS account limits. The error code describes the limit exceeded.

                ", + "smithy.api#documentation": "

                (Discontinued) The request was rejected because it attempted to create resources beyond the current\n Amazon Web Services account quotas. The error code describes the quota exceeded.

                ", "smithy.api#error": "client" } }, @@ -398,7 +398,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Lists all Amazon Macie Classic member accounts for the current Macie Classic administrator account.

                ", + "smithy.api#documentation": "

                (Discontinued) Lists all Amazon Macie Classic member accounts for the current Macie\n Classic administrator account.

                ", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -412,13 +412,13 @@ "nextToken": { "target": "com.amazonaws.macie#NextToken", "traits": { - "smithy.api#documentation": "

                Use this parameter when paginating results. Set the value of this parameter to null on\n your first call to the ListMemberAccounts action. Subsequent calls to the action fill\n nextToken in the request with the value of nextToken from the previous response to continue\n listing data.

                " + "smithy.api#documentation": "

                (Discontinued) Use this parameter when paginating results. Set the value of this\n parameter to null on your first call to the ListMemberAccounts action. Subsequent calls to the\n action fill nextToken in the request with the value of nextToken from the previous response to\n continue listing data.

                " } }, "maxResults": { "target": "com.amazonaws.macie#MaxResults", "traits": { - "smithy.api#documentation": "

                Use this parameter to indicate the maximum number of items that you want in the\n response. The default value is 250.

                " + "smithy.api#documentation": "

                (Discontinued) Use this parameter to indicate the maximum number of items that you want\n in the response. The default value is 250.

                " } } } @@ -429,13 +429,13 @@ "memberAccounts": { "target": "com.amazonaws.macie#MemberAccounts", "traits": { - "smithy.api#documentation": "

                A list of the Amazon Macie Classic member accounts returned by the action. The current\n Macie Classic administrator account is also included in this list.

                " + "smithy.api#documentation": "

                (Discontinued) A list of the Amazon Macie Classic member accounts returned by the\n action. The current Macie Classic administrator account is also included in this\n list.

                " } }, "nextToken": { "target": "com.amazonaws.macie#NextToken", "traits": { - "smithy.api#documentation": "

                When a response is generated, if there is more data to be listed, this parameter is\n present in the response and contains the value to use for the nextToken parameter in a\n subsequent pagination request. If there is no more data to be listed, this parameter is set to\n null.

                " + "smithy.api#documentation": "

                (Discontinued) When a response is generated, if there is more data to be listed, this\n parameter is present in the response and contains the value to use for the nextToken parameter\n in a subsequent pagination request. If there is no more data to be listed, this parameter is\n set to null.

                " } } } @@ -460,7 +460,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId\n isn't specified, the action lists the S3 resources associated with Macie Classic for\n the current Macie Classic administrator account. If memberAccountId is specified, the action lists the S3 resources\n associated with Macie Classic for the specified member account.

                ", + "smithy.api#documentation": "

                (Discontinued) Lists all the S3 resources associated with Amazon Macie Classic. If\n memberAccountId isn't specified, the action lists the S3 resources associated with Macie\n Classic for the current Macie Classic administrator account. If memberAccountId is specified,\n the action lists the S3 resources associated with Macie Classic for the specified member\n account.

                ", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -474,19 +474,19 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The Amazon Macie Classic member account ID whose associated S3 resources you want to\n list.

                " + "smithy.api#documentation": "

                (Discontinued) The Amazon Macie Classic member account ID whose associated S3 resources\n you want to list.

                " } }, "nextToken": { "target": "com.amazonaws.macie#NextToken", "traits": { - "smithy.api#documentation": "

                Use this parameter when paginating results. Set its value to null on your first call to\n the ListS3Resources action. Subsequent calls to the action fill nextToken in the request with\n the value of nextToken from the previous response to continue listing data.

                " + "smithy.api#documentation": "

                (Discontinued) Use this parameter when paginating results. Set its value to null on\n your first call to the ListS3Resources action. Subsequent calls to the action fill nextToken\n in the request with the value of nextToken from the previous response to continue listing\n data.

                " } }, "maxResults": { "target": "com.amazonaws.macie#MaxResults", "traits": { - "smithy.api#documentation": "

                Use this parameter to indicate the maximum number of items that you want in the\n response. The default value is 250.

                " + "smithy.api#documentation": "

                (Discontinued) Use this parameter to indicate the maximum number of items that you want\n in the response. The default value is 250.

                " } } } @@ -497,19 +497,34 @@ "s3Resources": { "target": "com.amazonaws.macie#S3ResourcesClassification", "traits": { - "smithy.api#documentation": "

                A list of the associated S3 resources returned by the action.

                " + "smithy.api#documentation": "

                (Discontinued) A list of the associated S3 resources returned by the action.

                " } }, "nextToken": { "target": "com.amazonaws.macie#NextToken", "traits": { - "smithy.api#documentation": "

                When a response is generated, if there is more data to be listed, this parameter is\n present in the response and contains the value to use for the nextToken parameter in a\n subsequent pagination request. If there is no more data to be listed, this parameter is set to\n null.

                " + "smithy.api#documentation": "

                (Discontinued) When a response is generated, if there is more data to be listed, this\n parameter is present in the response and contains the value to use for the nextToken parameter\n in a subsequent pagination request. If there is no more data to be listed, this parameter is\n set to null.

                " } } } }, "com.amazonaws.macie#MacieService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Macie", + "arnNamespace": "macie", + "cloudFormationName": "Macie", + "cloudTrailEventSource": "macie.amazonaws.com", + "endpointPrefix": "macie" + }, + "aws.auth#sigv4": { + "name": "macie" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Amazon Macie Classic\n

                Amazon Macie Classic has been discontinued and is no longer available.

                \n \n

                A new Amazon Macie is now available with significant design improvements and additional\n features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the\n new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User\n Guide.

                ", + "smithy.api#title": "Amazon Macie" + }, "version": "2017-12-19", "operations": [ { @@ -533,22 +548,7 @@ { "target": "com.amazonaws.macie#UpdateS3Resources" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Macie", - "arnNamespace": "macie", - "cloudFormationName": "Macie", - "cloudTrailEventSource": "macie.amazonaws.com", - "endpointPrefix": "macie" - }, - "aws.auth#sigv4": { - "name": "macie" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Macie Classic\n

                Amazon Macie Classic is a security service that uses machine learning to automatically\n discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data\n such as personally identifiable information (PII) or intellectual property, and provides you\n with dashboards and alerts that give visibility into how this data is being accessed or moved.\n For more information, see the Amazon Macie\n Classic User Guide.

                ", - "smithy.api#title": "Amazon Macie" - } + ] }, "com.amazonaws.macie#MaxResults": { "type": "integer", @@ -565,12 +565,12 @@ "accountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The AWS account ID of the Amazon Macie Classic member account.

                " + "smithy.api#documentation": "

                (Discontinued) The Amazon Web Services account ID of the Amazon Macie Classic member account.

                " } } }, "traits": { - "smithy.api#documentation": "

                Contains information about the Amazon Macie Classic member account.

                " + "smithy.api#documentation": "

                (Discontinued) Contains information about the Amazon Macie Classic member\n account.

                " } }, "com.amazonaws.macie#MemberAccounts": { @@ -639,19 +639,19 @@ "bucketName": { "target": "com.amazonaws.macie#BucketName", "traits": { - "smithy.api#documentation": "

                The name of the S3 bucket.

                ", + "smithy.api#documentation": "

                (Discontinued) The name of the S3 bucket.

                ", "smithy.api#required": {} } }, "prefix": { "target": "com.amazonaws.macie#Prefix", "traits": { - "smithy.api#documentation": "

                The prefix of the S3 bucket.

                " + "smithy.api#documentation": "

                (Discontinued) The prefix of the S3 bucket.

                " } } }, "traits": { - "smithy.api#documentation": "

                Contains information about the S3 resource. This data type is used as a request\n parameter in the DisassociateS3Resources action and can be used as a response parameter in the\n AssociateS3Resources and UpdateS3Resources actions.

                " + "smithy.api#documentation": "

                (Discontinued) Contains information about the S3 resource. This data type is used as a\n request parameter in the DisassociateS3Resources action and can be used as a response\n parameter in the AssociateS3Resources and UpdateS3Resources actions.

                " } }, "com.amazonaws.macie#S3ResourceClassification": { @@ -660,26 +660,26 @@ "bucketName": { "target": "com.amazonaws.macie#BucketName", "traits": { - "smithy.api#documentation": "

                The name of the S3 bucket that you want to associate with Amazon Macie\n Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) The name of the S3 bucket that you want to associate with Amazon Macie\n Classic.

                ", "smithy.api#required": {} } }, "prefix": { "target": "com.amazonaws.macie#Prefix", "traits": { - "smithy.api#documentation": "

                The prefix of the S3 bucket that you want to associate with Amazon Macie\n Classic.

                " + "smithy.api#documentation": "

                (Discontinued) The prefix of the S3 bucket that you want to associate with Amazon Macie\n Classic.

                " } }, "classificationType": { "target": "com.amazonaws.macie#ClassificationType", "traits": { - "smithy.api#documentation": "

                The classification type that you want to specify for the resource associated with\n Amazon Macie Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) The classification type that you want to specify for the resource\n associated with Amazon Macie Classic.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                The S3 resources that you want to associate with Amazon Macie Classic for monitoring\n and data classification. This data type is used as a request parameter in the\n AssociateS3Resources action and a response parameter in the ListS3Resources action.

                " + "smithy.api#documentation": "

                (Discontinued) The S3 resources that you want to associate with Amazon Macie Classic\n for monitoring and data classification. This data type is used as a request parameter in the\n AssociateS3Resources action and a response parameter in the ListS3Resources action.

                " } }, "com.amazonaws.macie#S3ResourceClassificationUpdate": { @@ -688,26 +688,26 @@ "bucketName": { "target": "com.amazonaws.macie#BucketName", "traits": { - "smithy.api#documentation": "

                The name of the S3 bucket whose classification types you want to update.

                ", + "smithy.api#documentation": "

                (Discontinued) The name of the S3 bucket whose classification types you want to\n update.

                ", "smithy.api#required": {} } }, "prefix": { "target": "com.amazonaws.macie#Prefix", "traits": { - "smithy.api#documentation": "

                The prefix of the S3 bucket whose classification types you want to update.

                " + "smithy.api#documentation": "

                (Discontinued) The prefix of the S3 bucket whose classification types you want to\n update.

                " } }, "classificationTypeUpdate": { "target": "com.amazonaws.macie#ClassificationTypeUpdate", "traits": { - "smithy.api#documentation": "

                The classification type that you want to update for the resource associated with Amazon\n Macie Classic.

                ", + "smithy.api#documentation": "

                (Discontinued) The classification type that you want to update for the resource\n associated with Amazon Macie Classic.

                ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                The S3 resources whose classification types you want to update. This data type is used\n as a request parameter in the UpdateS3Resources action.

                " + "smithy.api#documentation": "

                (Discontinued) The S3 resources whose classification types you want to update. This\n data type is used as a request parameter in the UpdateS3Resources action.

                " } }, "com.amazonaws.macie#S3Resources": { @@ -748,7 +748,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Updates the classification types for the specified S3 resources. If memberAccountId\n isn't specified, the action updates the classification types of the S3 resources associated\n with Amazon Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the\n action updates the classification types of the S3 resources associated with Macie\n Classic for the specified member account.

                " + "smithy.api#documentation": "

                (Discontinued) Updates the classification types for the specified S3 resources. If\n memberAccountId isn't specified, the action updates the classification types of the S3\n resources associated with Amazon Macie Classic for the current Macie Classic administrator\n account. If memberAccountId is specified, the action updates the classification types of the\n S3 resources associated with Macie Classic for the specified member account.

                " } }, "com.amazonaws.macie#UpdateS3ResourcesRequest": { @@ -757,13 +757,13 @@ "memberAccountId": { "target": "com.amazonaws.macie#AWSAccountId", "traits": { - "smithy.api#documentation": "

                The AWS ID of the Amazon Macie Classic member account whose S3 resources'\n classification types you want to update.

                " + "smithy.api#documentation": "

                (Discontinued) The Amazon Web Services account ID of the Amazon Macie Classic member account whose S3\n resources' classification types you want to update.

                " } }, "s3ResourcesUpdate": { "target": "com.amazonaws.macie#S3ResourcesClassificationUpdate", "traits": { - "smithy.api#documentation": "

                The S3 resources whose classification types you want to update.

                ", + "smithy.api#documentation": "

                (Discontinued) The S3 resources whose classification types you want to\n update.

                ", "smithy.api#required": {} } } @@ -775,7 +775,7 @@ "failedS3Resources": { "target": "com.amazonaws.macie#FailedS3Resources", "traits": { - "smithy.api#documentation": "

                The S3 resources whose classification types can't be updated. An error code and an\n error message are provided for each failed item.

                " + "smithy.api#documentation": "

                (Discontinued) The S3 resources whose classification types can't be updated. An error\n code and an error message are provided for each failed item.

                " } } } diff --git a/codegen/sdk-codegen/aws-models/mediaconvert.json b/codegen/sdk-codegen/aws-models/mediaconvert.json index ea27244e6a7d..10446812d435 100644 --- a/codegen/sdk-codegen/aws-models/mediaconvert.json +++ b/codegen/sdk-codegen/aws-models/mediaconvert.json @@ -3518,7 +3518,7 @@ "TimedMetadata": { "target": "com.amazonaws.mediaconvert#CmfcTimedMetadata", "traits": { - "smithy.api#documentation": "Applies to CMAF outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", "smithy.api#jsonName": "timedMetadata" } } @@ -3530,7 +3530,7 @@ "com.amazonaws.mediaconvert#CmfcTimedMetadata": { "type": "string", "traits": { - "smithy.api#documentation": "Applies to CMAF outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", "smithy.api#enum": [ { "value": "PASSTHROUGH", @@ -6736,6 +6736,22 @@ ] } }, + "com.amazonaws.mediaconvert#EmbeddedTimecodeOverride": { + "type": "string", + "traits": { + "smithy.api#documentation": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode.", + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "USE_MDPM", + "name": "USE_MDPM" + } + ] + } + }, "com.amazonaws.mediaconvert#Endpoint": { "type": "structure", "members": { @@ -9282,6 +9298,22 @@ ] } }, + "com.amazonaws.mediaconvert#HlsCaptionSegmentLengthControl": { + "type": "string", + "traits": { + "smithy.api#documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long.", + "smithy.api#enum": [ + { + "value": "LARGE_SEGMENTS", + "name": "LARGE_SEGMENTS" + }, + { + "value": "MATCH_VIDEO", + "name": "MATCH_VIDEO" + } + ] + } + }, "com.amazonaws.mediaconvert#HlsClientCache": { "type": "string", "traits": { @@ -9464,6 +9496,13 @@ "smithy.api#jsonName": "captionLanguageSetting" } }, + "CaptionSegmentLengthControl": { + "target": "com.amazonaws.mediaconvert#HlsCaptionSegmentLengthControl", + "traits": { + "smithy.api#documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long.", + "smithy.api#jsonName": "captionSegmentLengthControl" + } + }, "ClientCache": { "target": "com.amazonaws.mediaconvert#HlsClientCache", "traits": { @@ -9614,14 +9653,14 @@ "TimedMetadataId3Frame": { "target": "com.amazonaws.mediaconvert#HlsTimedMetadataId3Frame", "traits": { - "smithy.api#documentation": "Indicates ID3 frame that has the timecode.", + "smithy.api#documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE).", "smithy.api#jsonName": "timedMetadataId3Frame" } }, "TimedMetadataId3Period": { "target": "com.amazonaws.mediaconvert#__integerMinNegative2147483648Max2147483647", "traits": { - "smithy.api#documentation": "Timed Metadata interval in seconds.", + "smithy.api#documentation": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", "smithy.api#jsonName": "timedMetadataId3Period" } }, @@ -10008,7 +10047,7 @@ "com.amazonaws.mediaconvert#HlsTimedMetadataId3Frame": { "type": "string", "traits": { - "smithy.api#documentation": "Indicates ID3 frame that has the timecode.", + "smithy.api#documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE).", "smithy.api#enum": [ { "value": "NONE", @@ -10060,7 +10099,7 @@ "Id3": { "target": "com.amazonaws.mediaconvert#__stringPatternAZaZ0902", "traits": { - "smithy.api#documentation": "Use ID3 tag (Id3) to provide a tag value in base64-encode format.", + "smithy.api#documentation": "Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format.", "smithy.api#jsonName": "id3" } }, @@ -10094,7 +10133,7 @@ "com.amazonaws.mediaconvert#ImscAccessibilitySubs": { "type": "string", "traits": { - "smithy.api#documentation": "Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out.", + "smithy.api#documentation": "Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes.", "smithy.api#enum": [ { "value": "DISABLED", @@ -10113,7 +10152,7 @@ "Accessibility": { "target": "com.amazonaws.mediaconvert#ImscAccessibilitySubs", "traits": { - "smithy.api#documentation": "Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out.", + "smithy.api#documentation": "Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes.", "smithy.api#jsonName": "accessibility" } }, @@ -11071,7 +11110,7 @@ "TimedMetadataInsertion": { "target": "com.amazonaws.mediaconvert#TimedMetadataInsertion", "traits": { - "smithy.api#documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects.", + "smithy.api#documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", "smithy.api#jsonName": "timedMetadataInsertion" } } @@ -11312,7 +11351,7 @@ "TimedMetadataInsertion": { "target": "com.amazonaws.mediaconvert#TimedMetadataInsertion", "traits": { - "smithy.api#documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects.", + "smithy.api#documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", "smithy.api#jsonName": "timedMetadataInsertion" } } @@ -13379,14 +13418,14 @@ "TimedMetadata": { "target": "com.amazonaws.mediaconvert#TimedMetadata", "traits": { - "smithy.api#documentation": "Applies to HLS outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank.", "smithy.api#jsonName": "timedMetadata" } }, "TimedMetadataPid": { "target": "com.amazonaws.mediaconvert#__integerMin32Max8182", "traits": { - "smithy.api#documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream.", + "smithy.api#documentation": "Packet Identifier (PID) of the ID3 metadata stream in the transport stream.", "smithy.api#jsonName": "timedMetadataPid" } }, @@ -14070,7 +14109,7 @@ "TimedMetadata": { "target": "com.amazonaws.mediaconvert#MpdTimedMetadata", "traits": { - "smithy.api#documentation": "Applies to DASH outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", "smithy.api#jsonName": "timedMetadata" } } @@ -14082,7 +14121,7 @@ "com.amazonaws.mediaconvert#MpdTimedMetadata": { "type": "string", "traits": { - "smithy.api#documentation": "Applies to DASH outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", "smithy.api#enum": [ { "value": "PASSTHROUGH", @@ -15137,7 +15176,7 @@ "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpening": { "type": "string", "traits": { - "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", + "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", "smithy.api#enum": [ { "value": "DISABLED", @@ -15157,7 +15196,7 @@ "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpeningStrength": { "type": "string", "traits": { - "smithy.api#documentation": "Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening.", + "smithy.api#documentation": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening.", "smithy.api#enum": [ { "value": "LOW", @@ -15307,14 +15346,14 @@ "PostTemporalSharpening": { "target": "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpening", "traits": { - "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", + "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", "smithy.api#jsonName": "postTemporalSharpening" } }, "PostTemporalSharpeningStrength": { "target": "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpeningStrength", "traits": { - "smithy.api#documentation": "Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening.", + "smithy.api#documentation": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening.", "smithy.api#jsonName": "postTemporalSharpeningStrength" } }, @@ -17238,7 +17277,7 @@ "com.amazonaws.mediaconvert#TimedMetadata": { "type": "string", "traits": { - "smithy.api#documentation": "Applies to HLS outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", + "smithy.api#documentation": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank.", "smithy.api#enum": [ { "value": "PASSTHROUGH", @@ -17263,7 +17302,7 @@ } }, "traits": { - "smithy.api#documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in any HLS outputs. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects." + "smithy.api#documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." } }, "com.amazonaws.mediaconvert#Timing": { @@ -18291,6 +18330,13 @@ "smithy.api#jsonName": "colorSpaceUsage" } }, + "EmbeddedTimecodeOverride": { + "target": "com.amazonaws.mediaconvert#EmbeddedTimecodeOverride", + "traits": { + "smithy.api#documentation": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode.", + "smithy.api#jsonName": "embeddedTimecodeOverride" + } + }, "Hdr10Metadata": { "target": "com.amazonaws.mediaconvert#Hdr10Metadata", "traits": { @@ -18817,7 +18863,7 @@ "com.amazonaws.mediaconvert#WebvttAccessibilitySubs": { "type": "string", "traits": { - "smithy.api#documentation": "Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out.", + "smithy.api#documentation": "Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes.", "smithy.api#enum": [ { "value": "DISABLED", @@ -18836,7 +18882,7 @@ "Accessibility": { "target": "com.amazonaws.mediaconvert#WebvttAccessibilitySubs", "traits": { - "smithy.api#documentation": "Specify whether to flag this caption track as accessibility in your HLS/CMAF parent manifest. When you choose ENABLED, MediaConvert includes the parameters CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, DISABLED, MediaConvert leaves this parameter out.", + "smithy.api#documentation": "Set Accessibility subtitles (Accessibility) to Enabled (ENABLED) if the ISMC or WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing. When you enable this feature, MediaConvert adds the following attributes under EXT-X-MEDIA in the HLS or CMAF manifest for this track: CHARACTERISTICS=\"public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound\" and AUTOSELECT=\"YES\". Keep the default value, Disabled (DISABLED), if the captions track is not intended to provide such accessibility. MediaConvert will not add the above attributes.", "smithy.api#jsonName": "accessibility" } }, diff --git a/codegen/sdk-codegen/aws-models/mediapackage.json b/codegen/sdk-codegen/aws-models/mediapackage.json index 564800d079d7..23f993014f28 100644 --- a/codegen/sdk-codegen/aws-models/mediapackage.json +++ b/codegen/sdk-codegen/aws-models/mediapackage.json @@ -2339,6 +2339,21 @@ }, "com.amazonaws.mediapackage#MediaPackage": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "MediaPackage", + "arnNamespace": "mediapackage", + "cloudFormationName": "MediaPackage", + "cloudTrailEventSource": "mediapackage.amazonaws.com", + "endpointPrefix": "mediapackage" + }, + "aws.auth#sigv4": { + "name": "mediapackage" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "AWS Elemental MediaPackage", + "smithy.api#title": "AWS Elemental MediaPackage" + }, "version": "2017-10-12", "operations": [ { @@ -2398,22 +2413,7 @@ { "target": "com.amazonaws.mediapackage#UpdateOriginEndpoint" } - ], - "traits": { - "aws.api#service": { - "sdkId": "MediaPackage", - "arnNamespace": "mediapackage", - "cloudFormationName": "MediaPackage", - "cloudTrailEventSource": "mediapackage.amazonaws.com", - "endpointPrefix": "mediapackage" - }, - "aws.auth#sigv4": { - "name": "mediapackage" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "AWS Elemental MediaPackage", - "smithy.api#title": "AWS Elemental MediaPackage" - } + ] }, "com.amazonaws.mediapackage#MssEncryption": { "type": "structure", @@ -2661,6 +2661,10 @@ { "value": "HBBTV_1_5", "name": "HBBTV_1_5" + }, + { + "value": "HYBRIDCAST", + "name": "HYBRIDCAST" } ] } diff --git a/codegen/sdk-codegen/aws-models/mgn.json b/codegen/sdk-codegen/aws-models/mgn.json index 47d57f8c890f..0f0a30fcedeb 100644 --- a/codegen/sdk-codegen/aws-models/mgn.json +++ b/codegen/sdk-codegen/aws-models/mgn.json @@ -98,6 +98,21 @@ } ] }, + "com.amazonaws.mgn#BootMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LEGACY_BIOS", + "name": "LEGACY_BIOS" + }, + { + "value": "UEFI", + "name": "UEFI" + } + ] + } + }, "com.amazonaws.mgn#BoundedString": { "type": "string", "traits": { @@ -223,13 +238,13 @@ "resourceId": { "target": "com.amazonaws.mgn#LargeBoundedString", "traits": { - "smithy.api#documentation": "

                A conflict occured when prompting for the Resource ID.

                " + "smithy.api#documentation": "

                A conflict occurred when prompting for the Resource ID.

                " } }, "resourceType": { "target": "com.amazonaws.mgn#LargeBoundedString", "traits": { - "smithy.api#documentation": "

                A conflict occured when prompting for resource type.

                " + "smithy.api#documentation": "

                A conflict occurred when prompting for resource type.

                " } } }, @@ -299,7 +314,7 @@ "replicationServersSecurityGroupsIDs": { "target": "com.amazonaws.mgn#ReplicationServersSecurityGroupsIDs", "traits": { - "smithy.api#documentation": "

                Request to configure the Replication Server Secuirity group ID during Replication Settings template creation.

                ", + "smithy.api#documentation": "

                Request to configure the Replication Server Security group ID during Replication Settings template creation.

                ", "smithy.api#required": {} } }, @@ -320,21 +335,21 @@ "defaultLargeStagingDiskType": { "target": "com.amazonaws.mgn#ReplicationConfigurationDefaultLargeStagingDiskType", "traits": { - "smithy.api#documentation": "

                Request to configure the Staging Disk EBS volume type to \"gp2\" during Replication Settings template creation.

                ", + "smithy.api#documentation": "

                Request to configure the default large staging disk EBS volume type during Replication Settings template creation.

                ", "smithy.api#required": {} } }, "ebsEncryption": { "target": "com.amazonaws.mgn#ReplicationConfigurationEbsEncryption", "traits": { - "smithy.api#documentation": "

                Request to configure EBS enryption during Replication Settings template creation.

                ", + "smithy.api#documentation": "

                Request to configure EBS encryption during Replication Settings template creation.

                ", "smithy.api#required": {} } }, "ebsEncryptionKeyArn": { "target": "com.amazonaws.mgn#ARN", "traits": { - "smithy.api#documentation": "

                Request to configure an EBS enryption key during Replication Settings template creation.

                " + "smithy.api#documentation": "

                Request to configure an EBS encryption key during Replication Settings template creation.

                " } }, "bandwidthThrottling": { @@ -361,7 +376,7 @@ "stagingAreaTags": { "target": "com.amazonaws.mgn#TagsMap", "traits": { - "smithy.api#documentation": "

                Request to configure Staiging Area tags during Replication Settings template creation.

                ", + "smithy.api#documentation": "

                Request to configure Staging Area tags during Replication Settings template creation.

                ", "smithy.api#required": {} } }, @@ -470,7 +485,7 @@ "lagDuration": { "target": "com.amazonaws.mgn#ISO8601DatetimeString", "traits": { - "smithy.api#documentation": "

                Request to query data replication lag durating.

                " + "smithy.api#documentation": "

                Request to query data replication lag duration.

                " } }, "etaDateTime": { @@ -607,7 +622,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Data replication intiation step.

                " + "smithy.api#documentation": "

                Data replication initiation step.

                " } }, "com.amazonaws.mgn#DataReplicationInitiationStepName": { @@ -889,6 +904,9 @@ "input": { "target": "com.amazonaws.mgn#DeleteVcenterClientRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.mgn#ResourceNotFoundException" @@ -901,7 +919,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a single vCenter client by ID.

                ", + "smithy.api#documentation": "

                Deletes a given vCenter client by ID.

                ", "smithy.api#http": { "method": "POST", "uri": "/DeleteVcenterClient", @@ -939,7 +957,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Retrieves detailed Job log with paging.

                ", + "smithy.api#documentation": "

                Retrieves detailed job log items with paging.

                ", "smithy.api#http": { "method": "POST", "uri": "/DescribeJobLogItems", @@ -1012,7 +1030,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normaly created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

                ", + "smithy.api#documentation": "

                Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normally created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

                ", "smithy.api#http": { "method": "POST", "uri": "/DescribeJobs", @@ -1040,13 +1058,13 @@ "maxResults": { "target": "com.amazonaws.mgn#StrictlyPositiveInteger", "traits": { - "smithy.api#documentation": "

                Request to describe Job log by max results.

                " + "smithy.api#documentation": "

                Request to describe job log items by max results.

                " } }, "nextToken": { "target": "com.amazonaws.mgn#PaginationToken", "traits": { - "smithy.api#documentation": "

                Request to describe Job logby next token.

                " + "smithy.api#documentation": "

                Request to describe job log items by next token.

                " } } } @@ -1069,7 +1087,7 @@ "toDate": { "target": "com.amazonaws.mgn#ISO8601DatetimeString", "traits": { - "smithy.api#documentation": "

                Request to describe Job log by last date.

                " + "smithy.api#documentation": "

                Request to describe job log items by last date.

                " } } }, @@ -1319,7 +1337,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Lists all vCenter clients.

                ", + "smithy.api#documentation": "

                Returns a list of the installed vCenter clients.

                ", "smithy.api#http": { "method": "GET", "uri": "/DescribeVcenterClients", @@ -1390,7 +1408,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communciating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

                ", + "smithy.api#documentation": "

                Disconnects specific Source Servers from Application Migration Service. Data replication is stopped immediately. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. If the agent on the source server has not been prevented from communicating with the Application Migration Service service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

                ", "smithy.api#http": { "method": "POST", "uri": "/DisconnectFromService", @@ -1484,7 +1502,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDurationwill be nullified.

                ", + "smithy.api#documentation": "

                Finalizes the cutover immediately for specific Source Servers. All AWS resources created by Application Migration Service for enabling the replication of these source servers will be terminated / deleted within 90 minutes. Launched Test or Cutover instances will NOT be terminated. The AWS Replication Agent will receive a command to uninstall itself (within 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be changed to DISCONNECTED; The SourceServer.lifeCycle.state will be changed to CUTOVER; The totalStorageBytes property fo each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

                ", "smithy.api#http": { "method": "POST", "uri": "/FinalizeCutover", @@ -1498,7 +1516,7 @@ "sourceServerID": { "target": "com.amazonaws.mgn#SourceServerID", "traits": { - "smithy.api#documentation": "

                Request to finalize Cutover by Soure Server ID.

                ", + "smithy.api#documentation": "

                Request to finalize Cutover by Source Server ID.

                ", "smithy.api#required": {} } } @@ -1597,7 +1615,7 @@ "sourceServerID": { "target": "com.amazonaws.mgn#SourceServerID", "traits": { - "smithy.api#documentation": "

                Request to get Replication Configuaration by Source Server ID.

                ", + "smithy.api#documentation": "

                Request to get Replication Configuration by Source Server ID.

                ", "smithy.api#required": {} } } @@ -1791,7 +1809,7 @@ "tags": { "target": "com.amazonaws.mgn#TagsMap", "traits": { - "smithy.api#documentation": "

                Tags associated with spcific Job.

                " + "smithy.api#documentation": "

                Tags associated with specific Job.

                " } } }, @@ -2026,31 +2044,31 @@ "sourceServerID": { "target": "com.amazonaws.mgn#SourceServerID", "traits": { - "smithy.api#documentation": "

                Configure launch configuration Source Server ID.

                " + "smithy.api#documentation": "

                Launch configuration Source Server ID.

                " } }, "name": { "target": "com.amazonaws.mgn#SmallBoundedString", "traits": { - "smithy.api#documentation": "

                Configure launch configuration name.

                " + "smithy.api#documentation": "

                Launch configuration name.

                " } }, "ec2LaunchTemplateID": { "target": "com.amazonaws.mgn#BoundedString", "traits": { - "smithy.api#documentation": "

                Configure EC2 lauch configuration template ID.

                " + "smithy.api#documentation": "

                Launch configuration EC2 Launch template ID.

                " } }, "launchDisposition": { "target": "com.amazonaws.mgn#LaunchDisposition", "traits": { - "smithy.api#documentation": "

                Configure launch dispostion for launch configuration.

                " + "smithy.api#documentation": "

                Launch disposition for launch configuration.

                " } }, "targetInstanceTypeRightSizingMethod": { "target": "com.amazonaws.mgn#TargetInstanceTypeRightSizingMethod", "traits": { - "smithy.api#documentation": "

                Configure launch configuration Target instance type right sizing method.

                " + "smithy.api#documentation": "

                Launch configuration Target instance type right sizing method.

                " } }, "copyPrivateIp": { @@ -2068,7 +2086,13 @@ "licensing": { "target": "com.amazonaws.mgn#Licensing", "traits": { - "smithy.api#documentation": "

                Configure launch configuration OS licensing.

                " + "smithy.api#documentation": "

                Launch configuration OS licensing.

                " + } + }, + "bootMode": { + "target": "com.amazonaws.mgn#BootMode", + "traits": { + "smithy.api#documentation": "

                Launch configuration boot mode.

                " } } } @@ -2121,24 +2145,24 @@ "ec2InstanceID": { "target": "com.amazonaws.mgn#EC2InstanceID", "traits": { - "smithy.api#documentation": "

                Configure launced instance EC2 ID.

                " + "smithy.api#documentation": "

                Launched instance EC2 ID.

                " } }, "jobID": { "target": "com.amazonaws.mgn#JobID", "traits": { - "smithy.api#documentation": "

                Configure launced instance Job ID.

                " + "smithy.api#documentation": "

                Launched instance Job ID.

                " } }, "firstBoot": { "target": "com.amazonaws.mgn#FirstBoot", "traits": { - "smithy.api#documentation": "

                Configure launced instance first boot.

                " + "smithy.api#documentation": "

                Launched instance first boot.

                " } } }, "traits": { - "smithy.api#documentation": "

                Configure launced instance.

                " + "smithy.api#documentation": "

                Launched instance.

                " } }, "com.amazonaws.mgn#Licensing": { @@ -2297,7 +2321,7 @@ "finalized": { "target": "com.amazonaws.mgn#LifeCycleLastTestFinalized", "traits": { - "smithy.api#documentation": "

                Lifecycle last Test finlized.

                " + "smithy.api#documentation": "

                Lifecycle last Test finalized.

                " } } }, @@ -2316,7 +2340,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Lifecycle last Test finlized.

                " + "smithy.api#documentation": "

                Lifecycle last Test finalized.

                " } }, "com.amazonaws.mgn#LifeCycleLastTestInitiated": { @@ -2487,7 +2511,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle.state which equals DISCONNECTED or CUTOVER.

                ", + "smithy.api#documentation": "

                Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle. state which equals DISCONNECTED or CUTOVER.

                ", "smithy.api#http": { "method": "POST", "uri": "/MarkAsArchived", @@ -2723,6 +2747,10 @@ { "value": "ST1", "name": "ST1" + }, + { + "value": "GP3", + "name": "GP3" } ] } @@ -2768,6 +2796,12 @@ "traits": { "smithy.api#documentation": "

                Replication Configuration replicated disk IOPs.

                " } + }, + "throughput": { + "target": "com.amazonaws.mgn#PositiveInteger", + "traits": { + "smithy.api#documentation": "

                Replication Configuration replicated disk throughput.

                " + } } }, "traits": { @@ -2801,6 +2835,14 @@ { "value": "STANDARD", "name": "STANDARD" + }, + { + "value": "GP3", + "name": "GP3" + }, + { + "value": "IO2", + "name": "IO2" } ] } @@ -2823,7 +2865,7 @@ "replicationConfigurationTemplateID": { "target": "com.amazonaws.mgn#ReplicationConfigurationTemplateID", "traits": { - "smithy.api#documentation": "

                Replication Configuration template template ID.

                ", + "smithy.api#documentation": "

                Replication Configuration template ID.

                ", "smithy.api#required": {} } }, @@ -2866,7 +2908,7 @@ "defaultLargeStagingDiskType": { "target": "com.amazonaws.mgn#ReplicationConfigurationDefaultLargeStagingDiskType", "traits": { - "smithy.api#documentation": "

                Replication Configuration template use dedault large Staging Disk type.

                " + "smithy.api#documentation": "

                Replication Configuration template use default large Staging Disk type.

                " } }, "ebsEncryption": { @@ -2884,7 +2926,7 @@ "bandwidthThrottling": { "target": "com.amazonaws.mgn#PositiveInteger", "traits": { - "smithy.api#documentation": "

                Replication Configuration template bandwidth throtting.

                " + "smithy.api#documentation": "

                Replication Configuration template bandwidth throttling.

                " } }, "dataPlaneRouting": { @@ -3098,7 +3140,7 @@ "resourceId": { "target": "com.amazonaws.mgn#LargeBoundedString", "traits": { - "smithy.api#documentation": "

                Exceeded the service quota resource Id.

                " + "smithy.api#documentation": "

                Exceeded the service quota resource ID.

                " } }, "resourceType": { @@ -3442,7 +3484,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Starts replication on source server by ID.

                ", + "smithy.api#documentation": "

                Starts replication for SNAPSHOT_SHIPPING agents.

                ", "smithy.api#http": { "method": "POST", "uri": "/StartReplication", @@ -3482,7 +3524,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Lauches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

                ", + "smithy.api#documentation": "

                Launches a Test Instance for specific Source Servers. This command starts a LAUNCH job whose initiatedBy property is StartTest and changes the SourceServer.lifeCycle.state property to TESTING.

                ", "smithy.api#http": { "method": "POST", "uri": "/StartTest", @@ -3572,6 +3614,9 @@ "input": { "target": "com.amazonaws.mgn#TagResourceRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.mgn#AccessDeniedException" @@ -3770,7 +3815,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Unitialized account exception.

                ", + "smithy.api#documentation": "

                Uninitialized account exception.

                ", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -3780,6 +3825,9 @@ "input": { "target": "com.amazonaws.mgn#UntagResourceRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.mgn#AccessDeniedException" @@ -3905,6 +3953,12 @@ "traits": { "smithy.api#documentation": "

                Update Launch configuration licensing request.

                " } + }, + "bootMode": { + "target": "com.amazonaws.mgn#BootMode", + "traits": { + "smithy.api#documentation": "

                Update Launch configuration boot mode request.

                " + } } } }, @@ -4183,7 +4237,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Updates source server Replication Type by ID.

                ", + "smithy.api#documentation": "

                Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.

                ", "smithy.api#http": { "method": "POST", "uri": "/UpdateSourceServerReplicationType", diff --git a/codegen/sdk-codegen/aws-models/migration-hub-refactor-spaces.json b/codegen/sdk-codegen/aws-models/migration-hub-refactor-spaces.json index 4284bbefb815..3aca17420268 100644 --- a/codegen/sdk-codegen/aws-models/migration-hub-refactor-spaces.json +++ b/codegen/sdk-codegen/aws-models/migration-hub-refactor-spaces.json @@ -109,7 +109,7 @@ "NlbName": { "target": "com.amazonaws.migrationhubrefactorspaces#NlbName", "traits": { - "smithy.api#documentation": "

                The name of the Network Load Balancer that is configured by the API Gateway proxy.

                " + "smithy.api#documentation": "

                The name of the Network Load Balancer that is configured by the API Gateway proxy.\n

                " } }, "EndpointType": { @@ -179,7 +179,7 @@ "NlbName": { "target": "com.amazonaws.migrationhubrefactorspaces#NlbName", "traits": { - "smithy.api#documentation": "

                The name of the Network Load Balancer that is configured by the API Gateway proxy.

                " + "smithy.api#documentation": "

                The name of the Network Load Balancer that is configured by the API Gateway proxy.\n

                " } }, "EndpointType": { @@ -264,13 +264,13 @@ "Arn": { "target": "com.amazonaws.migrationhubrefactorspaces#ResourceArn", "traits": { - "smithy.api#documentation": "

                he Amazon Resource Name (ARN) of the application.

                " + "smithy.api#documentation": "

                The Amazon Resource Name (ARN) of the application.

                " } }, "OwnerAccountId": { "target": "com.amazonaws.migrationhubrefactorspaces#AccountId", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner.

                " + "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner (which is always the same as\n the environment owner account ID).

                " } }, "CreatedByAccountId": { @@ -436,7 +436,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces application. The account that owns the environment also owns the\n applications created inside the environment, regardless of the account that creates the\n application. Refactor Spaces provisions the Amazon API Gateway and Network Load Balancer for\n the application proxy inside your account.

                ", + "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces application. The account that owns the environment also owns the\n applications created inside the environment, regardless of the account that creates the\n application. Refactor Spaces provisions an Amazon API Gateway, API Gateway VPC link, and\n Network Load Balancer for the application proxy inside your account.

                ", "smithy.api#http": { "method": "POST", "uri": "/environments/{EnvironmentIdentifier}/applications", @@ -515,7 +515,7 @@ "OwnerAccountId": { "target": "com.amazonaws.migrationhubrefactorspaces#AccountId", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner.

                " + "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner (which is always the same as\n the environment owner account ID).

                " } }, "CreatedByAccountId": { @@ -612,7 +612,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and they\n are referred to as the environment owner. The environment owner has\n cross-account visibility and control of Refactor Spaces resources that are added to the environment\n by other accounts that the environment is shared with. When creating an environment, Refactor Spaces\n provisions a transit gateway in your account.

                ", + "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces environment. The caller owns the environment resource, and all\n Refactor Spaces applications, services, and routes created within the environment. They are referred\n to as the environment owner. The environment owner has cross-account\n visibility and control of Refactor Spaces resources that are added to the environment by other\n accounts that the environment is shared with. When creating an environment, Refactor Spaces\n provisions a transit gateway in your account.

                ", "smithy.api#http": { "method": "POST", "uri": "/environments", @@ -755,7 +755,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the\n environment owner, regardless of which account creates the route. Routes target a service in\n the application. If an application does not have any routes, then the first route must be\n created as a DEFAULT\n RouteType.

                \n

                When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic\n to the target service as follows:

                \n
                  \n
                • \n

                  If the service has a URL endpoint, and the endpoint resolves to a private IP address,\n Refactor Spaces routes traffic using the API Gateway VPC link.

                  \n
                • \n
                • \n

                  If the service has a URL endpoint, and the endpoint resolves to a public IP address,\n Refactor Spaces routes traffic over the public internet.

                  \n
                • \n
                • \n

                  If the service has an Lambda function endpoint, then Refactor Spaces uses\n the API Gateway\n Lambda integration.

                  \n
                • \n
                \n

                A health check is performed on the service when the route is created. If the health check\n fails, the route transitions to FAILED, and no traffic is sent to the service.

                \n

                For Lambda functions, the Lambda function state is checked. If\n the function is not active, the function configuration is updated so that Lambda\n resources are provisioned. If the Lambda state is Failed, then the\n route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide.

                \n

                For public URLs, a connection is opened to the public endpoint. If the URL is not reachable,\n the health check fails. For private URLs, a target group is created and the target group\n health check is run.

                \n

                The HealthCheckProtocol, HealthCheckPort, and\n HealthCheckPath are the same protocol, port, and path specified in the URL or\n health URL, if used. All other settings use the default values, as described in Health checks\n for your target groups. The health check is considered successful if at least one\n target within the target group transitions to a healthy state.

                ", + "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the\n environment owner, regardless of which account creates the route. Routes target a service in\n the application. If an application does not have any routes, then the first route must be\n created as a DEFAULT\n RouteType.

                \n

                When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic\n to the target service as follows:

                \n
                  \n
                • \n

                  If the service has a URL endpoint, and the endpoint resolves to a private IP address,\n Refactor Spaces routes traffic using the API Gateway VPC link.

                  \n
                • \n
                • \n

                  If the service has a URL endpoint, and the endpoint resolves to a public IP address,\n Refactor Spaces routes traffic over the public internet.

                  \n
                • \n
                • \n

                  If the service has an Lambda function endpoint, then Refactor Spaces\n configures the Lambda function's resource policy to allow the application's\n API Gateway to invoke the function.

                  \n
                • \n
                \n

                A one-time health check is performed on the service when the route is created. If the\n health check fails, the route transitions to FAILED, and no traffic is sent to\n the service.

                \n

                For Lambda functions, the Lambda function state is checked. If the\n function is not active, the function configuration is updated so that Lambda\n resources are provisioned. If the Lambda state is Failed, then the\n route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide.

                \n

                For public URLs, a connection is opened to the public endpoint. If the URL is not\n reachable, the health check fails. For private URLs, a target group is created and the target\n group health check is run.

                \n

                The HealthCheckProtocol, HealthCheckPort, and\n HealthCheckPath are the same protocol, port, and path specified in the URL or\n health URL, if used. All other settings use the default values, as described in Health checks\n for your target groups. The health check is considered successful if at least one\n target within the target group transitions to a healthy state.

                \n

                Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed\n certificates are supported. Private Certificate Authorities (CAs) are permitted only if the\n CA's domain is publicly resolvable.

                ", "smithy.api#http": { "method": "POST", "uri": "/environments/{EnvironmentIdentifier}/applications/{ApplicationIdentifier}/routes", @@ -853,7 +853,7 @@ "ServiceId": { "target": "com.amazonaws.migrationhubrefactorspaces#ServiceId", "traits": { - "smithy.api#documentation": "

                The ID of service in which the rute iscreated. Traffic that matches this route is\n forwarded to this service.

                " + "smithy.api#documentation": "

                The ID of service in which the route is created. Traffic that matches this route is\n forwarded to this service.

                " } }, "ApplicationId": { @@ -871,7 +871,7 @@ "State": { "target": "com.amazonaws.migrationhubrefactorspaces#RouteState", "traits": { - "smithy.api#documentation": "

                he current state of the route.

                " + "smithy.api#documentation": "

                The current state of the route.

                " } }, "Tags": { @@ -926,7 +926,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces service. The account owner of the service is always the\n environment owner, regardless of which account in the environment creates the service.\n Services have either a URL endpoint in a virtual private cloud (VPC), or a Lambda\n function endpoint.

                \n \n

                If an Amazon Web Services resourceis launched in a service VPC, and you want it to be\n accessible to all of an environment’s services with VPCs and routes, apply the\n RefactorSpacesSecurityGroup to the resource. Alternatively, to add more\n cross-account constraints, apply your own security group.

                \n
                ", + "smithy.api#documentation": "

                Creates an Amazon Web Services Migration Hub Refactor Spaces service. The account owner of the service is always the\n environment owner, regardless of which account in the environment creates the service.\n Services have either a URL endpoint in a virtual private cloud (VPC), or a Lambda\n function endpoint.

                \n \n

                If an Amazon Web Services resource is launched in a service VPC, and you want it to be\n accessible to all of an environment’s services with VPCs and routes, apply the\n RefactorSpacesSecurityGroup to the resource. Alternatively, to add more\n cross-account constraints, apply your own security group.

                \n
                ", "smithy.api#http": { "method": "POST", "uri": "/environments/{EnvironmentIdentifier}/applications/{ApplicationIdentifier}/services", @@ -1436,7 +1436,7 @@ "ApplicationId": { "target": "com.amazonaws.migrationhubrefactorspaces#ApplicationId", "traits": { - "smithy.api#documentation": "

                he ID of the application that the route belongs to.

                " + "smithy.api#documentation": "

                The ID of the application that the route belongs to.

                " } }, "State": { @@ -1713,7 +1713,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The summary information for environments as a response to ListEnvironments.

                " + "smithy.api#documentation": "

                The summary information for environments as a response to ListEnvironments.\n

                " } }, "com.amazonaws.migrationhubrefactorspaces#EnvironmentVpc": { @@ -2038,7 +2038,7 @@ "OwnerAccountId": { "target": "com.amazonaws.migrationhubrefactorspaces#AccountId", "traits": { - "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner.

                " + "smithy.api#documentation": "

                The Amazon Web Services account ID of the application owner (which is always the same as\n the environment owner account ID).

                " } }, "CreatedByAccountId": { @@ -2802,8 +2802,8 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", - "items": "ApplicationSummaryList", - "pageSize": "MaxResults" + "pageSize": "MaxResults", + "items": "ApplicationSummaryList" }, "smithy.api#readonly": {} } @@ -2878,7 +2878,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Lists all the virtual private clouds (VPCs) that are part of an Amazon Web Services Migration Hub Refactor Spaces environment.

                ", + "smithy.api#documentation": "

                Lists all Amazon Web Services Migration Hub Refactor Spaces service virtual private clouds (VPCs) that are part of the\n environment.

                ", "smithy.api#http": { "method": "GET", "uri": "/environments/{EnvironmentIdentifier}/vpcs", @@ -2887,8 +2887,8 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", - "items": "EnvironmentVpcList", - "pageSize": "MaxResults" + "pageSize": "MaxResults", + "items": "EnvironmentVpcList" }, "smithy.api#readonly": {} } @@ -2972,8 +2972,8 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", - "items": "EnvironmentSummaryList", - "pageSize": "MaxResults" + "pageSize": "MaxResults", + "items": "EnvironmentSummaryList" }, "smithy.api#readonly": {} } @@ -3055,8 +3055,8 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", - "items": "RouteSummaryList", - "pageSize": "MaxResults" + "pageSize": "MaxResults", + "items": "RouteSummaryList" }, "smithy.api#readonly": {} } @@ -3154,8 +3154,8 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", - "items": "ServiceSummaryList", - "pageSize": "MaxResults" + "pageSize": "MaxResults", + "items": "ServiceSummaryList" }, "smithy.api#readonly": {} } @@ -3235,8 +3235,7 @@ "smithy.api#documentation": "

                Lists the tags of a resource. The caller account must be the same as the resource’s\n OwnerAccountId. Listing tags in other accounts is not supported.

                ", "smithy.api#http": { "method": "GET", - "uri": "/tags/{ResourceArn}", - "code": 200 + "uri": "/tags/{ResourceArn}" }, "smithy.api#readonly": {} } @@ -3394,7 +3393,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Attaches a resource-based permission policy to the Amazon Web Services Migration Hub Refactor Spaces environment. The policy\n must contain the same actions and condition statements as the\n arn:aws:ram::aws:permission/AWSRAMDefaultPermissionRefactorSpacesEnvironment\n permission in Resource Access Manager. The policy must not contain new lines or blank lines.

                ", + "smithy.api#documentation": "

                Attaches a resource-based permission policy to the Amazon Web Services Migration Hub Refactor Spaces environment. The policy\n must contain the same actions and condition statements as the\n arn:aws:ram::aws:permission/AWSRAMDefaultPermissionRefactorSpacesEnvironment\n permission in Resource Access Manager. The policy must not contain new lines or blank lines.\n

                ", "smithy.api#http": { "method": "PUT", "uri": "/resourcepolicy", @@ -3431,27 +3430,25 @@ "traits": { "aws.api#service": { "sdkId": "Migration Hub Refactor Spaces", - "arnNamespace": "refactor-spaces", - "cloudFormationName": "RefactorSpaces", - "cloudTrailEventSource": "refactor-spaces.amazonaws.com", - "endpointPrefix": "refactor-spaces" + "arnNamespace": "refactor-spaces" }, "aws.auth#sigv4": { "name": "refactor-spaces" }, "aws.protocols#restJson1": {}, "smithy.api#cors": { - "additionalAllowedHeaders": ["content-type"], "additionalExposedHeaders": [ - "date", - "x-amz-apigw-id", - "x-amzn-trace-id", "x-amzn-errortype", "x-amzn-requestid", - "x-amzn-errormessage" - ] + "x-amzn-errormessage", + "x-amzn-trace-id", + "x-amz-apigw-id", + "date" + ], + "additionalAllowedHeaders": ["content-type"], + "origin": "*" }, - "smithy.api#documentation": "Amazon Web Services Migration Hub Refactor Spaces\n\n

                This API reference provides descriptions, syntax, and other details about each of the\n actions and data types for Amazon Web Services Migration Hub Refactor Spaces (Refactor Spaces). The topic for each action shows the API\n request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to\n access an API that is tailored to the programming language or platform that you're using. For\n more information, see Amazon Web Services SDKs.

                ", + "smithy.api#documentation": "Amazon Web Services Migration Hub Refactor Spaces\n \n

                This API reference provides descriptions, syntax, and other details about each of the\n actions and data types for Amazon Web Services Migration Hub Refactor Spaces (Refactor Spaces). The topic for each action shows the API\n request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to\n access an API that is tailored to the programming language or platform that you're using. For\n more information, see Amazon Web Services SDKs.

                \n \n

                To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations\n and their OUs, use Resource Access Manager's CreateResourceShare API. See CreateResourceShare in the Amazon Web Services RAM API Reference.

                ", "smithy.api#title": "AWS Migration Hub Refactor Spaces" }, "version": "2021-10-26", @@ -4063,8 +4060,7 @@ "smithy.api#documentation": "

                Removes the tags of a given resource. Tags are metadata which can be used to manage a\n resource. To tag a resource, the caller account must be the same as the resource’s\n OwnerAccountId. Tagging resources in other accounts is not supported.

                \n \n

                Amazon Web Services Migration Hub Refactor Spaces does not propagate tags to orchestrated resources, such as an\n environment’s transit gateway.

                \n
                ", "smithy.api#http": { "method": "POST", - "uri": "/tags/{ResourceArn}", - "code": 200 + "uri": "/tags/{ResourceArn}" } } }, @@ -4163,8 +4159,7 @@ "smithy.api#documentation": "

                Adds to or modifies the tags of the given resource. Tags are metadata which can be used to\n manage a resource. To untag a resource, the caller account must be the same as the resource’s\n OwnerAccountId. Untagging resources across accounts is not supported.

                ", "smithy.api#http": { "method": "DELETE", - "uri": "/tags/{ResourceArn}", - "code": 200 + "uri": "/tags/{ResourceArn}" }, "smithy.api#idempotent": {} } @@ -4227,7 +4222,7 @@ "ActivationState": { "target": "com.amazonaws.migrationhubrefactorspaces#RouteActivationState", "traits": { - "smithy.api#documentation": "

                Indicates whether traffic is forwarded to this route’s service after the route is created.

                ", + "smithy.api#documentation": "

                Indicates whether traffic is forwarded to this route’s service after the route is created.\n

                ", "smithy.api#required": {} } }, @@ -4320,7 +4315,7 @@ } }, "traits": { - "smithy.api#documentation": "

                The input does not satisfy the constraints specified by an Amazon Web Service.

                ", + "smithy.api#documentation": "

                The input does not satisfy the constraints specified by an Amazon Web Service.\n

                ", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/codegen/sdk-codegen/aws-models/panorama.json b/codegen/sdk-codegen/aws-models/panorama.json index 3f470b6ef0e6..1050680445f0 100644 --- a/codegen/sdk-codegen/aws-models/panorama.json +++ b/codegen/sdk-codegen/aws-models/panorama.json @@ -1,33 +1,5 @@ { "smithy": "1.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, "shapes": { "com.amazonaws.panorama#AccessDeniedException": { "type": "structure", @@ -45,6 +17,26 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.panorama#AlternateSoftwareMetadata": { + "type": "structure", + "members": { + "Version": { + "target": "com.amazonaws.panorama#Version", + "traits": { + "smithy.api#documentation": "

                The appliance software version.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Details about a beta appliance software update.

                " + } + }, + "com.amazonaws.panorama#AlternateSoftwares": { + "type": "list", + "member": { + "target": "com.amazonaws.panorama#AlternateSoftwareMetadata" + } + }, "com.amazonaws.panorama#ApplicationInstance": { "type": "structure", "members": { @@ -230,10 +222,7 @@ } }, "com.amazonaws.panorama#Boolean": { - "type": "boolean", - "traits": { - "smithy.api#box": {} - } + "type": "boolean" }, "com.amazonaws.panorama#Bucket": { "type": "string" @@ -882,7 +871,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a package.

                ", + "smithy.api#documentation": "

                Deletes a package.

                \n \n

                To delete a package, you need permission to call s3:DeleteObject\n in addition to permissions for the AWS Panorama API.

                \n
                ", "smithy.api#http": { "method": "DELETE", "uri": "/packages/{PackageId}", @@ -1467,6 +1456,18 @@ "traits": { "smithy.api#documentation": "

                The device's lease expiration time.

                " } + }, + "AlternateSoftwares": { + "target": "com.amazonaws.panorama#AlternateSoftwares", + "traits": { + "smithy.api#documentation": "

                Beta software releases available for the device.

                " + } + }, + "LatestAlternateSoftware": { + "target": "com.amazonaws.panorama#LatestAlternateSoftware", + "traits": { + "smithy.api#documentation": "

                The most recent beta software release.

                " + } } } }, @@ -2488,6 +2489,16 @@ "smithy.api#pattern": "^((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d))(:(6553[0-5]|655[0-2]\\d|65[0-4]\\d{2}|6[0-4]\\d{3}|[1-5]\\d{4}|[1-9]\\d{0,3}))?$" } }, + "com.amazonaws.panorama#IpAddressOrServerName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "(^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$)|(^((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d))(:(6553[0-5]|655[0-2]\\d|65[0-4]\\d{2}|6[0-4]\\d{3}|[1-5]\\d{4}|[1-9]\\d{0,3}))?$)" + } + }, "com.amazonaws.panorama#Job": { "type": "structure", "members": { @@ -2577,6 +2588,15 @@ "com.amazonaws.panorama#LastUpdatedTime": { "type": "timestamp" }, + "com.amazonaws.panorama#LatestAlternateSoftware": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, "com.amazonaws.panorama#LatestSoftware": { "type": "string", "traits": { @@ -3398,10 +3418,7 @@ } }, "com.amazonaws.panorama#MarkLatestPatch": { - "type": "boolean", - "traits": { - "smithy.api#box": {} - } + "type": "boolean" }, "com.amazonaws.panorama#Mask": { "type": "string", @@ -3436,6 +3453,10 @@ { "value": "NOT_CONNECTED", "name": "NOT_CONNECTED" + }, + { + "value": "CONNECTING", + "name": "CONNECTING" } ] } @@ -3454,6 +3475,12 @@ "traits": { "smithy.api#documentation": "

                Settings for Ethernet port 1.

                " } + }, + "Ntp": { + "target": "com.amazonaws.panorama#NtpPayload", + "traits": { + "smithy.api#documentation": "

                Network time protocol (NTP) server settings.

                " + } } }, "traits": { @@ -3474,6 +3501,18 @@ "traits": { "smithy.api#documentation": "

                The status of Ethernet port 1.

                " } + }, + "NtpStatus": { + "target": "com.amazonaws.panorama#NtpStatus", + "traits": { + "smithy.api#documentation": "

                Details about a network time protocol (NTP) server connection.

                " + } + }, + "LastUpdatedTime": { + "target": "com.amazonaws.panorama#LastUpdatedTime", + "traits": { + "smithy.api#documentation": "

                When the network status changed.

                " + } } }, "traits": { @@ -3925,6 +3964,67 @@ "target": "com.amazonaws.panorama#Node" } }, + "com.amazonaws.panorama#NtpPayload": { + "type": "structure", + "members": { + "NtpServers": { + "target": "com.amazonaws.panorama#NtpServerList", + "traits": { + "smithy.api#documentation": "

                NTP servers to use, in order of preference.

                ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                Network time protocol (NTP) server settings. Use this option to connect to local NTP\n servers instead of pool.ntp.org.

                " + } + }, + "com.amazonaws.panorama#NtpServerList": { + "type": "list", + "member": { + "target": "com.amazonaws.panorama#IpAddressOrServerName" + }, + "traits": { + "smithy.api#length": { + "max": 5 + } + } + }, + "com.amazonaws.panorama#NtpServerName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.panorama#NtpStatus": { + "type": "structure", + "members": { + "ConnectionStatus": { + "target": "com.amazonaws.panorama#NetworkConnectionStatus", + "traits": { + "smithy.api#documentation": "

                The connection's status.

                " + } + }, + "IpAddress": { + "target": "com.amazonaws.panorama#IpAddress", + "traits": { + "smithy.api#documentation": "

                The IP address of the server.

                " + } + }, + "NtpServerName": { + "target": "com.amazonaws.panorama#NtpServerName", + "traits": { + "smithy.api#documentation": "

                The domain name of the server.

                " + } + } + }, + "traits": { + "smithy.api#documentation": "

                Details about an NTP server connection.

                " + } + }, "com.amazonaws.panorama#OTAJobConfig": { "type": "structure", "members": { @@ -3955,6 +4055,22 @@ }, "com.amazonaws.panorama#OmniCloudServiceLambda": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Panorama", + "arnNamespace": "panorama", + "cloudFormationName": "OmniCloudServiceLambda", + "cloudTrailEventSource": "panorama.amazonaws.com", + "endpointPrefix": "panorama" + }, + "aws.auth#sigv4": { + "name": "panorama" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": {}, + "smithy.api#documentation": "AWS Panorama\n

                \n Overview\n

                \n

                This is the AWS Panorama API Reference. For an introduction to the service, see \n What is AWS Panorama? \n in the AWS Panorama Developer Guide.

                ", + "smithy.api#title": "AWS Panorama" + }, "version": "2019-07-24", "operations": [ { @@ -4056,22 +4172,7 @@ { "target": "com.amazonaws.panorama#UpdateDeviceMetadata" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Panorama", - "arnNamespace": "panorama", - "cloudFormationName": "Panorama", - "cloudTrailEventSource": "panorama.amazonaws.com", - "endpointPrefix": "panorama" - }, - "aws.auth#sigv4": { - "name": "panorama" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "AWS Panorama\n

                \n Overview\n

                \n

                This is the AWS Panorama API Reference. For an introduction to the service, see \n What is AWS Panorama? \n in the AWS Panorama Developer Guide.

                ", - "smithy.api#title": "AWS Panorama" - } + ] }, "com.amazonaws.panorama#OutPutS3Location": { "type": "structure", @@ -4244,6 +4345,10 @@ { "value": "NODE_PACKAGE_VERSION", "name": "NODE_PACKAGE_VERSION" + }, + { + "value": "MARKETPLACE_NODE_PACKAGE_VERSION", + "name": "MARKETPLACE_NODE_PACKAGE_VERSION" } ] } @@ -5374,6 +5479,15 @@ } ] } + }, + "com.amazonaws.panorama#Version": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } } } } diff --git a/codegen/sdk-codegen/aws-models/rds.json b/codegen/sdk-codegen/aws-models/rds.json index 34bf8cdeb231..74cfba8e7377 100644 --- a/codegen/sdk-codegen/aws-models/rds.json +++ b/codegen/sdk-codegen/aws-models/rds.json @@ -1541,7 +1541,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Copies a snapshot of a DB cluster.

                \n

                To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

                \n\n

                You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action \n is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, \n you must provide the following values:

                \n \n
                  \n
                • \n

                  \n KmsKeyId - The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                  \n
                • \n
                • \n

                  \n PreSignedUrl - A URL that contains a Signature Version 4 signed request for the \n CopyDBClusterSnapshot action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. \n The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the \n source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.

                  \n

                  The pre-signed URL request must contain the following parameter values:

                  \n
                    \n
                  • \n

                    \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot \n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                    \n
                  • \n
                  • \n

                    \n DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

                    \n
                  • \n
                  • \n

                    \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

                    \n
                  • \n
                  \n

                  To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                  \n \n

                  If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                  \n
                  \n
                • \n
                • \n

                  \n TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                  \n
                • \n
                • \n

                  \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. \n This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

                  \n
                • \n
                \n

                To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

                \n

                For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.\n

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Copies a snapshot of a DB cluster.

                \n

                To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

                \n\n

                You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action \n is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, \n you must provide the following values:

                \n \n
                  \n
                • \n

                  \n KmsKeyId - The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                  \n
                • \n
                • \n

                  \n PreSignedUrl - A URL that contains a Signature Version 4 signed request for the \n CopyDBClusterSnapshot action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. \n The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the \n source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.

                  \n

                  The pre-signed URL request must contain the following parameter values:

                  \n
                    \n
                  • \n

                    \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot \n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                    \n
                  • \n
                  • \n

                    \n DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

                    \n
                  • \n
                  • \n

                    \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

                    \n
                  • \n
                  \n

                  To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                  \n \n

                  If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                  \n
                  \n
                • \n
                • \n

                  \n TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                  \n
                • \n
                • \n

                  \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. \n This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

                  \n
                • \n
                \n

                To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

                \n

                For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.\n

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#CopyDBClusterSnapshotMessage": { @@ -2023,7 +2023,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

                \n

                You can use the ReplicationSourceIdentifier parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by\n ReplicationSourceIdentifier is encrypted, also specify the\n PreSignedUrl parameter.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

                \n

                You can use the ReplicationSourceIdentifier parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by\n ReplicationSourceIdentifier is encrypted, also specify the\n PreSignedUrl parameter.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#CreateDBClusterEndpoint": { @@ -2248,7 +2248,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                The list of log types that need to be enabled for exporting to CloudWatch Logs. The values\n in the list depend on the DB engine being used. For more information, see \n Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                The list of log types that need to be enabled for exporting to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                \n

                \n RDS for MySQL\n

                \n

                Possible values are error, general, and slowquery.

                \n

                \n RDS for PostgreSQL\n

                \n

                Possible values are postgresql and upgrade.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database\n Service User Guide.

                \n

                For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "EngineMode": { @@ -2284,7 +2284,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. \n The default is not to copy them.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. \n The default is not to copy them.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "Domain": { @@ -2393,7 +2393,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new DB cluster parameter group.

                \n

                Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

                \n

                \n A DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster.

                \n

                When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect. \n

                \n

                When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect. \n

                \n \n

                After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster\n that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                \n
                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Creates a new DB cluster parameter group.

                \n

                Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

                \n

                \n A DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster.

                \n

                When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect. \n

                \n

                When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect. \n

                \n \n

                After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster\n that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                \n
                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#CreateDBClusterParameterGroupMessage": { @@ -2473,7 +2473,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a snapshot of a DB cluster.

                \n

                For more information on Amazon Aurora, see What is Amazon\n Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Creates a snapshot of a DB cluster.

                \n

                For more information on Amazon Aurora, see What is Amazon\n Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#CreateDBClusterSnapshotMessage": { @@ -4474,7 +4474,7 @@ } }, "traits": { - "smithy.api#documentation": "

                Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.\n

                \n

                For an Amazon Aurora DB cluster, this data type is used as a response element in the operations \n CreateDBCluster, DeleteDBCluster, DescribeDBClusters, \n FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, \n RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, \n RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

                \n

                For a Multi-AZ DB cluster, this data type is used as a response element in the operations \n CreateDBCluster, DeleteDBCluster, DescribeDBClusters, \n FailoverDBCluster, ModifyDBCluster, RebootDBCluster, \n RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.\n

                \n

                For an Amazon Aurora DB cluster, this data type is used as a response element in the operations \n CreateDBCluster, DeleteDBCluster, DescribeDBClusters, \n FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, \n RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, \n RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

                \n

                For a Multi-AZ DB cluster, this data type is used as a response element in the operations \n CreateDBCluster, DeleteDBCluster, DescribeDBClusters, \n FailoverDBCluster, ModifyDBCluster, RebootDBCluster, \n RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#DBClusterAlreadyExistsFault": { @@ -7974,7 +7974,7 @@ } ], "traits": { - "smithy.api#documentation": "

                The DeleteDBCluster action deletes a previously provisioned DB cluster. \n When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. \n Manual DB cluster snapshots of the specified DB cluster are not deleted.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                The DeleteDBCluster action deletes a previously provisioned DB cluster. \n When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. \n Manual DB cluster snapshots of the specified DB cluster are not deleted.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#DeleteDBClusterEndpoint": { @@ -8053,7 +8053,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#DeleteDBClusterParameterGroupMessage": { @@ -8096,7 +8096,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

                \n \n

                The DB cluster snapshot must be in the available state to be\n deleted.

                \n
                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

                \n \n

                The DB cluster snapshot must be in the available state to be\n deleted.

                \n
                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#DeleteDBClusterSnapshotMessage": { @@ -8938,7 +8938,7 @@ } ], "traits": { - "smithy.api#documentation": "

                \n Returns a list of DBClusterParameterGroup descriptions. If a \n DBClusterParameterGroupName parameter is specified,\n the list will contain only the description of the specified DB cluster parameter group.\n

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                ", + "smithy.api#documentation": "

                \n Returns a list of DBClusterParameterGroup descriptions. If a \n DBClusterParameterGroupName parameter is specified,\n the list will contain only the description of the specified DB cluster parameter group.\n

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                ", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -8993,7 +8993,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns the detailed parameter list for a particular DB cluster parameter group.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                ", + "smithy.api#documentation": "

                Returns the detailed parameter list for a particular DB cluster parameter group.

                \n

                For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                ", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -9095,7 +9095,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns information about DB cluster snapshots. This API action supports pagination.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                ", + "smithy.api#documentation": "

                Returns information about DB cluster snapshots. This API action supports pagination.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                ", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -9302,7 +9302,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns information about Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports pagination.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                \n

                This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

                ", + "smithy.api#documentation": "

                Returns information about Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports pagination.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n

                This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

                ", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -12308,7 +12308,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Forces a failover for a DB cluster.

                \n

                For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                \n

                For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                \n

                An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby \n DB instance when the primary DB instance fails.

                \n

                To simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Forces a failover for a DB cluster.

                \n

                For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                \n

                For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                \n

                An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby \n DB instance when the primary DB instance fails.

                \n

                To simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#FailoverDBClusterMessage": { @@ -13803,7 +13803,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. \n You can change one or more settings by specifying these parameters and the new values in the\n request.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. \n You can change one or more settings by specifying these parameters and the new values in the\n request.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#ModifyDBClusterEndpoint": { @@ -13950,7 +13950,7 @@ "CloudwatchLogsExportConfiguration": { "target": "com.amazonaws.rds#CloudwatchLogsExportConfiguration", "traits": { - "smithy.api#documentation": "

                The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster. The values\n in the list depend on the DB engine being used.

                \n

                \n RDS for MySQL\n

                \n

                Possible values are error, general, and slowquery.

                \n

                \n RDS for PostgreSQL\n

                \n

                Possible values are postgresql and upgrade.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database\n Service User Guide.

                \n

                For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "EngineVersion": { @@ -14004,7 +14004,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. \n The default is not to copy them.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. \n The default is not to copy them.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "EnableGlobalWriteForwarding": { @@ -14095,7 +14095,7 @@ } ], "traits": { - "smithy.api#documentation": "

                \n Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName, ParameterValue, \n and ApplyMethod. A maximum of 20\n parameters can be modified in a single request.\n

                \n \n

                After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                \n

                If the modified DB cluster parameter group is used by an Aurora Serverless cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                \n Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName, ParameterValue, \n and ApplyMethod. A maximum of 20\n parameters can be modified in a single request.\n

                \n \n

                After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                \n

                If the modified DB cluster parameter group is used by an Aurora Serverless cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#ModifyDBClusterParameterGroupMessage": { @@ -16002,7 +16002,7 @@ "SupportsClusters": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

                Whether DB instances can be configured as a Multi-AZ DB cluster.

                \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " + "smithy.api#documentation": "

                Whether DB instances can be configured as a Multi-AZ DB cluster.

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } } }, @@ -16661,7 +16661,7 @@ } ], "traits": { - "smithy.api#documentation": "

                You might need to reboot your DB cluster, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB cluster parameter group associated with the DB cluster, \n reboot the DB cluster for the changes to take effect.\n

                \n \n

                Rebooting a DB cluster restarts the database engine service. Rebooting a DB \n cluster results in a momentary outage, during which the DB cluster status is set to rebooting.\n

                \n \n

                Use this operation only for a non-Aurora Multi-AZ DB cluster. \n The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the \n Amazon RDS User Guide.\n

                " + "smithy.api#documentation": "

                You might need to reboot your DB cluster, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB cluster parameter group associated with the DB cluster, \n reboot the DB cluster for the changes to take effect.\n

                \n \n

                Rebooting a DB cluster restarts the database engine service. Rebooting a DB \n cluster results in a momentary outage, during which the DB cluster status is set to rebooting.\n

                \n \n

                Use this operation only for a non-Aurora Multi-AZ DB cluster.

                \n \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the \n Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#RebootDBClusterMessage": { @@ -16909,7 +16909,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Removes the asssociation of an Amazon Web Services Identity and Access Management (IAM) role from a\n DB cluster.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Removes the asssociation of an Amazon Web Services Identity and Access Management (IAM) role from a\n DB cluster.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#RemoveRoleFromDBClusterMessage": { @@ -17411,7 +17411,7 @@ } ], "traits": { - "smithy.api#documentation": "

                \n Modifies the parameters of a DB cluster parameter group to the default value. To\n reset specific parameters submit a list of the following: ParameterName \n and ApplyMethod. To reset the\n entire DB cluster parameter group, specify the DBClusterParameterGroupName \n and ResetAllParameters parameters.\n

                \n

                \n When resetting the entire group, dynamic parameters are updated immediately and static parameters\n are set to pending-reboot to take effect on the next DB instance restart \n or RebootDBInstance request. You must call RebootDBInstance for every\n DB instance in your DB cluster that you want the updated static parameter to apply to.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                \n Modifies the parameters of a DB cluster parameter group to the default value. To\n reset specific parameters submit a list of the following: ParameterName \n and ApplyMethod. To reset the\n entire DB cluster parameter group, specify the DBClusterParameterGroupName \n and ResetAllParameters parameters.\n

                \n

                \n When resetting the entire group, dynamic parameters are updated immediately and static parameters\n are set to pending-reboot to take effect on the next DB instance restart \n or RebootDBInstance request. You must call RebootDBInstance for every\n DB instance in your DB cluster that you want the updated static parameter to apply to.

                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#ResetDBClusterParameterGroupMessage": { @@ -17748,7 +17748,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used. For more information, see \n Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                " + "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                " } }, "DeletionProtection": { @@ -17847,7 +17847,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

                \n

                The target DB cluster is created from the source snapshot with a default\n configuration. If you don't specify a security group, the new DB cluster is\n associated with the default security group.

                \n \n

                This action only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance action to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterFromSnapshot action has completed and the DB\n cluster is available.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

                \n

                The target DB cluster is created from the source snapshot with a default\n configuration. If you don't specify a security group, the new DB cluster is\n associated with the default security group.

                \n \n

                This action only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance action to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterFromSnapshot action has completed and the DB\n cluster is available.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#RestoreDBClusterFromSnapshotMessage": { @@ -17943,7 +17943,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.

                \n

                For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon\n Aurora User Guide.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.

                \n

                \n RDS for MySQL\n

                \n

                Possible values are error, general, and slowquery.

                \n

                \n RDS for PostgreSQL\n

                \n

                Possible values are postgresql and upgrade.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database\n Service User Guide.

                \n

                For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "EngineMode": { @@ -17973,7 +17973,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "Domain": { @@ -18090,7 +18090,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Restores a DB cluster to an arbitrary point in time. Users can restore to any point\n in time before LatestRestorableTime for up to\n BackupRetentionPeriod days. The target DB cluster is created from the\n source DB cluster with the same configuration as the original DB cluster, except that\n the new DB cluster is created with the default DB security group.

                \n \n

                For Aurora, this action only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance action to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterToPointInTime action has completed and the DB\n cluster is available.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                \n \n

                The Multi-AZ DB clusters feature is in preview and is subject to change.

                \n
                " + "smithy.api#documentation": "

                Restores a DB cluster to an arbitrary point in time. Users can restore to any point\n in time before LatestRestorableTime for up to\n BackupRetentionPeriod days. The target DB cluster is created from the\n source DB cluster with the same configuration as the original DB cluster, except that\n the new DB cluster is created with the default DB security group.

                \n \n

                For Aurora, this action only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance action to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterToPointInTime action has completed and the DB\n cluster is available.

                \n
                \n

                For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.\n

                \n

                For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                " } }, "com.amazonaws.rds#RestoreDBClusterToPointInTimeMessage": { @@ -18176,7 +18176,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                \n

                For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                \n

                \n RDS for MySQL\n

                \n

                Possible values are error, general, and slowquery.

                \n

                \n RDS for PostgreSQL\n

                \n

                Possible values are postgresql and upgrade.

                \n

                \n Aurora MySQL\n

                \n

                Possible values are audit, error, general, and slowquery.

                \n

                \n Aurora PostgreSQL\n

                \n

                Possible value is postgresql.

                \n

                For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database\n Service User Guide.

                \n

                For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "DBClusterParameterGroupName": { @@ -18194,7 +18194,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

                \n

                Valid for: Aurora DB clusters only

                " + "smithy.api#documentation": "

                A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

                \n

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                " } }, "Domain": { diff --git a/codegen/sdk-codegen/aws-models/route53-recovery-cluster.json b/codegen/sdk-codegen/aws-models/route53-recovery-cluster.json index f0fac55e48ba..cefbb5d0f4cb 100644 --- a/codegen/sdk-codegen/aws-models/route53-recovery-cluster.json +++ b/codegen/sdk-codegen/aws-models/route53-recovery-cluster.json @@ -52,7 +52,13 @@ "min": 1, "max": 255 }, - "smithy.api#pattern": "^[A-Za-z0-9:\\/_-]*$" + "smithy.api#pattern": "^[A-Za-z0-9:.\\/_-]*$" + } + }, + "com.amazonaws.route53recoverycluster#Arns": { + "type": "list", + "member": { + "target": "com.amazonaws.route53recoverycluster#Arn" } }, "com.amazonaws.route53recoverycluster#ConflictException": { @@ -131,7 +137,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Get the state for a routing control. A routing control is a simple on/off switch\n\t\t\t\tthat you can use to route traffic to cells. When the state is On, traffic flows to a cell. When it's off, traffic does not flow.

                \n\t\t\t

                Before you can create a routing control, you first must create a cluster to host the control.\n\t\t\t\tFor more information, see \n\t\t\t\tCreateCluster.\n\t\t\t\tAccess one of the endpoints for the cluster to get or update the routing control state to\n\t\t\t\tredirect traffic.

                \n\t\t\t

                For more information about working with routing controls, see \n\t\t\t\tRouting control\n\t\t\t\tin the Route 53 Application Recovery Controller Developer Guide.

                " + "smithy.api#documentation": "

                Get the state for a routing control. A routing control is a simple on/off switch that you\n\t\t\tcan use to route traffic to cells. When the state is On, traffic flows to a cell. When\n\t\t\tit's Off, traffic does not flow.

                \n\t\t\t

                Before you can create a routing control, you must first create a cluster to host the control\n\t\t\t\tin a control panel. For more information, see \n\t\t\t\t\tCreate routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. \n\t\t\t\tThen you access one of the endpoints for the cluster to get or update the routing control state to\n\t\t\t\tredirect traffic.

                \n\t\t\t

                \n You must specify Regional endpoints when you work with API cluster operations \n\t\t\t\tto get or update routing control states in Application Recovery Controller.\n

                \n\t\t\t

                To see a code example for getting a routing control state, including accessing Regional cluster endpoints\n\t\t\t\tin sequence, see API examples\n\t\t\t\tin the Amazon Route 53 Application Recovery Controller Developer Guide.

                \n\t\t\t

                Learn more about working with routing controls in the following topics in the \n\t\t\t\tAmazon Route 53 Application Recovery Controller Developer Guide:

                \n\t\t\t " } }, "com.amazonaws.route53recoverycluster#GetRoutingControlStateRequest": { @@ -265,18 +271,6 @@ }, "com.amazonaws.route53recoverycluster#ToggleCustomerAPI": { "type": "service", - "version": "2019-12-02", - "operations": [ - { - "target": "com.amazonaws.route53recoverycluster#GetRoutingControlState" - }, - { - "target": "com.amazonaws.route53recoverycluster#UpdateRoutingControlState" - }, - { - "target": "com.amazonaws.route53recoverycluster#UpdateRoutingControlStates" - } - ], "traits": { "aws.api#service": { "sdkId": "Route53 Recovery Cluster", @@ -289,9 +283,21 @@ "name": "route53-recovery-cluster" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "

                Welcome to the Amazon Route 53 Application Recovery Controller API Reference Guide for Recovery Control Data Plane .

                \n\t\t\t

                Recovery control in Route 53 Application Recovery Controller includes extremely reliable routing controls that enable you to recover applications \n\t\t\t\tby rerouting traffic, for example, across Availability Zones or AWS Regions. Routing controls are simple on/off switches \n\t\t\t\thosted on a cluster. A cluster is a set of five redundant regional endpoints against which you can execute API calls to update or \n\t\t\t\tget the state of routing controls. You use routing controls to failover traffic to recover your application \n\t\t\t\tacross Availability Zones or Regions.

                \n\t\t\t

                This API guide includes information about how to get and update routing control states in Route 53 Application Recovery Controller.

                \n\t\t\t

                For more information about Route 53 Application Recovery Controller, see the following:

                \n\t\t\t ", + "smithy.api#documentation": "

                Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

                \n\t\t\t

                With Amazon Route 53 Application Recovery Controller, you can use routing control with extreme reliability to\n\t\t\trecover applications by rerouting traffic across\n\t\t\tAvailability Zones or AWS Regions. Routing controls are simple on/off switches hosted\n\t\t\ton a highly available cluster in Application Recovery Controller. A cluster provides a set of five redundant Regional endpoints against which you\n\t\t\tcan run API calls to get or update the state of routing controls. To implement failover, you set \n\t\t\tone routing control on and another one off, to reroute traffic from one Availability Zone or Amazon Web Services Region \n\t\t\tto another.

                \n\t\t\t

                \n Be aware that you must specify the Regional endpoints for a cluster when you work with API cluster operations \n\t\t\t\tto get or update routing control states in Application Recovery Controller. In addition, you must specify the US West (Oregon) Region \n\t\t\t\tfor Application Recovery Controller API calls. For example, use the parameter region us-west-2 with AWS CLI commands.\n\t\t\t\tFor more information, see\n\t\t\t\t\n\t\t\t\t\tGet and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

                \n\t\t

                This API guide includes information about the API operations for how to get and update routing control states\n\t\t\tin Application Recovery Controller. You also must set up the structures to support routing controls: clusters and control panels.

                \n\t\t\t

                For more information about working with routing control in Application Recovery Controller, see the following:

                \n\t\t ", "smithy.api#title": "Route53 Recovery Cluster" - } + }, + "version": "2019-12-02", + "operations": [ + { + "target": "com.amazonaws.route53recoverycluster#GetRoutingControlState" + }, + { + "target": "com.amazonaws.route53recoverycluster#UpdateRoutingControlState" + }, + { + "target": "com.amazonaws.route53recoverycluster#UpdateRoutingControlStates" + } + ] }, "com.amazonaws.route53recoverycluster#UpdateRoutingControlState": { "type": "operation", @@ -325,7 +331,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Set the state of the routing control to reroute traffic. You can set the value to be On or Off.\n\t\t\t\tWhen the state is On, traffic flows to a cell. When it's off, traffic does not flow.

                \n\t\t\t

                For more information about working with routing controls, see \n\t\t\t\tRouting control\n\t\t\t\tin the Route 53 Application Recovery Controller Developer Guide.

                " + "smithy.api#documentation": "

                Set the state of the routing control to reroute traffic. You can set the value to be On or\n\t\t\tOff. When the state is On, traffic flows to a cell. When it's Off, traffic does not\n\t\t\tflow.

                \n\t\t\t

                With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing \n\t\t\t\tcontrol state updates that help prevent unexpected outcomes, like fail open traffic routing. However, \n\t\t\t\tthere are scenarios when you might want to bypass the routing control safeguards that are enforced with \n\t\t\t\tsafety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, \n\t\t\t\tand one or more safety rules might be unexpectedly preventing you from updating a routing control state to \n\t\t\t\treroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change \n\t\t\t\ta routing control state and fail over your application.

                \n\t\t\t

                The SafetyRulesToOverride property enables you override one or more safety rules and \n\t\t\t\tupdate routing control states. For more information, see \n\t\t\t\t\n\t\t\t\t\tOverride safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

                \n\t\t\t

                \n You must specify Regional endpoints when you work with API cluster operations \n\t\t\t\tto get or update routing control states in Application Recovery Controller.\n

                \n\t\t\t

                To see a code example for getting a routing control state, including accessing Regional cluster endpoints\n\t\t\t\tin sequence, see API examples\n\t\t\t\tin the Amazon Route 53 Application Recovery Controller Developer Guide.

                \n\t\t\t " } }, "com.amazonaws.route53recoverycluster#UpdateRoutingControlStateEntries": { @@ -340,7 +346,7 @@ "RoutingControlArn": { "target": "com.amazonaws.route53recoverycluster#Arn", "traits": { - "smithy.api#documentation": "

                The Amazon Resource Number (ARN) for the routing control state entry.

                ", + "smithy.api#documentation": "

                The Amazon Resource Number (ARN) for a routing control state entry.

                ", "smithy.api#required": {} } }, @@ -353,7 +359,7 @@ } }, "traits": { - "smithy.api#documentation": "

                A routing control state.

                " + "smithy.api#documentation": "

                A routing control state entry.

                " } }, "com.amazonaws.route53recoverycluster#UpdateRoutingControlStateRequest": { @@ -372,6 +378,12 @@ "smithy.api#documentation": "

                The state of the routing control. You can set the value to be On or Off.

                ", "smithy.api#required": {} } + }, + "SafetyRulesToOverride": { + "target": "com.amazonaws.route53recoverycluster#Arns", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Numbers (ARNs) for the safety rules that you want to override when you're updating the state of\n\t\t\ta routing control. You can override one safety rule or multiple safety rules by including one or more ARNs, separated \n\t\t\tby commas.

                \n\t\t

                For more information, see \n\t\t\tOverride safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

                " + } } } }, @@ -411,7 +423,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Set multiple routing control states. You can set the value for each state to be On or Off.\n\t\t\tWhen the state is On, traffic flows to a cell. When it's off, traffic does not flow.

                \n\t\t\t

                For more information about working with routing controls, see \n\t\t\t\tRouting control\n\t\t\t\tin the Route 53 Application Recovery Controller Developer Guide.

                " + "smithy.api#documentation": "

                Set multiple routing control states. You can set the value for each state to be On or Off.\n\t\t\tWhen the state is On, traffic flows to a cell. When it's Off, traffic does not\n\t\t\tflow.

                \n\t\t\t

                With Application Recovery Controller, you can add safety rules for routing controls, which are safeguards for routing \n\t\t\t\tcontrol state updates that help prevent unexpected outcomes, like fail open traffic routing. However, \n\t\t\t\tthere are scenarios when you might want to bypass the routing control safeguards that are enforced with \n\t\t\t\tsafety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, \n\t\t\t\tand one or more safety rules might be unexpectedly preventing you from updating a routing control state to \n\t\t\t\treroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change \n\t\t\t\ta routing control state and fail over your application.

                \n\t\t\t

                The SafetyRulesToOverride property enables you override one or more safety rules and \n\t\t\t\tupdate routing control states. For more information, see \n\t\t\t\t\n\t\t\t\t\tOverride safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

                \t\t\t\n

                \n You must specify Regional endpoints when you work with API cluster operations \n\t\t\t\tto get or update routing control states in Application Recovery Controller.\n

                \n\t\t\t

                To see a code example for getting a routing control state, including accessing Regional cluster endpoints\n\t\t\t\tin sequence, see API examples\n\t\t\t\tin the Amazon Route 53 Application Recovery Controller Developer Guide.

                \n\t\t\t " } }, "com.amazonaws.route53recoverycluster#UpdateRoutingControlStatesRequest": { @@ -423,6 +435,12 @@ "smithy.api#documentation": "

                A set of routing control entries that you want to update.

                ", "smithy.api#required": {} } + }, + "SafetyRulesToOverride": { + "target": "com.amazonaws.route53recoverycluster#Arns", + "traits": { + "smithy.api#documentation": "

                The Amazon Resource Numbers (ARNs) for the safety rules that you want to override when you're updating routing\n\t\t\tcontrol states. You can override one safety rule or multiple safety rules by including one or more ARNs, separated \n\t\t\tby commas.

                \n\t\t

                For more information, see \n\t\t\tOverride safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

                " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/service-catalog-appregistry.json b/codegen/sdk-codegen/aws-models/service-catalog-appregistry.json index 1fa191e6e748..6b4acb7be698 100644 --- a/codegen/sdk-codegen/aws-models/service-catalog-appregistry.json +++ b/codegen/sdk-codegen/aws-models/service-catalog-appregistry.json @@ -31,6 +31,21 @@ "shapes": { "com.amazonaws.servicecatalogappregistry#AWS242AppRegistry": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Service Catalog AppRegistry", + "arnNamespace": "servicecatalog", + "cloudFormationName": "ServiceCatalogAppRegistry", + "cloudTrailEventSource": "servicecatalogappregistry.amazonaws.com", + "endpointPrefix": "servicecatalog-appregistry" + }, + "aws.auth#sigv4": { + "name": "servicecatalog" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

                Amazon Web Services Service Catalog AppRegistry enables organizations to understand the application context of their Amazon Web Services resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.

                ", + "smithy.api#title": "AWS Service Catalog App Registry" + }, "version": "2020-06-24", "operations": [ { @@ -96,22 +111,7 @@ { "target": "com.amazonaws.servicecatalogappregistry#UpdateAttributeGroup" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Service Catalog AppRegistry", - "arnNamespace": "servicecatalog", - "cloudFormationName": "ServiceCatalogAppRegistry", - "cloudTrailEventSource": "servicecatalogappregistry.amazonaws.com", - "endpointPrefix": "servicecatalog-appregistry" - }, - "aws.auth#sigv4": { - "name": "servicecatalog" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

                Amazon Web Services Service Catalog AppRegistry enables organizations to understand the application context of their Amazon Web Services resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.

                ", - "smithy.api#title": "AWS Service Catalog App Registry" - } + ] }, "com.amazonaws.servicecatalogappregistry#Application": { "type": "structure", @@ -2107,6 +2107,9 @@ "name": { "target": "com.amazonaws.servicecatalogappregistry#Name", "traits": { + "smithy.api#deprecated": { + "message": "Name update for application is deprecated." + }, "smithy.api#documentation": "

                The new name of the application. The name must be unique in the region in which you are updating the application.

                " } }, @@ -2174,6 +2177,9 @@ "name": { "target": "com.amazonaws.servicecatalogappregistry#Name", "traits": { + "smithy.api#deprecated": { + "message": "Name update for attribute group is deprecated." + }, "smithy.api#documentation": "

                The new name of the attribute group. The name must be unique in the region in which you are\n updating the attribute group.

                " } }, diff --git a/codegen/sdk-codegen/aws-models/sts.json b/codegen/sdk-codegen/aws-models/sts.json index 151ed5fb6171..79dcf9cb14a2 100644 --- a/codegen/sdk-codegen/aws-models/sts.json +++ b/codegen/sdk-codegen/aws-models/sts.json @@ -100,7 +100,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a set of temporary security credentials that you can use to access Amazon Web Services\n resources that you might not normally have access to. These temporary credentials consist\n of an access key ID, a secret access key, and a security token. Typically, you use\n AssumeRole within your account or for cross-account access. For a\n comparison of AssumeRole with other API operations that produce temporary\n credentials, see Requesting Temporary Security\n Credentials and Comparing the\n Amazon Web Services STS API operations in the IAM User Guide.

                \n

                \n Permissions\n

                \n

                The temporary security credentials created by AssumeRole can be used to\n make API calls to any Amazon Web Services service with the following exception: You cannot call the\n Amazon Web Services STS GetFederationToken or GetSessionToken API\n operations.

                \n

                (Optional) You can pass inline or managed session policies to\n this operation. You can pass a single JSON policy document to use as an inline session\n policy. You can also specify up to 10 managed policies to use as managed session policies.\n The plaintext that you use for both inline and managed session policies can't exceed 2,048\n characters. Passing policies to this operation returns new \n temporary credentials. The resulting session's permissions are the intersection of the \n role's identity-based policy and the session policies. You can use the role's temporary \n credentials in subsequent Amazon Web Services API calls to access resources in the account that owns \n the role. You cannot use session policies to grant more permissions than those allowed \n by the identity-based policy of the role that is being assumed. For more information, see\n Session\n Policies in the IAM User Guide.

                \n

                When you create a role, you create two policies: A role trust policy that specifies\n who can assume the role and a permissions policy that specifies\n what can be done with the role. You specify the trusted principal\n who is allowed to assume the role in the role trust policy.

                \n

                To assume a role from a different account, your Amazon Web Services account must be trusted by the\n role. The trust relationship is defined in the role's trust policy when the role is\n created. That trust policy states which accounts are allowed to delegate that access to\n users in the account.

                \n

                A user who wants to access a role in a different account must also have permissions that\n are delegated from the user account administrator. The administrator must attach a policy\n that allows the user to call AssumeRole for the ARN of the role in the other\n account.

                \n

                To allow a user to assume a role in the same account, you can do either of the\n following:

                \n
                  \n
                • \n

                  Attach a policy to the user that allows the user to call\n AssumeRole (as long as the role's trust policy trusts the account).

                  \n
                • \n
                • \n

                  Add the user as a principal directly in the role's trust policy.

                  \n
                • \n
                \n

                You can do either because the role’s trust policy acts as an IAM resource-based\n policy. When a resource-based policy grants access to a principal in the same account, no\n additional identity-based policy is required. For more information about trust policies and\n resource-based policies, see IAM Policies in the\n IAM User Guide.

                \n\n

                \n Tags\n

                \n

                (Optional) You can pass tag key-value pairs to your session. These tags are called\n session tags. For more information about session tags, see Passing Session Tags in STS in the\n IAM User Guide.

                \n

                An administrator must grant you the permissions necessary to pass session tags. The\n administrator can also create granular permissions to allow you to pass only specific\n session tags. For more information, see Tutorial: Using Tags\n for Attribute-Based Access Control in the\n IAM User Guide.

                \n

                You can set the session tags as transitive. Transitive tags persist during role\n chaining. For more information, see Chaining Roles\n with Session Tags in the IAM User Guide.

                \n

                \n Using MFA with AssumeRole\n

                \n

                (Optional) You can include multi-factor authentication (MFA) information when you call\n AssumeRole. This is useful for cross-account scenarios to ensure that the\n user that assumes the role has been authenticated with an Amazon Web Services MFA device. In that\n scenario, the trust policy of the role being assumed includes a condition that tests for\n MFA authentication. If the caller does not include valid MFA information, the request to\n assume the role is denied. The condition in a trust policy that tests for MFA\n authentication might look like the following example.

                \n

                \n \"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}\n

                \n

                For more information, see Configuring MFA-Protected API Access\n in the IAM User Guide guide.

                \n

                To use MFA with AssumeRole, you pass values for the\n SerialNumber and TokenCode parameters. The\n SerialNumber value identifies the user's hardware or virtual MFA device.\n The TokenCode is the time-based one-time password (TOTP) that the MFA device\n produces.

                " + "smithy.api#documentation": "

                Returns a set of temporary security credentials that you can use to access Amazon Web Services\n resources that you might not normally have access to. These temporary credentials consist\n of an access key ID, a secret access key, and a security token. Typically, you use\n AssumeRole within your account or for cross-account access. For a\n comparison of AssumeRole with other API operations that produce temporary\n credentials, see Requesting Temporary Security\n Credentials and Comparing the\n Amazon Web Services STS API operations in the IAM User Guide.

                \n

                \n Permissions\n

                \n

                The temporary security credentials created by AssumeRole can be used to\n make API calls to any Amazon Web Services service with the following exception: You cannot call the\n Amazon Web Services STS GetFederationToken or GetSessionToken API\n operations.

                \n

                (Optional) You can pass inline or managed session policies to\n this operation. You can pass a single JSON policy document to use as an inline session\n policy. You can also specify up to 10 managed policies to use as managed session policies.\n The plaintext that you use for both inline and managed session policies can't exceed 2,048\n characters. Passing policies to this operation returns new \n temporary credentials. The resulting session's permissions are the intersection of the \n role's identity-based policy and the session policies. You can use the role's temporary \n credentials in subsequent Amazon Web Services API calls to access resources in the account that owns \n the role. You cannot use session policies to grant more permissions than those allowed \n by the identity-based policy of the role that is being assumed. For more information, see\n Session\n Policies in the IAM User Guide.

                \n

                When you create a role, you create two policies: A role trust policy that specifies\n who can assume the role and a permissions policy that specifies\n what can be done with the role. You specify the trusted principal\n who is allowed to assume the role in the role trust policy.

                \n

                To assume a role from a different account, your Amazon Web Services account must be trusted by the\n role. The trust relationship is defined in the role's trust policy when the role is\n created. That trust policy states which accounts are allowed to delegate that access to\n users in the account.

                \n

                A user who wants to access a role in a different account must also have permissions that\n are delegated from the user account administrator. The administrator must attach a policy\n that allows the user to call AssumeRole for the ARN of the role in the other\n account.

                \n

                To allow a user to assume a role in the same account, you can do either of the\n following:

                \n
                  \n
                • \n

                  Attach a policy to the user that allows the user to call AssumeRole\n (as long as the role's trust policy trusts the account).

                  \n
                • \n
                • \n

                  Add the user as a principal directly in the role's trust policy.

                  \n
                • \n
                \n

                You can do either because the role’s trust policy acts as an IAM resource-based\n policy. When a resource-based policy grants access to a principal in the same account, no\n additional identity-based policy is required. For more information about trust policies and\n resource-based policies, see IAM Policies in the\n IAM User Guide.

                \n\n

                \n Tags\n

                \n

                (Optional) You can pass tag key-value pairs to your session. These tags are called\n session tags. For more information about session tags, see Passing Session Tags in STS in the\n IAM User Guide.

                \n

                An administrator must grant you the permissions necessary to pass session tags. The\n administrator can also create granular permissions to allow you to pass only specific\n session tags. For more information, see Tutorial: Using Tags\n for Attribute-Based Access Control in the\n IAM User Guide.

                \n

                You can set the session tags as transitive. Transitive tags persist during role\n chaining. For more information, see Chaining Roles\n with Session Tags in the IAM User Guide.

                \n

                \n Using MFA with AssumeRole\n

                \n

                (Optional) You can include multi-factor authentication (MFA) information when you call\n AssumeRole. This is useful for cross-account scenarios to ensure that the\n user that assumes the role has been authenticated with an Amazon Web Services MFA device. In that\n scenario, the trust policy of the role being assumed includes a condition that tests for\n MFA authentication. If the caller does not include valid MFA information, the request to\n assume the role is denied. The condition in a trust policy that tests for MFA\n authentication might look like the following example.

                \n

                \n \"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}\n

                \n

                For more information, see Configuring MFA-Protected API Access\n in the IAM User Guide guide.

                \n

                To use MFA with AssumeRole, you pass values for the\n SerialNumber and TokenCode parameters. The\n SerialNumber value identifies the user's hardware or virtual MFA device.\n The TokenCode is the time-based one-time password (TOTP) that the MFA device\n produces.

                " } }, "com.amazonaws.sts#AssumeRoleRequest": { @@ -141,7 +141,7 @@ "Tags": { "target": "com.amazonaws.sts#tagListType", "traits": { - "smithy.api#documentation": "

                A list of session tags that you want to pass. Each session tag consists of a key name\n and an associated value. For more information about session tags, see Tagging Amazon Web Services STS\n Sessions in the IAM User Guide.

                \n

                This parameter is optional. You can pass up to 50 session tags. The plaintext session\n tag keys can’t exceed 128 characters, and the values can’t exceed 256 characters. For these\n and additional limits, see IAM\n and STS Character Limits in the IAM User Guide.

                \n \n \n

                An Amazon Web Services conversion compresses the passed session policies and session tags into a\n packed binary format that has a separate limit. Your request can fail for this limit\n even if your plaintext meets the other requirements. The PackedPolicySize\n response element indicates by percentage how close the policies and tags for your\n request are to the upper size limit.\n

                \n
                \n \n

                You can pass a session tag with the same key as a tag that is already attached to the\n role. When you do, session tags override a role tag with the same key.

                \n

                Tag key–value pairs are not case sensitive, but case is preserved. This means that you\n cannot have separate Department and department tag keys. Assume\n that the role has the Department=Marketing tag and you pass the\n department=engineering session tag. Department\n and department are not saved as separate tags, and the session tag passed in\n the request takes precedence over the role tag.

                \n

                Additionally, if you used temporary credentials to perform this operation, the new\n session inherits any transitive session tags from the calling session. If you pass a\n session tag with the same key as an inherited tag, the operation fails. To view the\n inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the\n IAM User Guide.

                " + "smithy.api#documentation": "

                A list of session tags that you want to pass. Each session tag consists of a key name\n and an associated value. For more information about session tags, see Tagging Amazon Web Services STS\n Sessions in the IAM User Guide.

                \n

                This parameter is optional. You can pass up to 50 session tags. The plaintext session\n tag keys can’t exceed 128 characters, and the values can’t exceed 256 characters. For these\n and additional limits, see IAM\n and STS Character Limits in the IAM User Guide.

                \n \n \n

                An Amazon Web Services conversion compresses the passed session policies and session tags into a\n packed binary format that has a separate limit. Your request can fail for this limit\n even if your plaintext meets the other requirements. The PackedPolicySize\n response element indicates by percentage how close the policies and tags for your\n request are to the upper size limit.\n

                \n
                \n \n

                You can pass a session tag with the same key as a tag that is already attached to the\n role. When you do, session tags override a role tag with the same key.

                \n

                Tag key–value pairs are not case sensitive, but case is preserved. This means that you\n cannot have separate Department and department tag keys. Assume\n that the role has the Department=Marketing tag and you pass the\n department=engineering session tag. Department\n and department are not saved as separate tags, and the session tag passed in\n the request takes precedence over the role tag.

                \n

                Additionally, if you used temporary credentials to perform this operation, the new\n session inherits any transitive session tags from the calling session. If you pass a\n session tag with the same key as an inherited tag, the operation fails. To view the\n inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the\n IAM User Guide.

                " } }, "TransitiveTagKeys": { @@ -378,7 +378,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns a set of temporary security credentials for users who have been authenticated in\n a mobile or web application with a web identity provider. Example providers include Amazon Cognito,\n Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity\n provider.

                \n \n

                For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the\n Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely\n identify a user. You can also supply the user with a consistent identity throughout the\n lifetime of an application.

                \n

                To learn more about Amazon Cognito, see Amazon Cognito Overview in\n Amazon Web Services SDK for Android Developer Guide and Amazon Cognito Overview in the\n Amazon Web Services SDK for iOS Developer Guide.

                \n
                \n

                Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web Services\n security credentials. Therefore, you can distribute an application (for example, on mobile\n devices) that requests temporary security credentials without including long-term Amazon Web Services\n credentials in the application. You also don't need to deploy server-based proxy services\n that use long-term Amazon Web Services credentials. Instead, the identity of the caller is validated by\n using a token from the web identity provider. For a comparison of\n AssumeRoleWithWebIdentity with the other API operations that produce\n temporary credentials, see Requesting Temporary Security\n Credentials and Comparing the\n Amazon Web Services STS API operations in the IAM User Guide.

                \n

                The temporary security credentials returned by this API consist of an access key ID, a\n secret access key, and a security token. Applications can use these temporary security\n credentials to sign calls to Amazon Web Services service API operations.

                \n

                \n Session Duration\n

                \n

                By default, the temporary security credentials created by\n AssumeRoleWithWebIdentity last for one hour. However, you can use the\n optional DurationSeconds parameter to specify the duration of your session.\n You can provide a value from 900 seconds (15 minutes) up to the maximum session duration\n setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how\n to view the maximum value for your role, see View the\n Maximum Session Duration Setting for a Role in the\n IAM User Guide. The maximum session duration limit applies when\n you use the AssumeRole* API operations or the assume-role* CLI\n commands. However the limit does not apply when you use those operations to create a\n console URL. For more information, see Using IAM Roles in the\n IAM User Guide.

                \n

                \n Permissions\n

                \n

                The temporary security credentials created by AssumeRoleWithWebIdentity can\n be used to make API calls to any Amazon Web Services service with the following exception: you cannot\n call the STS GetFederationToken or GetSessionToken API\n operations.

                \n

                (Optional) You can pass inline or managed session policies to\n this operation. You can pass a single JSON policy document to use as an inline session\n policy. You can also specify up to 10 managed policies to use as managed session policies.\n The plaintext that you use for both inline and managed session policies can't exceed 2,048\n characters. Passing policies to this operation returns new \n temporary credentials. The resulting session's permissions are the intersection of the \n role's identity-based policy and the session policies. You can use the role's temporary \n credentials in subsequent Amazon Web Services API calls to access resources in the account that owns \n the role. You cannot use session policies to grant more permissions than those allowed \n by the identity-based policy of the role that is being assumed. For more information, see\n Session\n Policies in the IAM User Guide.

                \n

                \n Tags\n

                \n

                (Optional) You can configure your IdP to pass attributes into your web identity token as\n session tags. Each session tag consists of a key name and an associated value. For more\n information about session tags, see Passing Session Tags in STS in the\n IAM User Guide.

                \n

                You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128\n characters and the values can’t exceed 256 characters. For these and additional limits, see\n IAM\n and STS Character Limits in the IAM User Guide.

                \n \n \n

                An Amazon Web Services conversion compresses the passed session policies and session tags into a\n packed binary format that has a separate limit. Your request can fail for this limit\n even if your plaintext meets the other requirements. The PackedPolicySize\n response element indicates by percentage how close the policies and tags for your\n request are to the upper size limit.\n

                \n
                \n \n

                You can pass a session tag with the same key as a tag that is attached to the role. When\n you do, the session tag overrides the role tag with the same key.

                \n

                An administrator must grant you the permissions necessary to pass session tags. The\n administrator can also create granular permissions to allow you to pass only specific\n session tags. For more information, see Tutorial: Using Tags\n for Attribute-Based Access Control in the\n IAM User Guide.

                \n

                You can set the session tags as transitive. Transitive tags persist during role\n chaining. For more information, see Chaining Roles\n with Session Tags in the IAM User Guide.

                \n

                \n Identities\n

                \n

                Before your application can call AssumeRoleWithWebIdentity, you must have\n an identity token from a supported identity provider and create a role that the application\n can assume. The role that your application assumes must trust the identity provider that is\n associated with the identity token. In other words, the identity provider must be specified\n in the role's trust policy.

                \n \n

                Calling AssumeRoleWithWebIdentity can result in an entry in your\n CloudTrail logs. The entry includes the Subject of\n the provided web identity token. We recommend that you avoid using any personally\n identifiable information (PII) in this field. For example, you could instead use a GUID\n or a pairwise identifier, as suggested\n in the OIDC specification.

                \n
                \n

                For more information about how to use web identity federation and the\n AssumeRoleWithWebIdentity API, see the following resources:

                \n " + "smithy.api#documentation": "

                Returns a set of temporary security credentials for users who have been authenticated in\n a mobile or web application with a web identity provider. Example providers include the\n OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID Connect-compatible\n identity provider such as Google or Amazon Cognito federated identities.

                \n \n

                For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the\n Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely\n identify a user. You can also supply the user with a consistent identity throughout the\n lifetime of an application.

                \n

                To learn more about Amazon Cognito, see Amazon Cognito Overview in\n Amazon Web Services SDK for Android Developer Guide and Amazon Cognito Overview in the\n Amazon Web Services SDK for iOS Developer Guide.

                \n
                \n

                Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web Services\n security credentials. Therefore, you can distribute an application (for example, on mobile\n devices) that requests temporary security credentials without including long-term Amazon Web Services\n credentials in the application. You also don't need to deploy server-based proxy services\n that use long-term Amazon Web Services credentials. Instead, the identity of the caller is validated by\n using a token from the web identity provider. For a comparison of\n AssumeRoleWithWebIdentity with the other API operations that produce\n temporary credentials, see Requesting Temporary Security\n Credentials and Comparing the\n Amazon Web Services STS API operations in the IAM User Guide.

                \n

                The temporary security credentials returned by this API consist of an access key ID, a\n secret access key, and a security token. Applications can use these temporary security\n credentials to sign calls to Amazon Web Services service API operations.

                \n

                \n Session Duration\n

                \n

                By default, the temporary security credentials created by\n AssumeRoleWithWebIdentity last for one hour. However, you can use the\n optional DurationSeconds parameter to specify the duration of your session.\n You can provide a value from 900 seconds (15 minutes) up to the maximum session duration\n setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how\n to view the maximum value for your role, see View the\n Maximum Session Duration Setting for a Role in the\n IAM User Guide. The maximum session duration limit applies when\n you use the AssumeRole* API operations or the assume-role* CLI\n commands. However the limit does not apply when you use those operations to create a\n console URL. For more information, see Using IAM Roles in the\n IAM User Guide.

                \n

                \n Permissions\n

                \n

                The temporary security credentials created by AssumeRoleWithWebIdentity can\n be used to make API calls to any Amazon Web Services service with the following exception: you cannot\n call the STS GetFederationToken or GetSessionToken API\n operations.

                \n

                (Optional) You can pass inline or managed session policies to\n this operation. You can pass a single JSON policy document to use as an inline session\n policy. You can also specify up to 10 managed policies to use as managed session policies.\n The plaintext that you use for both inline and managed session policies can't exceed 2,048\n characters. Passing policies to this operation returns new \n temporary credentials. The resulting session's permissions are the intersection of the \n role's identity-based policy and the session policies. You can use the role's temporary \n credentials in subsequent Amazon Web Services API calls to access resources in the account that owns \n the role. You cannot use session policies to grant more permissions than those allowed \n by the identity-based policy of the role that is being assumed. For more information, see\n Session\n Policies in the IAM User Guide.

                \n

                \n Tags\n

                \n

                (Optional) You can configure your IdP to pass attributes into your web identity token as\n session tags. Each session tag consists of a key name and an associated value. For more\n information about session tags, see Passing Session Tags in STS in the\n IAM User Guide.

                \n

                You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128\n characters and the values can’t exceed 256 characters. For these and additional limits, see\n IAM\n and STS Character Limits in the IAM User Guide.

                \n \n \n

                An Amazon Web Services conversion compresses the passed session policies and session tags into a\n packed binary format that has a separate limit. Your request can fail for this limit\n even if your plaintext meets the other requirements. The PackedPolicySize\n response element indicates by percentage how close the policies and tags for your\n request are to the upper size limit.\n

                \n
                \n \n

                You can pass a session tag with the same key as a tag that is attached to the role. When\n you do, the session tag overrides the role tag with the same key.

                \n

                An administrator must grant you the permissions necessary to pass session tags. The\n administrator can also create granular permissions to allow you to pass only specific\n session tags. For more information, see Tutorial: Using Tags\n for Attribute-Based Access Control in the\n IAM User Guide.

                \n

                You can set the session tags as transitive. Transitive tags persist during role\n chaining. For more information, see Chaining Roles\n with Session Tags in the IAM User Guide.

                \n

                \n Identities\n

                \n

                Before your application can call AssumeRoleWithWebIdentity, you must have\n an identity token from a supported identity provider and create a role that the application\n can assume. The role that your application assumes must trust the identity provider that is\n associated with the identity token. In other words, the identity provider must be specified\n in the role's trust policy.

                \n \n

                Calling AssumeRoleWithWebIdentity can result in an entry in your\n CloudTrail logs. The entry includes the Subject of\n the provided web identity token. We recommend that you avoid using any personally\n identifiable information (PII) in this field. For example, you could instead use a GUID\n or a pairwise identifier, as suggested\n in the OIDC specification.

                \n
                \n

                For more information about how to use web identity federation and the\n AssumeRoleWithWebIdentity API, see the following resources:

                \n " } }, "com.amazonaws.sts#AssumeRoleWithWebIdentityRequest": { @@ -408,7 +408,7 @@ "ProviderId": { "target": "com.amazonaws.sts#urlType", "traits": { - "smithy.api#documentation": "

                The fully qualified host component of the domain name of the identity provider.

                \n

                Specify this value only for OAuth 2.0 access tokens. Currently\n www.amazon.com and graph.facebook.com are the only supported\n identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port\n numbers.

                \n

                Do not specify this value for OpenID Connect ID tokens.

                " + "smithy.api#documentation": "

                The fully qualified host component of the domain name of the OAuth 2.0 identity\n provider. Do not specify this value for an OpenID Connect identity provider.

                \n

                Currently www.amazon.com and graph.facebook.com are the only\n supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and\n port numbers.

                \n

                Do not specify this value for OpenID Connect ID tokens.

                " } }, "PolicyArns": { diff --git a/codegen/sdk-codegen/aws-models/synthetics.json b/codegen/sdk-codegen/aws-models/synthetics.json index c488630fbe69..227111092f5a 100644 --- a/codegen/sdk-codegen/aws-models/synthetics.json +++ b/codegen/sdk-codegen/aws-models/synthetics.json @@ -262,9 +262,9 @@ } }, "Handler": { - "target": "com.amazonaws.synthetics#String", + "target": "com.amazonaws.synthetics#CodeHandler", "traits": { - "smithy.api#documentation": "

                The entry point to use for the source code when running the canary. This value must end\n with the string .handler. The string is limited to 29 characters or fewer.

                ", + "smithy.api#documentation": "

                The entry point to use for the source code when running the canary. For canaries that use the \n syn-python-selenium-1.0 runtime\n or a syn-nodejs.puppeteer runtime earlier than syn-nodejs.puppeteer-3.4, \n the handler must be specified as \n fileName.handler. For \n syn-python-selenium-1.1, syn-nodejs.puppeteer-3.4, and later runtimes, the handler can be specified as \n \n fileName.functionName\n , or\n you can specify a folder where canary scripts reside as \n\n folder/fileName.functionName\n .

                ", "smithy.api#required": {} } } @@ -658,6 +658,16 @@ "smithy.api#documentation": "

                This structure contains information about when the canary was created and modified.

                " } }, + "com.amazonaws.synthetics#CodeHandler": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([0-9a-zA-Z_-]+\\/)*[0-9A-Za-z_\\\\-]+\\.[A-Za-z_][A-Za-z0-9_]*$" + } + }, "com.amazonaws.synthetics#ConflictException": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/timestream-query.json b/codegen/sdk-codegen/aws-models/timestream-query.json index 89cbad167f23..101e8f9ee8f7 100644 --- a/codegen/sdk-codegen/aws-models/timestream-query.json +++ b/codegen/sdk-codegen/aws-models/timestream-query.json @@ -84,7 +84,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": true }, - "smithy.api#documentation": "

                Cancels a query that has been issued. Cancellation is provided only if the query has\n not completed running before the cancellation request was issued. Because cancellation\n is an idempotent operation, subsequent cancellation requests will return a\n CancellationMessage, indicating that the query has already been\n canceled. See code\n sample for details.

                ", + "smithy.api#documentation": "

                Cancels a query that has been issued. Cancellation is provided only if the query has\n not completed running before the cancellation request was issued. Because cancellation\n is an idempotent operation, subsequent cancellation requests will return a\n CancellationMessage, indicating that the query has already been\n canceled. See code\n sample for details.

                ", "smithy.api#idempotent": {} } }, @@ -403,7 +403,7 @@ } ], "traits": { - "smithy.api#documentation": "

                DescribeEndpoints returns a list of available endpoints to make Timestream\n API calls against. This API is available through both Write and Query.

                \n

                Because the Timestream SDKs are designed to transparently work with the\n service’s architecture, including the management and mapping of the service endpoints,\n it is not recommended that you use this API unless:

                \n \n

                For detailed information on how and when to use and implement DescribeEndpoints, see\n The Endpoint Discovery Pattern.

                " + "smithy.api#documentation": "

                DescribeEndpoints returns a list of available endpoints to make Timestream\n API calls against. This API is available through both Write and Query.

                \n

                Because the Timestream SDKs are designed to transparently work with the\n service’s architecture, including the management and mapping of the service endpoints,\n it is not recommended that you use this API unless:

                \n \n

                For detailed information on how and when to use and implement DescribeEndpoints, see\n The Endpoint Discovery Pattern.

                " } }, "com.amazonaws.timestreamquery#DescribeEndpointsRequest": { @@ -1199,7 +1199,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": true }, - "smithy.api#documentation": "

                \n Query is a synchronous operation that enables you to run a query against\n your Amazon Timestream data. Query will time out after 60 seconds.\n You must update the default timeout in the SDK to support a timeout of 60 seconds. See\n the code\n sample for details.

                \n

                Your query request will fail in the following cases:

                \n
                  \n
                • \n

                  If you submit a Query request with the same client token outside\n of the 5-minute idempotency window.

                  \n
                • \n
                • \n

                  If you submit a Query request with the same client token, but\n change other parameters, within the 5-minute idempotency window.

                  \n
                • \n
                • \n

                  If the size of the row (including the query metadata) exceeds 1 MB, then the\n query will fail with the following error message:

                  \n

                  \n Query aborted as max page response size has been exceeded by the output\n result row\n

                  \n
                • \n
                • \n

                  If the IAM principal of the query initiator and the result reader are not the\n same and/or the query initiator and the result reader do not have the same query\n string in the query requests, the query will fail with an Invalid\n pagination token error.

                  \n
                • \n
                ", + "smithy.api#documentation": "

                \n Query is a synchronous operation that enables you to run a query against\n your Amazon Timestream data. Query will time out after 60 seconds.\n You must update the default timeout in the SDK to support a timeout of 60 seconds. See\n the code\n sample for details.

                \n

                Your query request will fail in the following cases:

                \n
                  \n
                • \n

                  If you submit a Query request with the same client token outside\n of the 5-minute idempotency window.

                  \n
                • \n
                • \n

                  If you submit a Query request with the same client token, but\n change other parameters, within the 5-minute idempotency window.

                  \n
                • \n
                • \n

                  If the size of the row (including the query metadata) exceeds 1 MB, then the\n query will fail with the following error message:

                  \n

                  \n Query aborted as max page response size has been exceeded by the output\n result row\n

                  \n
                • \n
                • \n

                  If the IAM principal of the query initiator and the result reader are not the\n same and/or the query initiator and the result reader do not have the same query\n string in the query requests, the query will fail with an Invalid\n pagination token error.

                  \n
                • \n
                ", "smithy.api#idempotent": {}, "smithy.api#paginated": { "inputToken": "NextToken", diff --git a/codegen/sdk-codegen/aws-models/transcribe.json b/codegen/sdk-codegen/aws-models/transcribe.json index bc6c3b50f42d..855058aa6e3e 100644 --- a/codegen/sdk-codegen/aws-models/transcribe.json +++ b/codegen/sdk-codegen/aws-models/transcribe.json @@ -503,7 +503,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates an analytics category. Amazon Transcribe applies the conditions specified by your\n analytics categories to your call analytics jobs. For each analytics category, you specify one \n or more rules. For example, you can specify a rule that the customer sentiment was neutral \n or negative within that category. If you start a call analytics job, Amazon Transcribe applies the\n category to the analytics job that you've specified.

                ", + "smithy.api#documentation": "

                Creates a call analytics category. Amazon Transcribe applies the conditions specified by your\n call analytics categories to your call analytics jobs. For each analytics category, you \n must create between 1 and 20 rules. For example, you can create a 'greeting' \n category with a rule that flags calls in which your agent does not use a specified \n phrase (for example: \"Please note this call may be recorded.\") in the first 15 seconds\n of the call. When you start a call analytics job, Amazon Transcribe applies all your existing call\n analytics categories to that job.

                ", "smithy.api#http": { "method": "PUT", "uri": "/callanalyticscategories/{CategoryName}", @@ -517,7 +517,7 @@ "CategoryName": { "target": "com.amazonaws.transcribe#CategoryName", "traits": { - "smithy.api#documentation": "

                The name that you choose for your category when you create it.

                ", + "smithy.api#documentation": "

                A unique name, chosen by you, for your call analytics category. For example, \n sentiment-positive-last30seconds.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -525,7 +525,7 @@ "Rules": { "target": "com.amazonaws.transcribe#RuleList", "traits": { - "smithy.api#documentation": "

                To create a category, you must specify between 1 and 20 rules. For each rule, you \n specify a filter to be applied to the attributes of the call. For example, you can specify a \n sentiment filter to detect if the customer's sentiment was negative or neutral.

                ", + "smithy.api#documentation": "

                Rules make up a call analytics category. When creating a call analytics category,\n you must create between 1 and 20 rules for your category. For each rule, you \n specify a filter you want applied to the attributes of a call. For example, you can choose\n a sentiment filter that detects if a customer's sentiment was positive during the last\n 30 seconds of the call.

                ", "smithy.api#required": {} } } @@ -537,7 +537,7 @@ "CategoryProperties": { "target": "com.amazonaws.transcribe#CategoryProperties", "traits": { - "smithy.api#documentation": "

                The rules and associated metadata used to create a category.

                " + "smithy.api#documentation": "

                If your audio matches one of your categories, this field contains data on that \n category and its associated rules. This parameter shows which category is flagged\n (CategoryName) along with metadata for the rules that match your \n audio. Metadata includes the rule filter (such as InterruptionFilter,\n NonTalkTimeFilter, SentimentFilter, and \n TranscriptFilter) and where in your audio (StartTime\n and EndTime) the rule has a match.

                " } } } @@ -565,7 +565,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new custom language model. Use Amazon S3 prefixes to provide the location of \n your input files. The time it takes to create your model depends on the size of your training\n data.

                ", + "smithy.api#documentation": "

                Creates a new custom language model. When creating a new language model, \n you must specify if you want a Wideband (audio sample rates over 16,000 Hz) or\n Narrowband (audio sample rates under 16,000 Hz) base model. You then include the \n S3 URI location of your training and tuning files, the language for the model, a \n unique name, and any tags you want associated with your model.

                ", "smithy.api#http": { "method": "PUT", "uri": "/languagemodels/{ModelName}", @@ -579,21 +579,21 @@ "LanguageCode": { "target": "com.amazonaws.transcribe#CLMLanguageCode", "traits": { - "smithy.api#documentation": "

                The language of the input text you're using to train your custom language\n model.

                ", + "smithy.api#documentation": "

                The language of your custom language model; note that the language code you\n select must match the language of your training and tuning data.

                ", "smithy.api#required": {} } }, "BaseModelName": { "target": "com.amazonaws.transcribe#BaseModelName", "traits": { - "smithy.api#documentation": "

                The Amazon Transcribe standard language model, or base model used to create your custom\n language model.

                \n

                If you want to use your custom language model to transcribe audio with a sample rate \n of 16,000 Hz or greater, choose Wideband.

                \n

                If you want to use your custom language model to transcribe audio with a sample rate \n that is less than 16,000 Hz, choose Narrowband.

                ", + "smithy.api#documentation": "

                The Amazon Transcribe standard language model, or base model, used to create your \n custom language model. Amazon Transcribe offers two options for base models: Wideband and\n Narrowband.

                \n

                If the audio you want to transcribe has a sample rate of 16,000 Hz or greater, \n choose WideBand. To transcribe audio with a sample rate less than\n 16,000 Hz, choose NarrowBand.

                ", "smithy.api#required": {} } }, "ModelName": { "target": "com.amazonaws.transcribe#ModelName", "traits": { - "smithy.api#documentation": "

                The name you choose for your custom language model when you create it.

                ", + "smithy.api#documentation": "

                The name of your new custom language model.

                \n

                This name is case sensitive, cannot contain spaces, and must be unique within an \n Amazon Web Services account. If you try to create a language model with the same name as a\n previous language model, you get a ConflictException error.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -601,14 +601,14 @@ "InputDataConfig": { "target": "com.amazonaws.transcribe#InputDataConfig", "traits": { - "smithy.api#documentation": "

                Contains the data access role and the Amazon S3 prefixes to read the required input files to\n create a custom language model.

                ", + "smithy.api#documentation": "

                Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 \n locations of your training (S3Uri) and tuning \n (TuningDataS3Uri) data.

                ", "smithy.api#required": {} } }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { - "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new language model \n at the time you create this new model.

                " + "smithy.api#documentation": "

                Optionally add tags, each in the form of a key:value pair, to your new language\n model. See also: .

                " } } } @@ -619,31 +619,31 @@ "LanguageCode": { "target": "com.amazonaws.transcribe#CLMLanguageCode", "traits": { - "smithy.api#documentation": "

                The language code of the text you've used to create a custom language model.

                " + "smithy.api#documentation": "

                The language code you selected for your custom language model.

                " } }, "BaseModelName": { "target": "com.amazonaws.transcribe#BaseModelName", "traits": { - "smithy.api#documentation": "

                The Amazon Transcribe standard language model, or base model you've used to create a custom\n language model.

                " + "smithy.api#documentation": "

                The Amazon Transcribe standard language model, or base model, you used when creating your\n custom language model.

                \n

                If your audio has a sample rate of 16,000 Hz or greater, this value should be\n WideBand. If your audio has a sample rate of less than\n 16,000 Hz, this value should be NarrowBand.

                " } }, "ModelName": { "target": "com.amazonaws.transcribe#ModelName", "traits": { - "smithy.api#documentation": "

                The name you've chosen for your custom language model.

                " + "smithy.api#documentation": "

                The unique name you chose for your custom language model.

                " } }, "InputDataConfig": { "target": "com.amazonaws.transcribe#InputDataConfig", "traits": { - "smithy.api#documentation": "

                The data access role and Amazon S3 prefixes you've chosen to create your custom language model.

                " + "smithy.api#documentation": "

                Lists your data access role ARN (Amazon Resource Name) and the Amazon S3 \n locations your provided for your training (S3Uri) and tuning \n (TuningDataS3Uri) data.

                " } }, "ModelStatus": { "target": "com.amazonaws.transcribe#ModelStatus", "traits": { - "smithy.api#documentation": "

                The status of the custom language model. When the status is \n COMPLETED the model is ready to use.

                " + "smithy.api#documentation": "

                The status of your custom language model. When the status shows as \n COMPLETED, your model is ready to use.

                " } } } @@ -671,7 +671,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new custom vocabulary that you can use to modify how Amazon Transcribe Medical transcribes\n your audio file.

                ", + "smithy.api#documentation": "

                Creates a new custom medical vocabulary.

                \n

                When creating a new medical vocabulary, you must upload a text file that contains\n your new entries, phrases, and terms into an S3 bucket. Note that this differs from \n , where you can include a list of terms within\n your request using the Phrases flag, as\n CreateMedicalVocabulary does not support the Phrases\n flag.

                \n

                For more information on creating a custom vocabulary text file, see \n Creating a custom \n vocabulary.

                ", "smithy.api#http": { "method": "PUT", "uri": "/medicalvocabularies/{VocabularyName}", @@ -685,7 +685,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the custom vocabulary. This case-sensitive name must be unique within\n an Amazon Web Services account. If you try to create a vocabulary with the same name \n as a previous vocabulary, you get a ConflictException error.

                ", + "smithy.api#documentation": "

                The name of your new vocabulary.

                \n

                This name is case sensitive, cannot contain spaces, and must be unique within an \n Amazon Web Services account. If you try to create a vocabulary with the same name as a\n previous vocabulary, you get a ConflictException error.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -693,21 +693,21 @@ "LanguageCode": { "target": "com.amazonaws.transcribe#LanguageCode", "traits": { - "smithy.api#documentation": "

                The language code for the language used for the entries in your custom vocabulary.\n The language code of your custom vocabulary must match the language code of your \n transcription job. US English (en-US) is the only language code available for Amazon Transcribe Medical.

                ", + "smithy.api#documentation": "

                The language code that represents the language of the entries in your custom\n vocabulary. Note that U.S. English (en-US) is the only language \n supported with Amazon Transcribe Medical.

                ", "smithy.api#required": {} } }, "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The location in Amazon S3 of the text file you use to define your custom vocabulary. The URI \n must be in the same Amazon Web Services Region as the resource that you're calling. Enter\n information about your VocabularyFileUri in the following format:

                \n

                \n https://s3..amazonaws.com///\n

                \n

                The following is an example URI for a vocabulary file that is stored in Amazon S3:

                \n

                \n https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about Amazon S3 object names, see Object Keys in \n the Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies, see Medical Custom\n Vocabularies.

                ", + "smithy.api#documentation": "

                The Amazon S3 location (URI) of the text file that contains your custom vocabulary. \n The URI must be in the same Amazon Web Services Region as the resource that you're \n calling.

                \n

                Here's an example URI path:

                \n

                \n https://s3.us-east-1.amazonaws.com/my-s3-bucket/my-vocab-file.txt\n

                ", "smithy.api#required": {} } }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { - "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new medical \n vocabulary at the time you create this new vocabulary.

                " + "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new medical \n vocabulary at the time you create the new vocabulary.

                \n

                To learn more about using tags with Amazon Transcribe, refer to Tagging resources.

                " } } } @@ -718,31 +718,31 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary. The name must be unique within an Amazon Web Services \n account and is case sensitive.

                " + "smithy.api#documentation": "

                The name you chose for your vocabulary.

                " } }, "LanguageCode": { "target": "com.amazonaws.transcribe#LanguageCode", "traits": { - "smithy.api#documentation": "

                The language code for the entries in your custom vocabulary. US English (en-US) is the \n only valid language code for Amazon Transcribe Medical.

                " + "smithy.api#documentation": "

                The language code you selected for your medical vocabulary. Note that U.S. English\n (en-US) is the only language supported with Amazon Transcribe Medical.

                " } }, "VocabularyState": { "target": "com.amazonaws.transcribe#VocabularyState", "traits": { - "smithy.api#documentation": "

                The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is \n READY, you can use the vocabulary in a \n StartMedicalTranscriptionJob request.

                " + "smithy.api#documentation": "

                The processing state of your custom medical vocabulary. If the state is \n READY, you can use the vocabulary in a \n StartMedicalTranscriptionJob request.

                " } }, "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "

                The date and time that you created the vocabulary.

                " + "smithy.api#documentation": "

                The date and time you created your custom medical vocabulary.

                " } }, "FailureReason": { "target": "com.amazonaws.transcribe#FailureReason", "traits": { - "smithy.api#documentation": "

                If the VocabularyState field is FAILED, this field contains\n information about why the job failed.

                " + "smithy.api#documentation": "

                If the VocabularyState field is FAILED,\n FailureReason contains information about why the job failed.

                " } } } @@ -770,7 +770,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new custom vocabulary that you can use to change the way Amazon Transcribe handles transcription of an\n audio file.

                ", + "smithy.api#documentation": "

                Creates a new custom vocabulary.

                \n

                When creating a new medical vocabulary, you can either upload a text file that\n contains your new entries, phrases, and terms into an S3 bucket or include a list of \n terms directly in your request using the Phrases flag.

                \n

                For more information on creating a custom vocabulary, see \n Creating a custom \n vocabulary.

                ", "smithy.api#http": { "method": "PUT", "uri": "/vocabularies/{VocabularyName}", @@ -801,7 +801,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Creates a new vocabulary filter that you can use to filter words, such as profane words, \n from the output of a transcription job.

                ", + "smithy.api#documentation": "

                Creates a new vocabulary filter that you can use to filter words from your transcription\n output. For example, you can use this operation to remove profanity from your\n transcript.

                ", "smithy.api#http": { "method": "POST", "uri": "/vocabularyFilters/{VocabularyFilterName}", @@ -815,7 +815,7 @@ "VocabularyFilterName": { "target": "com.amazonaws.transcribe#VocabularyFilterName", "traits": { - "smithy.api#documentation": "

                The vocabulary filter name. The name must be unique within the account that contains \n it. If you try to create a vocabulary filter with the same name as another vocabulary filter, you\n get a ConflictException error.

                ", + "smithy.api#documentation": "

                The name of your new vocabulary filter.

                \n

                This name is case sensitive, cannot contain spaces, and must be unique within an \n Amazon Web Services account. If you try to create a vocabulary filter with the same name \n as a previous vocabulary filter, you get a ConflictException error.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -830,19 +830,19 @@ "Words": { "target": "com.amazonaws.transcribe#Words", "traits": { - "smithy.api#documentation": "

                The words to use in the vocabulary filter. Only use characters from the character set \n defined for custom vocabularies. For a list of character sets, see Character Sets for Custom\n Vocabularies.

                \n

                If you provide a list of words in the Words parameter, you can't use the\n VocabularyFilterFileUri parameter.

                " + "smithy.api#documentation": "

                The words you want in your vocabulary filter. Only use characters specified in the\n Character\n sets for the language you're transcribing.

                \n

                Note that if you include Words in your request, you cannot use\n VocabularyFilterFileUri; you must choose one or the other.

                " } }, "VocabularyFilterFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The Amazon S3 location of a text file used as input to create the vocabulary filter. Only \n use characters from the character set defined for custom vocabularies. For a list of character \n sets, see Character Sets for Custom\n Vocabularies.

                \n

                The specified file must be less than 50 KB of UTF-8 characters.

                \n

                If you provide the location of a list of words in the VocabularyFilterFileUri\n parameter, you can't use the Words parameter.

                " + "smithy.api#documentation": "

                The Amazon S3 location of a text file used as input to create the vocabulary filter. Only \n use characters from the character set defined for custom vocabularies. For a list of \n character sets, see Character Sets for Custom\n Vocabularies.

                \n

                Your vocabulary filter file must be less than 50 KB in size.

                \n

                Note that if you include VocabularyFilterFileUri in your request, you \n cannot use Words; you must choose one or the other.

                " } }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { - "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary\n filter at the time you create this new vocabulary filter.

                " + "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new vocabulary\n filter at the time you create this new vocabulary filter.

                " } } } @@ -859,13 +859,13 @@ "LanguageCode": { "target": "com.amazonaws.transcribe#LanguageCode", "traits": { - "smithy.api#documentation": "

                The language code of the words in the collection.

                " + "smithy.api#documentation": "

                The language code associated with your vocabulary filter.

                " } }, "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "

                The date and time that the vocabulary filter was modified.

                " + "smithy.api#documentation": "

                The date and time the vocabulary filter was modified.

                " } } } @@ -876,7 +876,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary. The name must be unique within an \n Amazon Web Services account. The name is case sensitive. If you try to create a vocabulary \n with the same name as a previous vocabulary you will receive a \n ConflictException error.

                ", + "smithy.api#documentation": "

                The name of your new vocabulary.

                \n

                This name is case sensitive, cannot contain spaces, and must be unique within an \n Amazon Web Services account. If you try to create a vocabulary with the same name as a\n previous vocabulary, you get a ConflictException error.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -884,26 +884,26 @@ "LanguageCode": { "target": "com.amazonaws.transcribe#LanguageCode", "traits": { - "smithy.api#documentation": "

                The language code of the vocabulary entries. For a list of languages and their \n corresponding language codes, see table-language-matrix.

                ", + "smithy.api#documentation": "

                The language code that represents the language of the entries in your custom\n vocabulary. Each vocabulary must contain terms in only one language. For a list of \n languages and their corresponding language codes, see Supported \n languages.

                ", "smithy.api#required": {} } }, "Phrases": { "target": "com.amazonaws.transcribe#Phrases", "traits": { - "smithy.api#documentation": "

                An array of strings that contains the vocabulary entries.

                " + "smithy.api#documentation": "

                Use this flag to include a list of terms within your request.

                \n

                Note that if you include Phrases in your request, you cannot\n use VocabularyFileUri; you must choose one or the other.

                " } }, "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The S3 location of the text file that contains the definition of the custom vocabulary. The\n URI must be in the same region as the API endpoint that you are calling. The general form \n is:

                \n

                \n https://s3..amazonaws.com/// \n

                \n

                For example:

                \n

                \n https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about S3 object names, see Object Keys in the \n Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies, see Custom vocabularies.

                " + "smithy.api#documentation": "

                The S3 location of the text file that contains your custom vocabulary. The\n URI must be located in the same region as the API endpoint you're calling.

                \n

                Here's an example URI path:

                \n

                \n https://s3.us-east-1.amazonaws.com/my-s3-bucket/my-vocab-file.txt\n

                \n

                Note that if you include VocabularyFileUri in your request, you \n cannot use the Phrases flag; you must choose one or the other.

                " } }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { - "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary at\n the time you create this new vocabulary.

                " + "smithy.api#documentation": "

                Adds one or more tags, each in the form of a key:value pair, to a new \n custom vocabulary at the time you create this new vocabulary.

                " } } } @@ -914,31 +914,31 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary.

                " + "smithy.api#documentation": "

                The name you chose for your vocabulary.

                " } }, "LanguageCode": { "target": "com.amazonaws.transcribe#LanguageCode", "traits": { - "smithy.api#documentation": "

                The language code of the vocabulary entries.

                " + "smithy.api#documentation": "

                The language code you selected for your vocabulary.

                " } }, "VocabularyState": { "target": "com.amazonaws.transcribe#VocabularyState", "traits": { - "smithy.api#documentation": "

                The processing state of the vocabulary. When the VocabularyState field \n contains READY the vocabulary is ready to be used in a StartTranscriptionJob \n request.

                " + "smithy.api#documentation": "

                The processing state of your vocabulary. If the state is READY, you can\n use the vocabulary in a StartTranscriptionJob request.

                " } }, "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "

                The date and time that the vocabulary was created.

                " + "smithy.api#documentation": "

                The date and time you created your custom vocabulary.

                " } }, "FailureReason": { "target": "com.amazonaws.transcribe#FailureReason", "traits": { - "smithy.api#documentation": "

                If the VocabularyState field is FAILED, this field contains\n information about why the job failed.

                " + "smithy.api#documentation": "

                If the VocabularyState field is FAILED,\n FailureReason contains information about why the job failed.

                " } } } @@ -979,7 +979,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a call analytics category using its name.

                ", + "smithy.api#documentation": "

                Deletes a call analytics category. To use this operation, specify the name of the \n category you want to delete using CategoryName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/callanalyticscategories/{CategoryName}", @@ -993,7 +993,7 @@ "CategoryName": { "target": "com.amazonaws.transcribe#CategoryName", "traits": { - "smithy.api#documentation": "

                The name of the call analytics category that you're choosing to delete. The value is case\n sensitive.

                ", + "smithy.api#documentation": "

                The name of the call analytics category you want to delete. Category names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1024,7 +1024,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a call analytics job using its name.

                ", + "smithy.api#documentation": "

                Deletes a call analytics job. To use this operation, specify the name of the \n job you want to delete using CallAnalyticsJobName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/callanalyticsjobs/{CallAnalyticsJobName}", @@ -1038,7 +1038,7 @@ "CallAnalyticsJobName": { "target": "com.amazonaws.transcribe#CallAnalyticsJobName", "traits": { - "smithy.api#documentation": "

                The name of the call analytics job you want to delete.

                ", + "smithy.api#documentation": "

                The name of the call analytics job you want to delete. Job names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1066,7 +1066,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a custom language model using its name.

                ", + "smithy.api#documentation": "

                Deletes a custom language model. To use this operation, specify the name of the \n language model you want to delete using ModelName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/languagemodels/{ModelName}", @@ -1080,7 +1080,7 @@ "ModelName": { "target": "com.amazonaws.transcribe#ModelName", "traits": { - "smithy.api#documentation": "

                The name of the model you're choosing to delete.

                ", + "smithy.api#documentation": "

                The name of the model you want to delete. Model names are case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1104,7 +1104,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a transcription job generated by Amazon Transcribe Medical and any related information.

                ", + "smithy.api#documentation": "

                Deletes a medical transcription job, along with any related information. To use this \n operation, specify the name of the job you want to delete using\n MedicalTranscriptionJobName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/medicaltranscriptionjobs/{MedicalTranscriptionJobName}", @@ -1118,7 +1118,7 @@ "MedicalTranscriptionJobName": { "target": "com.amazonaws.transcribe#TranscriptionJobName", "traits": { - "smithy.api#documentation": "

                The name you provide to the DeleteMedicalTranscriptionJob object to\n delete a transcription job.

                ", + "smithy.api#documentation": "

                The name of the medical transcription job you want to delete. Job names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1145,7 +1145,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a vocabulary from Amazon Transcribe Medical.

                ", + "smithy.api#documentation": "

                Deletes a custom medical vocabulary. To use this operation, specify the name of the\n vocabulary you want to delete using VocabularyName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/medicalvocabularies/{VocabularyName}", @@ -1159,7 +1159,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary that you want to delete.

                ", + "smithy.api#documentation": "

                The name of the vocabulary that you want to delete. Vocabulary names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1183,7 +1183,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a previously submitted transcription job along with any other generated results such as the \n transcription, models, and so on.

                ", + "smithy.api#documentation": "

                Deletes a transcription job, along with any related information. To use this operation,\n specify the name of the job you want to delete using\n TranscriptionJobName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/transcriptionjobs/{TranscriptionJobName}", @@ -1197,7 +1197,7 @@ "TranscriptionJobName": { "target": "com.amazonaws.transcribe#TranscriptionJobName", "traits": { - "smithy.api#documentation": "

                The name of the transcription job to be deleted.

                ", + "smithy.api#documentation": "

                The name of the transcription job you want to delete. Job names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1224,7 +1224,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Deletes a vocabulary from Amazon Transcribe.

                ", + "smithy.api#documentation": "

                Deletes a custom vocabulary. To use this operation, specify the name of the vocabulary \n you want to delete using VocabularyName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/vocabularies/{VocabularyName}", @@ -1252,7 +1252,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Removes a vocabulary filter.

                ", + "smithy.api#documentation": "

                Deletes a vocabulary filter. To use this operation, specify the name of the vocabulary \n filter you want to delete using VocabularyFilterName.

                ", "smithy.api#http": { "method": "DELETE", "uri": "/vocabularyFilters/{VocabularyFilterName}", @@ -1266,7 +1266,7 @@ "VocabularyFilterName": { "target": "com.amazonaws.transcribe#VocabularyFilterName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary filter to remove.

                ", + "smithy.api#documentation": "

                The name of the vocabulary filter you want to delete. Vocabulary filter names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1279,7 +1279,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary to delete.

                ", + "smithy.api#documentation": "

                The name of the vocabulary you want to delete. Vocabulary names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1309,7 +1309,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Gets information about a single custom language model. Use this information to see details about the \n language model in your Amazon Web Services account. You can also see whether the base language model used \n to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a\n new custom language model using the updated base model. If the language model wasn't created, you can use this\n operation to understand why Amazon Transcribe couldn't create it.

                ", + "smithy.api#documentation": "

                Provides information about a specific custom language model in your \n Amazon Web Services account.

                \n

                This operation also shows if the base language model you used to create your custom\n language model has been updated. If Amazon Transcribe has updated the base model, you can create a\n new custom language model using the updated base model.

                \n

                If you tried to create a new custom language model and the request wasn't successful,\n you can use this operation to help identify the reason.

                ", "smithy.api#http": { "method": "GET", "uri": "/languagemodels/{ModelName}", @@ -1323,7 +1323,7 @@ "ModelName": { "target": "com.amazonaws.transcribe#ModelName", "traits": { - "smithy.api#documentation": "

                The name of the custom language model you submit to get more information.

                ", + "smithy.api#documentation": "

                The name of the custom language model you want described. Model names are\n case-sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1381,7 +1381,7 @@ "CategoryName": { "target": "com.amazonaws.transcribe#CategoryName", "traits": { - "smithy.api#documentation": "

                The name of the category you want information about. This value is case sensitive.

                ", + "smithy.api#documentation": "

                The name of the category you want information about. Category names are case\n sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1394,7 +1394,7 @@ "CategoryProperties": { "target": "com.amazonaws.transcribe#CategoryProperties", "traits": { - "smithy.api#documentation": "

                The rules you've defined for a category.

                " + "smithy.api#documentation": "

                Provides you with the rules associated with the category you specified in your\n GetCallAnalyticsCategory request.

                " } } } @@ -1422,7 +1422,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns information about a call analytics job. To see the status of the job, check the\n CallAnalyticsJobStatus field. If the status is COMPLETED, the job\n is finished and you can find the results at the location specified in the TranscriptFileUri \n field. If you enable personally identifiable information (PII) redaction, the redacted transcript appears\n in the RedactedTranscriptFileUri field.

                ", + "smithy.api#documentation": "

                Retrieves information about a call analytics job.

                \n

                To view the job's status, refer to the CallAnalyticsJobStatus field. If the \n status is COMPLETED, the job is finished. You can then find your transcript at \n the URI specified in the TranscriptFileUri field. If you enabled personally \n identifiable information (PII) redaction, the redacted transcript appears in the\n RedactedTranscriptFileUri field.

                ", "smithy.api#http": { "method": "GET", "uri": "/callanalyticsjobs/{CallAnalyticsJobName}", @@ -1436,7 +1436,7 @@ "CallAnalyticsJobName": { "target": "com.amazonaws.transcribe#CallAnalyticsJobName", "traits": { - "smithy.api#documentation": "

                The name of the analytics job you want information about. This value is case\n sensitive.

                ", + "smithy.api#documentation": "

                The name of the analytics job you want information about. This value is case\n sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1449,7 +1449,7 @@ "CallAnalyticsJob": { "target": "com.amazonaws.transcribe#CallAnalyticsJob", "traits": { - "smithy.api#documentation": "

                An object that contains the results of your call analytics job.

                " + "smithy.api#documentation": "

                An object that contains detailed information about your call analytics job. Returned fields\n include: CallAnalyticsJobName, CallAnalyticsJobStatus,\n ChannelDefinitions, CompletionTime,\n CreationTime, DataAccessRoleArn,\n FailureReason, IdentifiedLanguageScore, \n LanguageCode, Media, MediaFormat,\n MediaSampleRateHertz, Settings, StartTime,\n and Transcript.

                " } } } @@ -1477,7 +1477,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns information about a transcription job from Amazon Transcribe Medical. To see the status of the job, check the\n TranscriptionJobStatus field. If the status is COMPLETED, the job is finished. You \n find the results of the completed job in the TranscriptFileUri field.

                ", + "smithy.api#documentation": "

                Retrieves information about a medical transcription job.

                \n

                To view the job's status, refer to the TranscriptionJobStatus field. If the \n status is COMPLETED, the job is finished. You can then find your transcript at \n the URI specified in the TranscriptFileUri field.

                ", "smithy.api#http": { "method": "GET", "uri": "/medicaltranscriptionjobs/{MedicalTranscriptionJobName}", @@ -1491,7 +1491,7 @@ "MedicalTranscriptionJobName": { "target": "com.amazonaws.transcribe#TranscriptionJobName", "traits": { - "smithy.api#documentation": "

                The name of the medical transcription job.

                ", + "smithy.api#documentation": "

                The name of the medical transcription job you want information about. This value is case\n sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1504,7 +1504,7 @@ "MedicalTranscriptionJob": { "target": "com.amazonaws.transcribe#MedicalTranscriptionJob", "traits": { - "smithy.api#documentation": "

                An object that contains the results of the medical transcription job.

                " + "smithy.api#documentation": "

                An object that contains detailed information about your medical transcription job.\n Returned fields include: CompletionTime,\n ContentIdentificationType, CreationTime,\n FailureReason, LanguageCode, Media,\n MediaFormat, MediaSampleRateHertz, \n MedicalTranscriptionJobName, Settings,\n Specialty, StartTime, Tags,\n Transcript, TranscriptionJobStatus, and\n Type.

                " } } } @@ -1546,7 +1546,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "

                The name of the vocabulary that you want information about. The value is case sensitive.

                ", + "smithy.api#documentation": "

                The name of the medical vocabulary you want information about. This value is case \n sensitive.

                ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1583,13 +1583,13 @@ "FailureReason": { "target": "com.amazonaws.transcribe#FailureReason", "traits": { - "smithy.api#documentation": "

                If the VocabularyState is FAILED, this field contains information about why\n the job failed.

                " + "smithy.api#documentation": "

                If your request returns a VocabularyState that is FAILED,\n the FailureReason field contains information about why the request \n failed.

                \n

                For more information, refer to the Common Errors\n section.

                " } }, "DownloadUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The location in Amazon S3 where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You \n can download your vocabulary from the URI for a limited time.

                " + "smithy.api#documentation": "

                The S3 location where the vocabulary is stored; use this URI to view or download the\n vocabulary.

                " } } } @@ -1617,7 +1617,7 @@ } ], "traits": { - "smithy.api#documentation": "

                Returns information about a transcription job. To see the status of the job, check the \n TranscriptionJobStatus field. If the status is COMPLETED, the job is finished and\n you can find the results at the location specified in the TranscriptFileUri field. If you enable content \n redaction, the redacted transcript appears in RedactedTranscriptFileUri.

                ", + "smithy.api#documentation": "

                Returns information about a transcription job. To see the status of the job, check the \n TranscriptionJobStatus field. If the status is COMPLETED, \n the job is finished and you can find the results at the location specified in the\n TranscriptFileUri field. If you enable content redaction, the redacted\n transcript appears in RedactedTranscriptFileUri.

                ", "smithy.api#http": { "method": "GET", "uri": "/transcriptionjobs/{TranscriptionJobName}", @@ -2187,7 +2187,7 @@ "UpgradeAvailability": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "

                Whether the base model used for the custom language model is up to date. If this field \n is true then you are running the most up-to-date version of the base model \n in your custom language model.

                " + "smithy.api#documentation": "

                Whether the base model used for the custom language model is up to date. If this field \n is false then you are running the most up-to-date version of the base model \n in your custom language model.

                " } }, "FailureReason": { @@ -2600,14 +2600,14 @@ "NextToken": { "target": "com.amazonaws.transcribe#NextToken", "traits": { - "smithy.api#documentation": "

                If the result of your previous request to ListMedicalVocabularies was\n truncated, include the NextToken to fetch the next set of vocabularies.

                ", + "smithy.api#documentation": "

                If the result of your previous request to ListMedicalVocabularies was\n truncated, include the NextToken to fetch the next set of\n vocabularies.

                ", "smithy.api#httpQuery": "NextToken" } }, "MaxResults": { "target": "com.amazonaws.transcribe#MaxResults", "traits": { - "smithy.api#documentation": "

                The maximum number of vocabularies to return in each page of results. If there are fewer \n results than the value you specify, only the actual results are returned. If you do not specify \n a value, the default of 5 is used.

                ", + "smithy.api#documentation": "

                The maximum number of vocabularies to return in each page of results. If there are\n fewer results than the value you specify, only the actual results are returned. If you do not\n specify a value, the default of 5 is used.

                ", "smithy.api#httpQuery": "MaxResults" } }, @@ -2639,7 +2639,7 @@ "NextToken": { "target": "com.amazonaws.transcribe#NextToken", "traits": { - "smithy.api#documentation": "

                The ListMedicalVocabularies operation returns a page of vocabularies at a\n time. You set the maximum number of vocabularies to return on a page with the\n MaxResults parameter. If there are more jobs in the list will fit on a page, \n Amazon Transcribe Medical returns the NextPage token. To return the next page of vocabularies, \n include the token in the next request to the ListMedicalVocabularies \n operation .

                " + "smithy.api#documentation": "

                The ListMedicalVocabularies operation returns a page of vocabularies at \n a time. You set the maximum number of vocabularies to return on a page with the\n MaxResults parameter. If there are more jobs in the list will fit on a page, \n Amazon Transcribe Medical returns the NextPage token. To return the next page of vocabularies,\n include the token in the next request to the ListMedicalVocabularies \n operation.

                " } }, "Vocabularies": { @@ -3001,7 +3001,7 @@ "MediaFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The S3 object location of the input media file. The URI must be in the same region as \n the API endpoint that you are calling. The general form is:

                \n

                \n s3:////\n

                \n

                For example:

                \n

                \n s3://AWSDOC-EXAMPLE-BUCKET/example.mp4\n

                \n

                \n s3://AWSDOC-EXAMPLE-BUCKET/mediadocs/example.mp4\n

                \n

                For more information about S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                " + "smithy.api#documentation": "

                The S3 object location of the input media file. The URI must be in the same region as \n the API endpoint that you are calling. The general form is:

                \n

                \n s3://DOC-EXAMPLE-BUCKET/keyprefix/objectkey\n

                \n

                For example:

                \n

                \n s3://DOC-EXAMPLE-BUCKET/example.flac\n

                \n

                \n s3://DOC-EXAMPLE-BUCKET/mediafiles/example.flac\n

                \n

                For more information about S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                " } }, "RedactedMediaFileUri": { @@ -3071,6 +3071,16 @@ ] } }, + "com.amazonaws.transcribe#MedicalMediaSampleRateHertz": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 16000, + "max": 48000 + } + } + }, "com.amazonaws.transcribe#MedicalTranscript": { "type": "structure", "members": { @@ -3107,7 +3117,7 @@ } }, "MediaSampleRateHertz": { - "target": "com.amazonaws.transcribe#MediaSampleRateHertz", + "target": "com.amazonaws.transcribe#MedicalMediaSampleRateHertz", "traits": { "smithy.api#documentation": "

                The sample rate, in Hertz, of the source audio containing medical information.

                \n

                If you don't specify the sample rate, Amazon Transcribe Medical determines it for you. If you choose to \n specify the sample rate, it must match the rate detected by Amazon Transcribe Medical.

                " } @@ -3951,7 +3961,7 @@ } }, "MediaSampleRateHertz": { - "target": "com.amazonaws.transcribe#MediaSampleRateHertz", + "target": "com.amazonaws.transcribe#MedicalMediaSampleRateHertz", "traits": { "smithy.api#documentation": "

                The sample rate, in Hertz, of the audio track in the input media file.

                \n

                If you do not specify the media sample rate, Amazon Transcribe Medical determines the sample rate. If you \n specify the sample rate, it must match the rate detected by Amazon Transcribe Medical. In most cases, you\n should leave the MediaSampleRateHertz field blank and let Amazon Transcribe Medical determine\n the sample rate.

                " } @@ -4022,7 +4032,7 @@ "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { - "smithy.api#documentation": "

                Add tags to an Amazon Transcribe medical transcription job.

                " + "smithy.api#documentation": "

                Add tags to an Amazon Transcribe Medical transcription job.

                " } } } @@ -4263,12 +4273,12 @@ "SubtitleFileUris": { "target": "com.amazonaws.transcribe#SubtitleFileUris", "traits": { - "smithy.api#documentation": "

                Choose the output location for your subtitle file. This location must be an S3 \n bucket.

                " + "smithy.api#documentation": "

                Contains the output location for your subtitle file. This location must be an S3 \n bucket.

                " } } }, "traits": { - "smithy.api#documentation": "

                Specify the output format for your subtitle file.

                " + "smithy.api#documentation": "

                Choose the output format for your subtitle file and the S3 location where you want\n your file saved.

                " } }, "com.amazonaws.transcribe#Tag": { @@ -5071,7 +5081,7 @@ "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The location in Amazon S3 of the text file that contains your custom vocabulary. The URI must\n be in the same Amazon Web Services Region as the resource that you are calling. The following\n is the format for a URI:

                \n

                \n https://s3..amazonaws.com/// \n

                \n

                For example:

                \n

                \n https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about Amazon S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom\n Vocabularies.

                " + "smithy.api#documentation": "

                The location in Amazon S3 of the text file that contains your custom vocabulary. The URI must\n be in the same Amazon Web Services Region as the resource that you are calling. The following\n is the format for a URI:

                \n

                \n https://s3.aws-region.amazonaws.com/bucket-name/keyprefix/objectkey\n

                \n

                For example:

                \n

                \n https://s3.us-east-1.amazonaws.com/DOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about Amazon S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom\n Vocabularies.

                " } } } @@ -5245,7 +5255,7 @@ "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "

                The S3 location of the text file that contains the definition of the custom vocabulary. \n The URI must be in the same region as the API endpoint that you are calling. The general form\n is:

                \n

                \n https://s3..amazonaws.com///\n

                \n

                For example:

                \n

                \n https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies, see Custom \n Vocabularies.

                " + "smithy.api#documentation": "

                The S3 location of the text file that contains the definition of the custom vocabulary. \n The URI must be in the same region as the API endpoint that you are calling. The general form\n is:

                \n

                \n https://s3.aws-region.amazonaws.com/bucket-name/keyprefix/objectkey\n

                \n

                For example:

                \n

                \n https://s3.us-east-1.amazonaws.com/DOC-EXAMPLE-BUCKET/vocab.txt\n

                \n

                For more information about S3 object names, see Object Keys in the\n Amazon S3 Developer Guide.

                \n

                For more information about custom vocabularies, see Custom \n Vocabularies.

                " } } } diff --git a/codegen/sdk-codegen/aws-models/transfer.json b/codegen/sdk-codegen/aws-models/transfer.json index 08295710f6a7..1e2b224e6473 100644 --- a/codegen/sdk-codegen/aws-models/transfer.json +++ b/codegen/sdk-codegen/aws-models/transfer.json @@ -320,10 +320,16 @@ } }, "PostAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                \n \n

                The SFTP protocol does not support post-authentication display banners.

                \n
                " + } }, "PreAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system.

                \n

                \n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n

                " + } }, "Protocols": { "target": "com.amazonaws.transfer#Protocols", @@ -1062,7 +1068,59 @@ } ], "traits": { - "smithy.api#documentation": "

                Describes a file transfer protocol-enabled server that you specify by passing the\n ServerId parameter.

                \n\n

                The response contains a description of a server's properties. When you set\n EndpointType to VPC, the response will contain the\n EndpointDetails.

                " + "smithy.api#documentation": "

                Describes a file transfer protocol-enabled server that you specify by passing the\n ServerId parameter.

                \n\n

                The response contains a description of a server's properties. When you set\n EndpointType to VPC, the response will contain the\n EndpointDetails.

                ", + "smithy.waiters#waitable": { + "ServerOffline": { + "acceptors": [ + { + "state": "success", + "matcher": { + "output": { + "path": "Server.State", + "expected": "OFFLINE", + "comparator": "stringEquals" + } + } + }, + { + "state": "failure", + "matcher": { + "output": { + "path": "Server.State", + "expected": "STOP_FAILED", + "comparator": "stringEquals" + } + } + } + ], + "minDelay": 30 + }, + "ServerOnline": { + "acceptors": [ + { + "state": "success", + "matcher": { + "output": { + "path": "Server.State", + "expected": "ONLINE", + "comparator": "stringEquals" + } + } + }, + { + "state": "failure", + "matcher": { + "output": { + "path": "Server.State", + "expected": "START_FAILED", + "comparator": "stringEquals" + } + } + } + ], + "minDelay": 30 + } + } } }, "com.amazonaws.transfer#DescribeServerRequest": { @@ -1413,10 +1471,16 @@ } }, "PostAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                \n \n

                The SFTP protocol does not support post-authentication display banners.

                \n
                " + } }, "PreAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system.

                \n

                \n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n

                " + } }, "Protocols": { "target": "com.amazonaws.transfer#Protocols", @@ -1707,7 +1771,7 @@ "Type": { "target": "com.amazonaws.transfer#ExecutionErrorType", "traits": { - "smithy.api#documentation": "

                Specifies the error type: currently, the only valid value is PERMISSION_DENIED, which occurs\n if your policy does not contain the correct permissions to complete one or more of the steps in the workflow.

                ", + "smithy.api#documentation": "

                Specifies the error type.

                \n
                  \n
                • \n

                  \n ALREADY_EXISTS: occurs for a copy step, if the overwrite option is not selected and a file with the same name already exists in the target location.

                  \n
                • \n
                • \n

                  \n BAD_REQUEST: a general bad request: for example, a step that attempts to\n tag an EFS file returns BAD_REQUEST, as only S3 files can be tagged.

                  \n
                • \n
                • \n

                  \n CUSTOM_STEP_FAILED: occurs when the custom step provided a callback that indicates failure.

                  \n
                • \n
                • \n

                  \n INTERNAL_SERVER_ERROR: a catch-all error that can occur for a variety of\n reasons.

                  \n
                • \n
                • \n

                  \n NOT_FOUND: occurs when a requested entity, for example a source file for\n a copy step, does not exist.

                  \n
                • \n
                • \n

                  \n PERMISSION_DENIED: occurs if your policy does not contain the correct\n permissions to complete one or more of the steps in the workflow.

                  \n
                • \n
                • \n

                  \n TIMEOUT: occurs when the execution times out.

                  \n \n

                  You can set the TimeoutSeconds for a custom step, anywhere from 1 second to 1800 seconds (30 minutes).

                  \n
                  \n
                • \n
                • \n

                  \n THROTTLED: occurs if you exceed the new execution refill rate of one\n workflow per second.

                  \n
                • \n
                ", "smithy.api#required": {} } }, @@ -1733,6 +1797,34 @@ { "value": "PERMISSION_DENIED", "name": "PERMISSION_DENIED" + }, + { + "value": "CUSTOM_STEP_FAILED", + "name": "CUSTOM_STEP_FAILED" + }, + { + "value": "THROTTLED", + "name": "THROTTLED" + }, + { + "value": "ALREADY_EXISTS", + "name": "ALREADY_EXISTS" + }, + { + "value": "NOT_FOUND", + "name": "NOT_FOUND" + }, + { + "value": "BAD_REQUEST", + "name": "BAD_REQUEST" + }, + { + "value": "TIMEOUT", + "name": "TIMEOUT" + }, + { + "value": "INTERNAL_SERVER_ERROR", + "name": "INTERNAL_SERVER_ERROR" } ] } @@ -4374,10 +4466,16 @@ } }, "PostAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed after the user authenticates.

                \n \n

                The SFTP protocol does not support post-authentication display banners.

                \n
                " + } }, "PreAuthenticationLoginBanner": { - "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner" + "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner", + "traits": { + "smithy.api#documentation": "

                Specify a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system.

                \n

                \n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n

                " + } }, "Protocols": { "target": "com.amazonaws.transfer#Protocols", diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index 51bd7bd97f8f..7e53a562ed26 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -461,6 +461,27 @@ "us-west-2": {} } }, + "amplifyuibuilder": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "me-south-1": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, "api.detective": { "defaults": { "protocols": ["https"] @@ -1320,9 +1341,51 @@ "endpoints": { "ap-northeast-1": {}, "eu-west-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-2": {} + "fips-us-east-1": { + "credentialScope": { + "region": "us-east-1" + }, + "deprecated": true, + "hostname": "apprunner-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2": { + "credentialScope": { + "region": "us-east-2" + }, + "deprecated": true, + "hostname": "apprunner-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2": { + "credentialScope": { + "region": "us-west-2" + }, + "deprecated": true, + "hostname": "apprunner-fips.us-west-2.amazonaws.com" + }, + "us-east-1": { + "variants": [ + { + "hostname": "apprunner-fips.us-east-1.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-east-2": { + "variants": [ + { + "hostname": "apprunner-fips.us-east-2.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-west-2": { + "variants": [ + { + "hostname": "apprunner-fips.us-west-2.amazonaws.com", + "tags": ["fips"] + } + ] + } } }, "appstream2": { @@ -1585,6 +1648,7 @@ "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, + "ap-southeast-3": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {}, @@ -1691,6 +1755,7 @@ }, "braket": { "endpoints": { + "eu-west-2": {}, "us-east-1": {}, "us-west-1": {}, "us-west-2": {} @@ -7002,6 +7067,10 @@ }, "ivs": { "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "eu-central-1": {}, "eu-west-1": {}, "us-east-1": {}, "us-west-2": {} @@ -8738,6 +8807,7 @@ "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, + "ap-southeast-3": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {}, @@ -15637,6 +15707,14 @@ } }, "autoscaling": { + "defaults": { + "variants": [ + { + "hostname": "autoscaling.{region}.{dnsSuffix}", + "tags": ["fips"] + } + ] + }, "endpoints": { "us-gov-east-1": { "protocols": ["http", "https"] @@ -15780,17 +15858,35 @@ }, "cloudtrail": { "endpoints": { - "us-gov-east-1": { + "fips-us-gov-east-1": { "credentialScope": { "region": "us-gov-east-1" }, + "deprecated": true, "hostname": "cloudtrail.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1": { + "fips-us-gov-west-1": { "credentialScope": { "region": "us-gov-west-1" }, + "deprecated": true, "hostname": "cloudtrail.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1": { + "variants": [ + { + "hostname": "cloudtrail.us-gov-east-1.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-gov-west-1": { + "variants": [ + { + "hostname": "cloudtrail.us-gov-west-1.amazonaws.com", + "tags": ["fips"] + } + ] } } }, @@ -16664,17 +16760,35 @@ }, "events": { "endpoints": { - "us-gov-east-1": { + "fips-us-gov-east-1": { "credentialScope": { "region": "us-gov-east-1" }, + "deprecated": true, "hostname": "events.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1": { + "fips-us-gov-west-1": { "credentialScope": { "region": "us-gov-west-1" }, + "deprecated": true, "hostname": "events.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1": { + "variants": [ + { + "hostname": "events.us-gov-east-1.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-gov-west-1": { + "variants": [ + { + "hostname": "events.us-gov-west-1.amazonaws.com", + "tags": ["fips"] + } + ] } } }, @@ -17382,17 +17496,35 @@ }, "logs": { "endpoints": { - "us-gov-east-1": { + "fips-us-gov-east-1": { "credentialScope": { "region": "us-gov-east-1" }, + "deprecated": true, "hostname": "logs.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1": { + "fips-us-gov-west-1": { "credentialScope": { "region": "us-gov-west-1" }, + "deprecated": true, "hostname": "logs.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1": { + "variants": [ + { + "hostname": "logs.us-gov-east-1.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-gov-west-1": { + "variants": [ + { + "hostname": "logs.us-gov-west-1.amazonaws.com", + "tags": ["fips"] + } + ] } } }, @@ -18461,22 +18593,48 @@ }, "sns": { "endpoints": { - "us-gov-east-1": { + "fips-us-gov-east-1": { "credentialScope": { "region": "us-gov-east-1" }, + "deprecated": true, "hostname": "sns.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1": { + "fips-us-gov-west-1": { "credentialScope": { "region": "us-gov-west-1" }, - "hostname": "sns.us-gov-west-1.amazonaws.com", - "protocols": ["http", "https"] + "deprecated": true, + "hostname": "sns.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1": { + "variants": [ + { + "hostname": "sns.us-gov-east-1.amazonaws.com", + "tags": ["fips"] + } + ] + }, + "us-gov-west-1": { + "protocols": ["http", "https"], + "variants": [ + { + "hostname": "sns.us-gov-west-1.amazonaws.com", + "tags": ["fips"] + } + ] } } }, "sqs": { + "defaults": { + "variants": [ + { + "hostname": "sqs.{region}.{dnsSuffix}", + "tags": ["fips"] + } + ] + }, "endpoints": { "us-gov-east-1": { "credentialScope": { diff --git a/tests/functional/endpoints/test_cases_supported.json b/tests/functional/endpoints/test_cases_supported.json index d2d5df03c931..9c84b5dfa5b1 100644 --- a/tests/functional/endpoints/test_cases_supported.json +++ b/tests/functional/endpoints/test_cases_supported.json @@ -10069,7 +10069,7 @@ "region": "us-gov-east-1", "useFipsEndpoint": true, "useDualstackEndpoint": false, - "hostname": "autoscaling-fips.us-gov-east-1.amazonaws.com" + "hostname": "autoscaling.us-gov-east-1.amazonaws.com" }, { "endpointPrefix": "autoscaling",