From d91b82e76c73dde99b14cfc59560b5084cc01fa9 Mon Sep 17 00:00:00 2001
From: aws-sdk-cpp-automation The timestamp when the certificate that was used by edge-optimized endpoint
- * for this domain name was uploaded.
The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.
+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate. */ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline bool CertificateUploadDateHasBeenSet() const { return m_certificateUploadDateHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h index 5e3fdd4d371..49e153b15ba 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h @@ -90,7 +90,8 @@ namespace Model ///@{ /** *The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.
+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate. */ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline void SetCertificateUploadDate(const Aws::Utils::DateTime& value) { m_certificateUploadDate = value; } diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h index 42046948dde..85877f48353 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h @@ -90,7 +90,8 @@ namespace Model ///@{ /** *The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.
+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate. */ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline void SetCertificateUploadDate(const Aws::Utils::DateTime& value) { m_certificateUploadDate = value; } diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h new file mode 100644 index 00000000000..2baba5297f1 --- /dev/null +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#includeThe type of data catalog to create: LAMBDA
for a federated
- * catalog, HIVE
for an external hive metastore, or GLUE
- * for an Glue Data Catalog.
GLUE
for an Glue Data Catalog, and HIVE
for
+ * an external Apache Hive metastore. FEDERATED
is a federated catalog
+ * for which Athena creates the connection and the Lambda function for you based on
+ * the parameters that you pass.
*/
inline const DataCatalogType& GetType() const{ return m_type; }
inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; }
@@ -106,7 +108,20 @@ namespace Model
* catalog-id=catalog_id
The
* GLUE
data catalog type also applies to the default
* AwsDataCatalog
that already exists in your account, of which you
- * can have only one and cannot modify.
The
+ * FEDERATED
data catalog type uses one of the following parameters,
+ * but not both. Use connection-arn
for an existing Glue connection.
+ * Use connection-type
and connection-properties
to
+ * specify the configuration setting for a new connection.
+ * connection-arn:<glue_connection_arn_to_reuse>
lambda-role-arn
(optional): The execution role to
+ * use for the Lambda function. If not provided, one is created.
+ * connection-type:MYSQL|REDSHIFT|....,
+ * connection-properties:"<json_string>"
For
+ * <json_string>
, use escaped JSON text, as in the
+ * following example.
+ * "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}"
+ *
The type of data catalog to create: LAMBDA
for a federated
- * catalog, HIVE
for an external hive metastore, or GLUE
- * for an Glue Data Catalog.
GLUE
for an Glue Data Catalog, and HIVE
for
+ * an external Apache Hive metastore. FEDERATED
is a federated catalog
+ * for which Athena creates the connection and the Lambda function for you based on
+ * the parameters that you pass.
*/
inline const DataCatalogType& GetType() const{ return m_type; }
inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; }
@@ -110,7 +114,18 @@ namespace Model
* catalog-id=catalog_id
The
* GLUE
data catalog type also applies to the default
* AwsDataCatalog
that already exists in your account, of which you
- * can have only one and cannot modify.
The
+ * FEDERATED
data catalog type uses one of the following parameters,
+ * but not both. Use connection-arn
for an existing Glue connection.
+ * Use connection-type
and connection-properties
to
+ * specify the configuration setting for a new connection.
+ * connection-arn:<glue_connection_arn_to_reuse>
connection-type:MYSQL|REDSHIFT|....,
+ * connection-properties:"<json_string>"
For
+ * <json_string>
, use escaped JSON text, as in the
+ * following example.
+ * "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}"
+ *
The status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data
+ * catalog types are created synchronously. Their status is either
+ * CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
+ * CREATE_IN_PROGRESS
: Federated data catalog creation in
+ * progress.
CREATE_COMPLETE
: Data catalog creation
+ * complete.
CREATE_FAILED
: Data catalog could not
+ * be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
:
+ * Federated data catalog creation failed and is being removed.
+ * CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation
+ * failed and was removed.
+ * CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation
+ * failed but could not be removed.
Data catalog deletion + * status:
DELETE_IN_PROGRESS
: Federated data
+ * catalog deletion in progress.
DELETE_COMPLETE
:
+ * Federated data catalog deleted.
DELETE_FAILED
:
+ * Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for
+ * example, REDSHIFT
, MYSQL
, or SQLSERVER
).
+ * For information about individual connectors, see Available
+ * data source connectors.
Text of the error that occurred during data catalog creation or deletion.
+ */ + inline const Aws::String& GetError() const{ return m_error; } + inline bool ErrorHasBeenSet() const { return m_errorHasBeenSet; } + inline void SetError(const Aws::String& value) { m_errorHasBeenSet = true; m_error = value; } + inline void SetError(Aws::String&& value) { m_errorHasBeenSet = true; m_error = std::move(value); } + inline void SetError(const char* value) { m_errorHasBeenSet = true; m_error.assign(value); } + inline DataCatalog& WithError(const Aws::String& value) { SetError(value); return *this;} + inline DataCatalog& WithError(Aws::String&& value) { SetError(std::move(value)); return *this;} + inline DataCatalog& WithError(const char* value) { SetError(value); return *this;} + ///@} private: Aws::String m_name; @@ -139,6 +214,15 @@ namespace Model Aws::MapThe status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data
+ * catalog types are created synchronously. Their status is either
+ * CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
+ * CREATE_IN_PROGRESS
: Federated data catalog creation in
+ * progress.
CREATE_COMPLETE
: Data catalog creation
+ * complete.
CREATE_FAILED
: Data catalog could not
+ * be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
:
+ * Federated data catalog creation failed and is being removed.
+ * CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation
+ * failed and was removed.
+ * CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation
+ * failed but could not be removed.
Data catalog deletion + * status:
DELETE_IN_PROGRESS
: Federated data
+ * catalog deletion in progress.
DELETE_COMPLETE
:
+ * Federated data catalog deleted.
DELETE_FAILED
:
+ * Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for
+ * example, REDSHIFT
, MYSQL
, or SQLSERVER
).
+ * For information about individual connectors, see Available
+ * data source connectors.
Text of the error that occurred during data catalog creation or deletion.
+ */ + inline const Aws::String& GetError() const{ return m_error; } + inline bool ErrorHasBeenSet() const { return m_errorHasBeenSet; } + inline void SetError(const Aws::String& value) { m_errorHasBeenSet = true; m_error = value; } + inline void SetError(Aws::String&& value) { m_errorHasBeenSet = true; m_error = std::move(value); } + inline void SetError(const char* value) { m_errorHasBeenSet = true; m_error.assign(value); } + inline DataCatalogSummary& WithError(const Aws::String& value) { SetError(value); return *this;} + inline DataCatalogSummary& WithError(Aws::String&& value) { SetError(std::move(value)); return *this;} + inline DataCatalogSummary& WithError(const char* value) { SetError(value); return *this;} + ///@} private: Aws::String m_catalogName; @@ -74,6 +136,15 @@ namespace Model DataCatalogType m_type; bool m_typeHasBeenSet = false; + + DataCatalogStatus m_status; + bool m_statusHasBeenSet = false; + + ConnectionType m_connectionType; + bool m_connectionTypeHasBeenSet = false; + + Aws::String m_error; + bool m_errorHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h index 874a85fcef3..32cc4f80b99 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h @@ -18,7 +18,8 @@ namespace Model NOT_SET, LAMBDA, GLUE, - HIVE + HIVE, + FEDERATED }; namespace DataCatalogTypeMapper diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h index 38f89f2c06b..a60f73e4ff5 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h @@ -5,6 +5,7 @@ #pragma once #includeThe foundation model to be used for orchestration by the agent you - * create.
+ *The Amazon Resource Name (ARN) of the foundation model to be used for + * orchestration by the agent you create.
*/ inline const Aws::String& GetFoundationModel() const{ return m_foundationModel; } inline bool FoundationModelHasBeenSet() const { return m_foundationModelHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h index 12f21def7fb..322a3ca255d 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h @@ -59,9 +59,10 @@ namespace Model ///@{ /** - *The unique identifier of the model to use to generate a response from the - * query results. Omit this field if you want to return the retrieved results as an - * array.
+ *The unique identifier of the model or inference + * profile to use to generate a response from the query results. Omit this + * field if you want to return the retrieved results as an array.
*/ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h index d8d19318f9e..8082e7f432d 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h @@ -55,7 +55,9 @@ namespace Model ///@{ /** - *The unique identifier of the model to run inference with.
+ *The unique identifier of the model or inference + * profile to run inference with.
*/ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h index 6276f61e52d..055dbff7625 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h @@ -49,7 +49,7 @@ namespace Model * of thepromptConfigurations
must contain a parserMode
* value that is set to OVERRIDDEN
. For more information, see Parser
- * Lambda function in Agents for Amazon Bedrock.
+ * Lambda function in Amazon Bedrock Agents.
*/
inline const Aws::String& GetOverrideLambda() const{ return m_overrideLambda; }
inline bool OverrideLambdaHasBeenSet() const { return m_overrideLambdaHasBeenSet; }
diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h
index c8c989e69a4..5fb2e80674f 100644
--- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h
+++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h
@@ -73,8 +73,9 @@ namespace Model
///@{
/**
- * The unique identifier of the model with which to run inference on the - * prompt.
+ *The unique identifier of the model or inference + * profile with which to run inference on the prompt.
*/ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h index e0d6f7f13b0..fe0fcd0124b 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h @@ -1496,9 +1496,7 @@ namespace EC2 * or volume. The action removes all artifacts of the conversion, including a * partially uploaded volume or instance. If the conversion is complete or is in * the process of transferring the final disk image, the command fails and returns - * an exception.For more information, see Importing - * a Virtual Machine Using the Amazon EC2 CLI.
We recommend that you use the
- * ImportImage
API. For more information, see ImportImage API instead. For more information, see Importing
* a VM as an image using VM Import/Export in the VM Import/Export User
* Guide.
Creates an import instance task using metadata from - * the specified disk image.
This API action is not supported by the Command - * Line Interface (CLI). For information about using the Amazon EC2 CLI, which is - * deprecated, see Importing - * a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.
- *This API action supports only single-volume VMs. To import multi-volume VMs, - * use ImportImage instead.
For information about the import manifest
- * referenced by this API action, see This API action supports only single-volume
+ * VMs. To import multi-volume VMs, use ImportImage instead. For
+ * information about the import manifest referenced by this API action, see VM
- * Import Manifest.See Also:
.
This API action is not supported by the Command Line + * Interface (CLI).
Creates an import volume task using metadata from the specified disk - * image.
This API action supports only single-volume VMs. To import + *
This API action supports only single-volume VMs. To import * multi-volume VMs, use ImportImage instead. To import a disk to a - * snapshot, use ImportSnapshot instead.
This API action is not - * supported by the Command Line Interface (CLI). For information about using the - * Amazon EC2 CLI, which is deprecated, see Importing - * Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.
- *For information about the import manifest referenced by this API action, see - * ImportSnapshot instead.
Creates an import + * volume task using metadata from the specified disk image.
For information + * about the import manifest referenced by this API action, see VM - * Import Manifest.
This API action is not supported by the Command Line + * Interface (CLI).
The description of the snapshot.
+ *The description of the disk image being imported.
*/ inline const Aws::String& GetDescription() const{ return m_description; } inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp b/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp index 9b4093c1565..67420e81701 100644 --- a/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp +++ b/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp @@ -21,6 +21,8 @@ namespace Aws { static const int use_capacity_reservations_first_HASH = HashingUtils::HashString("use-capacity-reservations-first"); + static const int use_capacity_reservations_only_HASH = HashingUtils::HashString("use-capacity-reservations-only"); + static const int none_HASH = HashingUtils::HashString("none"); FleetCapacityReservationUsageStrategy GetFleetCapacityReservationUsageStrategyForName(const Aws::String& name) @@ -30,6 +32,14 @@ namespace Aws { return FleetCapacityReservationUsageStrategy::use_capacity_reservations_first; } + else if (hashCode == use_capacity_reservations_only_HASH) + { + return FleetCapacityReservationUsageStrategy::use_capacity_reservations_only; + } + else if (hashCode == none_HASH) + { + return FleetCapacityReservationUsageStrategy::none; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -48,6 +58,10 @@ namespace Aws return {}; case FleetCapacityReservationUsageStrategy::use_capacity_reservations_first: return "use-capacity-reservations-first"; + case FleetCapacityReservationUsageStrategy::use_capacity_reservations_only: + return "use-capacity-reservations-only"; + case FleetCapacityReservationUsageStrategy::none: + return "none"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp b/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp index 69d4e1f66d2..8310ae027d0 100644 --- a/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp +++ b/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp @@ -840,6 +840,14 @@ namespace Aws static const int r8g_metal_24xl_HASH = HashingUtils::HashString("r8g.metal-24xl"); static const int r8g_metal_48xl_HASH = HashingUtils::HashString("r8g.metal-48xl"); static const int mac2_m1ultra_metal_HASH = HashingUtils::HashString("mac2-m1ultra.metal"); + static const int g6e_xlarge_HASH = HashingUtils::HashString("g6e.xlarge"); + static const int g6e_2xlarge_HASH = HashingUtils::HashString("g6e.2xlarge"); + static const int g6e_4xlarge_HASH = HashingUtils::HashString("g6e.4xlarge"); + static const int g6e_8xlarge_HASH = HashingUtils::HashString("g6e.8xlarge"); + static const int g6e_12xlarge_HASH = HashingUtils::HashString("g6e.12xlarge"); + static const int g6e_16xlarge_HASH = HashingUtils::HashString("g6e.16xlarge"); + static const int g6e_24xlarge_HASH = HashingUtils::HashString("g6e.24xlarge"); + static const int g6e_48xlarge_HASH = HashingUtils::HashString("g6e.48xlarge"); /* The if-else chains in this file are converted into a jump table by the compiler, @@ -4972,6 +4980,46 @@ namespace Aws enumValue = InstanceType::mac2_m1ultra_metal; return true; } + else if (hashCode == g6e_xlarge_HASH) + { + enumValue = InstanceType::g6e_xlarge; + return true; + } + else if (hashCode == g6e_2xlarge_HASH) + { + enumValue = InstanceType::g6e_2xlarge; + return true; + } + else if (hashCode == g6e_4xlarge_HASH) + { + enumValue = InstanceType::g6e_4xlarge; + return true; + } + else if (hashCode == g6e_8xlarge_HASH) + { + enumValue = InstanceType::g6e_8xlarge; + return true; + } + else if (hashCode == g6e_12xlarge_HASH) + { + enumValue = InstanceType::g6e_12xlarge; + return true; + } + else if (hashCode == g6e_16xlarge_HASH) + { + enumValue = InstanceType::g6e_16xlarge; + return true; + } + else if (hashCode == g6e_24xlarge_HASH) + { + enumValue = InstanceType::g6e_24xlarge; + return true; + } + else if (hashCode == g6e_48xlarge_HASH) + { + enumValue = InstanceType::g6e_48xlarge; + return true; + } return false; } @@ -7487,6 +7535,30 @@ namespace Aws case InstanceType::mac2_m1ultra_metal: value = "mac2-m1ultra.metal"; return true; + case InstanceType::g6e_xlarge: + value = "g6e.xlarge"; + return true; + case InstanceType::g6e_2xlarge: + value = "g6e.2xlarge"; + return true; + case InstanceType::g6e_4xlarge: + value = "g6e.4xlarge"; + return true; + case InstanceType::g6e_8xlarge: + value = "g6e.8xlarge"; + return true; + case InstanceType::g6e_12xlarge: + value = "g6e.12xlarge"; + return true; + case InstanceType::g6e_16xlarge: + value = "g6e.16xlarge"; + return true; + case InstanceType::g6e_24xlarge: + value = "g6e.24xlarge"; + return true; + case InstanceType::g6e_48xlarge: + value = "g6e.48xlarge"; + return true; default: return false; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h index f32415af6e0..2621346aedb 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h @@ -18,6 +18,7 @@ #includeThe scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.
+ */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline Application& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline Application& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_applicationId; @@ -413,6 +427,9 @@ namespace Model InteractiveConfiguration m_interactiveConfiguration; bool m_interactiveConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h index 360d2eb1410..81d9e12f3cb 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h @@ -17,6 +17,7 @@ #includeThe scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.
+ */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline CreateApplicationRequest& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline CreateApplicationRequest& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_name; @@ -334,6 +348,9 @@ namespace Model InteractiveConfiguration m_interactiveConfiguration; bool m_interactiveConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h index 3787bbb94a4..233f7841e89 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h @@ -362,6 +362,40 @@ namespace Model inline JobRun& WithAttemptUpdatedAt(const Aws::Utils::DateTime& value) { SetAttemptUpdatedAt(value); return *this;} inline JobRun& WithAttemptUpdatedAt(Aws::Utils::DateTime&& value) { SetAttemptUpdatedAt(std::move(value)); return *this;} ///@} + + ///@{ + /** + *The date and time when the job moved to the RUNNING state.
+ */ + inline const Aws::Utils::DateTime& GetStartedAt() const{ return m_startedAt; } + inline bool StartedAtHasBeenSet() const { return m_startedAtHasBeenSet; } + inline void SetStartedAt(const Aws::Utils::DateTime& value) { m_startedAtHasBeenSet = true; m_startedAt = value; } + inline void SetStartedAt(Aws::Utils::DateTime&& value) { m_startedAtHasBeenSet = true; m_startedAt = std::move(value); } + inline JobRun& WithStartedAt(const Aws::Utils::DateTime& value) { SetStartedAt(value); return *this;} + inline JobRun& WithStartedAt(Aws::Utils::DateTime&& value) { SetStartedAt(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *The date and time when the job was terminated.
+ */ + inline const Aws::Utils::DateTime& GetEndedAt() const{ return m_endedAt; } + inline bool EndedAtHasBeenSet() const { return m_endedAtHasBeenSet; } + inline void SetEndedAt(const Aws::Utils::DateTime& value) { m_endedAtHasBeenSet = true; m_endedAt = value; } + inline void SetEndedAt(Aws::Utils::DateTime&& value) { m_endedAtHasBeenSet = true; m_endedAt = std::move(value); } + inline JobRun& WithEndedAt(const Aws::Utils::DateTime& value) { SetEndedAt(value); return *this;} + inline JobRun& WithEndedAt(Aws::Utils::DateTime&& value) { SetEndedAt(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *The total time for a job in the QUEUED state in milliseconds.
+ */ + inline long long GetQueuedDurationMilliseconds() const{ return m_queuedDurationMilliseconds; } + inline bool QueuedDurationMillisecondsHasBeenSet() const { return m_queuedDurationMillisecondsHasBeenSet; } + inline void SetQueuedDurationMilliseconds(long long value) { m_queuedDurationMillisecondsHasBeenSet = true; m_queuedDurationMilliseconds = value; } + inline JobRun& WithQueuedDurationMilliseconds(long long value) { SetQueuedDurationMilliseconds(value); return *this;} + ///@} private: Aws::String m_applicationId; @@ -435,6 +469,15 @@ namespace Model Aws::Utils::DateTime m_attemptUpdatedAt; bool m_attemptUpdatedAtHasBeenSet = false; + + Aws::Utils::DateTime m_startedAt; + bool m_startedAtHasBeenSet = false; + + Aws::Utils::DateTime m_endedAt; + bool m_endedAtHasBeenSet = false; + + long long m_queuedDurationMilliseconds; + bool m_queuedDurationMillisecondsHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h index ab67c5d0276..21bcff1770e 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h @@ -23,7 +23,8 @@ namespace Model SUCCESS, FAILED, CANCELLING, - CANCELLED + CANCELLED, + QUEUED }; namespace JobRunStateMapper diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h new file mode 100644 index 00000000000..abd778836f4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h @@ -0,0 +1,74 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#includeThe scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.
The maximum duration in minutes for the job in QUEUED state. If scheduler + * configuration is enabled on your application, the default value is 360 minutes + * (6 hours). The valid range is from 15 to 720.
+ */ + inline int GetQueueTimeoutMinutes() const{ return m_queueTimeoutMinutes; } + inline bool QueueTimeoutMinutesHasBeenSet() const { return m_queueTimeoutMinutesHasBeenSet; } + inline void SetQueueTimeoutMinutes(int value) { m_queueTimeoutMinutesHasBeenSet = true; m_queueTimeoutMinutes = value; } + inline SchedulerConfiguration& WithQueueTimeoutMinutes(int value) { SetQueueTimeoutMinutes(value); return *this;} + ///@} + + ///@{ + /** + *The maximum concurrent job runs on this application. If scheduler + * configuration is enabled on your application, the default value is 15. The valid + * range is 1 to 1000.
+ */ + inline int GetMaxConcurrentRuns() const{ return m_maxConcurrentRuns; } + inline bool MaxConcurrentRunsHasBeenSet() const { return m_maxConcurrentRunsHasBeenSet; } + inline void SetMaxConcurrentRuns(int value) { m_maxConcurrentRunsHasBeenSet = true; m_maxConcurrentRuns = value; } + inline SchedulerConfiguration& WithMaxConcurrentRuns(int value) { SetMaxConcurrentRuns(value); return *this;} + ///@} + private: + + int m_queueTimeoutMinutes; + bool m_queueTimeoutMinutesHasBeenSet = false; + + int m_maxConcurrentRuns; + bool m_maxConcurrentRunsHasBeenSet = false; + }; + +} // namespace Model +} // namespace EMRServerless +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h index 01ab631ec33..3b4f55bb760 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h @@ -17,6 +17,7 @@ #includeThe scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.
+ */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline UpdateApplicationRequest& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline UpdateApplicationRequest& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_applicationId; @@ -294,6 +308,9 @@ namespace Model MonitoringConfiguration m_monitoringConfiguration; bool m_monitoringConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp index fd98d4990d8..2e9bea02cff 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp @@ -41,7 +41,8 @@ Application::Application() : m_workerTypeSpecificationsHasBeenSet(false), m_runtimeConfigurationHasBeenSet(false), m_monitoringConfigurationHasBeenSet(false), - m_interactiveConfigurationHasBeenSet(false) + m_interactiveConfigurationHasBeenSet(false), + m_schedulerConfigurationHasBeenSet(false) { } @@ -212,6 +213,13 @@ Application& Application::operator =(JsonView jsonValue) m_interactiveConfigurationHasBeenSet = true; } + if(jsonValue.ValueExists("schedulerConfiguration")) + { + m_schedulerConfiguration = jsonValue.GetObject("schedulerConfiguration"); + + m_schedulerConfigurationHasBeenSet = true; + } + return *this; } @@ -361,6 +369,12 @@ JsonValue Application::Jsonize() const } + if(m_schedulerConfigurationHasBeenSet) + { + payload.WithObject("schedulerConfiguration", m_schedulerConfiguration.Jsonize()); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp index c64355440ff..1de51b2191e 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp @@ -30,7 +30,8 @@ CreateApplicationRequest::CreateApplicationRequest() : m_workerTypeSpecificationsHasBeenSet(false), m_runtimeConfigurationHasBeenSet(false), m_monitoringConfigurationHasBeenSet(false), - m_interactiveConfigurationHasBeenSet(false) + m_interactiveConfigurationHasBeenSet(false), + m_schedulerConfigurationHasBeenSet(false) { } @@ -153,6 +154,12 @@ Aws::String CreateApplicationRequest::SerializePayload() const } + if(m_schedulerConfigurationHasBeenSet) + { + payload.WithObject("schedulerConfiguration", m_schedulerConfiguration.Jsonize()); + + } + return payload.View().WriteReadable(); } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp index f3b53cd9652..de79bee1e95 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp @@ -47,7 +47,11 @@ JobRun::JobRun() : m_attempt(0), m_attemptHasBeenSet(false), m_attemptCreatedAtHasBeenSet(false), - m_attemptUpdatedAtHasBeenSet(false) + m_attemptUpdatedAtHasBeenSet(false), + m_startedAtHasBeenSet(false), + m_endedAtHasBeenSet(false), + m_queuedDurationMilliseconds(0), + m_queuedDurationMillisecondsHasBeenSet(false) { } @@ -230,6 +234,27 @@ JobRun& JobRun::operator =(JsonView jsonValue) m_attemptUpdatedAtHasBeenSet = true; } + if(jsonValue.ValueExists("startedAt")) + { + m_startedAt = jsonValue.GetDouble("startedAt"); + + m_startedAtHasBeenSet = true; + } + + if(jsonValue.ValueExists("endedAt")) + { + m_endedAt = jsonValue.GetDouble("endedAt"); + + m_endedAtHasBeenSet = true; + } + + if(jsonValue.ValueExists("queuedDurationMilliseconds")) + { + m_queuedDurationMilliseconds = jsonValue.GetInt64("queuedDurationMilliseconds"); + + m_queuedDurationMillisecondsHasBeenSet = true; + } + return *this; } @@ -380,6 +405,22 @@ JsonValue JobRun::Jsonize() const payload.WithDouble("attemptUpdatedAt", m_attemptUpdatedAt.SecondsWithMSPrecision()); } + if(m_startedAtHasBeenSet) + { + payload.WithDouble("startedAt", m_startedAt.SecondsWithMSPrecision()); + } + + if(m_endedAtHasBeenSet) + { + payload.WithDouble("endedAt", m_endedAt.SecondsWithMSPrecision()); + } + + if(m_queuedDurationMillisecondsHasBeenSet) + { + payload.WithInt64("queuedDurationMilliseconds", m_queuedDurationMilliseconds); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp index 62d4e8bb561..e63cd03c1ca 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp @@ -28,6 +28,7 @@ namespace Aws static const int FAILED_HASH = HashingUtils::HashString("FAILED"); static const int CANCELLING_HASH = HashingUtils::HashString("CANCELLING"); static const int CANCELLED_HASH = HashingUtils::HashString("CANCELLED"); + static const int QUEUED_HASH = HashingUtils::HashString("QUEUED"); JobRunState GetJobRunStateForName(const Aws::String& name) @@ -65,6 +66,10 @@ namespace Aws { return JobRunState::CANCELLED; } + else if (hashCode == QUEUED_HASH) + { + return JobRunState::QUEUED; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -97,6 +102,8 @@ namespace Aws return "CANCELLING"; case JobRunState::CANCELLED: return "CANCELLED"; + case JobRunState::QUEUED: + return "QUEUED"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp new file mode 100644 index 00000000000..2a8ff03d00c --- /dev/null +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp @@ -0,0 +1,75 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#includeThe properties for OAuth2 authentication in the CreateConnection request.
+ */ + inline const OAuth2PropertiesInput& GetOAuth2Properties() const{ return m_oAuth2Properties; } + inline bool OAuth2PropertiesHasBeenSet() const { return m_oAuth2PropertiesHasBeenSet; } + inline void SetOAuth2Properties(const OAuth2PropertiesInput& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = value; } + inline void SetOAuth2Properties(OAuth2PropertiesInput&& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = std::move(value); } + inline AuthenticationConfigurationInput& WithOAuth2Properties(const OAuth2PropertiesInput& value) { SetOAuth2Properties(value); return *this;} + inline AuthenticationConfigurationInput& WithOAuth2Properties(OAuth2PropertiesInput&& value) { SetOAuth2Properties(std::move(value)); return *this;} + ///@} + ///@{ /** *The secret manager ARN to store credentials in the CreateConnection @@ -67,28 +79,16 @@ namespace Model inline AuthenticationConfigurationInput& WithSecretArn(Aws::String&& value) { SetSecretArn(std::move(value)); return *this;} inline AuthenticationConfigurationInput& WithSecretArn(const char* value) { SetSecretArn(value); return *this;} ///@} - - ///@{ - /** - *
The properties for OAuth2 authentication in the CreateConnection request.
- */ - inline const OAuth2PropertiesInput& GetOAuth2Properties() const{ return m_oAuth2Properties; } - inline bool OAuth2PropertiesHasBeenSet() const { return m_oAuth2PropertiesHasBeenSet; } - inline void SetOAuth2Properties(const OAuth2PropertiesInput& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = value; } - inline void SetOAuth2Properties(OAuth2PropertiesInput&& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = std::move(value); } - inline AuthenticationConfigurationInput& WithOAuth2Properties(const OAuth2PropertiesInput& value) { SetOAuth2Properties(value); return *this;} - inline AuthenticationConfigurationInput& WithOAuth2Properties(OAuth2PropertiesInput&& value) { SetOAuth2Properties(std::move(value)); return *this;} - ///@} private: AuthenticationType m_authenticationType; bool m_authenticationTypeHasBeenSet = false; - Aws::String m_secretArn; - bool m_secretArnHasBeenSet = false; - OAuth2PropertiesInput m_oAuth2Properties; bool m_oAuth2PropertiesHasBeenSet = false; + + Aws::String m_secretArn; + bool m_secretArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h index 3c7178e955d..4c086c77995 100644 --- a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h +++ b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h @@ -230,6 +230,25 @@ namespace Model inline Connection& AddConnectionProperties(const ConnectionPropertyKey& key, const char* value) { m_connectionPropertiesHasBeenSet = true; m_connectionProperties.emplace(key, value); return *this; } ///@} + ///@{ + /** + *This field is not currently used.
+ */ + inline const Aws::MapThe physical connection requirements, such as virtual private cloud (VPC) and
@@ -349,6 +368,9 @@ namespace Model
Aws::Map This field is not currently used. The physical connection requirements, such as virtual private cloud (VPC) and
@@ -237,6 +256,9 @@ namespace Model
Aws::Map This command doesn't
* apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use
* RestoreDBClusterFromSnapshot
.See Also:
The name of the DB parameter group to associate with this DB instance.
If you don't specify a value for DBParameterGroupName
, then
- * Amazon RDS uses the DBParameterGroup
of the source DB instance for
- * a same Region read replica, or the default DBParameterGroup
for the
- * specified DB engine for a cross-Region read replica.
Specifying a - * parameter group for this operation is only supported for MySQL DB instances for - * cross-Region read replicas and for Oracle DB instances. It isn't supported for - * MySQL DB instances for same Region read replicas or for RDS Custom.
- *Constraints:
Must be 1 to 255 letters, numbers, or - * hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
The name of the DB parameter group to associate with this read replica DB + * instance.
For Single-AZ or Multi-AZ DB instance read replica instances,
+ * if you don't specify a value for DBParameterGroupName
, then Amazon
+ * RDS uses the DBParameterGroup
of the source DB instance for a same
+ * Region read replica, or the default DBParameterGroup
for the
+ * specified DB engine for a cross-Region read replica.
For Multi-AZ DB
+ * cluster same Region read replica instances, if you don't specify a value for
+ * DBParameterGroupName
, then Amazon RDS uses the default
+ * DBParameterGroup
.
Specifying a parameter group for this + * operation is only supported for MySQL DB instances for cross-Region read + * replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB + * instances. It isn't supported for MySQL DB instances for same Region read + * replicas or for RDS Custom.
Constraints:
Must be 1 to + * 255 letters, numbers, or hyphens.
First character must be a + * letter.
Can't end with a hyphen or contain two consecutive + * hyphens.
Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
The Amazon Resource Name (ARN) for the DB shard group.
+ */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline CreateDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline CreateDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline CreateDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h index 8e4868dc8b4..c21ba01dd75 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h @@ -102,13 +102,12 @@ namespace Model ///@{ /** - *Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
The Amazon Resource Name (ARN) for the DB shard group.
+ */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline bool DBShardGroupArnHasBeenSet() const { return m_dBShardGroupArnHasBeenSet; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn.assign(value); } + inline DBShardGroup& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline DBShardGroup& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline DBShardGroup& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -201,6 +214,9 @@ namespace Model Aws::String m_endpoint; bool m_endpointHasBeenSet = false; + Aws::String m_dBShardGroupArn; + bool m_dBShardGroupArnHasBeenSet = false; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h index db50fe3009c..fca84b2ae59 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
The Amazon Resource Name (ARN) for the DB shard group.
+ */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline DeleteDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline DeleteDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline DeleteDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h index 3a875b366ba..90b607d20a2 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h @@ -71,6 +71,21 @@ namespace Model inline void SetMinACU(double value) { m_minACUHasBeenSet = true; m_minACU = value; } inline ModifyDBShardGroupRequest& WithMinACU(double value) { SetMinACU(value); return *this;} ///@} + + ///@{ + /** + *Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
The Amazon Resource Name (ARN) for the DB shard group.
+ */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline ModifyDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline ModifyDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline ModifyDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h index ff411d683fc..00d81892708 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:
0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.
1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.
2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:
0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.
1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).
2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.
The Amazon Resource Name (ARN) for the DB shard group.
+ */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline RebootDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline RebootDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline RebootDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp b/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp index fb0b59724cd..e12ce33a98d 100644 --- a/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp @@ -94,7 +94,6 @@ static const int D_B_INSTANCE_AUTOMATED_BACKUP_QUOTA_EXCEEDED_FAULT_HASH = Hashi static const int D_B_CLUSTER_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("DBClusterNotFoundFault"); static const int SUBSCRIPTION_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("SubscriptionNotFound"); static const int BACKUP_POLICY_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("BackupPolicyNotFoundFault"); -static const int INVALID_MAX_ACU_FAULT_HASH = HashingUtils::HashString("InvalidMaxAcu"); static const int INVALID_D_B_PROXY_ENDPOINT_STATE_FAULT_HASH = HashingUtils::HashString("InvalidDBProxyEndpointStateFault"); static const int D_B_SUBNET_GROUP_DOES_NOT_COVER_ENOUGH_A_ZS_HASH = HashingUtils::HashString("DBSubnetGroupDoesNotCoverEnoughAZs"); static const int D_B_UPGRADE_DEPENDENCY_FAILURE_FAULT_HASH = HashingUtils::HashString("DBUpgradeDependencyFailure"); @@ -553,11 +552,6 @@ static bool GetErrorForNameHelper0(int hashCode, AWSErrorRetrieves the status of your account's Amazon Web Services service access, * and validates the service linked role required to access the multi-account - * search feature. Only the management account or a delegated administrator with - * service access enabled can invoke this API call.
Returns a list of resources and their details that match the specified + * criteria. This query must use a view. If you don’t explicitly specify a view, + * then Resource Explorer uses the default view for the Amazon Web Services Region + * in which you call this operation.
Retrieves a list of all resource types currently supported by Amazon Web * Services Resource Explorer.
The maximum number of results that you want included on each page of the
+ * response. If you do not include this parameter, it defaults to a value
+ * appropriate to the operation. If additional items exist beyond those included in
+ * the current response, the NextToken
response element is present and
+ * has a value (is not null). Include that value as the NextToken
+ * request parameter in the next call to the operation to get the next part of the
+ * results.
An API operation can return fewer results than the
+ * maximum even when there are more results available. You should check
+ * NextToken
after every operation to ensure that you receive all of
+ * the results.
The parameter for receiving additional results if you receive a
+ * NextToken
response in a previous request. A NextToken
+ * response indicates that more output is available. Set this parameter to the
+ * value of the previous call's NextToken
response to indicate where
+ * the output should continue from. The pagination tokens expire after 24
+ * hours.
Specifies the Amazon resource name (ARN) of the view to use for the query. If + * you don't specify a value for this parameter, then the operation automatically + * uses the default view for the Amazon Web Services Region in which you called + * this operation. If the Region either doesn't have a default view or if you don't + * have permission to use the default view, then the operation fails with a 401 + * Unauthorized exception.
+ */ + inline const Aws::String& GetViewArn() const{ return m_viewArn; } + inline bool ViewArnHasBeenSet() const { return m_viewArnHasBeenSet; } + inline void SetViewArn(const Aws::String& value) { m_viewArnHasBeenSet = true; m_viewArn = value; } + inline void SetViewArn(Aws::String&& value) { m_viewArnHasBeenSet = true; m_viewArn = std::move(value); } + inline void SetViewArn(const char* value) { m_viewArnHasBeenSet = true; m_viewArn.assign(value); } + inline ListResourcesRequest& WithViewArn(const Aws::String& value) { SetViewArn(value); return *this;} + inline ListResourcesRequest& WithViewArn(Aws::String&& value) { SetViewArn(std::move(value)); return *this;} + inline ListResourcesRequest& WithViewArn(const char* value) { SetViewArn(value); return *this;} + ///@} + private: + + SearchFilter m_filters; + bool m_filtersHasBeenSet = false; + + int m_maxResults; + bool m_maxResultsHasBeenSet = false; + + Aws::String m_nextToken; + bool m_nextTokenHasBeenSet = false; + + Aws::String m_viewArn; + bool m_viewArnHasBeenSet = false; + }; + +} // namespace Model +} // namespace ResourceExplorer2 +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h new file mode 100644 index 00000000000..9be241a56ac --- /dev/null +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h @@ -0,0 +1,104 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#includeIf present, indicates that more output is available than is included in the
+ * current response. Use this value in the NextToken
request parameter
+ * in a subsequent call to the operation to get the next part of the output. You
+ * should repeat this until the NextToken
response element comes back
+ * as null
. The pagination tokens expire after 24 hours.
The list of structures that describe the resources that match the query.
+ */ + inline const Aws::VectorThe Amazon resource name (ARN) of the view that this operation used to + * perform the search.
+ */ + inline const Aws::String& GetViewArn() const{ return m_viewArn; } + inline void SetViewArn(const Aws::String& value) { m_viewArn = value; } + inline void SetViewArn(Aws::String&& value) { m_viewArn = std::move(value); } + inline void SetViewArn(const char* value) { m_viewArn.assign(value); } + inline ListResourcesResult& WithViewArn(const Aws::String& value) { SetViewArn(value); return *this;} + inline ListResourcesResult& WithViewArn(Aws::String&& value) { SetViewArn(std::move(value)); return *this;} + inline ListResourcesResult& WithViewArn(const char* value) { SetViewArn(value); return *this;} + ///@} + + ///@{ + + inline const Aws::String& GetRequestId() const{ return m_requestId; } + inline void SetRequestId(const Aws::String& value) { m_requestId = value; } + inline void SetRequestId(Aws::String&& value) { m_requestId = std::move(value); } + inline void SetRequestId(const char* value) { m_requestId.assign(value); } + inline ListResourcesResult& WithRequestId(const Aws::String& value) { SetRequestId(value); return *this;} + inline ListResourcesResult& WithRequestId(Aws::String&& value) { SetRequestId(std::move(value)); return *this;} + inline ListResourcesResult& WithRequestId(const char* value) { SetRequestId(value); return *this;} + ///@} + private: + + Aws::String m_nextToken; + + Aws::VectorThe Amazon Web Service that owns the resource and is responsible for creating - * and updating it.
+ *The Amazon Web Servicesservice that owns the resource and is responsible for + * creating and updating it.
*/ inline const Aws::String& GetService() const{ return m_service; } inline bool ServiceHasBeenSet() const { return m_serviceHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h index 1440b1507d4..fad52216072 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h @@ -54,9 +54,9 @@ namespace Model ///@{ /** - *The Amazon Web Service that is associated with the resource type. This is the - * primary service that lets you create and interact with resources of this - * type.
+ *The Amazon Web Servicesservice that is associated with the resource type. + * This is the primary service that lets you create and interact with resources of + * this type.
*/ inline const Aws::String& GetService() const{ return m_service; } inline bool ServiceHasBeenSet() const { return m_serviceHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp b/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp index 036f0a90d07..c1ae9a3c321 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp @@ -34,6 +34,7 @@ #includeThe timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded.
" + "documentation":"The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate.
" }, "regionalDomainName":{ "shape":"String", diff --git a/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json b/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json index 66375fe17b1..383a3814f52 100644 --- a/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json +++ b/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json @@ -1538,6 +1538,41 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "ConnectionType":{ + "type":"string", + "enum":[ + "DYNAMODB", + "MYSQL", + "POSTGRESQL", + "REDSHIFT", + "ORACLE", + "SYNAPSE", + "SQLSERVER", + "DB2", + "OPENSEARCH", + "BIGQUERY", + "GOOGLECLOUDSTORAGE", + "HBASE", + "DOCUMENTDB", + "MSK", + "NEPTUNE", + "CMDB", + "TPCDS", + "REDIS", + "CLOUDWATCH", + "TIMESTREAM", + "SAPHANA", + "SNOWFLAKE", + "TERADATA", + "VERTICA", + "CLOUDERAIMPALA", + "CLOUDERAHIVE", + "HORTONWORKSHIVE", + "DATALAKEGEN2", + "DB2AS400", + "CLOUDWATCHMETRICS" + ] + }, "CoordinatorDpuSize":{ "type":"integer", "box":true, @@ -1583,7 +1618,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"The type of data catalog to create: LAMBDA
for a federated catalog, HIVE
for an external hive metastore, or GLUE
for an Glue Data Catalog.
The type of data catalog to create: LAMBDA
for a federated catalog, GLUE
for an Glue Data Catalog, and HIVE
for an external Apache Hive metastore. FEDERATED
is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.
Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
The FEDERATED
data catalog type uses one of the following parameters, but not both. Use connection-arn
for an existing Glue connection. Use connection-type
and connection-properties
to specify the configuration setting for a new connection.
connection-arn:<glue_connection_arn_to_reuse>
lambda-role-arn
(optional): The execution role to use for the Lambda function. If not provided, one is created.
connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"
For <json_string>
, use escaped JSON text, as in the following example.
\"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"
The type of data catalog to create: LAMBDA
for a federated catalog, HIVE
for an external hive metastore, or GLUE
for an Glue Data Catalog.
The type of data catalog to create: LAMBDA
for a federated catalog, GLUE
for an Glue Data Catalog, and HIVE
for an external Apache Hive metastore. FEDERATED
is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.
Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
The FEDERATED
data catalog type uses one of the following parameters, but not both. Use connection-arn
for an existing Glue connection. Use connection-type
and connection-properties
to specify the configuration setting for a new connection.
connection-arn:<glue_connection_arn_to_reuse>
connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"
For <json_string>
, use escaped JSON text, as in the following example.
\"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"
The status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data catalog types are created synchronously. Their status is either CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
CREATE_IN_PROGRESS
: Federated data catalog creation in progress.
CREATE_COMPLETE
: Data catalog creation complete.
CREATE_FAILED
: Data catalog could not be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
: Federated data catalog creation failed and is being removed.
CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation failed and was removed.
CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation failed but could not be removed.
Data catalog deletion status:
DELETE_IN_PROGRESS
: Federated data catalog deletion in progress.
DELETE_COMPLETE
: Federated data catalog deleted.
DELETE_FAILED
: Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for example, REDSHIFT
, MYSQL
, or SQLSERVER
). For information about individual connectors, see Available data source connectors.
Text of the error that occurred during data catalog creation or deletion.
" } }, "documentation":"Contains information about a data catalog in an Amazon Web Services account.
In the Athena console, data catalogs are listed as \"data sources\" on the Data sources page under the Data source name column.
The data catalog type.
" + }, + "Status":{ + "shape":"DataCatalogStatus", + "documentation":"The status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data catalog types are created synchronously. Their status is either CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
CREATE_IN_PROGRESS
: Federated data catalog creation in progress.
CREATE_COMPLETE
: Data catalog creation complete.
CREATE_FAILED
: Data catalog could not be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
: Federated data catalog creation failed and is being removed.
CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation failed and was removed.
CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation failed but could not be removed.
Data catalog deletion status:
DELETE_IN_PROGRESS
: Federated data catalog deletion in progress.
DELETE_COMPLETE
: Federated data catalog deleted.
DELETE_FAILED
: Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for example, REDSHIFT
, MYSQL
, or SQLSERVER
). For information about individual connectors, see Available data source connectors.
Text of the error that occurred during data catalog creation or deletion.
" } }, "documentation":"The summary information for the data catalog, which includes its name and type.
" @@ -1828,7 +1902,8 @@ "enum":[ "LAMBDA", "GLUE", - "HIVE" + "HIVE", + "FEDERATED" ] }, "Database":{ @@ -1904,6 +1979,7 @@ "DeleteDataCatalogOutput":{ "type":"structure", "members":{ + "DataCatalog":{"shape":"DataCatalog"} } }, "DeleteNamedQueryInput":{ diff --git a/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json b/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json index 253826604ad..4a519427e8d 100644 --- a/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json +++ b/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json @@ -2313,7 +2313,7 @@ }, "foundationModel":{ "shape":"ModelIdentifier", - "documentation":"The foundation model to be used for orchestration by the agent you create.
" + "documentation":"The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create.
" }, "guardrailConfiguration":{ "shape":"GuardrailConfiguration", @@ -5190,8 +5190,8 @@ "documentation":"The unique identifier of the knowledge base to query.
" }, "modelId":{ - "shape":"ModelIdentifier", - "documentation":"The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.
" + "shape":"KnowledgeBaseModelIdentifier", + "documentation":"The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.
" } }, "documentation":"Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.
" @@ -5202,6 +5202,12 @@ "min":0, "pattern":"^[0-9a-zA-Z]+$" }, + "KnowledgeBaseModelIdentifier":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + }, "KnowledgeBaseRoleArn":{ "type":"string", "max":2048, @@ -6301,7 +6307,7 @@ }, "modelId":{ "shape":"PromptModelIdentifier", - "documentation":"The unique identifier of the model to run inference with.
" + "documentation":"The unique identifier of the model or inference profile to run inference with.
" }, "templateConfiguration":{ "shape":"PromptTemplateConfiguration", @@ -6424,7 +6430,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" }, "PromptModelInferenceConfiguration":{ "type":"structure", @@ -6462,7 +6468,7 @@ "members":{ "overrideLambda":{ "shape":"LambdaArn", - "documentation":"The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations
must contain a parserMode
value that is set to OVERRIDDEN
. For more information, see Parser Lambda function in Agents for Amazon Bedrock.
The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations
must contain a parserMode
value that is set to OVERRIDDEN
. For more information, see Parser Lambda function in Amazon Bedrock Agents.
The unique identifier of the model with which to run inference on the prompt.
" + "documentation":"The unique identifier of the model or inference profile with which to run inference on the prompt.
" }, "name":{ "shape":"PromptVariantName", diff --git a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json index 32910a6be85..1f84c148d38 100644 --- a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json +++ b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json @@ -459,7 +459,7 @@ "requestUri":"/" }, "input":{"shape":"CancelConversionRequest"}, - "documentation":"Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.
For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.
" + "documentation":"Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.
" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -4827,7 +4827,7 @@ }, "input":{"shape":"ImportInstanceRequest"}, "output":{"shape":"ImportInstanceResult"}, - "documentation":"We recommend that you use the ImportImage
API. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.
Creates an import instance task using metadata from the specified disk image.
This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.
For information about the import manifest referenced by this API action, see VM Import Manifest.
" + "documentation":"We recommend that you use the ImportImage
API instead. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.
Creates an import instance task using metadata from the specified disk image.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.
For information about the import manifest referenced by this API action, see VM Import Manifest.
This API action is not supported by the Command Line Interface (CLI).
" }, "ImportKeyPair":{ "name":"ImportKeyPair", @@ -4857,7 +4857,7 @@ }, "input":{"shape":"ImportVolumeRequest"}, "output":{"shape":"ImportVolumeResult"}, - "documentation":"Creates an import volume task using metadata from the specified disk image.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.
This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.
For information about the import manifest referenced by this API action, see VM Import Manifest.
" + "documentation":"This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.
Creates an import volume task using metadata from the specified disk image.
For information about the import manifest referenced by this API action, see VM Import Manifest.
This API action is not supported by the Command Line Interface (CLI).
" }, "ListImagesInRecycleBin":{ "name":"ListImagesInRecycleBin", @@ -17321,7 +17321,7 @@ "type":"structure", "members":{ "KeyName":{ - "shape":"KeyPairName", + "shape":"KeyPairNameWithResolver", "documentation":"The name of the key pair.
" }, "KeyPairId":{ @@ -19306,11 +19306,7 @@ }, "DescribeCapacityBlockOfferingsRequest":{ "type":"structure", - "required":[ - "InstanceType", - "InstanceCount", - "CapacityDurationHours" - ], + "required":["CapacityDurationHours"], "members":{ "DryRun":{ "shape":"Boolean", @@ -29887,7 +29883,11 @@ }, "FleetCapacityReservationUsageStrategy":{ "type":"string", - "enum":["use-capacity-reservations-first"] + "enum":[ + "use-capacity-reservations-first", + "use-capacity-reservations-only", + "none" + ] }, "FleetData":{ "type":"structure", @@ -37483,7 +37483,15 @@ "r8g.48xlarge", "r8g.metal-24xl", "r8g.metal-48xl", - "mac2-m1ultra.metal" + "mac2-m1ultra.metal", + "g6e.xlarge", + "g6e.2xlarge", + "g6e.4xlarge", + "g6e.8xlarge", + "g6e.12xlarge", + "g6e.16xlarge", + "g6e.24xlarge", + "g6e.48xlarge" ] }, "InstanceTypeHypervisor":{ @@ -39755,6 +39763,7 @@ } }, "KeyPairName":{"type":"string"}, + "KeyPairNameWithResolver":{"type":"string"}, "KeyType":{ "type":"string", "enum":[ @@ -54262,7 +54271,7 @@ "members":{ "Description":{ "shape":"String", - "documentation":"The description of the snapshot.
", + "documentation":"The description of the disk image being imported.
", "locationName":"description" }, "DiskImageSize":{ diff --git a/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json b/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json index d5e5c737441..e55f2a9295a 100644 --- a/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json +++ b/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-07-13", + "auth":["aws.auth#sigv4"], "endpointPrefix":"emr-serverless", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"EMR Serverless", "serviceId":"EMR Serverless", "signatureVersion":"v4", @@ -370,6 +371,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"The interactive configuration object that enables the interactive use cases for an application.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } }, "documentation":"Information about an application. Amazon EMR Serverless uses applications to run jobs.
" @@ -728,6 +733,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"The interactive configuration object that enables the interactive use cases to use when running an application.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } } }, @@ -793,7 +802,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+" + "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+" }, "EngineType":{ "type":"string", @@ -943,7 +952,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" + "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" }, "ImageConfiguration":{ "type":"structure", @@ -1168,6 +1177,18 @@ "attemptUpdatedAt":{ "shape":"Date", "documentation":"The date and time of when the job run attempt was last updated.
" + }, + "startedAt":{ + "shape":"Date", + "documentation":"The date and time when the job moved to the RUNNING state.
" + }, + "endedAt":{ + "shape":"Date", + "documentation":"The date and time when the job was terminated.
" + }, + "queuedDurationMilliseconds":{ + "shape":"Long", + "documentation":"The total time for a job in the QUEUED state in milliseconds.
" } }, "documentation":"Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.
" @@ -1278,7 +1299,8 @@ "SUCCESS", "FAILED", "CANCELLING", - "CANCELLED" + "CANCELLED", + "QUEUED" ] }, "JobRunStateSet":{ @@ -1591,6 +1613,10 @@ "min":1, "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" }, + "Long":{ + "type":"long", + "box":true + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1780,6 +1806,20 @@ }, "documentation":"The Amazon S3 configuration for monitoring log publishing. You can configure your jobs to send log information to Amazon S3.
" }, + "SchedulerConfiguration":{ + "type":"structure", + "members":{ + "queueTimeoutMinutes":{ + "shape":"Integer", + "documentation":"The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720.
" + }, + "maxConcurrentRuns":{ + "shape":"Integer", + "documentation":"The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000.
" + } + }, + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupString"}, @@ -2128,6 +2168,10 @@ "monitoringConfiguration":{ "shape":"MonitoringConfiguration", "documentation":"The configuration setting for monitoring.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } } }, diff --git a/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json b/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json index 4031dda5621..6e4716c11d0 100644 --- a/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json +++ b/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json @@ -4230,13 +4230,13 @@ "shape":"AuthenticationType", "documentation":"A structure containing the authentication configuration in the CreateConnection request.
" }, - "SecretArn":{ - "shape":"SecretArn", - "documentation":"The secret manager ARN to store credentials in the CreateConnection request.
" - }, "OAuth2Properties":{ "shape":"OAuth2PropertiesInput", "documentation":"The properties for OAuth2 authentication in the CreateConnection request.
" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"The secret manager ARN to store credentials in the CreateConnection request.
" } }, "documentation":"A structure containing the authentication configuration in the CreateConnection request.
" @@ -4253,7 +4253,8 @@ "type":"string", "max":4096, "min":1, - "pattern":"\\S+" + "pattern":"\\S+", + "sensitive":true }, "AuthorizationCodeProperties":{ "type":"structure", @@ -6684,6 +6685,10 @@ "shape":"ConnectionProperties", "documentation":"These key-value pairs define parameters for the connection:
HOST
- The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT
- The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME
- The name under which to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
PASSWORD
- A password, if one is used, for the user name.
ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
JDBC_ENGINE
- The name of the JDBC engine to use.
JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
CONFIG_FILES
- (Reserved for future use.)
INSTANCE_ID
- The instance ID to use.
JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
SECRET_ID
- The secret ID used for the secret manager of credentials.
CONNECTOR_URL
- The connector URL for a MARKETPLACE or CUSTOM connection.
CONNECTOR_TYPE
- The connector type for a MARKETPLACE or CUSTOM connection.
CONNECTOR_CLASS_NAME
- The connector class name for a MARKETPLACE or CUSTOM connection.
KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
KAFKA_SSL_ENABLED
- Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
KAFKA_CUSTOM_CERT
- The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
KAFKA_SKIP_CUSTOM_CERT_VALIDATION
- Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
KAFKA_CLIENT_KEYSTORE
- The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
KAFKA_CLIENT_KEYSTORE_PASSWORD
- The password to access the provided keystore (Optional).
KAFKA_CLIENT_KEY_PASSWORD
- A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
- The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
- The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_MECHANISM
- \"SCRAM-SHA-512\"
, \"GSSAPI\"
, \"AWS_MSK_IAM\"
, or \"PLAIN\"
. These are the supported SASL Mechanisms.
KAFKA_SASL_PLAIN_USERNAME
- A plaintext username used to authenticate with the \"PLAIN\" mechanism.
KAFKA_SASL_PLAIN_PASSWORD
- A plaintext password used to authenticate with the \"PLAIN\" mechanism.
ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD
- The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_SCRAM_USERNAME
- A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
KAFKA_SASL_SCRAM_PASSWORD
- A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
- The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_SCRAM_SECRETS_ARN
- The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.
KAFKA_SASL_GSSAPI_KEYTAB
- The S3 location of a Kerberos keytab
file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
KAFKA_SASL_GSSAPI_KRB5_CONF
- The S3 location of a Kerberos krb5.conf
file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
KAFKA_SASL_GSSAPI_SERVICE
- The Kerberos service name, as set with sasl.kerberos.service.name
in your Kafka Configuration.
KAFKA_SASL_GSSAPI_PRINCIPAL
- The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
ROLE_ARN
- The role to be used for running queries.
REGION
- The Amazon Web Services Region where queries will be run.
WORKGROUP_NAME
- The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.
CLUSTER_IDENTIFIER
- The cluster identifier of an Amazon Redshift cluster in which queries will run.
DATABASE
- The Amazon Redshift database that you are connecting to.
This field is not currently used.
" + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup
, that are needed to make this connection successfully.
These key-value pairs define parameters for the connection.
" }, + "AthenaProperties":{ + "shape":"PropertyMap", + "documentation":"This field is not currently used.
" + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup
, that are needed to successfully make this connection.
Specifies the job and session values that an admin configures in an Glue usage profile.
" }, + "PropertyKey":{ + "type":"string", + "max":128, + "min":1 + }, + "PropertyMap":{ + "type":"map", + "key":{"shape":"PropertyKey"}, + "value":{"shape":"PropertyValue"} + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -17978,6 +17997,11 @@ }, "documentation":"Defines a property predicate.
" }, + "PropertyValue":{ + "type":"string", + "max":2048, + "min":1 + }, "PublicKeysList":{ "type":"list", "member":{"shape":"GenericString"}, diff --git a/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json b/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json index 98b62383429..5c727bc9d4b 100644 --- a/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json +++ b/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json @@ -553,7 +553,6 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"MaxDBShardGroupLimitReached"}, {"shape":"InvalidDBClusterStateFault"}, - {"shape":"InvalidMaxAcuFault"}, {"shape":"UnsupportedDBEngineVersionFault"}, {"shape":"InvalidVPCNetworkStateFault"} ], @@ -2166,8 +2165,7 @@ "errors":[ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBShardGroupAlreadyExistsFault"}, - {"shape":"DBShardGroupNotFoundFault"}, - {"shape":"InvalidMaxAcuFault"} + {"shape":"DBShardGroupNotFoundFault"} ], "documentation":"Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or more settings by specifying these parameters and the new values in the request.
" }, @@ -2703,7 +2701,7 @@ {"shape":"CertificateNotFoundFault"}, {"shape":"TenantDatabaseQuotaExceededFault"} ], - "documentation":"Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.
If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot
operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier
in the call to the RestoreDBInstanceFromDBSnapshot
operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot
.
Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.
If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot
operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier
in the call to the RestoreDBInstanceFromDBSnapshot
operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot
.
The name of the DB parameter group to associate with this DB instance.
If you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of the source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
The name of the DB parameter group to associate with this read replica DB instance.
For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of the source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the default DBParameterGroup
.
Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Specifies whether to create standby instances for the DB shard group. Valid values are the following:
0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.
1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.
2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
Specifies whether to create standby instances for the DB shard group. Valid values are the following:
0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.
1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.
2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
The connection endpoint for the DB shard group.
" + }, + "DBShardGroupArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) for the DB shard group.
" } } }, @@ -11783,18 +11785,6 @@ }, "exception":true }, - "InvalidMaxAcuFault":{ - "type":"structure", - "members":{ - }, - "documentation":"The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs).
", - "error":{ - "code":"InvalidMaxAcu", - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, "InvalidOptionGroupStateFault":{ "type":"structure", "members":{ @@ -12858,6 +12848,10 @@ "MinACU":{ "shape":"DoubleOptional", "documentation":"The minimum capacity of the DB shard group in Aurora capacity units (ACUs).
" + }, + "ComputeRedundancy":{ + "shape":"IntegerOptional", + "documentation":"Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account or a delegated administrator with service access enabled can invoke this API call.
" + "documentation":"Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account can invoke this API call.
" }, "GetDefaultView":{ "name":"GetDefaultView", @@ -247,6 +249,25 @@ ], "documentation":"Retrieves a list of a member's indexes in all Amazon Web Services Regions that are currently collecting resource information for Amazon Web Services Resource Explorer. Only the management account or a delegated administrator with service access enabled can invoke this API call.
" }, + "ListResources":{ + "name":"ListResources", + "http":{ + "method":"POST", + "requestUri":"/ListResources", + "responseCode":200 + }, + "input":{"shape":"ListResourcesInput"}, + "output":{"shape":"ListResourcesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation.
" + }, "ListSupportedResourceTypes":{ "name":"ListSupportedResourceTypes", "http":{ @@ -903,6 +924,67 @@ } } }, + "ListResourcesInput":{ + "type":"structure", + "members":{ + "Filters":{"shape":"SearchFilter"}, + "MaxResults":{ + "shape":"ListResourcesInputMaxResultsInteger", + "documentation":"The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken
response element is present and has a value (is not null). Include that value as the NextToken
request parameter in the next call to the operation to get the next part of the results.
An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken
after every operation to ensure that you receive all of the results.
The parameter for receiving additional results if you receive a NextToken
response in a previous request. A NextToken
response indicates that more output is available. Set this parameter to the value of the previous call's NextToken
response to indicate where the output should continue from. The pagination tokens expire after 24 hours.
Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception.
" + } + } + }, + "ListResourcesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListResourcesInputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesInputViewArnString":{ + "type":"string", + "max":1000, + "min":0 + }, + "ListResourcesOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"ListResourcesOutputNextTokenString", + "documentation":"If present, indicates that more output is available than is included in the current response. Use this value in the NextToken
request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken
response element comes back as null
. The pagination tokens expire after 24 hours.
The list of structures that describe the resources that match the query.
" + }, + "ViewArn":{ + "shape":"ListResourcesOutputViewArnString", + "documentation":"The Amazon resource name (ARN) of the view that this operation used to perform the search.
" + } + } + }, + "ListResourcesOutputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesOutputViewArnString":{ + "type":"string", + "max":1011, + "min":1 + }, "ListSupportedResourceTypesInput":{ "type":"structure", "members":{ @@ -1035,7 +1117,7 @@ }, "QueryString":{ "type":"string", - "max":1011, + "max":1280, "min":0, "sensitive":true }, @@ -1072,7 +1154,7 @@ }, "Service":{ "shape":"String", - "documentation":"The Amazon Web Service that owns the resource and is responsible for creating and updating it.
" + "documentation":"The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating it.
" } }, "documentation":"A resource in Amazon Web Services that Amazon Web Services Resource Explorer has discovered, and for which it has stored information in the index of the Amazon Web Services Region that contains the resource.
" @@ -1259,7 +1341,7 @@ }, "Service":{ "shape":"String", - "documentation":"The Amazon Web Service that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.
" + "documentation":"The Amazon Web Servicesservice that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.
" } }, "documentation":"A structure that describes a resource type supported by Amazon Web Services Resource Explorer.
"