From d91b82e76c73dde99b14cfc59560b5084cc01fa9 Mon Sep 17 00:00:00 2001 From: aws-sdk-cpp-automation Date: Mon, 23 Sep 2024 18:16:26 +0000 Subject: [PATCH] This release adds support for job concurrency and queuing configuration at Application level. Support ComputeRedundancy parameter in ModifyDBShardGroup API. Add DBShardGroupArn in DBShardGroup API response. Remove InvalidMaxAcuFault from CreateDBShardGroup and ModifyDBShardGroup API. Both API will throw InvalidParameterValueException for invalid ACU configuration. Amazon Bedrock Prompt Flows and Prompt Management now supports using inference profiles to increase throughput and improve resilience. Added AthenaProperties parameter to Glue Connections, allowing Athena to store service specific properties on Glue Connections. Documentation updates for Amazon API Gateway Amazon EC2 G6e instances powered by NVIDIA L40S Tensor Core GPUs are the most cost-efficient GPU instances for deploying generative AI models and the highest performance GPU instances for spatial computing workloads. AWS Resource Explorer released ListResources feature which allows customers to list all indexed AWS resources within a view. List/Get/Update/Delete/CreateDataCatalog now integrate with AWS Glue connections. Users can create a Glue connection through Athena or use a Glue connection to define their Athena federated parameters. --- VERSION | 2 +- .../apigateway/model/CreateDomainNameResult.h | 3 +- .../include/aws/apigateway/model/DomainName.h | 3 +- .../apigateway/model/GetDomainNameResult.h | 3 +- .../apigateway/model/UpdateDomainNameResult.h | 3 +- .../include/aws/athena/model/ConnectionType.h | 59 ++++ .../athena/model/CreateDataCatalogRequest.h | 21 +- .../athena/model/CreateDataCatalogResult.h | 12 + .../include/aws/athena/model/DataCatalog.h | 90 +++++- .../aws/athena/model/DataCatalogStatus.h | 38 +++ .../aws/athena/model/DataCatalogSummary.h | 71 +++++ .../aws/athena/model/DataCatalogType.h | 3 +- .../athena/model/DeleteDataCatalogResult.h | 12 + .../source/model/ConnectionType.cpp | 268 ++++++++++++++++++ .../source/model/CreateDataCatalogResult.cpp | 8 +- .../source/model/DataCatalog.cpp | 44 ++- .../source/model/DataCatalogStatus.cpp | 121 ++++++++ .../source/model/DataCatalogSummary.cpp | 44 ++- .../source/model/DataCatalogType.cpp | 7 + .../source/model/DeleteDataCatalogResult.cpp | 8 +- .../bedrock-agent/model/CreateAgentRequest.h | 4 +- .../KnowledgeBaseFlowNodeConfiguration.h | 7 +- .../model/PromptFlowNodeInlineConfiguration.h | 4 +- .../model/PromptOverrideConfiguration.h | 2 +- .../aws/bedrock-agent/model/PromptVariant.h | 5 +- .../include/aws/ec2/EC2Client.h | 36 +-- .../FleetCapacityReservationUsageStrategy.h | 4 +- .../include/aws/ec2/model/InstanceType.h | 10 +- .../aws/ec2/model/SnapshotTaskDetail.h | 2 +- .../FleetCapacityReservationUsageStrategy.cpp | 14 + .../source/model/InstanceType.cpp | 72 +++++ .../aws/emr-serverless/model/Application.h | 17 ++ .../model/CreateApplicationRequest.h | 17 ++ .../include/aws/emr-serverless/model/JobRun.h | 43 +++ .../aws/emr-serverless/model/JobRunState.h | 3 +- .../model/SchedulerConfiguration.h | 74 +++++ .../model/UpdateApplicationRequest.h | 17 ++ .../source/model/Application.cpp | 16 +- .../source/model/CreateApplicationRequest.cpp | 9 +- .../source/model/JobRun.cpp | 43 ++- .../source/model/JobRunState.cpp | 7 + .../source/model/SchedulerConfiguration.cpp | 75 +++++ .../source/model/UpdateApplicationRequest.cpp | 9 +- .../model/AuthenticationConfigurationInput.h | 32 +-- .../include/aws/glue/model/Connection.h | 22 ++ .../include/aws/glue/model/ConnectionInput.h | 22 ++ .../AuthenticationConfigurationInput.cpp | 24 +- .../source/model/Connection.cpp | 22 ++ .../source/model/ConnectionInput.cpp | 22 ++ .../include/aws/rds/RDSClient.h | 2 +- .../include/aws/rds/RDSErrors.h | 1 - .../CreateDBInstanceReadReplicaRequest.h | 27 +- .../aws/rds/model/CreateDBShardGroupRequest.h | 13 +- .../aws/rds/model/CreateDBShardGroupResult.h | 28 +- .../include/aws/rds/model/DBShardGroup.h | 30 +- .../aws/rds/model/DeleteDBShardGroupResult.h | 28 +- .../aws/rds/model/ModifyDBShardGroupRequest.h | 18 ++ .../aws/rds/model/ModifyDBShardGroupResult.h | 28 +- .../aws/rds/model/RebootDBShardGroupResult.h | 28 +- .../src/aws-cpp-sdk-rds/source/RDSErrors.cpp | 18 +- .../source/model/CreateDBShardGroupResult.cpp | 5 + .../source/model/DBShardGroup.cpp | 18 +- .../source/model/DeleteDBShardGroupResult.cpp | 5 + .../model/ModifyDBShardGroupRequest.cpp | 9 +- .../source/model/ModifyDBShardGroupResult.cpp | 5 + .../source/model/RebootDBShardGroupResult.cpp | 5 + .../ResourceExplorer2Client.h | 32 ++- .../ResourceExplorer2ServiceClientModel.h | 6 + .../model/ListResourcesRequest.h | 119 ++++++++ .../model/ListResourcesResult.h | 104 +++++++ .../aws/resource-explorer-2/model/Resource.h | 4 +- .../model/SupportedResourceType.h | 6 +- .../source/ResourceExplorer2Client.cpp | 28 ++ .../source/model/ListResourcesRequest.cpp | 57 ++++ .../source/model/ListResourcesResult.cpp | 63 ++++ .../include/aws/core/VersionConfig.h | 4 +- .../apigateway-2015-07-09.normal.json | 2 +- .../athena-2017-05-18.normal.json | 86 +++++- .../bedrock-agent-2023-06-05.normal.json | 20 +- .../ec2-2016-11-15.normal.json | 33 ++- .../emr-serverless-2021-07-13.normal.json | 52 +++- .../glue-2017-03-31.normal.json | 34 ++- .../rds-2014-10-31.normal.json | 32 +-- ...resource-explorer-2-2022-07-28.normal.json | 92 +++++- 84 files changed, 2276 insertions(+), 220 deletions(-) create mode 100644 generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h create mode 100644 generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogStatus.h create mode 100644 generated/src/aws-cpp-sdk-athena/source/model/ConnectionType.cpp create mode 100644 generated/src/aws-cpp-sdk-athena/source/model/DataCatalogStatus.cpp create mode 100644 generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h create mode 100644 generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp create mode 100644 generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesRequest.h create mode 100644 generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h create mode 100644 generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesRequest.cpp create mode 100644 generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesResult.cpp diff --git a/VERSION b/VERSION index 65371e010b3..d936751dce9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.11.410 \ No newline at end of file +1.11.411 \ No newline at end of file diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/CreateDomainNameResult.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/CreateDomainNameResult.h index fd4f84bdcc9..8d6b44f36fa 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/CreateDomainNameResult.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/CreateDomainNameResult.h @@ -90,7 +90,8 @@ namespace Model ///@{ /** *

The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.

+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate.

*/ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline void SetCertificateUploadDate(const Aws::Utils::DateTime& value) { m_certificateUploadDate = value; } diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/DomainName.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/DomainName.h index 75b2d7a3931..dc496f0c570 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/DomainName.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/DomainName.h @@ -93,7 +93,8 @@ namespace Model ///@{ /** *

The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.

+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate.

*/ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline bool CertificateUploadDateHasBeenSet() const { return m_certificateUploadDateHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h index 5e3fdd4d371..49e153b15ba 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/GetDomainNameResult.h @@ -90,7 +90,8 @@ namespace Model ///@{ /** *

The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.

+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate.

*/ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline void SetCertificateUploadDate(const Aws::Utils::DateTime& value) { m_certificateUploadDate = value; } diff --git a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h index 42046948dde..85877f48353 100644 --- a/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h +++ b/generated/src/aws-cpp-sdk-apigateway/include/aws/apigateway/model/UpdateDomainNameResult.h @@ -90,7 +90,8 @@ namespace Model ///@{ /** *

The timestamp when the certificate that was used by edge-optimized endpoint - * for this domain name was uploaded.

+ * for this domain name was uploaded. API Gateway doesn't change this value if you + * update the certificate.

*/ inline const Aws::Utils::DateTime& GetCertificateUploadDate() const{ return m_certificateUploadDate; } inline void SetCertificateUploadDate(const Aws::Utils::DateTime& value) { m_certificateUploadDate = value; } diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h new file mode 100644 index 00000000000..2baba5297f1 --- /dev/null +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/ConnectionType.h @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Athena +{ +namespace Model +{ + enum class ConnectionType + { + NOT_SET, + DYNAMODB, + MYSQL, + POSTGRESQL, + REDSHIFT, + ORACLE, + SYNAPSE, + SQLSERVER, + DB2, + OPENSEARCH, + BIGQUERY, + GOOGLECLOUDSTORAGE, + HBASE, + DOCUMENTDB, + MSK, + NEPTUNE, + CMDB, + TPCDS, + REDIS, + CLOUDWATCH, + TIMESTREAM, + SAPHANA, + SNOWFLAKE, + TERADATA, + VERTICA, + CLOUDERAIMPALA, + CLOUDERAHIVE, + HORTONWORKSHIVE, + DATALAKEGEN2, + DB2AS400, + CLOUDWATCHMETRICS + }; + +namespace ConnectionTypeMapper +{ +AWS_ATHENA_API ConnectionType GetConnectionTypeForName(const Aws::String& name); + +AWS_ATHENA_API Aws::String GetNameForConnectionType(ConnectionType value); +} // namespace ConnectionTypeMapper +} // namespace Model +} // namespace Athena +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogRequest.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogRequest.h index 403868720eb..b1e4bcef001 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogRequest.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogRequest.h @@ -58,8 +58,10 @@ namespace Model ///@{ /** *

The type of data catalog to create: LAMBDA for a federated - * catalog, HIVE for an external hive metastore, or GLUE - * for an Glue Data Catalog.

+ * catalog, GLUE for an Glue Data Catalog, and HIVE for + * an external Apache Hive metastore. FEDERATED is a federated catalog + * for which Athena creates the connection and the Lambda function for you based on + * the parameters that you pass.

*/ inline const DataCatalogType& GetType() const{ return m_type; } inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } @@ -106,7 +108,20 @@ namespace Model * catalog-id=catalog_id

  • The * GLUE data catalog type also applies to the default * AwsDataCatalog that already exists in your account, of which you - * can have only one and cannot modify.

+ * can have only one and cannot modify.

  • The + * FEDERATED data catalog type uses one of the following parameters, + * but not both. Use connection-arn for an existing Glue connection. + * Use connection-type and connection-properties to + * specify the configuration setting for a new connection.

    • + * connection-arn:<glue_connection_arn_to_reuse>

      + *
    • lambda-role-arn (optional): The execution role to + * use for the Lambda function. If not provided, one is created.

    • + * connection-type:MYSQL|REDSHIFT|...., + * connection-properties:"<json_string>"

      For + * <json_string> , use escaped JSON text, as in the + * following example.

      + * "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" + *

  • */ inline const Aws::Map& GetParameters() const{ return m_parameters; } inline bool ParametersHasBeenSet() const { return m_parametersHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogResult.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogResult.h index 875f7e6e17e..499060e4621 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogResult.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/CreateDataCatalogResult.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -32,6 +33,15 @@ namespace Model AWS_ATHENA_API CreateDataCatalogResult& operator=(const Aws::AmazonWebServiceResult& result); + ///@{ + + inline const DataCatalog& GetDataCatalog() const{ return m_dataCatalog; } + inline void SetDataCatalog(const DataCatalog& value) { m_dataCatalog = value; } + inline void SetDataCatalog(DataCatalog&& value) { m_dataCatalog = std::move(value); } + inline CreateDataCatalogResult& WithDataCatalog(const DataCatalog& value) { SetDataCatalog(value); return *this;} + inline CreateDataCatalogResult& WithDataCatalog(DataCatalog&& value) { SetDataCatalog(std::move(value)); return *this;} + ///@} + ///@{ inline const Aws::String& GetRequestId() const{ return m_requestId; } @@ -44,6 +54,8 @@ namespace Model ///@} private: + DataCatalog m_dataCatalog; + Aws::String m_requestId; }; diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalog.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalog.h index 4b1e8efe7ab..ec7e9c4a6c0 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalog.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalog.h @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include namespace Aws @@ -76,8 +78,10 @@ namespace Model ///@{ /** *

    The type of data catalog to create: LAMBDA for a federated - * catalog, HIVE for an external hive metastore, or GLUE - * for an Glue Data Catalog.

    + * catalog, GLUE for an Glue Data Catalog, and HIVE for + * an external Apache Hive metastore. FEDERATED is a federated catalog + * for which Athena creates the connection and the Lambda function for you based on + * the parameters that you pass.

    */ inline const DataCatalogType& GetType() const{ return m_type; } inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } @@ -110,7 +114,18 @@ namespace Model * catalog-id=catalog_id

    • The * GLUE data catalog type also applies to the default * AwsDataCatalog that already exists in your account, of which you - * can have only one and cannot modify.

    + * can have only one and cannot modify.

  • The + * FEDERATED data catalog type uses one of the following parameters, + * but not both. Use connection-arn for an existing Glue connection. + * Use connection-type and connection-properties to + * specify the configuration setting for a new connection.

    • + * connection-arn:<glue_connection_arn_to_reuse>

      + *
    • connection-type:MYSQL|REDSHIFT|...., + * connection-properties:"<json_string>"

      For + * <json_string> , use escaped JSON text, as in the + * following example.

      + * "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" + *

  • */ inline const Aws::Map& GetParameters() const{ return m_parameters; } inline bool ParametersHasBeenSet() const { return m_parametersHasBeenSet; } @@ -126,6 +141,66 @@ namespace Model inline DataCatalog& AddParameters(Aws::String&& key, const char* value) { m_parametersHasBeenSet = true; m_parameters.emplace(std::move(key), value); return *this; } inline DataCatalog& AddParameters(const char* key, const char* value) { m_parametersHasBeenSet = true; m_parameters.emplace(key, value); return *this; } ///@} + + ///@{ + /** + *

    The status of the creation or deletion of the data catalog.

    • + *

      The LAMBDA, GLUE, and HIVE data + * catalog types are created synchronously. Their status is either + * CREATE_COMPLETE or CREATE_FAILED.

    • + *

      The FEDERATED data catalog type is created asynchronously.

      + *

    Data catalog creation status:

    • + * CREATE_IN_PROGRESS: Federated data catalog creation in + * progress.

    • CREATE_COMPLETE: Data catalog creation + * complete.

    • CREATE_FAILED: Data catalog could not + * be created.

    • CREATE_FAILED_CLEANUP_IN_PROGRESS: + * Federated data catalog creation failed and is being removed.

    • + * CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation + * failed and was removed.

    • + * CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation + * failed but could not be removed.

    Data catalog deletion + * status:

    • DELETE_IN_PROGRESS: Federated data + * catalog deletion in progress.

    • DELETE_COMPLETE: + * Federated data catalog deleted.

    • DELETE_FAILED: + * Federated data catalog could not be deleted.

    + */ + inline const DataCatalogStatus& GetStatus() const{ return m_status; } + inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; } + inline void SetStatus(const DataCatalogStatus& value) { m_statusHasBeenSet = true; m_status = value; } + inline void SetStatus(DataCatalogStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); } + inline DataCatalog& WithStatus(const DataCatalogStatus& value) { SetStatus(value); return *this;} + inline DataCatalog& WithStatus(DataCatalogStatus&& value) { SetStatus(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    The type of connection for a FEDERATED data catalog (for + * example, REDSHIFT, MYSQL, or SQLSERVER). + * For information about individual connectors, see Available + * data source connectors.

    + */ + inline const ConnectionType& GetConnectionType() const{ return m_connectionType; } + inline bool ConnectionTypeHasBeenSet() const { return m_connectionTypeHasBeenSet; } + inline void SetConnectionType(const ConnectionType& value) { m_connectionTypeHasBeenSet = true; m_connectionType = value; } + inline void SetConnectionType(ConnectionType&& value) { m_connectionTypeHasBeenSet = true; m_connectionType = std::move(value); } + inline DataCatalog& WithConnectionType(const ConnectionType& value) { SetConnectionType(value); return *this;} + inline DataCatalog& WithConnectionType(ConnectionType&& value) { SetConnectionType(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Text of the error that occurred during data catalog creation or deletion.

    + */ + inline const Aws::String& GetError() const{ return m_error; } + inline bool ErrorHasBeenSet() const { return m_errorHasBeenSet; } + inline void SetError(const Aws::String& value) { m_errorHasBeenSet = true; m_error = value; } + inline void SetError(Aws::String&& value) { m_errorHasBeenSet = true; m_error = std::move(value); } + inline void SetError(const char* value) { m_errorHasBeenSet = true; m_error.assign(value); } + inline DataCatalog& WithError(const Aws::String& value) { SetError(value); return *this;} + inline DataCatalog& WithError(Aws::String&& value) { SetError(std::move(value)); return *this;} + inline DataCatalog& WithError(const char* value) { SetError(value); return *this;} + ///@} private: Aws::String m_name; @@ -139,6 +214,15 @@ namespace Model Aws::Map m_parameters; bool m_parametersHasBeenSet = false; + + DataCatalogStatus m_status; + bool m_statusHasBeenSet = false; + + ConnectionType m_connectionType; + bool m_connectionTypeHasBeenSet = false; + + Aws::String m_error; + bool m_errorHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogStatus.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogStatus.h new file mode 100644 index 00000000000..dc20a57c9ff --- /dev/null +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogStatus.h @@ -0,0 +1,38 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Athena +{ +namespace Model +{ + enum class DataCatalogStatus + { + NOT_SET, + CREATE_IN_PROGRESS, + CREATE_COMPLETE, + CREATE_FAILED, + CREATE_FAILED_CLEANUP_IN_PROGRESS, + CREATE_FAILED_CLEANUP_COMPLETE, + CREATE_FAILED_CLEANUP_FAILED, + DELETE_IN_PROGRESS, + DELETE_COMPLETE, + DELETE_FAILED + }; + +namespace DataCatalogStatusMapper +{ +AWS_ATHENA_API DataCatalogStatus GetDataCatalogStatusForName(const Aws::String& name); + +AWS_ATHENA_API Aws::String GetNameForDataCatalogStatus(DataCatalogStatus value); +} // namespace DataCatalogStatusMapper +} // namespace Model +} // namespace Athena +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogSummary.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogSummary.h index a2f95353411..48a43add004 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogSummary.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogSummary.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include namespace Aws @@ -67,6 +69,66 @@ namespace Model inline DataCatalogSummary& WithType(const DataCatalogType& value) { SetType(value); return *this;} inline DataCatalogSummary& WithType(DataCatalogType&& value) { SetType(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The status of the creation or deletion of the data catalog.

    • + *

      The LAMBDA, GLUE, and HIVE data + * catalog types are created synchronously. Their status is either + * CREATE_COMPLETE or CREATE_FAILED.

    • + *

      The FEDERATED data catalog type is created asynchronously.

      + *

    Data catalog creation status:

    • + * CREATE_IN_PROGRESS: Federated data catalog creation in + * progress.

    • CREATE_COMPLETE: Data catalog creation + * complete.

    • CREATE_FAILED: Data catalog could not + * be created.

    • CREATE_FAILED_CLEANUP_IN_PROGRESS: + * Federated data catalog creation failed and is being removed.

    • + * CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation + * failed and was removed.

    • + * CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation + * failed but could not be removed.

    Data catalog deletion + * status:

    • DELETE_IN_PROGRESS: Federated data + * catalog deletion in progress.

    • DELETE_COMPLETE: + * Federated data catalog deleted.

    • DELETE_FAILED: + * Federated data catalog could not be deleted.

    + */ + inline const DataCatalogStatus& GetStatus() const{ return m_status; } + inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; } + inline void SetStatus(const DataCatalogStatus& value) { m_statusHasBeenSet = true; m_status = value; } + inline void SetStatus(DataCatalogStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); } + inline DataCatalogSummary& WithStatus(const DataCatalogStatus& value) { SetStatus(value); return *this;} + inline DataCatalogSummary& WithStatus(DataCatalogStatus&& value) { SetStatus(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    The type of connection for a FEDERATED data catalog (for + * example, REDSHIFT, MYSQL, or SQLSERVER). + * For information about individual connectors, see Available + * data source connectors.

    + */ + inline const ConnectionType& GetConnectionType() const{ return m_connectionType; } + inline bool ConnectionTypeHasBeenSet() const { return m_connectionTypeHasBeenSet; } + inline void SetConnectionType(const ConnectionType& value) { m_connectionTypeHasBeenSet = true; m_connectionType = value; } + inline void SetConnectionType(ConnectionType&& value) { m_connectionTypeHasBeenSet = true; m_connectionType = std::move(value); } + inline DataCatalogSummary& WithConnectionType(const ConnectionType& value) { SetConnectionType(value); return *this;} + inline DataCatalogSummary& WithConnectionType(ConnectionType&& value) { SetConnectionType(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Text of the error that occurred during data catalog creation or deletion.

    + */ + inline const Aws::String& GetError() const{ return m_error; } + inline bool ErrorHasBeenSet() const { return m_errorHasBeenSet; } + inline void SetError(const Aws::String& value) { m_errorHasBeenSet = true; m_error = value; } + inline void SetError(Aws::String&& value) { m_errorHasBeenSet = true; m_error = std::move(value); } + inline void SetError(const char* value) { m_errorHasBeenSet = true; m_error.assign(value); } + inline DataCatalogSummary& WithError(const Aws::String& value) { SetError(value); return *this;} + inline DataCatalogSummary& WithError(Aws::String&& value) { SetError(std::move(value)); return *this;} + inline DataCatalogSummary& WithError(const char* value) { SetError(value); return *this;} + ///@} private: Aws::String m_catalogName; @@ -74,6 +136,15 @@ namespace Model DataCatalogType m_type; bool m_typeHasBeenSet = false; + + DataCatalogStatus m_status; + bool m_statusHasBeenSet = false; + + ConnectionType m_connectionType; + bool m_connectionTypeHasBeenSet = false; + + Aws::String m_error; + bool m_errorHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h index 874a85fcef3..32cc4f80b99 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DataCatalogType.h @@ -18,7 +18,8 @@ namespace Model NOT_SET, LAMBDA, GLUE, - HIVE + HIVE, + FEDERATED }; namespace DataCatalogTypeMapper diff --git a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h index 38f89f2c06b..a60f73e4ff5 100644 --- a/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h +++ b/generated/src/aws-cpp-sdk-athena/include/aws/athena/model/DeleteDataCatalogResult.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -32,6 +33,15 @@ namespace Model AWS_ATHENA_API DeleteDataCatalogResult& operator=(const Aws::AmazonWebServiceResult& result); + ///@{ + + inline const DataCatalog& GetDataCatalog() const{ return m_dataCatalog; } + inline void SetDataCatalog(const DataCatalog& value) { m_dataCatalog = value; } + inline void SetDataCatalog(DataCatalog&& value) { m_dataCatalog = std::move(value); } + inline DeleteDataCatalogResult& WithDataCatalog(const DataCatalog& value) { SetDataCatalog(value); return *this;} + inline DeleteDataCatalogResult& WithDataCatalog(DataCatalog&& value) { SetDataCatalog(std::move(value)); return *this;} + ///@} + ///@{ inline const Aws::String& GetRequestId() const{ return m_requestId; } @@ -44,6 +54,8 @@ namespace Model ///@} private: + DataCatalog m_dataCatalog; + Aws::String m_requestId; }; diff --git a/generated/src/aws-cpp-sdk-athena/source/model/ConnectionType.cpp b/generated/src/aws-cpp-sdk-athena/source/model/ConnectionType.cpp new file mode 100644 index 00000000000..8e3d950cbeb --- /dev/null +++ b/generated/src/aws-cpp-sdk-athena/source/model/ConnectionType.cpp @@ -0,0 +1,268 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Athena + { + namespace Model + { + namespace ConnectionTypeMapper + { + + static const int DYNAMODB_HASH = HashingUtils::HashString("DYNAMODB"); + static const int MYSQL_HASH = HashingUtils::HashString("MYSQL"); + static const int POSTGRESQL_HASH = HashingUtils::HashString("POSTGRESQL"); + static const int REDSHIFT_HASH = HashingUtils::HashString("REDSHIFT"); + static const int ORACLE_HASH = HashingUtils::HashString("ORACLE"); + static const int SYNAPSE_HASH = HashingUtils::HashString("SYNAPSE"); + static const int SQLSERVER_HASH = HashingUtils::HashString("SQLSERVER"); + static const int DB2_HASH = HashingUtils::HashString("DB2"); + static const int OPENSEARCH_HASH = HashingUtils::HashString("OPENSEARCH"); + static const int BIGQUERY_HASH = HashingUtils::HashString("BIGQUERY"); + static const int GOOGLECLOUDSTORAGE_HASH = HashingUtils::HashString("GOOGLECLOUDSTORAGE"); + static const int HBASE_HASH = HashingUtils::HashString("HBASE"); + static const int DOCUMENTDB_HASH = HashingUtils::HashString("DOCUMENTDB"); + static const int MSK_HASH = HashingUtils::HashString("MSK"); + static const int NEPTUNE_HASH = HashingUtils::HashString("NEPTUNE"); + static const int CMDB_HASH = HashingUtils::HashString("CMDB"); + static const int TPCDS_HASH = HashingUtils::HashString("TPCDS"); + static const int REDIS_HASH = HashingUtils::HashString("REDIS"); + static const int CLOUDWATCH_HASH = HashingUtils::HashString("CLOUDWATCH"); + static const int TIMESTREAM_HASH = HashingUtils::HashString("TIMESTREAM"); + static const int SAPHANA_HASH = HashingUtils::HashString("SAPHANA"); + static const int SNOWFLAKE_HASH = HashingUtils::HashString("SNOWFLAKE"); + static const int TERADATA_HASH = HashingUtils::HashString("TERADATA"); + static const int VERTICA_HASH = HashingUtils::HashString("VERTICA"); + static const int CLOUDERAIMPALA_HASH = HashingUtils::HashString("CLOUDERAIMPALA"); + static const int CLOUDERAHIVE_HASH = HashingUtils::HashString("CLOUDERAHIVE"); + static const int HORTONWORKSHIVE_HASH = HashingUtils::HashString("HORTONWORKSHIVE"); + static const int DATALAKEGEN2_HASH = HashingUtils::HashString("DATALAKEGEN2"); + static const int DB2AS400_HASH = HashingUtils::HashString("DB2AS400"); + static const int CLOUDWATCHMETRICS_HASH = HashingUtils::HashString("CLOUDWATCHMETRICS"); + + + ConnectionType GetConnectionTypeForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == DYNAMODB_HASH) + { + return ConnectionType::DYNAMODB; + } + else if (hashCode == MYSQL_HASH) + { + return ConnectionType::MYSQL; + } + else if (hashCode == POSTGRESQL_HASH) + { + return ConnectionType::POSTGRESQL; + } + else if (hashCode == REDSHIFT_HASH) + { + return ConnectionType::REDSHIFT; + } + else if (hashCode == ORACLE_HASH) + { + return ConnectionType::ORACLE; + } + else if (hashCode == SYNAPSE_HASH) + { + return ConnectionType::SYNAPSE; + } + else if (hashCode == SQLSERVER_HASH) + { + return ConnectionType::SQLSERVER; + } + else if (hashCode == DB2_HASH) + { + return ConnectionType::DB2; + } + else if (hashCode == OPENSEARCH_HASH) + { + return ConnectionType::OPENSEARCH; + } + else if (hashCode == BIGQUERY_HASH) + { + return ConnectionType::BIGQUERY; + } + else if (hashCode == GOOGLECLOUDSTORAGE_HASH) + { + return ConnectionType::GOOGLECLOUDSTORAGE; + } + else if (hashCode == HBASE_HASH) + { + return ConnectionType::HBASE; + } + else if (hashCode == DOCUMENTDB_HASH) + { + return ConnectionType::DOCUMENTDB; + } + else if (hashCode == MSK_HASH) + { + return ConnectionType::MSK; + } + else if (hashCode == NEPTUNE_HASH) + { + return ConnectionType::NEPTUNE; + } + else if (hashCode == CMDB_HASH) + { + return ConnectionType::CMDB; + } + else if (hashCode == TPCDS_HASH) + { + return ConnectionType::TPCDS; + } + else if (hashCode == REDIS_HASH) + { + return ConnectionType::REDIS; + } + else if (hashCode == CLOUDWATCH_HASH) + { + return ConnectionType::CLOUDWATCH; + } + else if (hashCode == TIMESTREAM_HASH) + { + return ConnectionType::TIMESTREAM; + } + else if (hashCode == SAPHANA_HASH) + { + return ConnectionType::SAPHANA; + } + else if (hashCode == SNOWFLAKE_HASH) + { + return ConnectionType::SNOWFLAKE; + } + else if (hashCode == TERADATA_HASH) + { + return ConnectionType::TERADATA; + } + else if (hashCode == VERTICA_HASH) + { + return ConnectionType::VERTICA; + } + else if (hashCode == CLOUDERAIMPALA_HASH) + { + return ConnectionType::CLOUDERAIMPALA; + } + else if (hashCode == CLOUDERAHIVE_HASH) + { + return ConnectionType::CLOUDERAHIVE; + } + else if (hashCode == HORTONWORKSHIVE_HASH) + { + return ConnectionType::HORTONWORKSHIVE; + } + else if (hashCode == DATALAKEGEN2_HASH) + { + return ConnectionType::DATALAKEGEN2; + } + else if (hashCode == DB2AS400_HASH) + { + return ConnectionType::DB2AS400; + } + else if (hashCode == CLOUDWATCHMETRICS_HASH) + { + return ConnectionType::CLOUDWATCHMETRICS; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return ConnectionType::NOT_SET; + } + + Aws::String GetNameForConnectionType(ConnectionType enumValue) + { + switch(enumValue) + { + case ConnectionType::NOT_SET: + return {}; + case ConnectionType::DYNAMODB: + return "DYNAMODB"; + case ConnectionType::MYSQL: + return "MYSQL"; + case ConnectionType::POSTGRESQL: + return "POSTGRESQL"; + case ConnectionType::REDSHIFT: + return "REDSHIFT"; + case ConnectionType::ORACLE: + return "ORACLE"; + case ConnectionType::SYNAPSE: + return "SYNAPSE"; + case ConnectionType::SQLSERVER: + return "SQLSERVER"; + case ConnectionType::DB2: + return "DB2"; + case ConnectionType::OPENSEARCH: + return "OPENSEARCH"; + case ConnectionType::BIGQUERY: + return "BIGQUERY"; + case ConnectionType::GOOGLECLOUDSTORAGE: + return "GOOGLECLOUDSTORAGE"; + case ConnectionType::HBASE: + return "HBASE"; + case ConnectionType::DOCUMENTDB: + return "DOCUMENTDB"; + case ConnectionType::MSK: + return "MSK"; + case ConnectionType::NEPTUNE: + return "NEPTUNE"; + case ConnectionType::CMDB: + return "CMDB"; + case ConnectionType::TPCDS: + return "TPCDS"; + case ConnectionType::REDIS: + return "REDIS"; + case ConnectionType::CLOUDWATCH: + return "CLOUDWATCH"; + case ConnectionType::TIMESTREAM: + return "TIMESTREAM"; + case ConnectionType::SAPHANA: + return "SAPHANA"; + case ConnectionType::SNOWFLAKE: + return "SNOWFLAKE"; + case ConnectionType::TERADATA: + return "TERADATA"; + case ConnectionType::VERTICA: + return "VERTICA"; + case ConnectionType::CLOUDERAIMPALA: + return "CLOUDERAIMPALA"; + case ConnectionType::CLOUDERAHIVE: + return "CLOUDERAHIVE"; + case ConnectionType::HORTONWORKSHIVE: + return "HORTONWORKSHIVE"; + case ConnectionType::DATALAKEGEN2: + return "DATALAKEGEN2"; + case ConnectionType::DB2AS400: + return "DB2AS400"; + case ConnectionType::CLOUDWATCHMETRICS: + return "CLOUDWATCHMETRICS"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace ConnectionTypeMapper + } // namespace Model + } // namespace Athena +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-athena/source/model/CreateDataCatalogResult.cpp b/generated/src/aws-cpp-sdk-athena/source/model/CreateDataCatalogResult.cpp index 4b831c61b3c..baaefbc6248 100644 --- a/generated/src/aws-cpp-sdk-athena/source/model/CreateDataCatalogResult.cpp +++ b/generated/src/aws-cpp-sdk-athena/source/model/CreateDataCatalogResult.cpp @@ -28,7 +28,13 @@ CreateDataCatalogResult::CreateDataCatalogResult(const Aws::AmazonWebServiceResu CreateDataCatalogResult& CreateDataCatalogResult::operator =(const Aws::AmazonWebServiceResult& result) { - AWS_UNREFERENCED_PARAM(result); + JsonView jsonValue = result.GetPayload().View(); + if(jsonValue.ValueExists("DataCatalog")) + { + m_dataCatalog = jsonValue.GetObject("DataCatalog"); + + } + const auto& headers = result.GetHeaderValueCollection(); const auto& requestIdIter = headers.find("x-amzn-requestid"); diff --git a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalog.cpp b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalog.cpp index 88aaa6ebacf..0c3c4a8d698 100644 --- a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalog.cpp +++ b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalog.cpp @@ -23,7 +23,12 @@ DataCatalog::DataCatalog() : m_descriptionHasBeenSet(false), m_type(DataCatalogType::NOT_SET), m_typeHasBeenSet(false), - m_parametersHasBeenSet(false) + m_parametersHasBeenSet(false), + m_status(DataCatalogStatus::NOT_SET), + m_statusHasBeenSet(false), + m_connectionType(ConnectionType::NOT_SET), + m_connectionTypeHasBeenSet(false), + m_errorHasBeenSet(false) { } @@ -66,6 +71,27 @@ DataCatalog& DataCatalog::operator =(JsonView jsonValue) m_parametersHasBeenSet = true; } + if(jsonValue.ValueExists("Status")) + { + m_status = DataCatalogStatusMapper::GetDataCatalogStatusForName(jsonValue.GetString("Status")); + + m_statusHasBeenSet = true; + } + + if(jsonValue.ValueExists("ConnectionType")) + { + m_connectionType = ConnectionTypeMapper::GetConnectionTypeForName(jsonValue.GetString("ConnectionType")); + + m_connectionTypeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Error")) + { + m_error = jsonValue.GetString("Error"); + + m_errorHasBeenSet = true; + } + return *this; } @@ -101,6 +127,22 @@ JsonValue DataCatalog::Jsonize() const } + if(m_statusHasBeenSet) + { + payload.WithString("Status", DataCatalogStatusMapper::GetNameForDataCatalogStatus(m_status)); + } + + if(m_connectionTypeHasBeenSet) + { + payload.WithString("ConnectionType", ConnectionTypeMapper::GetNameForConnectionType(m_connectionType)); + } + + if(m_errorHasBeenSet) + { + payload.WithString("Error", m_error); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogStatus.cpp b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogStatus.cpp new file mode 100644 index 00000000000..ca8cc68ee3b --- /dev/null +++ b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogStatus.cpp @@ -0,0 +1,121 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Athena + { + namespace Model + { + namespace DataCatalogStatusMapper + { + + static const int CREATE_IN_PROGRESS_HASH = HashingUtils::HashString("CREATE_IN_PROGRESS"); + static const int CREATE_COMPLETE_HASH = HashingUtils::HashString("CREATE_COMPLETE"); + static const int CREATE_FAILED_HASH = HashingUtils::HashString("CREATE_FAILED"); + static const int CREATE_FAILED_CLEANUP_IN_PROGRESS_HASH = HashingUtils::HashString("CREATE_FAILED_CLEANUP_IN_PROGRESS"); + static const int CREATE_FAILED_CLEANUP_COMPLETE_HASH = HashingUtils::HashString("CREATE_FAILED_CLEANUP_COMPLETE"); + static const int CREATE_FAILED_CLEANUP_FAILED_HASH = HashingUtils::HashString("CREATE_FAILED_CLEANUP_FAILED"); + static const int DELETE_IN_PROGRESS_HASH = HashingUtils::HashString("DELETE_IN_PROGRESS"); + static const int DELETE_COMPLETE_HASH = HashingUtils::HashString("DELETE_COMPLETE"); + static const int DELETE_FAILED_HASH = HashingUtils::HashString("DELETE_FAILED"); + + + DataCatalogStatus GetDataCatalogStatusForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == CREATE_IN_PROGRESS_HASH) + { + return DataCatalogStatus::CREATE_IN_PROGRESS; + } + else if (hashCode == CREATE_COMPLETE_HASH) + { + return DataCatalogStatus::CREATE_COMPLETE; + } + else if (hashCode == CREATE_FAILED_HASH) + { + return DataCatalogStatus::CREATE_FAILED; + } + else if (hashCode == CREATE_FAILED_CLEANUP_IN_PROGRESS_HASH) + { + return DataCatalogStatus::CREATE_FAILED_CLEANUP_IN_PROGRESS; + } + else if (hashCode == CREATE_FAILED_CLEANUP_COMPLETE_HASH) + { + return DataCatalogStatus::CREATE_FAILED_CLEANUP_COMPLETE; + } + else if (hashCode == CREATE_FAILED_CLEANUP_FAILED_HASH) + { + return DataCatalogStatus::CREATE_FAILED_CLEANUP_FAILED; + } + else if (hashCode == DELETE_IN_PROGRESS_HASH) + { + return DataCatalogStatus::DELETE_IN_PROGRESS; + } + else if (hashCode == DELETE_COMPLETE_HASH) + { + return DataCatalogStatus::DELETE_COMPLETE; + } + else if (hashCode == DELETE_FAILED_HASH) + { + return DataCatalogStatus::DELETE_FAILED; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return DataCatalogStatus::NOT_SET; + } + + Aws::String GetNameForDataCatalogStatus(DataCatalogStatus enumValue) + { + switch(enumValue) + { + case DataCatalogStatus::NOT_SET: + return {}; + case DataCatalogStatus::CREATE_IN_PROGRESS: + return "CREATE_IN_PROGRESS"; + case DataCatalogStatus::CREATE_COMPLETE: + return "CREATE_COMPLETE"; + case DataCatalogStatus::CREATE_FAILED: + return "CREATE_FAILED"; + case DataCatalogStatus::CREATE_FAILED_CLEANUP_IN_PROGRESS: + return "CREATE_FAILED_CLEANUP_IN_PROGRESS"; + case DataCatalogStatus::CREATE_FAILED_CLEANUP_COMPLETE: + return "CREATE_FAILED_CLEANUP_COMPLETE"; + case DataCatalogStatus::CREATE_FAILED_CLEANUP_FAILED: + return "CREATE_FAILED_CLEANUP_FAILED"; + case DataCatalogStatus::DELETE_IN_PROGRESS: + return "DELETE_IN_PROGRESS"; + case DataCatalogStatus::DELETE_COMPLETE: + return "DELETE_COMPLETE"; + case DataCatalogStatus::DELETE_FAILED: + return "DELETE_FAILED"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace DataCatalogStatusMapper + } // namespace Model + } // namespace Athena +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogSummary.cpp b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogSummary.cpp index c1aa1248b42..de7a0a51e4a 100644 --- a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogSummary.cpp +++ b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogSummary.cpp @@ -21,7 +21,12 @@ namespace Model DataCatalogSummary::DataCatalogSummary() : m_catalogNameHasBeenSet(false), m_type(DataCatalogType::NOT_SET), - m_typeHasBeenSet(false) + m_typeHasBeenSet(false), + m_status(DataCatalogStatus::NOT_SET), + m_statusHasBeenSet(false), + m_connectionType(ConnectionType::NOT_SET), + m_connectionTypeHasBeenSet(false), + m_errorHasBeenSet(false) { } @@ -47,6 +52,27 @@ DataCatalogSummary& DataCatalogSummary::operator =(JsonView jsonValue) m_typeHasBeenSet = true; } + if(jsonValue.ValueExists("Status")) + { + m_status = DataCatalogStatusMapper::GetDataCatalogStatusForName(jsonValue.GetString("Status")); + + m_statusHasBeenSet = true; + } + + if(jsonValue.ValueExists("ConnectionType")) + { + m_connectionType = ConnectionTypeMapper::GetConnectionTypeForName(jsonValue.GetString("ConnectionType")); + + m_connectionTypeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Error")) + { + m_error = jsonValue.GetString("Error"); + + m_errorHasBeenSet = true; + } + return *this; } @@ -65,6 +91,22 @@ JsonValue DataCatalogSummary::Jsonize() const payload.WithString("Type", DataCatalogTypeMapper::GetNameForDataCatalogType(m_type)); } + if(m_statusHasBeenSet) + { + payload.WithString("Status", DataCatalogStatusMapper::GetNameForDataCatalogStatus(m_status)); + } + + if(m_connectionTypeHasBeenSet) + { + payload.WithString("ConnectionType", ConnectionTypeMapper::GetNameForConnectionType(m_connectionType)); + } + + if(m_errorHasBeenSet) + { + payload.WithString("Error", m_error); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogType.cpp b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogType.cpp index 73916e9ab25..f8e59244c3c 100644 --- a/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogType.cpp +++ b/generated/src/aws-cpp-sdk-athena/source/model/DataCatalogType.cpp @@ -23,6 +23,7 @@ namespace Aws static const int LAMBDA_HASH = HashingUtils::HashString("LAMBDA"); static const int GLUE_HASH = HashingUtils::HashString("GLUE"); static const int HIVE_HASH = HashingUtils::HashString("HIVE"); + static const int FEDERATED_HASH = HashingUtils::HashString("FEDERATED"); DataCatalogType GetDataCatalogTypeForName(const Aws::String& name) @@ -40,6 +41,10 @@ namespace Aws { return DataCatalogType::HIVE; } + else if (hashCode == FEDERATED_HASH) + { + return DataCatalogType::FEDERATED; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -62,6 +67,8 @@ namespace Aws return "GLUE"; case DataCatalogType::HIVE: return "HIVE"; + case DataCatalogType::FEDERATED: + return "FEDERATED"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-athena/source/model/DeleteDataCatalogResult.cpp b/generated/src/aws-cpp-sdk-athena/source/model/DeleteDataCatalogResult.cpp index 18cdcd51cee..a752a5dfe98 100644 --- a/generated/src/aws-cpp-sdk-athena/source/model/DeleteDataCatalogResult.cpp +++ b/generated/src/aws-cpp-sdk-athena/source/model/DeleteDataCatalogResult.cpp @@ -28,7 +28,13 @@ DeleteDataCatalogResult::DeleteDataCatalogResult(const Aws::AmazonWebServiceResu DeleteDataCatalogResult& DeleteDataCatalogResult::operator =(const Aws::AmazonWebServiceResult& result) { - AWS_UNREFERENCED_PARAM(result); + JsonView jsonValue = result.GetPayload().View(); + if(jsonValue.ValueExists("DataCatalog")) + { + m_dataCatalog = jsonValue.GetObject("DataCatalog"); + + } + const auto& headers = result.GetHeaderValueCollection(); const auto& requestIdIter = headers.find("x-amzn-requestid"); diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/CreateAgentRequest.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/CreateAgentRequest.h index 31c85b4306b..e740bc868b9 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/CreateAgentRequest.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/CreateAgentRequest.h @@ -115,8 +115,8 @@ namespace Model ///@{ /** - *

    The foundation model to be used for orchestration by the agent you - * create.

    + *

    The Amazon Resource Name (ARN) of the foundation model to be used for + * orchestration by the agent you create.

    */ inline const Aws::String& GetFoundationModel() const{ return m_foundationModel; } inline bool FoundationModelHasBeenSet() const { return m_foundationModelHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h index 12f21def7fb..322a3ca255d 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/KnowledgeBaseFlowNodeConfiguration.h @@ -59,9 +59,10 @@ namespace Model ///@{ /** - *

    The unique identifier of the model to use to generate a response from the - * query results. Omit this field if you want to return the retrieved results as an - * array.

    + *

    The unique identifier of the model or inference + * profile to use to generate a response from the query results. Omit this + * field if you want to return the retrieved results as an array.

    */ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h index d8d19318f9e..8082e7f432d 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptFlowNodeInlineConfiguration.h @@ -55,7 +55,9 @@ namespace Model ///@{ /** - *

    The unique identifier of the model to run inference with.

    + *

    The unique identifier of the model or inference + * profile to run inference with.

    */ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h index 6276f61e52d..055dbff7625 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptOverrideConfiguration.h @@ -49,7 +49,7 @@ namespace Model * of the promptConfigurations must contain a parserMode * value that is set to OVERRIDDEN. For more information, see Parser - * Lambda function in Agents for Amazon Bedrock.

    + * Lambda function in Amazon Bedrock Agents.

    */ inline const Aws::String& GetOverrideLambda() const{ return m_overrideLambda; } inline bool OverrideLambdaHasBeenSet() const { return m_overrideLambdaHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h index c8c989e69a4..5fb2e80674f 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent/include/aws/bedrock-agent/model/PromptVariant.h @@ -73,8 +73,9 @@ namespace Model ///@{ /** - *

    The unique identifier of the model with which to run inference on the - * prompt.

    + *

    The unique identifier of the model or inference + * profile with which to run inference on the prompt.

    */ inline const Aws::String& GetModelId() const{ return m_modelId; } inline bool ModelIdHasBeenSet() const { return m_modelIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h index e0d6f7f13b0..fe0fcd0124b 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/EC2Client.h @@ -1496,9 +1496,7 @@ namespace EC2 * or volume. The action removes all artifacts of the conversion, including a * partially uploaded volume or instance. If the conversion is complete or is in * the process of transferring the final disk image, the command fails and returns - * an exception.

    For more information, see Importing - * a Virtual Machine Using the Amazon EC2 CLI.

    See Also:

    See Also:

    AWS * API Reference

    */ @@ -14619,20 +14617,16 @@ namespace EC2 /** *

    We recommend that you use the - * ImportImage API. For more information, see ImportImage API instead. For more information, see Importing * a VM as an image using VM Import/Export in the VM Import/Export User * Guide.

    Creates an import instance task using metadata from - * the specified disk image.

    This API action is not supported by the Command - * Line Interface (CLI). For information about using the Amazon EC2 CLI, which is - * deprecated, see Importing - * a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.

    - *

    This API action supports only single-volume VMs. To import multi-volume VMs, - * use ImportImage instead.

    For information about the import manifest - * referenced by this API action, see

    This API action supports only single-volume + * VMs. To import multi-volume VMs, use ImportImage instead.

    For + * information about the import manifest referenced by this API action, see VM - * Import Manifest.

    See Also:

    .

    This API action is not supported by the Command Line + * Interface (CLI).

    See Also:

    AWS * API Reference

    */ @@ -14719,18 +14713,14 @@ namespace EC2 } /** - *

    Creates an import volume task using metadata from the specified disk - * image.

    This API action supports only single-volume VMs. To import + *

    This API action supports only single-volume VMs. To import * multi-volume VMs, use ImportImage instead. To import a disk to a - * snapshot, use ImportSnapshot instead.

    This API action is not - * supported by the Command Line Interface (CLI). For information about using the - * Amazon EC2 CLI, which is deprecated, see Importing - * Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.

    - *

    For information about the import manifest referenced by this API action, see - * ImportSnapshot instead.

    Creates an import + * volume task using metadata from the specified disk image.

    For information + * about the import manifest referenced by this API action, see VM - * Import Manifest.

    See Also:

    .

    This API action is not supported by the Command Line + * Interface (CLI).

    See Also:

    AWS * API Reference

    */ diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/FleetCapacityReservationUsageStrategy.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/FleetCapacityReservationUsageStrategy.h index 436895e2e6f..2aec4f5e997 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/FleetCapacityReservationUsageStrategy.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/FleetCapacityReservationUsageStrategy.h @@ -16,7 +16,9 @@ namespace Model enum class FleetCapacityReservationUsageStrategy { NOT_SET, - use_capacity_reservations_first + use_capacity_reservations_first, + use_capacity_reservations_only, + none }; namespace FleetCapacityReservationUsageStrategyMapper diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceType.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceType.h index c91e6927be6..b5fb76ef6aa 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceType.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/InstanceType.h @@ -835,7 +835,15 @@ namespace Model r8g_48xlarge, r8g_metal_24xl, r8g_metal_48xl, - mac2_m1ultra_metal + mac2_m1ultra_metal, + g6e_xlarge, + g6e_2xlarge, + g6e_4xlarge, + g6e_8xlarge, + g6e_12xlarge, + g6e_16xlarge, + g6e_24xlarge, + g6e_48xlarge }; namespace InstanceTypeMapper diff --git a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/SnapshotTaskDetail.h b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/SnapshotTaskDetail.h index afc6860bdae..cbcb9db58a2 100644 --- a/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/SnapshotTaskDetail.h +++ b/generated/src/aws-cpp-sdk-ec2/include/aws/ec2/model/SnapshotTaskDetail.h @@ -42,7 +42,7 @@ namespace Model ///@{ /** - *

    The description of the snapshot.

    + *

    The description of the disk image being imported.

    */ inline const Aws::String& GetDescription() const{ return m_description; } inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp b/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp index 9b4093c1565..67420e81701 100644 --- a/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp +++ b/generated/src/aws-cpp-sdk-ec2/source/model/FleetCapacityReservationUsageStrategy.cpp @@ -21,6 +21,8 @@ namespace Aws { static const int use_capacity_reservations_first_HASH = HashingUtils::HashString("use-capacity-reservations-first"); + static const int use_capacity_reservations_only_HASH = HashingUtils::HashString("use-capacity-reservations-only"); + static const int none_HASH = HashingUtils::HashString("none"); FleetCapacityReservationUsageStrategy GetFleetCapacityReservationUsageStrategyForName(const Aws::String& name) @@ -30,6 +32,14 @@ namespace Aws { return FleetCapacityReservationUsageStrategy::use_capacity_reservations_first; } + else if (hashCode == use_capacity_reservations_only_HASH) + { + return FleetCapacityReservationUsageStrategy::use_capacity_reservations_only; + } + else if (hashCode == none_HASH) + { + return FleetCapacityReservationUsageStrategy::none; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -48,6 +58,10 @@ namespace Aws return {}; case FleetCapacityReservationUsageStrategy::use_capacity_reservations_first: return "use-capacity-reservations-first"; + case FleetCapacityReservationUsageStrategy::use_capacity_reservations_only: + return "use-capacity-reservations-only"; + case FleetCapacityReservationUsageStrategy::none: + return "none"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp b/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp index 69d4e1f66d2..8310ae027d0 100644 --- a/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp +++ b/generated/src/aws-cpp-sdk-ec2/source/model/InstanceType.cpp @@ -840,6 +840,14 @@ namespace Aws static const int r8g_metal_24xl_HASH = HashingUtils::HashString("r8g.metal-24xl"); static const int r8g_metal_48xl_HASH = HashingUtils::HashString("r8g.metal-48xl"); static const int mac2_m1ultra_metal_HASH = HashingUtils::HashString("mac2-m1ultra.metal"); + static const int g6e_xlarge_HASH = HashingUtils::HashString("g6e.xlarge"); + static const int g6e_2xlarge_HASH = HashingUtils::HashString("g6e.2xlarge"); + static const int g6e_4xlarge_HASH = HashingUtils::HashString("g6e.4xlarge"); + static const int g6e_8xlarge_HASH = HashingUtils::HashString("g6e.8xlarge"); + static const int g6e_12xlarge_HASH = HashingUtils::HashString("g6e.12xlarge"); + static const int g6e_16xlarge_HASH = HashingUtils::HashString("g6e.16xlarge"); + static const int g6e_24xlarge_HASH = HashingUtils::HashString("g6e.24xlarge"); + static const int g6e_48xlarge_HASH = HashingUtils::HashString("g6e.48xlarge"); /* The if-else chains in this file are converted into a jump table by the compiler, @@ -4972,6 +4980,46 @@ namespace Aws enumValue = InstanceType::mac2_m1ultra_metal; return true; } + else if (hashCode == g6e_xlarge_HASH) + { + enumValue = InstanceType::g6e_xlarge; + return true; + } + else if (hashCode == g6e_2xlarge_HASH) + { + enumValue = InstanceType::g6e_2xlarge; + return true; + } + else if (hashCode == g6e_4xlarge_HASH) + { + enumValue = InstanceType::g6e_4xlarge; + return true; + } + else if (hashCode == g6e_8xlarge_HASH) + { + enumValue = InstanceType::g6e_8xlarge; + return true; + } + else if (hashCode == g6e_12xlarge_HASH) + { + enumValue = InstanceType::g6e_12xlarge; + return true; + } + else if (hashCode == g6e_16xlarge_HASH) + { + enumValue = InstanceType::g6e_16xlarge; + return true; + } + else if (hashCode == g6e_24xlarge_HASH) + { + enumValue = InstanceType::g6e_24xlarge; + return true; + } + else if (hashCode == g6e_48xlarge_HASH) + { + enumValue = InstanceType::g6e_48xlarge; + return true; + } return false; } @@ -7487,6 +7535,30 @@ namespace Aws case InstanceType::mac2_m1ultra_metal: value = "mac2-m1ultra.metal"; return true; + case InstanceType::g6e_xlarge: + value = "g6e.xlarge"; + return true; + case InstanceType::g6e_2xlarge: + value = "g6e.2xlarge"; + return true; + case InstanceType::g6e_4xlarge: + value = "g6e.4xlarge"; + return true; + case InstanceType::g6e_8xlarge: + value = "g6e.8xlarge"; + return true; + case InstanceType::g6e_12xlarge: + value = "g6e.12xlarge"; + return true; + case InstanceType::g6e_16xlarge: + value = "g6e.16xlarge"; + return true; + case InstanceType::g6e_24xlarge: + value = "g6e.24xlarge"; + return true; + case InstanceType::g6e_48xlarge: + value = "g6e.48xlarge"; + return true; default: return false; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h index f32415af6e0..2621346aedb 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/Application.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -349,6 +350,19 @@ namespace Model inline Application& WithInteractiveConfiguration(const InteractiveConfiguration& value) { SetInteractiveConfiguration(value); return *this;} inline Application& WithInteractiveConfiguration(InteractiveConfiguration&& value) { SetInteractiveConfiguration(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.

    + */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline Application& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline Application& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_applicationId; @@ -413,6 +427,9 @@ namespace Model InteractiveConfiguration m_interactiveConfiguration; bool m_interactiveConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h index 360d2eb1410..81d9e12f3cb 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/CreateApplicationRequest.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -285,6 +286,19 @@ namespace Model inline CreateApplicationRequest& WithInteractiveConfiguration(const InteractiveConfiguration& value) { SetInteractiveConfiguration(value); return *this;} inline CreateApplicationRequest& WithInteractiveConfiguration(InteractiveConfiguration&& value) { SetInteractiveConfiguration(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.

    + */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline CreateApplicationRequest& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline CreateApplicationRequest& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_name; @@ -334,6 +348,9 @@ namespace Model InteractiveConfiguration m_interactiveConfiguration; bool m_interactiveConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h index 3787bbb94a4..233f7841e89 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRun.h @@ -362,6 +362,40 @@ namespace Model inline JobRun& WithAttemptUpdatedAt(const Aws::Utils::DateTime& value) { SetAttemptUpdatedAt(value); return *this;} inline JobRun& WithAttemptUpdatedAt(Aws::Utils::DateTime&& value) { SetAttemptUpdatedAt(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The date and time when the job moved to the RUNNING state.

    + */ + inline const Aws::Utils::DateTime& GetStartedAt() const{ return m_startedAt; } + inline bool StartedAtHasBeenSet() const { return m_startedAtHasBeenSet; } + inline void SetStartedAt(const Aws::Utils::DateTime& value) { m_startedAtHasBeenSet = true; m_startedAt = value; } + inline void SetStartedAt(Aws::Utils::DateTime&& value) { m_startedAtHasBeenSet = true; m_startedAt = std::move(value); } + inline JobRun& WithStartedAt(const Aws::Utils::DateTime& value) { SetStartedAt(value); return *this;} + inline JobRun& WithStartedAt(Aws::Utils::DateTime&& value) { SetStartedAt(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    The date and time when the job was terminated.

    + */ + inline const Aws::Utils::DateTime& GetEndedAt() const{ return m_endedAt; } + inline bool EndedAtHasBeenSet() const { return m_endedAtHasBeenSet; } + inline void SetEndedAt(const Aws::Utils::DateTime& value) { m_endedAtHasBeenSet = true; m_endedAt = value; } + inline void SetEndedAt(Aws::Utils::DateTime&& value) { m_endedAtHasBeenSet = true; m_endedAt = std::move(value); } + inline JobRun& WithEndedAt(const Aws::Utils::DateTime& value) { SetEndedAt(value); return *this;} + inline JobRun& WithEndedAt(Aws::Utils::DateTime&& value) { SetEndedAt(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    The total time for a job in the QUEUED state in milliseconds.

    + */ + inline long long GetQueuedDurationMilliseconds() const{ return m_queuedDurationMilliseconds; } + inline bool QueuedDurationMillisecondsHasBeenSet() const { return m_queuedDurationMillisecondsHasBeenSet; } + inline void SetQueuedDurationMilliseconds(long long value) { m_queuedDurationMillisecondsHasBeenSet = true; m_queuedDurationMilliseconds = value; } + inline JobRun& WithQueuedDurationMilliseconds(long long value) { SetQueuedDurationMilliseconds(value); return *this;} + ///@} private: Aws::String m_applicationId; @@ -435,6 +469,15 @@ namespace Model Aws::Utils::DateTime m_attemptUpdatedAt; bool m_attemptUpdatedAtHasBeenSet = false; + + Aws::Utils::DateTime m_startedAt; + bool m_startedAtHasBeenSet = false; + + Aws::Utils::DateTime m_endedAt; + bool m_endedAtHasBeenSet = false; + + long long m_queuedDurationMilliseconds; + bool m_queuedDurationMillisecondsHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h index ab67c5d0276..21bcff1770e 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/JobRunState.h @@ -23,7 +23,8 @@ namespace Model SUCCESS, FAILED, CANCELLING, - CANCELLED + CANCELLED, + QUEUED }; namespace JobRunStateMapper diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h new file mode 100644 index 00000000000..abd778836f4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/SchedulerConfiguration.h @@ -0,0 +1,74 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace EMRServerless +{ +namespace Model +{ + + /** + *

    The scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.

    See + * Also:

    AWS + * API Reference

    + */ + class SchedulerConfiguration + { + public: + AWS_EMRSERVERLESS_API SchedulerConfiguration(); + AWS_EMRSERVERLESS_API SchedulerConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_EMRSERVERLESS_API SchedulerConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_EMRSERVERLESS_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    The maximum duration in minutes for the job in QUEUED state. If scheduler + * configuration is enabled on your application, the default value is 360 minutes + * (6 hours). The valid range is from 15 to 720.

    + */ + inline int GetQueueTimeoutMinutes() const{ return m_queueTimeoutMinutes; } + inline bool QueueTimeoutMinutesHasBeenSet() const { return m_queueTimeoutMinutesHasBeenSet; } + inline void SetQueueTimeoutMinutes(int value) { m_queueTimeoutMinutesHasBeenSet = true; m_queueTimeoutMinutes = value; } + inline SchedulerConfiguration& WithQueueTimeoutMinutes(int value) { SetQueueTimeoutMinutes(value); return *this;} + ///@} + + ///@{ + /** + *

    The maximum concurrent job runs on this application. If scheduler + * configuration is enabled on your application, the default value is 15. The valid + * range is 1 to 1000.

    + */ + inline int GetMaxConcurrentRuns() const{ return m_maxConcurrentRuns; } + inline bool MaxConcurrentRunsHasBeenSet() const { return m_maxConcurrentRunsHasBeenSet; } + inline void SetMaxConcurrentRuns(int value) { m_maxConcurrentRunsHasBeenSet = true; m_maxConcurrentRuns = value; } + inline SchedulerConfiguration& WithMaxConcurrentRuns(int value) { SetMaxConcurrentRuns(value); return *this;} + ///@} + private: + + int m_queueTimeoutMinutes; + bool m_queueTimeoutMinutesHasBeenSet = false; + + int m_maxConcurrentRuns; + bool m_maxConcurrentRunsHasBeenSet = false; + }; + +} // namespace Model +} // namespace EMRServerless +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h index 01ab631ec33..3b4f55bb760 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h +++ b/generated/src/aws-cpp-sdk-emr-serverless/include/aws/emr-serverless/model/UpdateApplicationRequest.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -251,6 +252,19 @@ namespace Model inline UpdateApplicationRequest& WithMonitoringConfiguration(const MonitoringConfiguration& value) { SetMonitoringConfiguration(value); return *this;} inline UpdateApplicationRequest& WithMonitoringConfiguration(MonitoringConfiguration&& value) { SetMonitoringConfiguration(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The scheduler configuration for batch and streaming jobs running on this + * application. Supported with release labels emr-7.0.0 and above.

    + */ + inline const SchedulerConfiguration& GetSchedulerConfiguration() const{ return m_schedulerConfiguration; } + inline bool SchedulerConfigurationHasBeenSet() const { return m_schedulerConfigurationHasBeenSet; } + inline void SetSchedulerConfiguration(const SchedulerConfiguration& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = value; } + inline void SetSchedulerConfiguration(SchedulerConfiguration&& value) { m_schedulerConfigurationHasBeenSet = true; m_schedulerConfiguration = std::move(value); } + inline UpdateApplicationRequest& WithSchedulerConfiguration(const SchedulerConfiguration& value) { SetSchedulerConfiguration(value); return *this;} + inline UpdateApplicationRequest& WithSchedulerConfiguration(SchedulerConfiguration&& value) { SetSchedulerConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_applicationId; @@ -294,6 +308,9 @@ namespace Model MonitoringConfiguration m_monitoringConfiguration; bool m_monitoringConfigurationHasBeenSet = false; + + SchedulerConfiguration m_schedulerConfiguration; + bool m_schedulerConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp index fd98d4990d8..2e9bea02cff 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/Application.cpp @@ -41,7 +41,8 @@ Application::Application() : m_workerTypeSpecificationsHasBeenSet(false), m_runtimeConfigurationHasBeenSet(false), m_monitoringConfigurationHasBeenSet(false), - m_interactiveConfigurationHasBeenSet(false) + m_interactiveConfigurationHasBeenSet(false), + m_schedulerConfigurationHasBeenSet(false) { } @@ -212,6 +213,13 @@ Application& Application::operator =(JsonView jsonValue) m_interactiveConfigurationHasBeenSet = true; } + if(jsonValue.ValueExists("schedulerConfiguration")) + { + m_schedulerConfiguration = jsonValue.GetObject("schedulerConfiguration"); + + m_schedulerConfigurationHasBeenSet = true; + } + return *this; } @@ -361,6 +369,12 @@ JsonValue Application::Jsonize() const } + if(m_schedulerConfigurationHasBeenSet) + { + payload.WithObject("schedulerConfiguration", m_schedulerConfiguration.Jsonize()); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp index c64355440ff..1de51b2191e 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/CreateApplicationRequest.cpp @@ -30,7 +30,8 @@ CreateApplicationRequest::CreateApplicationRequest() : m_workerTypeSpecificationsHasBeenSet(false), m_runtimeConfigurationHasBeenSet(false), m_monitoringConfigurationHasBeenSet(false), - m_interactiveConfigurationHasBeenSet(false) + m_interactiveConfigurationHasBeenSet(false), + m_schedulerConfigurationHasBeenSet(false) { } @@ -153,6 +154,12 @@ Aws::String CreateApplicationRequest::SerializePayload() const } + if(m_schedulerConfigurationHasBeenSet) + { + payload.WithObject("schedulerConfiguration", m_schedulerConfiguration.Jsonize()); + + } + return payload.View().WriteReadable(); } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp index f3b53cd9652..de79bee1e95 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRun.cpp @@ -47,7 +47,11 @@ JobRun::JobRun() : m_attempt(0), m_attemptHasBeenSet(false), m_attemptCreatedAtHasBeenSet(false), - m_attemptUpdatedAtHasBeenSet(false) + m_attemptUpdatedAtHasBeenSet(false), + m_startedAtHasBeenSet(false), + m_endedAtHasBeenSet(false), + m_queuedDurationMilliseconds(0), + m_queuedDurationMillisecondsHasBeenSet(false) { } @@ -230,6 +234,27 @@ JobRun& JobRun::operator =(JsonView jsonValue) m_attemptUpdatedAtHasBeenSet = true; } + if(jsonValue.ValueExists("startedAt")) + { + m_startedAt = jsonValue.GetDouble("startedAt"); + + m_startedAtHasBeenSet = true; + } + + if(jsonValue.ValueExists("endedAt")) + { + m_endedAt = jsonValue.GetDouble("endedAt"); + + m_endedAtHasBeenSet = true; + } + + if(jsonValue.ValueExists("queuedDurationMilliseconds")) + { + m_queuedDurationMilliseconds = jsonValue.GetInt64("queuedDurationMilliseconds"); + + m_queuedDurationMillisecondsHasBeenSet = true; + } + return *this; } @@ -380,6 +405,22 @@ JsonValue JobRun::Jsonize() const payload.WithDouble("attemptUpdatedAt", m_attemptUpdatedAt.SecondsWithMSPrecision()); } + if(m_startedAtHasBeenSet) + { + payload.WithDouble("startedAt", m_startedAt.SecondsWithMSPrecision()); + } + + if(m_endedAtHasBeenSet) + { + payload.WithDouble("endedAt", m_endedAt.SecondsWithMSPrecision()); + } + + if(m_queuedDurationMillisecondsHasBeenSet) + { + payload.WithInt64("queuedDurationMilliseconds", m_queuedDurationMilliseconds); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp index 62d4e8bb561..e63cd03c1ca 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/JobRunState.cpp @@ -28,6 +28,7 @@ namespace Aws static const int FAILED_HASH = HashingUtils::HashString("FAILED"); static const int CANCELLING_HASH = HashingUtils::HashString("CANCELLING"); static const int CANCELLED_HASH = HashingUtils::HashString("CANCELLED"); + static const int QUEUED_HASH = HashingUtils::HashString("QUEUED"); JobRunState GetJobRunStateForName(const Aws::String& name) @@ -65,6 +66,10 @@ namespace Aws { return JobRunState::CANCELLED; } + else if (hashCode == QUEUED_HASH) + { + return JobRunState::QUEUED; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -97,6 +102,8 @@ namespace Aws return "CANCELLING"; case JobRunState::CANCELLED: return "CANCELLED"; + case JobRunState::QUEUED: + return "QUEUED"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp new file mode 100644 index 00000000000..2a8ff03d00c --- /dev/null +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/SchedulerConfiguration.cpp @@ -0,0 +1,75 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace EMRServerless +{ +namespace Model +{ + +SchedulerConfiguration::SchedulerConfiguration() : + m_queueTimeoutMinutes(0), + m_queueTimeoutMinutesHasBeenSet(false), + m_maxConcurrentRuns(0), + m_maxConcurrentRunsHasBeenSet(false) +{ +} + +SchedulerConfiguration::SchedulerConfiguration(JsonView jsonValue) + : SchedulerConfiguration() +{ + *this = jsonValue; +} + +SchedulerConfiguration& SchedulerConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("queueTimeoutMinutes")) + { + m_queueTimeoutMinutes = jsonValue.GetInteger("queueTimeoutMinutes"); + + m_queueTimeoutMinutesHasBeenSet = true; + } + + if(jsonValue.ValueExists("maxConcurrentRuns")) + { + m_maxConcurrentRuns = jsonValue.GetInteger("maxConcurrentRuns"); + + m_maxConcurrentRunsHasBeenSet = true; + } + + return *this; +} + +JsonValue SchedulerConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_queueTimeoutMinutesHasBeenSet) + { + payload.WithInteger("queueTimeoutMinutes", m_queueTimeoutMinutes); + + } + + if(m_maxConcurrentRunsHasBeenSet) + { + payload.WithInteger("maxConcurrentRuns", m_maxConcurrentRuns); + + } + + return payload; +} + +} // namespace Model +} // namespace EMRServerless +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-emr-serverless/source/model/UpdateApplicationRequest.cpp b/generated/src/aws-cpp-sdk-emr-serverless/source/model/UpdateApplicationRequest.cpp index 7616bc1e927..de2a86870ce 100644 --- a/generated/src/aws-cpp-sdk-emr-serverless/source/model/UpdateApplicationRequest.cpp +++ b/generated/src/aws-cpp-sdk-emr-serverless/source/model/UpdateApplicationRequest.cpp @@ -28,7 +28,8 @@ UpdateApplicationRequest::UpdateApplicationRequest() : m_interactiveConfigurationHasBeenSet(false), m_releaseLabelHasBeenSet(false), m_runtimeConfigurationHasBeenSet(false), - m_monitoringConfigurationHasBeenSet(false) + m_monitoringConfigurationHasBeenSet(false), + m_schedulerConfigurationHasBeenSet(false) { } @@ -128,6 +129,12 @@ Aws::String UpdateApplicationRequest::SerializePayload() const } + if(m_schedulerConfigurationHasBeenSet) + { + payload.WithObject("schedulerConfiguration", m_schedulerConfiguration.Jsonize()); + + } + return payload.View().WriteReadable(); } diff --git a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/AuthenticationConfigurationInput.h b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/AuthenticationConfigurationInput.h index c6c5cb199a0..b1ca03064fe 100644 --- a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/AuthenticationConfigurationInput.h +++ b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/AuthenticationConfigurationInput.h @@ -6,8 +6,8 @@ #pragma once #include #include -#include #include +#include #include namespace Aws @@ -53,6 +53,18 @@ namespace Model inline AuthenticationConfigurationInput& WithAuthenticationType(AuthenticationType&& value) { SetAuthenticationType(std::move(value)); return *this;} ///@} + ///@{ + /** + *

    The properties for OAuth2 authentication in the CreateConnection request.

    + */ + inline const OAuth2PropertiesInput& GetOAuth2Properties() const{ return m_oAuth2Properties; } + inline bool OAuth2PropertiesHasBeenSet() const { return m_oAuth2PropertiesHasBeenSet; } + inline void SetOAuth2Properties(const OAuth2PropertiesInput& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = value; } + inline void SetOAuth2Properties(OAuth2PropertiesInput&& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = std::move(value); } + inline AuthenticationConfigurationInput& WithOAuth2Properties(const OAuth2PropertiesInput& value) { SetOAuth2Properties(value); return *this;} + inline AuthenticationConfigurationInput& WithOAuth2Properties(OAuth2PropertiesInput&& value) { SetOAuth2Properties(std::move(value)); return *this;} + ///@} + ///@{ /** *

    The secret manager ARN to store credentials in the CreateConnection @@ -67,28 +79,16 @@ namespace Model inline AuthenticationConfigurationInput& WithSecretArn(Aws::String&& value) { SetSecretArn(std::move(value)); return *this;} inline AuthenticationConfigurationInput& WithSecretArn(const char* value) { SetSecretArn(value); return *this;} ///@} - - ///@{ - /** - *

    The properties for OAuth2 authentication in the CreateConnection request.

    - */ - inline const OAuth2PropertiesInput& GetOAuth2Properties() const{ return m_oAuth2Properties; } - inline bool OAuth2PropertiesHasBeenSet() const { return m_oAuth2PropertiesHasBeenSet; } - inline void SetOAuth2Properties(const OAuth2PropertiesInput& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = value; } - inline void SetOAuth2Properties(OAuth2PropertiesInput&& value) { m_oAuth2PropertiesHasBeenSet = true; m_oAuth2Properties = std::move(value); } - inline AuthenticationConfigurationInput& WithOAuth2Properties(const OAuth2PropertiesInput& value) { SetOAuth2Properties(value); return *this;} - inline AuthenticationConfigurationInput& WithOAuth2Properties(OAuth2PropertiesInput&& value) { SetOAuth2Properties(std::move(value)); return *this;} - ///@} private: AuthenticationType m_authenticationType; bool m_authenticationTypeHasBeenSet = false; - Aws::String m_secretArn; - bool m_secretArnHasBeenSet = false; - OAuth2PropertiesInput m_oAuth2Properties; bool m_oAuth2PropertiesHasBeenSet = false; + + Aws::String m_secretArn; + bool m_secretArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h index 3c7178e955d..4c086c77995 100644 --- a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h +++ b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/Connection.h @@ -230,6 +230,25 @@ namespace Model inline Connection& AddConnectionProperties(const ConnectionPropertyKey& key, const char* value) { m_connectionPropertiesHasBeenSet = true; m_connectionProperties.emplace(key, value); return *this; } ///@} + ///@{ + /** + *

    This field is not currently used.

    + */ + inline const Aws::Map& GetAthenaProperties() const{ return m_athenaProperties; } + inline bool AthenaPropertiesHasBeenSet() const { return m_athenaPropertiesHasBeenSet; } + inline void SetAthenaProperties(const Aws::Map& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties = value; } + inline void SetAthenaProperties(Aws::Map&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties = std::move(value); } + inline Connection& WithAthenaProperties(const Aws::Map& value) { SetAthenaProperties(value); return *this;} + inline Connection& WithAthenaProperties(Aws::Map&& value) { SetAthenaProperties(std::move(value)); return *this;} + inline Connection& AddAthenaProperties(const Aws::String& key, const Aws::String& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, value); return *this; } + inline Connection& AddAthenaProperties(Aws::String&& key, const Aws::String& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), value); return *this; } + inline Connection& AddAthenaProperties(const Aws::String& key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, std::move(value)); return *this; } + inline Connection& AddAthenaProperties(Aws::String&& key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), std::move(value)); return *this; } + inline Connection& AddAthenaProperties(const char* key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, std::move(value)); return *this; } + inline Connection& AddAthenaProperties(Aws::String&& key, const char* value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), value); return *this; } + inline Connection& AddAthenaProperties(const char* key, const char* value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, value); return *this; } + ///@} + ///@{ /** *

    The physical connection requirements, such as virtual private cloud (VPC) and @@ -349,6 +368,9 @@ namespace Model Aws::Map m_connectionProperties; bool m_connectionPropertiesHasBeenSet = false; + Aws::Map m_athenaProperties; + bool m_athenaPropertiesHasBeenSet = false; + PhysicalConnectionRequirements m_physicalConnectionRequirements; bool m_physicalConnectionRequirementsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/ConnectionInput.h b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/ConnectionInput.h index c4c789b0ee2..5d1a52a1364 100644 --- a/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/ConnectionInput.h +++ b/generated/src/aws-cpp-sdk-glue/include/aws/glue/model/ConnectionInput.h @@ -183,6 +183,25 @@ namespace Model inline ConnectionInput& AddConnectionProperties(const ConnectionPropertyKey& key, const char* value) { m_connectionPropertiesHasBeenSet = true; m_connectionProperties.emplace(key, value); return *this; } ///@} + ///@{ + /** + *

    This field is not currently used.

    + */ + inline const Aws::Map& GetAthenaProperties() const{ return m_athenaProperties; } + inline bool AthenaPropertiesHasBeenSet() const { return m_athenaPropertiesHasBeenSet; } + inline void SetAthenaProperties(const Aws::Map& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties = value; } + inline void SetAthenaProperties(Aws::Map&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties = std::move(value); } + inline ConnectionInput& WithAthenaProperties(const Aws::Map& value) { SetAthenaProperties(value); return *this;} + inline ConnectionInput& WithAthenaProperties(Aws::Map&& value) { SetAthenaProperties(std::move(value)); return *this;} + inline ConnectionInput& AddAthenaProperties(const Aws::String& key, const Aws::String& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, value); return *this; } + inline ConnectionInput& AddAthenaProperties(Aws::String&& key, const Aws::String& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), value); return *this; } + inline ConnectionInput& AddAthenaProperties(const Aws::String& key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, std::move(value)); return *this; } + inline ConnectionInput& AddAthenaProperties(Aws::String&& key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), std::move(value)); return *this; } + inline ConnectionInput& AddAthenaProperties(const char* key, Aws::String&& value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, std::move(value)); return *this; } + inline ConnectionInput& AddAthenaProperties(Aws::String&& key, const char* value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(std::move(key), value); return *this; } + inline ConnectionInput& AddAthenaProperties(const char* key, const char* value) { m_athenaPropertiesHasBeenSet = true; m_athenaProperties.emplace(key, value); return *this; } + ///@} + ///@{ /** *

    The physical connection requirements, such as virtual private cloud (VPC) and @@ -237,6 +256,9 @@ namespace Model Aws::Map m_connectionProperties; bool m_connectionPropertiesHasBeenSet = false; + Aws::Map m_athenaProperties; + bool m_athenaPropertiesHasBeenSet = false; + PhysicalConnectionRequirements m_physicalConnectionRequirements; bool m_physicalConnectionRequirementsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-glue/source/model/AuthenticationConfigurationInput.cpp b/generated/src/aws-cpp-sdk-glue/source/model/AuthenticationConfigurationInput.cpp index ff63254c1d4..667928afc55 100644 --- a/generated/src/aws-cpp-sdk-glue/source/model/AuthenticationConfigurationInput.cpp +++ b/generated/src/aws-cpp-sdk-glue/source/model/AuthenticationConfigurationInput.cpp @@ -21,8 +21,8 @@ namespace Model AuthenticationConfigurationInput::AuthenticationConfigurationInput() : m_authenticationType(AuthenticationType::NOT_SET), m_authenticationTypeHasBeenSet(false), - m_secretArnHasBeenSet(false), - m_oAuth2PropertiesHasBeenSet(false) + m_oAuth2PropertiesHasBeenSet(false), + m_secretArnHasBeenSet(false) { } @@ -41,18 +41,18 @@ AuthenticationConfigurationInput& AuthenticationConfigurationInput::operator =(J m_authenticationTypeHasBeenSet = true; } - if(jsonValue.ValueExists("SecretArn")) + if(jsonValue.ValueExists("OAuth2Properties")) { - m_secretArn = jsonValue.GetString("SecretArn"); + m_oAuth2Properties = jsonValue.GetObject("OAuth2Properties"); - m_secretArnHasBeenSet = true; + m_oAuth2PropertiesHasBeenSet = true; } - if(jsonValue.ValueExists("OAuth2Properties")) + if(jsonValue.ValueExists("SecretArn")) { - m_oAuth2Properties = jsonValue.GetObject("OAuth2Properties"); + m_secretArn = jsonValue.GetString("SecretArn"); - m_oAuth2PropertiesHasBeenSet = true; + m_secretArnHasBeenSet = true; } return *this; @@ -67,15 +67,15 @@ JsonValue AuthenticationConfigurationInput::Jsonize() const payload.WithString("AuthenticationType", AuthenticationTypeMapper::GetNameForAuthenticationType(m_authenticationType)); } - if(m_secretArnHasBeenSet) + if(m_oAuth2PropertiesHasBeenSet) { - payload.WithString("SecretArn", m_secretArn); + payload.WithObject("OAuth2Properties", m_oAuth2Properties.Jsonize()); } - if(m_oAuth2PropertiesHasBeenSet) + if(m_secretArnHasBeenSet) { - payload.WithObject("OAuth2Properties", m_oAuth2Properties.Jsonize()); + payload.WithString("SecretArn", m_secretArn); } diff --git a/generated/src/aws-cpp-sdk-glue/source/model/Connection.cpp b/generated/src/aws-cpp-sdk-glue/source/model/Connection.cpp index 74276b6cf8b..1b4b579d5e2 100644 --- a/generated/src/aws-cpp-sdk-glue/source/model/Connection.cpp +++ b/generated/src/aws-cpp-sdk-glue/source/model/Connection.cpp @@ -25,6 +25,7 @@ Connection::Connection() : m_connectionTypeHasBeenSet(false), m_matchCriteriaHasBeenSet(false), m_connectionPropertiesHasBeenSet(false), + m_athenaPropertiesHasBeenSet(false), m_physicalConnectionRequirementsHasBeenSet(false), m_creationTimeHasBeenSet(false), m_lastUpdatedTimeHasBeenSet(false), @@ -86,6 +87,16 @@ Connection& Connection::operator =(JsonView jsonValue) m_connectionPropertiesHasBeenSet = true; } + if(jsonValue.ValueExists("AthenaProperties")) + { + Aws::Map athenaPropertiesJsonMap = jsonValue.GetObject("AthenaProperties").GetAllObjects(); + for(auto& athenaPropertiesItem : athenaPropertiesJsonMap) + { + m_athenaProperties[athenaPropertiesItem.first] = athenaPropertiesItem.second.AsString(); + } + m_athenaPropertiesHasBeenSet = true; + } + if(jsonValue.ValueExists("PhysicalConnectionRequirements")) { m_physicalConnectionRequirements = jsonValue.GetObject("PhysicalConnectionRequirements"); @@ -188,6 +199,17 @@ JsonValue Connection::Jsonize() const } + if(m_athenaPropertiesHasBeenSet) + { + JsonValue athenaPropertiesJsonMap; + for(auto& athenaPropertiesItem : m_athenaProperties) + { + athenaPropertiesJsonMap.WithString(athenaPropertiesItem.first, athenaPropertiesItem.second); + } + payload.WithObject("AthenaProperties", std::move(athenaPropertiesJsonMap)); + + } + if(m_physicalConnectionRequirementsHasBeenSet) { payload.WithObject("PhysicalConnectionRequirements", m_physicalConnectionRequirements.Jsonize()); diff --git a/generated/src/aws-cpp-sdk-glue/source/model/ConnectionInput.cpp b/generated/src/aws-cpp-sdk-glue/source/model/ConnectionInput.cpp index 808a279984f..bc0e646c86b 100644 --- a/generated/src/aws-cpp-sdk-glue/source/model/ConnectionInput.cpp +++ b/generated/src/aws-cpp-sdk-glue/source/model/ConnectionInput.cpp @@ -25,6 +25,7 @@ ConnectionInput::ConnectionInput() : m_connectionTypeHasBeenSet(false), m_matchCriteriaHasBeenSet(false), m_connectionPropertiesHasBeenSet(false), + m_athenaPropertiesHasBeenSet(false), m_physicalConnectionRequirementsHasBeenSet(false), m_authenticationConfigurationHasBeenSet(false), m_validateCredentials(false), @@ -81,6 +82,16 @@ ConnectionInput& ConnectionInput::operator =(JsonView jsonValue) m_connectionPropertiesHasBeenSet = true; } + if(jsonValue.ValueExists("AthenaProperties")) + { + Aws::Map athenaPropertiesJsonMap = jsonValue.GetObject("AthenaProperties").GetAllObjects(); + for(auto& athenaPropertiesItem : athenaPropertiesJsonMap) + { + m_athenaProperties[athenaPropertiesItem.first] = athenaPropertiesItem.second.AsString(); + } + m_athenaPropertiesHasBeenSet = true; + } + if(jsonValue.ValueExists("PhysicalConnectionRequirements")) { m_physicalConnectionRequirements = jsonValue.GetObject("PhysicalConnectionRequirements"); @@ -148,6 +159,17 @@ JsonValue ConnectionInput::Jsonize() const } + if(m_athenaPropertiesHasBeenSet) + { + JsonValue athenaPropertiesJsonMap; + for(auto& athenaPropertiesItem : m_athenaProperties) + { + athenaPropertiesJsonMap.WithString(athenaPropertiesItem.first, athenaPropertiesItem.second); + } + payload.WithObject("AthenaProperties", std::move(athenaPropertiesJsonMap)); + + } + if(m_physicalConnectionRequirementsHasBeenSet) { payload.WithObject("PhysicalConnectionRequirements", m_physicalConnectionRequirements.Jsonize()); diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/RDSClient.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/RDSClient.h index ef1c9d53e77..c613c2c5b4b 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/RDSClient.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/RDSClient.h @@ -4731,7 +4731,7 @@ namespace Aws * href="https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/mysql-upgrade-snapshot.html">Upgrading * a MySQL DB snapshot engine version. For more information about upgrading a * RDS for PostgreSQL DB snapshot engine version, Upgrading + * href="https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBSnapshot.PostgreSQL.html">Upgrading * a PostgreSQL DB snapshot engine version.

    This command doesn't * apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use * RestoreDBClusterFromSnapshot.

    See Also:

    The name of the DB parameter group to associate with this DB instance.

    - *

    If you don't specify a value for DBParameterGroupName, then - * Amazon RDS uses the DBParameterGroup of the source DB instance for - * a same Region read replica, or the default DBParameterGroup for the - * specified DB engine for a cross-Region read replica.

    Specifying a - * parameter group for this operation is only supported for MySQL DB instances for - * cross-Region read replicas and for Oracle DB instances. It isn't supported for - * MySQL DB instances for same Region read replicas or for RDS Custom.

    - *

    Constraints:

    • Must be 1 to 255 letters, numbers, or - * hyphens.

    • First character must be a letter.

    • - *

      Can't end with a hyphen or contain two consecutive hyphens.

    + *

    The name of the DB parameter group to associate with this read replica DB + * instance.

    For Single-AZ or Multi-AZ DB instance read replica instances, + * if you don't specify a value for DBParameterGroupName, then Amazon + * RDS uses the DBParameterGroup of the source DB instance for a same + * Region read replica, or the default DBParameterGroup for the + * specified DB engine for a cross-Region read replica.

    For Multi-AZ DB + * cluster same Region read replica instances, if you don't specify a value for + * DBParameterGroupName, then Amazon RDS uses the default + * DBParameterGroup.

    Specifying a parameter group for this + * operation is only supported for MySQL DB instances for cross-Region read + * replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB + * instances. It isn't supported for MySQL DB instances for same Region read + * replicas or for RDS Custom.

    Constraints:

    • Must be 1 to + * 255 letters, numbers, or hyphens.

    • First character must be a + * letter.

    • Can't end with a hyphen or contain two consecutive + * hyphens.

    */ inline const Aws::String& GetDBParameterGroupName() const{ return m_dBParameterGroupName; } inline bool DBParameterGroupNameHasBeenSet() const { return m_dBParameterGroupNameHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupRequest.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupRequest.h index 935bf64e4a4..1daa5654ef5 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupRequest.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupRequest.h @@ -66,13 +66,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline bool ComputeRedundancyHasBeenSet() const { return m_computeRedundancyHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupResult.h index a4bd8059424..d2cb8ca7ee0 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/CreateDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline void SetComputeRedundancy(int value) { m_computeRedundancy = value; } @@ -153,6 +152,19 @@ namespace Model inline CreateDBShardGroupResult& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} ///@} + ///@{ + /** + *

    The Amazon Resource Name (ARN) for the DB shard group.

    + */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline CreateDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline CreateDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline CreateDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h index 8e4868dc8b4..c21ba01dd75 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DBShardGroup.h @@ -102,13 +102,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline bool ComputeRedundancyHasBeenSet() const { return m_computeRedundancyHasBeenSet; } @@ -164,6 +163,20 @@ namespace Model inline DBShardGroup& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} ///@} + ///@{ + /** + *

    The Amazon Resource Name (ARN) for the DB shard group.

    + */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline bool DBShardGroupArnHasBeenSet() const { return m_dBShardGroupArnHasBeenSet; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArnHasBeenSet = true; m_dBShardGroupArn.assign(value); } + inline DBShardGroup& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline DBShardGroup& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline DBShardGroup& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -201,6 +214,9 @@ namespace Model Aws::String m_endpoint; bool m_endpointHasBeenSet = false; + Aws::String m_dBShardGroupArn; + bool m_dBShardGroupArnHasBeenSet = false; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h index db50fe3009c..fca84b2ae59 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/DeleteDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline void SetComputeRedundancy(int value) { m_computeRedundancy = value; } @@ -153,6 +152,19 @@ namespace Model inline DeleteDBShardGroupResult& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} ///@} + ///@{ + /** + *

    The Amazon Resource Name (ARN) for the DB shard group.

    + */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline DeleteDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline DeleteDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline DeleteDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h index 3a875b366ba..90b607d20a2 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupRequest.h @@ -71,6 +71,21 @@ namespace Model inline void SetMinACU(double value) { m_minACUHasBeenSet = true; m_minACU = value; } inline ModifyDBShardGroupRequest& WithMinACU(double value) { SetMinACU(value); return *this;} ///@} + + ///@{ + /** + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    + */ + inline int GetComputeRedundancy() const{ return m_computeRedundancy; } + inline bool ComputeRedundancyHasBeenSet() const { return m_computeRedundancyHasBeenSet; } + inline void SetComputeRedundancy(int value) { m_computeRedundancyHasBeenSet = true; m_computeRedundancy = value; } + inline ModifyDBShardGroupRequest& WithComputeRedundancy(int value) { SetComputeRedundancy(value); return *this;} + ///@} private: Aws::String m_dBShardGroupIdentifier; @@ -81,6 +96,9 @@ namespace Model double m_minACU; bool m_minACUHasBeenSet = false; + + int m_computeRedundancy; + bool m_computeRedundancyHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupResult.h index 2909c633381..eae6f7abe4f 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/ModifyDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline void SetComputeRedundancy(int value) { m_computeRedundancy = value; } @@ -153,6 +152,19 @@ namespace Model inline ModifyDBShardGroupResult& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} ///@} + ///@{ + /** + *

    The Amazon Resource Name (ARN) for the DB shard group.

    + */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline ModifyDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline ModifyDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline ModifyDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h index ff411d683fc..00d81892708 100644 --- a/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h +++ b/generated/src/aws-cpp-sdk-rds/include/aws/rds/model/RebootDBShardGroupResult.h @@ -95,13 +95,12 @@ namespace Model ///@{ /** - *

    Specifies whether to create standby instances for the DB shard group. Valid - * values are the following:

    • 0 - Creates a single, primary DB - * instance for each physical shard. This is the default value, and the only one - * supported for the preview.

    • 1 - Creates a primary DB instance - * and a standby instance in a different Availability Zone (AZ) for each physical - * shard.

    • 2 - Creates a primary DB instance and two standby - * instances in different AZs for each physical shard.

    + *

    Specifies whether to create standby DB shard groups for the DB shard group. + * Valid values are the following:

    • 0 - Creates a DB shard group + * without a standby DB shard group. This is the default value.

    • 1 + * - Creates a DB shard group with a standby DB shard group in a different + * Availability Zone (AZ).

    • 2 - Creates a DB shard group with two + * standby DB shard groups in two different AZs.

    */ inline int GetComputeRedundancy() const{ return m_computeRedundancy; } inline void SetComputeRedundancy(int value) { m_computeRedundancy = value; } @@ -153,6 +152,19 @@ namespace Model inline RebootDBShardGroupResult& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} ///@} + ///@{ + /** + *

    The Amazon Resource Name (ARN) for the DB shard group.

    + */ + inline const Aws::String& GetDBShardGroupArn() const{ return m_dBShardGroupArn; } + inline void SetDBShardGroupArn(const Aws::String& value) { m_dBShardGroupArn = value; } + inline void SetDBShardGroupArn(Aws::String&& value) { m_dBShardGroupArn = std::move(value); } + inline void SetDBShardGroupArn(const char* value) { m_dBShardGroupArn.assign(value); } + inline RebootDBShardGroupResult& WithDBShardGroupArn(const Aws::String& value) { SetDBShardGroupArn(value); return *this;} + inline RebootDBShardGroupResult& WithDBShardGroupArn(Aws::String&& value) { SetDBShardGroupArn(std::move(value)); return *this;} + inline RebootDBShardGroupResult& WithDBShardGroupArn(const char* value) { SetDBShardGroupArn(value); return *this;} + ///@} + ///@{ inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; } @@ -181,6 +193,8 @@ namespace Model Aws::String m_endpoint; + Aws::String m_dBShardGroupArn; + ResponseMetadata m_responseMetadata; }; diff --git a/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp b/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp index fb0b59724cd..e12ce33a98d 100644 --- a/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/RDSErrors.cpp @@ -94,7 +94,6 @@ static const int D_B_INSTANCE_AUTOMATED_BACKUP_QUOTA_EXCEEDED_FAULT_HASH = Hashi static const int D_B_CLUSTER_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("DBClusterNotFoundFault"); static const int SUBSCRIPTION_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("SubscriptionNotFound"); static const int BACKUP_POLICY_NOT_FOUND_FAULT_HASH = HashingUtils::HashString("BackupPolicyNotFoundFault"); -static const int INVALID_MAX_ACU_FAULT_HASH = HashingUtils::HashString("InvalidMaxAcu"); static const int INVALID_D_B_PROXY_ENDPOINT_STATE_FAULT_HASH = HashingUtils::HashString("InvalidDBProxyEndpointStateFault"); static const int D_B_SUBNET_GROUP_DOES_NOT_COVER_ENOUGH_A_ZS_HASH = HashingUtils::HashString("DBSubnetGroupDoesNotCoverEnoughAZs"); static const int D_B_UPGRADE_DEPENDENCY_FAILURE_FAULT_HASH = HashingUtils::HashString("DBUpgradeDependencyFailure"); @@ -553,11 +552,6 @@ static bool GetErrorForNameHelper0(int hashCode, AWSError& error) error = AWSError(static_cast(RDSErrors::BACKUP_POLICY_NOT_FOUND_FAULT), RetryableType::NOT_RETRYABLE); return true; } - else if (hashCode == INVALID_MAX_ACU_FAULT_HASH) - { - error = AWSError(static_cast(RDSErrors::INVALID_MAX_ACU_FAULT), RetryableType::NOT_RETRYABLE); - return true; - } else if (hashCode == INVALID_D_B_PROXY_ENDPOINT_STATE_FAULT_HASH) { error = AWSError(static_cast(RDSErrors::INVALID_D_B_PROXY_ENDPOINT_STATE_FAULT), RetryableType::NOT_RETRYABLE); @@ -783,17 +777,17 @@ static bool GetErrorForNameHelper0(int hashCode, AWSError& error) error = AWSError(static_cast(RDSErrors::D_B_PROXY_ENDPOINT_NOT_FOUND_FAULT), RetryableType::NOT_RETRYABLE); return true; } + else if (hashCode == INSTANCE_QUOTA_EXCEEDED_FAULT_HASH) + { + error = AWSError(static_cast(RDSErrors::INSTANCE_QUOTA_EXCEEDED_FAULT), RetryableType::NOT_RETRYABLE); + return true; + } return false; } static bool GetErrorForNameHelper1(int hashCode, AWSError& error) { - if (hashCode == INSTANCE_QUOTA_EXCEEDED_FAULT_HASH) - { - error = AWSError(static_cast(RDSErrors::INSTANCE_QUOTA_EXCEEDED_FAULT), RetryableType::NOT_RETRYABLE); - return true; - } - else if (hashCode == GLOBAL_CLUSTER_ALREADY_EXISTS_FAULT_HASH) + if (hashCode == GLOBAL_CLUSTER_ALREADY_EXISTS_FAULT_HASH) { error = AWSError(static_cast(RDSErrors::GLOBAL_CLUSTER_ALREADY_EXISTS_FAULT), RetryableType::NOT_RETRYABLE); return true; diff --git a/generated/src/aws-cpp-sdk-rds/source/model/CreateDBShardGroupResult.cpp b/generated/src/aws-cpp-sdk-rds/source/model/CreateDBShardGroupResult.cpp index fdf87a36ead..73e13b966c2 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/CreateDBShardGroupResult.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/CreateDBShardGroupResult.cpp @@ -88,6 +88,11 @@ CreateDBShardGroupResult& CreateDBShardGroupResult::operator =(const Aws::Amazon { m_endpoint = Aws::Utils::Xml::DecodeEscapedXmlText(endpointNode.GetText()); } + XmlNode dBShardGroupArnNode = resultNode.FirstChild("DBShardGroupArn"); + if(!dBShardGroupArnNode.IsNull()) + { + m_dBShardGroupArn = Aws::Utils::Xml::DecodeEscapedXmlText(dBShardGroupArnNode.GetText()); + } } if (!rootNode.IsNull()) { diff --git a/generated/src/aws-cpp-sdk-rds/source/model/DBShardGroup.cpp b/generated/src/aws-cpp-sdk-rds/source/model/DBShardGroup.cpp index e9cafe45b3a..5661d01e3d0 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/DBShardGroup.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/DBShardGroup.cpp @@ -33,7 +33,8 @@ DBShardGroup::DBShardGroup() : m_statusHasBeenSet(false), m_publiclyAccessible(false), m_publiclyAccessibleHasBeenSet(false), - m_endpointHasBeenSet(false) + m_endpointHasBeenSet(false), + m_dBShardGroupArnHasBeenSet(false) { } @@ -103,6 +104,12 @@ DBShardGroup& DBShardGroup::operator =(const XmlNode& xmlNode) m_endpoint = Aws::Utils::Xml::DecodeEscapedXmlText(endpointNode.GetText()); m_endpointHasBeenSet = true; } + XmlNode dBShardGroupArnNode = resultNode.FirstChild("DBShardGroupArn"); + if(!dBShardGroupArnNode.IsNull()) + { + m_dBShardGroupArn = Aws::Utils::Xml::DecodeEscapedXmlText(dBShardGroupArnNode.GetText()); + m_dBShardGroupArnHasBeenSet = true; + } } return *this; @@ -155,6 +162,11 @@ void DBShardGroup::OutputToStream(Aws::OStream& oStream, const char* location, u oStream << location << index << locationValue << ".Endpoint=" << StringUtils::URLEncode(m_endpoint.c_str()) << "&"; } + if(m_dBShardGroupArnHasBeenSet) + { + oStream << location << index << locationValue << ".DBShardGroupArn=" << StringUtils::URLEncode(m_dBShardGroupArn.c_str()) << "&"; + } + Aws::StringStream responseMetadataLocationAndMemberSs; responseMetadataLocationAndMemberSs << location << index << locationValue << ".ResponseMetadata"; m_responseMetadata.OutputToStream(oStream, responseMetadataLocationAndMemberSs.str().c_str()); @@ -198,6 +210,10 @@ void DBShardGroup::OutputToStream(Aws::OStream& oStream, const char* location) c { oStream << location << ".Endpoint=" << StringUtils::URLEncode(m_endpoint.c_str()) << "&"; } + if(m_dBShardGroupArnHasBeenSet) + { + oStream << location << ".DBShardGroupArn=" << StringUtils::URLEncode(m_dBShardGroupArn.c_str()) << "&"; + } Aws::String responseMetadataLocationAndMember(location); responseMetadataLocationAndMember += ".ResponseMetadata"; m_responseMetadata.OutputToStream(oStream, responseMetadataLocationAndMember.c_str()); diff --git a/generated/src/aws-cpp-sdk-rds/source/model/DeleteDBShardGroupResult.cpp b/generated/src/aws-cpp-sdk-rds/source/model/DeleteDBShardGroupResult.cpp index 236095e84a2..465b61e8e13 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/DeleteDBShardGroupResult.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/DeleteDBShardGroupResult.cpp @@ -88,6 +88,11 @@ DeleteDBShardGroupResult& DeleteDBShardGroupResult::operator =(const Aws::Amazon { m_endpoint = Aws::Utils::Xml::DecodeEscapedXmlText(endpointNode.GetText()); } + XmlNode dBShardGroupArnNode = resultNode.FirstChild("DBShardGroupArn"); + if(!dBShardGroupArnNode.IsNull()) + { + m_dBShardGroupArn = Aws::Utils::Xml::DecodeEscapedXmlText(dBShardGroupArnNode.GetText()); + } } if (!rootNode.IsNull()) { diff --git a/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupRequest.cpp b/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupRequest.cpp index dbb493f5df3..7c4efbd8e7a 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupRequest.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupRequest.cpp @@ -15,7 +15,9 @@ ModifyDBShardGroupRequest::ModifyDBShardGroupRequest() : m_maxACU(0.0), m_maxACUHasBeenSet(false), m_minACU(0.0), - m_minACUHasBeenSet(false) + m_minACUHasBeenSet(false), + m_computeRedundancy(0), + m_computeRedundancyHasBeenSet(false) { } @@ -38,6 +40,11 @@ Aws::String ModifyDBShardGroupRequest::SerializePayload() const ss << "MinACU=" << StringUtils::URLEncode(m_minACU) << "&"; } + if(m_computeRedundancyHasBeenSet) + { + ss << "ComputeRedundancy=" << m_computeRedundancy << "&"; + } + ss << "Version=2014-10-31"; return ss.str(); } diff --git a/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupResult.cpp b/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupResult.cpp index eee0cea95c7..01c6abb6beb 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupResult.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/ModifyDBShardGroupResult.cpp @@ -88,6 +88,11 @@ ModifyDBShardGroupResult& ModifyDBShardGroupResult::operator =(const Aws::Amazon { m_endpoint = Aws::Utils::Xml::DecodeEscapedXmlText(endpointNode.GetText()); } + XmlNode dBShardGroupArnNode = resultNode.FirstChild("DBShardGroupArn"); + if(!dBShardGroupArnNode.IsNull()) + { + m_dBShardGroupArn = Aws::Utils::Xml::DecodeEscapedXmlText(dBShardGroupArnNode.GetText()); + } } if (!rootNode.IsNull()) { diff --git a/generated/src/aws-cpp-sdk-rds/source/model/RebootDBShardGroupResult.cpp b/generated/src/aws-cpp-sdk-rds/source/model/RebootDBShardGroupResult.cpp index 717fb71afd4..600c4acd69b 100644 --- a/generated/src/aws-cpp-sdk-rds/source/model/RebootDBShardGroupResult.cpp +++ b/generated/src/aws-cpp-sdk-rds/source/model/RebootDBShardGroupResult.cpp @@ -88,6 +88,11 @@ RebootDBShardGroupResult& RebootDBShardGroupResult::operator =(const Aws::Amazon { m_endpoint = Aws::Utils::Xml::DecodeEscapedXmlText(endpointNode.GetText()); } + XmlNode dBShardGroupArnNode = resultNode.FirstChild("DBShardGroupArn"); + if(!dBShardGroupArnNode.IsNull()) + { + m_dBShardGroupArn = Aws::Utils::Xml::DecodeEscapedXmlText(dBShardGroupArnNode.GetText()); + } } if (!rootNode.IsNull()) { diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/ResourceExplorer2Client.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/ResourceExplorer2Client.h index 93654050c11..75608450ffd 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/ResourceExplorer2Client.h +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/ResourceExplorer2Client.h @@ -360,8 +360,8 @@ namespace ResourceExplorer2 /** *

    Retrieves the status of your account's Amazon Web Services service access, * and validates the service linked role required to access the multi-account - * search feature. Only the management account or a delegated administrator with - * service access enabled can invoke this API call.

    See Also:

    See Also:

    AWS * API Reference

    */ @@ -520,6 +520,34 @@ namespace ResourceExplorer2 return SubmitAsync(&ResourceExplorer2Client::ListIndexesForMembers, request, handler, context); } + /** + *

    Returns a list of resources and their details that match the specified + * criteria. This query must use a view. If you don’t explicitly specify a view, + * then Resource Explorer uses the default view for the Amazon Web Services Region + * in which you call this operation.

    See Also:

    AWS + * API Reference

    + */ + virtual Model::ListResourcesOutcome ListResources(const Model::ListResourcesRequest& request = {}) const; + + /** + * A Callable wrapper for ListResources that returns a future to the operation so that it can be executed in parallel to other requests. + */ + template + Model::ListResourcesOutcomeCallable ListResourcesCallable(const ListResourcesRequestT& request = {}) const + { + return SubmitCallable(&ResourceExplorer2Client::ListResources, request); + } + + /** + * An Async wrapper for ListResources that queues the request into a thread executor and triggers associated callback when operation has finished. + */ + template + void ListResourcesAsync(const ListResourcesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr, const ListResourcesRequestT& request = {}) const + { + return SubmitAsync(&ResourceExplorer2Client::ListResources, request, handler, context); + } + /** *

    Retrieves a list of all resource types currently supported by Amazon Web * Services Resource Explorer.

    See Also:

    #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -101,6 +103,7 @@ namespace Aws class GetViewRequest; class ListIndexesRequest; class ListIndexesForMembersRequest; + class ListResourcesRequest; class ListSupportedResourceTypesRequest; class ListTagsForResourceRequest; class ListViewsRequest; @@ -125,6 +128,7 @@ namespace Aws typedef Aws::Utils::Outcome GetViewOutcome; typedef Aws::Utils::Outcome ListIndexesOutcome; typedef Aws::Utils::Outcome ListIndexesForMembersOutcome; + typedef Aws::Utils::Outcome ListResourcesOutcome; typedef Aws::Utils::Outcome ListSupportedResourceTypesOutcome; typedef Aws::Utils::Outcome ListTagsForResourceOutcome; typedef Aws::Utils::Outcome ListViewsOutcome; @@ -149,6 +153,7 @@ namespace Aws typedef std::future GetViewOutcomeCallable; typedef std::future ListIndexesOutcomeCallable; typedef std::future ListIndexesForMembersOutcomeCallable; + typedef std::future ListResourcesOutcomeCallable; typedef std::future ListSupportedResourceTypesOutcomeCallable; typedef std::future ListTagsForResourceOutcomeCallable; typedef std::future ListViewsOutcomeCallable; @@ -176,6 +181,7 @@ namespace Aws typedef std::function&) > GetViewResponseReceivedHandler; typedef std::function&) > ListIndexesResponseReceivedHandler; typedef std::function&) > ListIndexesForMembersResponseReceivedHandler; + typedef std::function&) > ListResourcesResponseReceivedHandler; typedef std::function&) > ListSupportedResourceTypesResponseReceivedHandler; typedef std::function&) > ListTagsForResourceResponseReceivedHandler; typedef std::function&) > ListViewsResponseReceivedHandler; diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesRequest.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesRequest.h new file mode 100644 index 00000000000..18bf3b85620 --- /dev/null +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesRequest.h @@ -0,0 +1,119 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace ResourceExplorer2 +{ +namespace Model +{ + + /** + */ + class ListResourcesRequest : public ResourceExplorer2Request + { + public: + AWS_RESOURCEEXPLORER2_API ListResourcesRequest(); + + // Service request name is the Operation name which will send this request out, + // each operation should has unique request name, so that we can get operation's name from this request. + // Note: this is not true for response, multiple operations may have the same response name, + // so we can not get operation's name from response. + inline virtual const char* GetServiceRequestName() const override { return "ListResources"; } + + AWS_RESOURCEEXPLORER2_API Aws::String SerializePayload() const override; + + + ///@{ + + inline const SearchFilter& GetFilters() const{ return m_filters; } + inline bool FiltersHasBeenSet() const { return m_filtersHasBeenSet; } + inline void SetFilters(const SearchFilter& value) { m_filtersHasBeenSet = true; m_filters = value; } + inline void SetFilters(SearchFilter&& value) { m_filtersHasBeenSet = true; m_filters = std::move(value); } + inline ListResourcesRequest& WithFilters(const SearchFilter& value) { SetFilters(value); return *this;} + inline ListResourcesRequest& WithFilters(SearchFilter&& value) { SetFilters(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    The maximum number of results that you want included on each page of the + * response. If you do not include this parameter, it defaults to a value + * appropriate to the operation. If additional items exist beyond those included in + * the current response, the NextToken response element is present and + * has a value (is not null). Include that value as the NextToken + * request parameter in the next call to the operation to get the next part of the + * results.

    An API operation can return fewer results than the + * maximum even when there are more results available. You should check + * NextToken after every operation to ensure that you receive all of + * the results.

    + */ + inline int GetMaxResults() const{ return m_maxResults; } + inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; } + inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; } + inline ListResourcesRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;} + ///@} + + ///@{ + /** + *

    The parameter for receiving additional results if you receive a + * NextToken response in a previous request. A NextToken + * response indicates that more output is available. Set this parameter to the + * value of the previous call's NextToken response to indicate where + * the output should continue from. The pagination tokens expire after 24 + * hours.

    + */ + inline const Aws::String& GetNextToken() const{ return m_nextToken; } + inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; } + inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; } + inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); } + inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); } + inline ListResourcesRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;} + inline ListResourcesRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;} + inline ListResourcesRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;} + ///@} + + ///@{ + /** + *

    Specifies the Amazon resource name (ARN) of the view to use for the query. If + * you don't specify a value for this parameter, then the operation automatically + * uses the default view for the Amazon Web Services Region in which you called + * this operation. If the Region either doesn't have a default view or if you don't + * have permission to use the default view, then the operation fails with a 401 + * Unauthorized exception.

    + */ + inline const Aws::String& GetViewArn() const{ return m_viewArn; } + inline bool ViewArnHasBeenSet() const { return m_viewArnHasBeenSet; } + inline void SetViewArn(const Aws::String& value) { m_viewArnHasBeenSet = true; m_viewArn = value; } + inline void SetViewArn(Aws::String&& value) { m_viewArnHasBeenSet = true; m_viewArn = std::move(value); } + inline void SetViewArn(const char* value) { m_viewArnHasBeenSet = true; m_viewArn.assign(value); } + inline ListResourcesRequest& WithViewArn(const Aws::String& value) { SetViewArn(value); return *this;} + inline ListResourcesRequest& WithViewArn(Aws::String&& value) { SetViewArn(std::move(value)); return *this;} + inline ListResourcesRequest& WithViewArn(const char* value) { SetViewArn(value); return *this;} + ///@} + private: + + SearchFilter m_filters; + bool m_filtersHasBeenSet = false; + + int m_maxResults; + bool m_maxResultsHasBeenSet = false; + + Aws::String m_nextToken; + bool m_nextTokenHasBeenSet = false; + + Aws::String m_viewArn; + bool m_viewArnHasBeenSet = false; + }; + +} // namespace Model +} // namespace ResourceExplorer2 +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h new file mode 100644 index 00000000000..9be241a56ac --- /dev/null +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/ListResourcesResult.h @@ -0,0 +1,104 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include + +namespace Aws +{ +template +class AmazonWebServiceResult; + +namespace Utils +{ +namespace Json +{ + class JsonValue; +} // namespace Json +} // namespace Utils +namespace ResourceExplorer2 +{ +namespace Model +{ + class ListResourcesResult + { + public: + AWS_RESOURCEEXPLORER2_API ListResourcesResult(); + AWS_RESOURCEEXPLORER2_API ListResourcesResult(const Aws::AmazonWebServiceResult& result); + AWS_RESOURCEEXPLORER2_API ListResourcesResult& operator=(const Aws::AmazonWebServiceResult& result); + + + ///@{ + /** + *

    If present, indicates that more output is available than is included in the + * current response. Use this value in the NextToken request parameter + * in a subsequent call to the operation to get the next part of the output. You + * should repeat this until the NextToken response element comes back + * as null. The pagination tokens expire after 24 hours.

    + */ + inline const Aws::String& GetNextToken() const{ return m_nextToken; } + inline void SetNextToken(const Aws::String& value) { m_nextToken = value; } + inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); } + inline void SetNextToken(const char* value) { m_nextToken.assign(value); } + inline ListResourcesResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;} + inline ListResourcesResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;} + inline ListResourcesResult& WithNextToken(const char* value) { SetNextToken(value); return *this;} + ///@} + + ///@{ + /** + *

    The list of structures that describe the resources that match the query.

    + */ + inline const Aws::Vector& GetResources() const{ return m_resources; } + inline void SetResources(const Aws::Vector& value) { m_resources = value; } + inline void SetResources(Aws::Vector&& value) { m_resources = std::move(value); } + inline ListResourcesResult& WithResources(const Aws::Vector& value) { SetResources(value); return *this;} + inline ListResourcesResult& WithResources(Aws::Vector&& value) { SetResources(std::move(value)); return *this;} + inline ListResourcesResult& AddResources(const Resource& value) { m_resources.push_back(value); return *this; } + inline ListResourcesResult& AddResources(Resource&& value) { m_resources.push_back(std::move(value)); return *this; } + ///@} + + ///@{ + /** + *

    The Amazon resource name (ARN) of the view that this operation used to + * perform the search.

    + */ + inline const Aws::String& GetViewArn() const{ return m_viewArn; } + inline void SetViewArn(const Aws::String& value) { m_viewArn = value; } + inline void SetViewArn(Aws::String&& value) { m_viewArn = std::move(value); } + inline void SetViewArn(const char* value) { m_viewArn.assign(value); } + inline ListResourcesResult& WithViewArn(const Aws::String& value) { SetViewArn(value); return *this;} + inline ListResourcesResult& WithViewArn(Aws::String&& value) { SetViewArn(std::move(value)); return *this;} + inline ListResourcesResult& WithViewArn(const char* value) { SetViewArn(value); return *this;} + ///@} + + ///@{ + + inline const Aws::String& GetRequestId() const{ return m_requestId; } + inline void SetRequestId(const Aws::String& value) { m_requestId = value; } + inline void SetRequestId(Aws::String&& value) { m_requestId = std::move(value); } + inline void SetRequestId(const char* value) { m_requestId.assign(value); } + inline ListResourcesResult& WithRequestId(const Aws::String& value) { SetRequestId(value); return *this;} + inline ListResourcesResult& WithRequestId(Aws::String&& value) { SetRequestId(std::move(value)); return *this;} + inline ListResourcesResult& WithRequestId(const char* value) { SetRequestId(value); return *this;} + ///@} + private: + + Aws::String m_nextToken; + + Aws::Vector m_resources; + + Aws::String m_viewArn; + + Aws::String m_requestId; + }; + +} // namespace Model +} // namespace ResourceExplorer2 +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/Resource.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/Resource.h index bc0caa0535b..9d05863dfd9 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/Resource.h +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/Resource.h @@ -133,8 +133,8 @@ namespace Model ///@{ /** - *

    The Amazon Web Service that owns the resource and is responsible for creating - * and updating it.

    + *

    The Amazon Web Servicesservice that owns the resource and is responsible for + * creating and updating it.

    */ inline const Aws::String& GetService() const{ return m_service; } inline bool ServiceHasBeenSet() const { return m_serviceHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h index 1440b1507d4..fad52216072 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/include/aws/resource-explorer-2/model/SupportedResourceType.h @@ -54,9 +54,9 @@ namespace Model ///@{ /** - *

    The Amazon Web Service that is associated with the resource type. This is the - * primary service that lets you create and interact with resources of this - * type.

    + *

    The Amazon Web Servicesservice that is associated with the resource type. + * This is the primary service that lets you create and interact with resources of + * this type.

    */ inline const Aws::String& GetService() const{ return m_service; } inline bool ServiceHasBeenSet() const { return m_serviceHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp b/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp index 036f0a90d07..c1ae9a3c321 100644 --- a/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/source/ResourceExplorer2Client.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -536,6 +537,33 @@ ListIndexesForMembersOutcome ResourceExplorer2Client::ListIndexesForMembers(cons {{TracingUtils::SMITHY_METHOD_DIMENSION, request.GetServiceRequestName()}, {TracingUtils::SMITHY_SERVICE_DIMENSION, this->GetServiceClientName()}}); } +ListResourcesOutcome ResourceExplorer2Client::ListResources(const ListResourcesRequest& request) const +{ + AWS_OPERATION_GUARD(ListResources); + AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListResources, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE); + AWS_OPERATION_CHECK_PTR(m_telemetryProvider, ListResources, CoreErrors, CoreErrors::NOT_INITIALIZED); + auto tracer = m_telemetryProvider->getTracer(this->GetServiceClientName(), {}); + auto meter = m_telemetryProvider->getMeter(this->GetServiceClientName(), {}); + AWS_OPERATION_CHECK_PTR(meter, ListResources, CoreErrors, CoreErrors::NOT_INITIALIZED); + auto span = tracer->CreateSpan(Aws::String(this->GetServiceClientName()) + ".ListResources", + {{ TracingUtils::SMITHY_METHOD_DIMENSION, request.GetServiceRequestName() }, { TracingUtils::SMITHY_SERVICE_DIMENSION, this->GetServiceClientName() }, { TracingUtils::SMITHY_SYSTEM_DIMENSION, TracingUtils::SMITHY_METHOD_AWS_VALUE }}, + smithy::components::tracing::SpanKind::CLIENT); + return TracingUtils::MakeCallWithTiming( + [&]()-> ListResourcesOutcome { + auto endpointResolutionOutcome = TracingUtils::MakeCallWithTiming( + [&]() -> ResolveEndpointOutcome { return m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams()); }, + TracingUtils::SMITHY_CLIENT_ENDPOINT_RESOLUTION_METRIC, + *meter, + {{TracingUtils::SMITHY_METHOD_DIMENSION, request.GetServiceRequestName()}, {TracingUtils::SMITHY_SERVICE_DIMENSION, this->GetServiceClientName()}}); + AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListResources, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage()); + endpointResolutionOutcome.GetResult().AddPathSegments("/ListResources"); + return ListResourcesOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER)); + }, + TracingUtils::SMITHY_CLIENT_DURATION_METRIC, + *meter, + {{TracingUtils::SMITHY_METHOD_DIMENSION, request.GetServiceRequestName()}, {TracingUtils::SMITHY_SERVICE_DIMENSION, this->GetServiceClientName()}}); +} + ListSupportedResourceTypesOutcome ResourceExplorer2Client::ListSupportedResourceTypes(const ListSupportedResourceTypesRequest& request) const { AWS_OPERATION_GUARD(ListSupportedResourceTypes); diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesRequest.cpp b/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesRequest.cpp new file mode 100644 index 00000000000..f1f13f83738 --- /dev/null +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesRequest.cpp @@ -0,0 +1,57 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::ResourceExplorer2::Model; +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +ListResourcesRequest::ListResourcesRequest() : + m_filtersHasBeenSet(false), + m_maxResults(0), + m_maxResultsHasBeenSet(false), + m_nextTokenHasBeenSet(false), + m_viewArnHasBeenSet(false) +{ +} + +Aws::String ListResourcesRequest::SerializePayload() const +{ + JsonValue payload; + + if(m_filtersHasBeenSet) + { + payload.WithObject("Filters", m_filters.Jsonize()); + + } + + if(m_maxResultsHasBeenSet) + { + payload.WithInteger("MaxResults", m_maxResults); + + } + + if(m_nextTokenHasBeenSet) + { + payload.WithString("NextToken", m_nextToken); + + } + + if(m_viewArnHasBeenSet) + { + payload.WithString("ViewArn", m_viewArn); + + } + + return payload.View().WriteReadable(); +} + + + + diff --git a/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesResult.cpp b/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesResult.cpp new file mode 100644 index 00000000000..c01bce2d334 --- /dev/null +++ b/generated/src/aws-cpp-sdk-resource-explorer-2/source/model/ListResourcesResult.cpp @@ -0,0 +1,63 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include +#include +#include + +#include + +using namespace Aws::ResourceExplorer2::Model; +using namespace Aws::Utils::Json; +using namespace Aws::Utils; +using namespace Aws; + +ListResourcesResult::ListResourcesResult() +{ +} + +ListResourcesResult::ListResourcesResult(const Aws::AmazonWebServiceResult& result) +{ + *this = result; +} + +ListResourcesResult& ListResourcesResult::operator =(const Aws::AmazonWebServiceResult& result) +{ + JsonView jsonValue = result.GetPayload().View(); + if(jsonValue.ValueExists("NextToken")) + { + m_nextToken = jsonValue.GetString("NextToken"); + + } + + if(jsonValue.ValueExists("Resources")) + { + Aws::Utils::Array resourcesJsonList = jsonValue.GetArray("Resources"); + for(unsigned resourcesIndex = 0; resourcesIndex < resourcesJsonList.GetLength(); ++resourcesIndex) + { + m_resources.push_back(resourcesJsonList[resourcesIndex].AsObject()); + } + } + + if(jsonValue.ValueExists("ViewArn")) + { + m_viewArn = jsonValue.GetString("ViewArn"); + + } + + + const auto& headers = result.GetHeaderValueCollection(); + const auto& requestIdIter = headers.find("x-amzn-requestid"); + if(requestIdIter != headers.end()) + { + m_requestId = requestIdIter->second; + } + + + return *this; +} diff --git a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h index 8847508b72d..c7403d1191c 100644 --- a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h +++ b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h @@ -4,7 +4,7 @@ */ #pragma once -#define AWS_SDK_VERSION_STRING "1.11.410" +#define AWS_SDK_VERSION_STRING "1.11.411" #define AWS_SDK_VERSION_MAJOR 1 #define AWS_SDK_VERSION_MINOR 11 -#define AWS_SDK_VERSION_PATCH 410 +#define AWS_SDK_VERSION_PATCH 411 diff --git a/tools/code-generation/api-descriptions/apigateway-2015-07-09.normal.json b/tools/code-generation/api-descriptions/apigateway-2015-07-09.normal.json index a86e1c4745c..5d932aa169e 100644 --- a/tools/code-generation/api-descriptions/apigateway-2015-07-09.normal.json +++ b/tools/code-generation/api-descriptions/apigateway-2015-07-09.normal.json @@ -3637,7 +3637,7 @@ }, "certificateUploadDate":{ "shape":"Timestamp", - "documentation":"

    The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded.

    " + "documentation":"

    The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate.

    " }, "regionalDomainName":{ "shape":"String", diff --git a/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json b/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json index 66375fe17b1..383a3814f52 100644 --- a/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json +++ b/tools/code-generation/api-descriptions/athena-2017-05-18.normal.json @@ -1538,6 +1538,41 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "ConnectionType":{ + "type":"string", + "enum":[ + "DYNAMODB", + "MYSQL", + "POSTGRESQL", + "REDSHIFT", + "ORACLE", + "SYNAPSE", + "SQLSERVER", + "DB2", + "OPENSEARCH", + "BIGQUERY", + "GOOGLECLOUDSTORAGE", + "HBASE", + "DOCUMENTDB", + "MSK", + "NEPTUNE", + "CMDB", + "TPCDS", + "REDIS", + "CLOUDWATCH", + "TIMESTREAM", + "SAPHANA", + "SNOWFLAKE", + "TERADATA", + "VERTICA", + "CLOUDERAIMPALA", + "CLOUDERAHIVE", + "HORTONWORKSHIVE", + "DATALAKEGEN2", + "DB2AS400", + "CLOUDWATCHMETRICS" + ] + }, "CoordinatorDpuSize":{ "type":"integer", "box":true, @@ -1583,7 +1618,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog.

    " + "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.

    " }, "Description":{ "shape":"DescriptionString", @@ -1591,7 +1626,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

    Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

    • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

      metadata-function=lambda_arn, sdk-version=version_number

    • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

      • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

        metadata-function=lambda_arn, record-function=lambda_arn

      • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

        function=lambda_arn

    • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.

      catalog-id=catalog_id

      • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    " + "documentation":"

    Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

    • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

      metadata-function=lambda_arn, sdk-version=version_number

    • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

      • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

        metadata-function=lambda_arn, record-function=lambda_arn

      • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

        function=lambda_arn

    • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.

      catalog-id=catalog_id

      • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    • The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection.

      • connection-arn:<glue_connection_arn_to_reuse>

      • lambda-role-arn (optional): The execution role to use for the Lambda function. If not provided, one is created.

      • connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"

        For <json_string> , use escaped JSON text, as in the following example.

        \"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"

    " }, "Tags":{ "shape":"TagList", @@ -1602,6 +1637,7 @@ "CreateDataCatalogOutput":{ "type":"structure", "members":{ + "DataCatalog":{"shape":"DataCatalog"} } }, "CreateNamedQueryInput":{ @@ -1796,15 +1832,41 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog.

    " + "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.

    " }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

    Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

    • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

      metadata-function=lambda_arn, sdk-version=version_number

    • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

      • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

        metadata-function=lambda_arn, record-function=lambda_arn

      • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

        function=lambda_arn

    • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs.

      catalog-id=catalog_id

      • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    " + "documentation":"

    Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

    • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

      metadata-function=lambda_arn, sdk-version=version_number

    • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

      • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

        metadata-function=lambda_arn, record-function=lambda_arn

      • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

        function=lambda_arn

    • The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs.

      catalog-id=catalog_id

      • The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify.

    • The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection.

      • connection-arn:<glue_connection_arn_to_reuse>

      • connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"

        For <json_string> , use escaped JSON text, as in the following example.

        \"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"

    " + }, + "Status":{ + "shape":"DataCatalogStatus", + "documentation":"

    The status of the creation or deletion of the data catalog.

    • The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED.

    • The FEDERATED data catalog type is created asynchronously.

    Data catalog creation status:

    • CREATE_IN_PROGRESS: Federated data catalog creation in progress.

    • CREATE_COMPLETE: Data catalog creation complete.

    • CREATE_FAILED: Data catalog could not be created.

    • CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed.

    • CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed.

    • CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed.

    Data catalog deletion status:

    • DELETE_IN_PROGRESS: Federated data catalog deletion in progress.

    • DELETE_COMPLETE: Federated data catalog deleted.

    • DELETE_FAILED: Federated data catalog could not be deleted.

    " + }, + "ConnectionType":{ + "shape":"ConnectionType", + "documentation":"

    The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see Available data source connectors.

    " + }, + "Error":{ + "shape":"ErrorMessage", + "documentation":"

    Text of the error that occurred during data catalog creation or deletion.

    " } }, "documentation":"

    Contains information about a data catalog in an Amazon Web Services account.

    In the Athena console, data catalogs are listed as \"data sources\" on the Data sources page under the Data source name column.

    " }, + "DataCatalogStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "CREATE_FAILED_CLEANUP_IN_PROGRESS", + "CREATE_FAILED_CLEANUP_COMPLETE", + "CREATE_FAILED_CLEANUP_FAILED", + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED" + ] + }, "DataCatalogSummary":{ "type":"structure", "members":{ @@ -1815,6 +1877,18 @@ "Type":{ "shape":"DataCatalogType", "documentation":"

    The data catalog type.

    " + }, + "Status":{ + "shape":"DataCatalogStatus", + "documentation":"

    The status of the creation or deletion of the data catalog.

    • The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED.

    • The FEDERATED data catalog type is created asynchronously.

    Data catalog creation status:

    • CREATE_IN_PROGRESS: Federated data catalog creation in progress.

    • CREATE_COMPLETE: Data catalog creation complete.

    • CREATE_FAILED: Data catalog could not be created.

    • CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed.

    • CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed.

    • CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed.

    Data catalog deletion status:

    • DELETE_IN_PROGRESS: Federated data catalog deletion in progress.

    • DELETE_COMPLETE: Federated data catalog deleted.

    • DELETE_FAILED: Federated data catalog could not be deleted.

    " + }, + "ConnectionType":{ + "shape":"ConnectionType", + "documentation":"

    The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see Available data source connectors.

    " + }, + "Error":{ + "shape":"ErrorMessage", + "documentation":"

    Text of the error that occurred during data catalog creation or deletion.

    " } }, "documentation":"

    The summary information for the data catalog, which includes its name and type.

    " @@ -1828,7 +1902,8 @@ "enum":[ "LAMBDA", "GLUE", - "HIVE" + "HIVE", + "FEDERATED" ] }, "Database":{ @@ -1904,6 +1979,7 @@ "DeleteDataCatalogOutput":{ "type":"structure", "members":{ + "DataCatalog":{"shape":"DataCatalog"} } }, "DeleteNamedQueryInput":{ diff --git a/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json b/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json index 253826604ad..4a519427e8d 100644 --- a/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json +++ b/tools/code-generation/api-descriptions/bedrock-agent-2023-06-05.normal.json @@ -2313,7 +2313,7 @@ }, "foundationModel":{ "shape":"ModelIdentifier", - "documentation":"

    The foundation model to be used for orchestration by the agent you create.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create.

    " }, "guardrailConfiguration":{ "shape":"GuardrailConfiguration", @@ -5190,8 +5190,8 @@ "documentation":"

    The unique identifier of the knowledge base to query.

    " }, "modelId":{ - "shape":"ModelIdentifier", - "documentation":"

    The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

    " + "shape":"KnowledgeBaseModelIdentifier", + "documentation":"

    The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

    " } }, "documentation":"

    Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " @@ -5202,6 +5202,12 @@ "min":0, "pattern":"^[0-9a-zA-Z]+$" }, + "KnowledgeBaseModelIdentifier":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + }, "KnowledgeBaseRoleArn":{ "type":"string", "max":2048, @@ -6301,7 +6307,7 @@ }, "modelId":{ "shape":"PromptModelIdentifier", - "documentation":"

    The unique identifier of the model to run inference with.

    " + "documentation":"

    The unique identifier of the model or inference profile to run inference with.

    " }, "templateConfiguration":{ "shape":"PromptTemplateConfiguration", @@ -6424,7 +6430,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" }, "PromptModelInferenceConfiguration":{ "type":"structure", @@ -6462,7 +6468,7 @@ "members":{ "overrideLambda":{ "shape":"LambdaArn", - "documentation":"

    The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

    " + "documentation":"

    The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Amazon Bedrock Agents.

    " }, "promptConfigurations":{ "shape":"PromptConfigurations", @@ -6568,7 +6574,7 @@ }, "modelId":{ "shape":"PromptModelIdentifier", - "documentation":"

    The unique identifier of the model with which to run inference on the prompt.

    " + "documentation":"

    The unique identifier of the model or inference profile with which to run inference on the prompt.

    " }, "name":{ "shape":"PromptVariantName", diff --git a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json index 32910a6be85..1f84c148d38 100644 --- a/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json +++ b/tools/code-generation/api-descriptions/ec2-2016-11-15.normal.json @@ -459,7 +459,7 @@ "requestUri":"/" }, "input":{"shape":"CancelConversionRequest"}, - "documentation":"

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.

    " + "documentation":"

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    " }, "CancelExportTask":{ "name":"CancelExportTask", @@ -4827,7 +4827,7 @@ }, "input":{"shape":"ImportInstanceRequest"}, "output":{"shape":"ImportInstanceResult"}, - "documentation":"

    We recommend that you use the ImportImage API. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.

    Creates an import instance task using metadata from the specified disk image.

    This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.

    This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    " + "documentation":"

    We recommend that you use the ImportImage API instead. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.

    Creates an import instance task using metadata from the specified disk image.

    This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    This API action is not supported by the Command Line Interface (CLI).

    " }, "ImportKeyPair":{ "name":"ImportKeyPair", @@ -4857,7 +4857,7 @@ }, "input":{"shape":"ImportVolumeRequest"}, "output":{"shape":"ImportVolumeResult"}, - "documentation":"

    Creates an import volume task using metadata from the specified disk image.

    This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.

    This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    " + "documentation":"

    This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.

    Creates an import volume task using metadata from the specified disk image.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    This API action is not supported by the Command Line Interface (CLI).

    " }, "ListImagesInRecycleBin":{ "name":"ListImagesInRecycleBin", @@ -17321,7 +17321,7 @@ "type":"structure", "members":{ "KeyName":{ - "shape":"KeyPairName", + "shape":"KeyPairNameWithResolver", "documentation":"

    The name of the key pair.

    " }, "KeyPairId":{ @@ -19306,11 +19306,7 @@ }, "DescribeCapacityBlockOfferingsRequest":{ "type":"structure", - "required":[ - "InstanceType", - "InstanceCount", - "CapacityDurationHours" - ], + "required":["CapacityDurationHours"], "members":{ "DryRun":{ "shape":"Boolean", @@ -29887,7 +29883,11 @@ }, "FleetCapacityReservationUsageStrategy":{ "type":"string", - "enum":["use-capacity-reservations-first"] + "enum":[ + "use-capacity-reservations-first", + "use-capacity-reservations-only", + "none" + ] }, "FleetData":{ "type":"structure", @@ -37483,7 +37483,15 @@ "r8g.48xlarge", "r8g.metal-24xl", "r8g.metal-48xl", - "mac2-m1ultra.metal" + "mac2-m1ultra.metal", + "g6e.xlarge", + "g6e.2xlarge", + "g6e.4xlarge", + "g6e.8xlarge", + "g6e.12xlarge", + "g6e.16xlarge", + "g6e.24xlarge", + "g6e.48xlarge" ] }, "InstanceTypeHypervisor":{ @@ -39755,6 +39763,7 @@ } }, "KeyPairName":{"type":"string"}, + "KeyPairNameWithResolver":{"type":"string"}, "KeyType":{ "type":"string", "enum":[ @@ -54262,7 +54271,7 @@ "members":{ "Description":{ "shape":"String", - "documentation":"

    The description of the snapshot.

    ", + "documentation":"

    The description of the disk image being imported.

    ", "locationName":"description" }, "DiskImageSize":{ diff --git a/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json b/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json index d5e5c737441..e55f2a9295a 100644 --- a/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json +++ b/tools/code-generation/api-descriptions/emr-serverless-2021-07-13.normal.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-07-13", + "auth":["aws.auth#sigv4"], "endpointPrefix":"emr-serverless", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"EMR Serverless", "serviceId":"EMR Serverless", "signatureVersion":"v4", @@ -370,6 +371,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"

    The interactive configuration object that enables the interactive use cases for an application.

    " + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " } }, "documentation":"

    Information about an application. Amazon EMR Serverless uses applications to run jobs.

    " @@ -728,6 +733,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"

    The interactive configuration object that enables the interactive use cases to use when running an application.

    " + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " } } }, @@ -793,7 +802,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+" + "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+" }, "EngineType":{ "type":"string", @@ -943,7 +952,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" + "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" }, "ImageConfiguration":{ "type":"structure", @@ -1168,6 +1177,18 @@ "attemptUpdatedAt":{ "shape":"Date", "documentation":"

    The date and time of when the job run attempt was last updated.

    " + }, + "startedAt":{ + "shape":"Date", + "documentation":"

    The date and time when the job moved to the RUNNING state.

    " + }, + "endedAt":{ + "shape":"Date", + "documentation":"

    The date and time when the job was terminated.

    " + }, + "queuedDurationMilliseconds":{ + "shape":"Long", + "documentation":"

    The total time for a job in the QUEUED state in milliseconds.

    " } }, "documentation":"

    Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.

    " @@ -1278,7 +1299,8 @@ "SUCCESS", "FAILED", "CANCELLING", - "CANCELLED" + "CANCELLED", + "QUEUED" ] }, "JobRunStateSet":{ @@ -1591,6 +1613,10 @@ "min":1, "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" }, + "Long":{ + "type":"long", + "box":true + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1780,6 +1806,20 @@ }, "documentation":"

    The Amazon S3 configuration for monitoring log publishing. You can configure your jobs to send log information to Amazon S3.

    " }, + "SchedulerConfiguration":{ + "type":"structure", + "members":{ + "queueTimeoutMinutes":{ + "shape":"Integer", + "documentation":"

    The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720.

    " + }, + "maxConcurrentRuns":{ + "shape":"Integer", + "documentation":"

    The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000.

    " + } + }, + "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupString"}, @@ -2128,6 +2168,10 @@ "monitoringConfiguration":{ "shape":"MonitoringConfiguration", "documentation":"

    The configuration setting for monitoring.

    " + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " } } }, diff --git a/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json b/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json index 4031dda5621..6e4716c11d0 100644 --- a/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json +++ b/tools/code-generation/api-descriptions/glue-2017-03-31.normal.json @@ -4230,13 +4230,13 @@ "shape":"AuthenticationType", "documentation":"

    A structure containing the authentication configuration in the CreateConnection request.

    " }, - "SecretArn":{ - "shape":"SecretArn", - "documentation":"

    The secret manager ARN to store credentials in the CreateConnection request.

    " - }, "OAuth2Properties":{ "shape":"OAuth2PropertiesInput", "documentation":"

    The properties for OAuth2 authentication in the CreateConnection request.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The secret manager ARN to store credentials in the CreateConnection request.

    " } }, "documentation":"

    A structure containing the authentication configuration in the CreateConnection request.

    " @@ -4253,7 +4253,8 @@ "type":"string", "max":4096, "min":1, - "pattern":"\\S+" + "pattern":"\\S+", + "sensitive":true }, "AuthorizationCodeProperties":{ "type":"structure", @@ -6684,6 +6685,10 @@ "shape":"ConnectionProperties", "documentation":"

    These key-value pairs define parameters for the connection:

    • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

    • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

    • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

    • PASSWORD - A password, if one is used, for the user name.

    • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

    • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

    • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

    • JDBC_ENGINE - The name of the JDBC engine to use.

    • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

    • CONFIG_FILES - (Reserved for future use.)

    • INSTANCE_ID - The instance ID to use.

    • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

    • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

    • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

    • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

    • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

    • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

    • SECRET_ID - The secret ID used for the secret manager of credentials.

    • CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

    • CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

    • CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

    • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

    • KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

    • KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

    • KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

    • KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

    • KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

    • KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

    • ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

    • ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

    • KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

    • KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

    • KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

    • ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

    • KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    • KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    • ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

    • KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

    • KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

    • KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

    • KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

    • KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

    • ROLE_ARN - The role to be used for running queries.

    • REGION - The Amazon Web Services Region where queries will be run.

    • WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.

    • CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run.

    • DATABASE - The Amazon Redshift database that you are connecting to.

    " }, + "AthenaProperties":{ + "shape":"PropertyMap", + "documentation":"

    This field is not currently used.

    " + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"

    The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully.

    " @@ -6747,6 +6752,10 @@ "shape":"ConnectionProperties", "documentation":"

    These key-value pairs define parameters for the connection.

    " }, + "AthenaProperties":{ + "shape":"PropertyMap", + "documentation":"

    This field is not currently used.

    " + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"

    The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

    " @@ -17960,6 +17969,16 @@ }, "documentation":"

    Specifies the job and session values that an admin configures in an Glue usage profile.

    " }, + "PropertyKey":{ + "type":"string", + "max":128, + "min":1 + }, + "PropertyMap":{ + "type":"map", + "key":{"shape":"PropertyKey"}, + "value":{"shape":"PropertyValue"} + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -17978,6 +17997,11 @@ }, "documentation":"

    Defines a property predicate.

    " }, + "PropertyValue":{ + "type":"string", + "max":2048, + "min":1 + }, "PublicKeysList":{ "type":"list", "member":{"shape":"GenericString"}, diff --git a/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json b/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json index 98b62383429..5c727bc9d4b 100644 --- a/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json +++ b/tools/code-generation/api-descriptions/rds-2014-10-31.normal.json @@ -553,7 +553,6 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"MaxDBShardGroupLimitReached"}, {"shape":"InvalidDBClusterStateFault"}, - {"shape":"InvalidMaxAcuFault"}, {"shape":"UnsupportedDBEngineVersionFault"}, {"shape":"InvalidVPCNetworkStateFault"} ], @@ -2166,8 +2165,7 @@ "errors":[ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBShardGroupAlreadyExistsFault"}, - {"shape":"DBShardGroupNotFoundFault"}, - {"shape":"InvalidMaxAcuFault"} + {"shape":"DBShardGroupNotFoundFault"} ], "documentation":"

    Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or more settings by specifying these parameters and the new values in the request.

    " }, @@ -2703,7 +2701,7 @@ {"shape":"CertificateNotFoundFault"}, {"shape":"TenantDatabaseQuotaExceededFault"} ], - "documentation":"

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.

    If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.

    This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

    " + "documentation":"

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.

    If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.

    This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

    " }, "RestoreDBInstanceFromS3":{ "name":"RestoreDBInstanceFromS3", @@ -4702,7 +4700,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

    The name of the DB parameter group to associate with this DB instance.

    If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

    Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

    Constraints:

    • Must be 1 to 255 letters, numbers, or hyphens.

    • First character must be a letter.

    • Can't end with a hyphen or contain two consecutive hyphens.

    " + "documentation":"

    The name of the DB parameter group to associate with this read replica DB instance.

    For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

    For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup.

    Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

    Constraints:

    • Must be 1 to 255 letters, numbers, or hyphens.

    • First character must be a letter.

    • Can't end with a hyphen or contain two consecutive hyphens.

    " }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -5035,7 +5033,7 @@ }, "ComputeRedundancy":{ "shape":"IntegerOptional", - "documentation":"

    Specifies whether to create standby instances for the DB shard group. Valid values are the following:

    • 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.

    • 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.

    • 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.

    " + "documentation":"

    Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

    • 0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    • 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    • 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    " }, "MaxACU":{ "shape":"DoubleOptional", @@ -8121,7 +8119,7 @@ }, "ComputeRedundancy":{ "shape":"IntegerOptional", - "documentation":"

    Specifies whether to create standby instances for the DB shard group. Valid values are the following:

    • 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.

    • 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.

    • 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.

    " + "documentation":"

    Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

    • 0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    • 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    • 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    " }, "Status":{ "shape":"String", @@ -8134,6 +8132,10 @@ "Endpoint":{ "shape":"String", "documentation":"

    The connection endpoint for the DB shard group.

    " + }, + "DBShardGroupArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for the DB shard group.

    " } } }, @@ -11783,18 +11785,6 @@ }, "exception":true }, - "InvalidMaxAcuFault":{ - "type":"structure", - "members":{ - }, - "documentation":"

    The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs).

    ", - "error":{ - "code":"InvalidMaxAcu", - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, "InvalidOptionGroupStateFault":{ "type":"structure", "members":{ @@ -12858,6 +12848,10 @@ "MinACU":{ "shape":"DoubleOptional", "documentation":"

    The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

    " + }, + "ComputeRedundancy":{ + "shape":"IntegerOptional", + "documentation":"

    Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

    • 0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    • 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    • 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    " } } }, diff --git a/tools/code-generation/api-descriptions/resource-explorer-2-2022-07-28.normal.json b/tools/code-generation/api-descriptions/resource-explorer-2-2022-07-28.normal.json index 9559bd53d86..f8625af30e1 100644 --- a/tools/code-generation/api-descriptions/resource-explorer-2-2022-07-28.normal.json +++ b/tools/code-generation/api-descriptions/resource-explorer-2-2022-07-28.normal.json @@ -5,11 +5,13 @@ "endpointPrefix":"resource-explorer-2", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Resource Explorer", "serviceId":"Resource Explorer 2", "signatureVersion":"v4", "signingName":"resource-explorer-2", - "uid":"resource-explorer-2-2022-07-28" + "uid":"resource-explorer-2-2022-07-28", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateDefaultView":{ @@ -158,7 +160,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account or a delegated administrator with service access enabled can invoke this API call.

    " + "documentation":"

    Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account can invoke this API call.

    " }, "GetDefaultView":{ "name":"GetDefaultView", @@ -247,6 +249,25 @@ ], "documentation":"

    Retrieves a list of a member's indexes in all Amazon Web Services Regions that are currently collecting resource information for Amazon Web Services Resource Explorer. Only the management account or a delegated administrator with service access enabled can invoke this API call.

    " }, + "ListResources":{ + "name":"ListResources", + "http":{ + "method":"POST", + "requestUri":"/ListResources", + "responseCode":200 + }, + "input":{"shape":"ListResourcesInput"}, + "output":{"shape":"ListResourcesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation.

    " + }, "ListSupportedResourceTypes":{ "name":"ListSupportedResourceTypes", "http":{ @@ -903,6 +924,67 @@ } } }, + "ListResourcesInput":{ + "type":"structure", + "members":{ + "Filters":{"shape":"SearchFilter"}, + "MaxResults":{ + "shape":"ListResourcesInputMaxResultsInteger", + "documentation":"

    The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "NextToken":{ + "shape":"ListResourcesInputNextTokenString", + "documentation":"

    The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. The pagination tokens expire after 24 hours.

    " + }, + "ViewArn":{ + "shape":"ListResourcesInputViewArnString", + "documentation":"

    Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception.

    " + } + } + }, + "ListResourcesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListResourcesInputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesInputViewArnString":{ + "type":"string", + "max":1000, + "min":0 + }, + "ListResourcesOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"ListResourcesOutputNextTokenString", + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. The pagination tokens expire after 24 hours.

    " + }, + "Resources":{ + "shape":"ResourceList", + "documentation":"

    The list of structures that describe the resources that match the query.

    " + }, + "ViewArn":{ + "shape":"ListResourcesOutputViewArnString", + "documentation":"

    The Amazon resource name (ARN) of the view that this operation used to perform the search.

    " + } + } + }, + "ListResourcesOutputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesOutputViewArnString":{ + "type":"string", + "max":1011, + "min":1 + }, "ListSupportedResourceTypesInput":{ "type":"structure", "members":{ @@ -1035,7 +1117,7 @@ }, "QueryString":{ "type":"string", - "max":1011, + "max":1280, "min":0, "sensitive":true }, @@ -1072,7 +1154,7 @@ }, "Service":{ "shape":"String", - "documentation":"

    The Amazon Web Service that owns the resource and is responsible for creating and updating it.

    " + "documentation":"

    The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating it.

    " } }, "documentation":"

    A resource in Amazon Web Services that Amazon Web Services Resource Explorer has discovered, and for which it has stored information in the index of the Amazon Web Services Region that contains the resource.

    " @@ -1259,7 +1341,7 @@ }, "Service":{ "shape":"String", - "documentation":"

    The Amazon Web Service that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.

    " + "documentation":"

    The Amazon Web Servicesservice that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.

    " } }, "documentation":"

    A structure that describes a resource type supported by Amazon Web Services Resource Explorer.

    "