diff --git a/include/aws/s3/private/s3_buffer_pool.h b/include/aws/s3/private/s3_buffer_pool.h index c2471d214..431abea75 100644 --- a/include/aws/s3/private/s3_buffer_pool.h +++ b/include/aws/s3/private/s3_buffer_pool.h @@ -34,8 +34,9 @@ struct aws_s3_buffer_pool; struct aws_s3_buffer_pool_ticket; struct aws_s3_buffer_pool_usage_stats { - /* Max size limit. Same value as provided during creation. */ - size_t max_size; + /* Effective Max memory limit. Memory limit value provided during construction minus + * buffer reserved for overhead of the pool */ + size_t mem_limit; /* How much mem is used in primary storage. includes memory used by blocks * that are waiting on all allocs to release before being put back in circulation. */ diff --git a/source/s3_buffer_pool.c b/source/s3_buffer_pool.c index 75b9b1c0f..20c5dda82 100644 --- a/source/s3_buffer_pool.c +++ b/source/s3_buffer_pool.c @@ -138,13 +138,24 @@ struct aws_s3_buffer_pool *aws_s3_buffer_pool_new( return NULL; } + size_t adjusted_mem_lim = mem_limit - s_buffer_pool_reserved_mem; + + if (chunk_size * s_chunks_per_block > adjusted_mem_lim) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Failed to initialize buffer pool. Chunk size is too large for the memory limit. " + "Consider adjusting memory limit or part size."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + struct aws_s3_buffer_pool *buffer_pool = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_buffer_pool)); AWS_FATAL_ASSERT(buffer_pool != NULL); buffer_pool->base_allocator = allocator; buffer_pool->chunk_size = chunk_size; - buffer_pool->block_size = chunk_size * s_chunks_per_block; + buffer_pool->block_size = adjusted_mem_lim; /* Somewhat arbitrary number. * Tries to balance between how many allocations use buffer and buffer space * being wasted. */ @@ -207,11 +218,8 @@ struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve(struct aws_s3_buffe return NULL; } - if (size == 0) { - AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Could not reserve from buffer pool. 0 is not a valid size."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } + AWS_FATAL_ASSERT(size != 0); + AWS_FATAL_ASSERT(size <= buffer_pool->mem_limit); struct aws_s3_buffer_pool_ticket *ticket = NULL; aws_mutex_lock(&buffer_pool->mutex); @@ -396,7 +404,7 @@ struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3 aws_mutex_lock(&buffer_pool->mutex); struct aws_s3_buffer_pool_usage_stats ret = (struct aws_s3_buffer_pool_usage_stats){ - .max_size = buffer_pool->mem_limit, + .mem_limit = buffer_pool->mem_limit, .primary_allocated = buffer_pool->primary_allocated, .primary_used = buffer_pool->primary_used, .primary_reserved = buffer_pool->primary_reserved, diff --git a/source/s3_client.c b/source/s3_client.c index 492811a00..0d071c212 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -81,9 +81,6 @@ static size_t s_dns_host_address_ttl_seconds = 5 * 60; * 30 seconds mirrors the value currently used by the Java SDK. */ static const uint32_t s_default_throughput_failure_interval_seconds = 30; -/* Default multiplier between max part size and memory limit */ -static const size_t s_default_max_part_size_to_mem_lim_multiplier = 4; - /* Amount of time spent idling before trimming buffer. */ static const size_t s_buffer_pool_trim_time_offset_in_s = 5; @@ -226,10 +223,6 @@ void aws_s3_client_unlock_synced_data(struct aws_s3_client *client) { aws_mutex_unlock(&client->synced_data.lock); } -static size_t s_default_max_part_size_based_on_mem_limit(size_t mem_lim) { - return mem_lim / s_default_max_part_size_to_mem_lim_multiplier; -} - struct aws_s3_client *aws_s3_client_new( struct aws_allocator *allocator, const struct aws_s3_client_config *client_config) { @@ -254,17 +247,6 @@ struct aws_s3_client *aws_s3_client_new( return NULL; } - if (client_config->max_part_size != 0 && client_config->memory_limit_in_bytes != 0 && - client_config->max_part_size > - s_default_max_part_size_based_on_mem_limit(client_config->memory_limit_in_bytes)) { - AWS_LOGF_ERROR( - AWS_LS_S3_CLIENT, - "Cannot create client from client_config; memory limit should be at least 4 times higher than max part " - "size."); - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } - #ifdef BYO_CRYPTO if (client_config->tls_mode == AWS_MR_TLS_ENABLED && client_config->tls_connection_options == NULL) { AWS_LOGF_ERROR( @@ -310,12 +292,27 @@ struct aws_s3_client *aws_s3_client_new( client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit); + if (client->buffer_pool == NULL) { + goto on_early_fail; + } + + struct aws_s3_buffer_pool_usage_stats pool_usage = aws_s3_buffer_pool_get_usage(client->buffer_pool); + + if (client_config->max_part_size > pool_usage.mem_limit) { + AWS_LOGF_ERROR( + AWS_LS_S3_CLIENT, + "Cannot create client from client_config; configured max part size should not exceed memory limit." + "size."); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto on_early_fail; + } + client->vtable = &s_s3_client_default_vtable; aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_s3_client_start_destroy); if (aws_mutex_init(&client->synced_data.lock) != AWS_OP_SUCCESS) { - goto on_lock_init_fail; + goto on_early_fail; } aws_linked_list_init(&client->synced_data.pending_meta_request_work); @@ -354,8 +351,8 @@ struct aws_s3_client *aws_s3_client_new( *((uint64_t *)&client->max_part_size) = s_default_max_part_size; } - if (client_config->max_part_size > s_default_max_part_size_based_on_mem_limit(mem_limit)) { - *((uint64_t *)&client->max_part_size) = s_default_max_part_size_based_on_mem_limit(mem_limit); + if (client_config->max_part_size > pool_usage.mem_limit) { + *((uint64_t *)&client->max_part_size) = pool_usage.mem_limit; } if (client->max_part_size > SIZE_MAX) { @@ -542,7 +539,7 @@ struct aws_s3_client *aws_s3_client_new( aws_event_loop_group_release(client->client_bootstrap->event_loop_group); aws_client_bootstrap_release(client->client_bootstrap); aws_mutex_clean_up(&client->synced_data.lock); -on_lock_init_fail: +on_early_fail: aws_mem_release(client->allocator, client); return NULL; }