Skip to content

Commit

Permalink
lint, fix docs
Browse files Browse the repository at this point in the history
  • Loading branch information
DmitriyMusatkin committed Nov 21, 2023
1 parent 1630751 commit 0028246
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 22 deletions.
29 changes: 19 additions & 10 deletions include/aws/s3/private/s3_buffer_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,22 @@

/*
* S3 buffer pool.
* Buffer pool used for pooling part sized buffers for Put/Get.
* Provides additional functionally for limiting overall memory usage by setting
* upper bound beyond reservations will fail.
* Buffer pool used for pooling part sized buffers for Put/Get operations.
* Provides additional functionally for limiting overall memory used.
* High-level buffer pool usage flow:
* - Create buffer with overall memory limit and common buffer size, aka chunk
* size (typically part size configured on client)
* - For each request:
* -- call reserve to acquire ticket for future buffer acquisition. this will
* mark memory reserved, but would not allocate it. if reserve call hits
* memory limit, it fails and reservation hold is put on the whole buffer
* pool. (aws_s3_buffer_pool_remove_reservation_hold can be used to remove
* reservation hold).
* -- once request needs memory, it can exchange ticket for a buffer using
* aws_s3_buffer_pool_acquire_buffer. this operation never fails, even if it
* ends up going over memory limit.
* -- buffer lifetime is tied to the ticket. so once request is done with the
* buffer, ticket is released and buffer returns back to the pool.
*/

AWS_EXTERN_C_BEGIN
Expand Down Expand Up @@ -43,13 +56,8 @@ struct aws_s3_buffer_pool_usage_stats {

/*
* Create new buffer pool.
* Buffer pool controls overall amount of memory that can be used on buffers and
* it is split into primary and secondary storage.
* Primary storage allocates big blocks consisting of several chunks and reuses
* those blocks for successive buffer acquires.
* Secondary storage delegates buffer acquires directly to system allocators.
* chunk_size - specifies the size of memory that will most commonly be acquired
* from the pool (typically part size).
* from the pool (typically part size).
* mem_limit - limit on how much mem buffer pool can use. once limit is hit,
* buffers can no longer be reserved from (reservation hold is placed on the pool).
* Returns buffer pool pointer on success and NULL on failure.
Expand All @@ -66,7 +74,8 @@ AWS_S3_API struct aws_s3_buffer_pool *aws_s3_buffer_pool_new(
AWS_S3_API void aws_s3_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool);

/*
* Best effort way to reserve some memory for later use.
* Reserves memory from the pool for later use.
* Best effort and can potentially reserve memory slightly over the limit.
* Reservation takes some memory out of the available pool, but does not
* allocate it right away.
* On success ticket will be returned.
Expand Down
25 changes: 13 additions & 12 deletions source/s3_buffer_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@
* of big allocations, performance impact is not that bad, but something we need
* to look into on the next iteration.
*
* Basic approach is to divide acquires into primary and secondary.
* Basic approach is to divide acquires into primary and secondary.
* User provides chunk size during construction. Acquires below 4 * chunks_size
* are done from primary and the rest are from secondary.
*
*
* Primary storage consists of blocks that are each s_chunks_per_block *
* chunk_size in size. blocks are created on demand as needed.
* chunk_size in size. blocks are created on demand as needed.
* Acquire operation from primary basically works by determining how many chunks
* are needed and then finding available space in existing blocks or creating a
* new block. Acquire will always take over the whole chunk, so some space is
* likely wasted.
* likely wasted.
* Ex. say chunk_size is 8mb and s_chunks_per_block is 16, which makes block size 128mb.
* acquires up to 32mb will be done from primary. So 1 block can hold 4 buffers
* of 32mb (4 chunks) or 16 buffers of 8mb (1 chunk). If requested buffer size
Expand All @@ -41,7 +41,7 @@ struct aws_s3_buffer_pool_ticket {
};

/* Default size for blocks array. Note: this is just for meta info, blocks
* themselves are not preallocated s*/
* themselves are not preallocated. */
static size_t s_block_list_initial_capacity = 5;

/* Amount of mem reserved for use outside of buffer pool.
Expand Down Expand Up @@ -84,7 +84,7 @@ struct s3_buffer_pool_block {
/*
* Sets n bits at position starting with LSB.
* Note: n must be at most 8, but in practice will always be at most 4.
* position + n should at most be 16
* position + n should at most be 16
*/
static inline uint16_t s_set_bits(uint16_t num, size_t position, size_t n) {
AWS_PRECONDITION(n <= 8);
Expand All @@ -96,7 +96,7 @@ static inline uint16_t s_set_bits(uint16_t num, size_t position, size_t n) {
/*
* Clears n bits at position starting with LSB.
* Note: n must be at most 8, but in practice will always be at most 4.
* position + n should at most be 16
* position + n should at most be 16
*/
static inline uint16_t s_clear_bits(uint16_t num, size_t position, size_t n) {
AWS_PRECONDITION(n <= 8);
Expand All @@ -108,7 +108,7 @@ static inline uint16_t s_clear_bits(uint16_t num, size_t position, size_t n) {
/*
* Checks whether n bits are set at position starting with LSB.
* Note: n must be at most 8, but in practice will always be at most 4.
* position + n should at most be 16
* position + n should at most be 16
*/
static inline bool s_check_bits(uint16_t num, size_t position, size_t n) {
AWS_PRECONDITION(n <= 8);
Expand Down Expand Up @@ -248,8 +248,10 @@ struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve(struct aws_s3_buffe

if (ticket == NULL) {
AWS_LOGF_TRACE(
AWS_LS_S3_CLIENT, "Memory limit reached while trying to allocate buffer of size %zu. "
"Putting new buffer reservations on hold...", size);
AWS_LS_S3_CLIENT,
"Memory limit reached while trying to allocate buffer of size %zu. "
"Putting new buffer reservations on hold...",
size);
aws_raise_error(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT);
}
return ticket;
Expand All @@ -266,8 +268,7 @@ void aws_s3_buffer_pool_remove_reservation_hold(struct aws_s3_buffer_pool *buffe
buffer_pool->has_reservation_hold = false;
}

static uint8_t *s_primary_acquire_synced(struct aws_s3_buffer_pool *buffer_pool,
size_t size, size_t *out_chunks_used) {
static uint8_t *s_primary_acquire_synced(struct aws_s3_buffer_pool *buffer_pool, size_t size, size_t *out_chunks_used) {
uint8_t *alloc_ptr = NULL;

size_t chunks_needed = size / buffer_pool->chunk_size;
Expand Down

0 comments on commit 0028246

Please sign in to comment.