Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Object stats #616

Open
wants to merge 22 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 10 additions & 19 deletions src/snmalloc/backend_helpers/statsrange.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ namespace snmalloc
{
using ContainsParent<ParentRange>::parent;

static inline std::atomic<size_t> current_usage{};
static inline std::atomic<size_t> peak_usage{};
static inline Stat usage{};

public:
static constexpr bool Aligned = ParentRange::Aligned;
Expand All @@ -29,36 +28,28 @@ namespace snmalloc

constexpr Type() = default;

CapPtr<void, ChunkBounds> alloc_range(size_t size)
capptr::Arena<void> alloc_range(size_t size)
nwf-msr marked this conversation as resolved.
Show resolved Hide resolved
{
auto result = parent.alloc_range(size);
if (result != nullptr)
{
auto prev = current_usage.fetch_add(size);
auto curr = peak_usage.load();
while (curr < prev + size)
{
if (peak_usage.compare_exchange_weak(curr, prev + size))
break;
}
}
return result;
auto r = parent.alloc_range(size);
if (r != nullptr)
usage += size;
return r;
}

void dealloc_range(CapPtr<void, ChunkBounds> base, size_t size)
void dealloc_range(capptr::Arena<void> base, size_t size)
{
current_usage -= size;
usage -= size;
parent.dealloc_range(base, size);
}

size_t get_current_usage()
{
return current_usage.load();
return usage.get_curr();
}

size_t get_peak_usage()
{
return peak_usage.load();
return usage.get_peak();
}
};
};
Expand Down
1 change: 1 addition & 0 deletions src/snmalloc/ds_core/ds_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@
#include "ptrwrap.h"
#include "redblacktree.h"
#include "seqset.h"
#include "stats.h"
68 changes: 68 additions & 0 deletions src/snmalloc/ds_core/stats.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#include "defines.h"

#include <atomic>
#include <cstddef>

namespace snmalloc
{
/**
* Very basic statistic that tracks current and peak values.
*/
class Stat
{
private:
std::atomic<size_t> curr{0};
std::atomic<size_t> peak{0};

public:
void increase(size_t amount)
{
size_t c = (curr += amount);
size_t p = peak.load(std::memory_order_relaxed);
while (c > p)
mjp41 marked this conversation as resolved.
Show resolved Hide resolved
{
if (peak.compare_exchange_strong(p, c))
break;
}
}

void decrease(size_t amount)
{
size_t prev = curr.fetch_sub(amount);
// TODO Fix this to be true.
nwf-msr marked this conversation as resolved.
Show resolved Hide resolved
// SNMALLOC_ASSERT_MSG(prev >= amount, "prev = {}, amount = {}",
// prev, amount);
UNUSED(prev);
}

size_t get_curr()
{
return curr.load(std::memory_order_relaxed);
}

size_t get_peak()
{
return peak.load(std::memory_order_relaxed);
}

void operator+=(size_t amount)
{
increase(amount);
}

void operator-=(size_t amount)
{
decrease(amount);
}

void operator++()
{
increase(1);
}

void operator--()
{
decrease(1);
}
};
} // namespace snmalloc
65 changes: 65 additions & 0 deletions src/snmalloc/mem/allocstats.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#include "../ds_core/ds_core.h"
#include "sizeclasstable.h"

#include <array>

namespace snmalloc
{
class MonotoneStat
nwf-msr marked this conversation as resolved.
Show resolved Hide resolved
{
size_t value{0};

public:
void operator++(int)
{
value++;
}

void operator+=(const MonotoneStat& other)
{
value += other.value;
}

size_t operator*()
{
return value;
}
};

struct AllocStat
{
MonotoneStat objects_allocated{};
MonotoneStat objects_deallocated{};
MonotoneStat slabs_allocated{};
MonotoneStat slabs_deallocated{};
};

class AllocStats
{
std::array<AllocStat, SIZECLASS_REP_SIZE> sizeclass{};

public:
AllocStat& operator[](sizeclass_t index)
{
auto i = index.raw();
return sizeclass[i];
}

AllocStat& operator[](smallsizeclass_t index)
{
return sizeclass[sizeclass_t::from_small_class(index).raw()];
}

void operator+=(const AllocStats& other)
{
for (size_t i = 0; i < SIZECLASS_REP_SIZE; i++)
{
sizeclass[i].objects_allocated += other.sizeclass[i].objects_allocated;
sizeclass[i].objects_deallocated +=
other.sizeclass[i].objects_deallocated;
sizeclass[i].slabs_allocated += other.sizeclass[i].slabs_allocated;
sizeclass[i].slabs_deallocated += other.sizeclass[i].slabs_deallocated;
}
}
};
} // namespace snmalloc
47 changes: 38 additions & 9 deletions src/snmalloc/mem/corealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,11 @@ namespace snmalloc
*/
Ticker<typename Config::Pal> ticker;

/**
* Tracks this allocators memory usage
*/
AllocStats stats;

/**
* The message queue needs to be accessible from other threads
*
Expand Down Expand Up @@ -364,6 +369,8 @@ namespace snmalloc
// don't touch the cache lines at this point in snmalloc_check_client.
auto start = clear_slab(meta, sizeclass);

stats[sizeclass].slabs_deallocated++;

Config::Backend::dealloc_chunk(
get_backend_local_state(),
*meta,
Expand Down Expand Up @@ -400,6 +407,8 @@ namespace snmalloc
// Remove from set of fully used slabs.
meta->node.remove();

stats[entry.get_sizeclass()].slabs_deallocated++;

Config::Backend::dealloc_chunk(
get_backend_local_state(), *meta, p, size);

Expand Down Expand Up @@ -478,14 +487,18 @@ namespace snmalloc
SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(local_state, p);
};
auto cb = [this,
&need_post](freelist::HeadPtr msg) SNMALLOC_FAST_PATH_LAMBDA {

size_t received_bytes = 0;

auto cb = [this, &need_post, &received_bytes](
freelist::HeadPtr msg) SNMALLOC_FAST_PATH_LAMBDA {
#ifdef SNMALLOC_TRACING
message<1024>("Handling remote");
#endif

auto& entry =
Config::Backend::template get_metaentry(snmalloc::address_cast(msg));
received_bytes += sizeclass_full_to_size(entry.get_sizeclass());

handle_dealloc_remote(entry, msg.as_void(), need_post);

Expand Down Expand Up @@ -514,6 +527,9 @@ namespace snmalloc
post();
}

// Push size to global statistics
RemoteDeallocCache::remote_inflight -= received_bytes;

return action(args...);
}

Expand Down Expand Up @@ -542,10 +558,7 @@ namespace snmalloc
}
else
{
if (
!need_post &&
!attached_cache->remote_dealloc_cache.reserve_space(entry))
need_post = true;
need_post = attached_cache->remote_dealloc_cache.reserve_space(entry);
mjp41 marked this conversation as resolved.
Show resolved Hide resolved
attached_cache->remote_dealloc_cache
.template dealloc<sizeof(CoreAllocator)>(
entry.get_remote()->trunc_id(), p.as_void());
Expand Down Expand Up @@ -668,13 +681,14 @@ namespace snmalloc
// pointers
auto& entry =
Config::Backend::template get_metaentry(snmalloc::address_cast(p));
if (SNMALLOC_LIKELY(dealloc_local_object_fast(entry, p, entropy)))
if (SNMALLOC_LIKELY(dealloc_local_object_fast<false>(entry, p, entropy)))
return;

dealloc_local_object_slow(p, entry);
}

SNMALLOC_FAST_PATH static bool dealloc_local_object_fast(
template<bool Statistics = true>
SNMALLOC_FAST_PATH bool dealloc_local_object_fast(
const PagemapEntry& entry,
CapPtr<void, capptr::bounds::Alloc> p,
LocalEntropy& entropy)
Expand All @@ -695,6 +709,10 @@ namespace snmalloc
// Update the head and the next pointer in the free list.
meta->free_queue.add(cp, key, entropy);

if constexpr (Statistics)
{
stats[entry.get_sizeclass()].objects_deallocated++;
}
return SNMALLOC_LIKELY(!meta->return_object());
}

Expand Down Expand Up @@ -741,6 +759,7 @@ namespace snmalloc
}

auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
stats[sizeclass].objects_allocated++;
return ticker.check_tick(r);
}
return small_alloc_slow<zero_mem>(sizeclass, fast_free_list);
Expand Down Expand Up @@ -813,6 +832,9 @@ namespace snmalloc
}

auto r = finish_alloc<zero_mem, Config>(p, sizeclass);

stats[sizeclass].objects_allocated++;
stats[sizeclass].slabs_allocated++;
return ticker.check_tick(r);
}

Expand All @@ -834,17 +856,19 @@ namespace snmalloc
{
auto p_wild = message_queue().destroy();
auto p_tame = domesticate(p_wild);

size_t received_bytes = 0;
while (p_tame != nullptr)
{
bool need_post = true; // Always going to post, so ignore.
auto n_tame =
p_tame->atomic_read_next(RemoteAllocator::key_global, domesticate);
const PagemapEntry& entry =
Config::Backend::get_metaentry(snmalloc::address_cast(p_tame));
received_bytes += sizeclass_full_to_size(entry.get_sizeclass());
handle_dealloc_remote(entry, p_tame.as_void(), need_post);
p_tame = n_tame;
}
RemoteDeallocCache::remote_inflight -= received_bytes;
}
else
{
Expand Down Expand Up @@ -986,6 +1010,11 @@ namespace snmalloc

return debug_is_empty_impl(result);
}

const AllocStats& get_stats()
{
return stats;
}
};

/**
Expand Down
Loading