Skip to content

Commit

Permalink
LLVM: Fix MemoryManager1 Memory Outage
Browse files Browse the repository at this point in the history
  • Loading branch information
elad335 committed Jan 10, 2025
1 parent b468d67 commit 18d7e3b
Showing 1 changed file with 108 additions and 25 deletions.
133 changes: 108 additions & 25 deletions Utilities/JITLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,19 +173,27 @@ struct JITAnnouncer : llvm::JITEventListener
// Simple memory manager
struct MemoryManager1 : llvm::RTDyldMemoryManager
{
// 256 MiB for code or data
static constexpr u64 c_max_size = 0x20000000 / 2;
// 4GiB for code or data
static constexpr u64 c_max_size = 0x1'0000'0000;

// Allocation unit (2M)
static constexpr u64 c_page_size = 2 * 1024 * 1024;

// Reserve 512 MiB
u8* const ptr = static_cast<u8*>(utils::memory_reserve(c_max_size * 2));
// Reserve 4GiB blocks
std::vector<void*> m_code_mems;
std::vector<void*> m_data_ro_mems;
std::vector<void*> m_data_rw_mems;

u64 code_ptr = 0;
u64 data_ptr = c_max_size;
u64 data_ro_ptr = 0;
u64 data_rw_ptr = 0;

MemoryManager1() = default;
MemoryManager1() noexcept
{
m_code_mems.emplace_back(memory_reserve_4GiB());
m_data_ro_mems.emplace_back(memory_reserve_4GiB());
m_data_rw_mems.emplace_back(memory_reserve_4GiB());
}

MemoryManager1(const MemoryManager1&) = delete;

Expand All @@ -194,7 +202,55 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager
~MemoryManager1() override
{
// Hack: don't release to prevent reuse of address space, see jit_announce
utils::memory_decommit(ptr, c_max_size * 2);
for (auto ptr : m_code_mems)
{
if (m_code_mems.back() == ptr)
{
utils::memory_decommit(ptr, utils::align(code_ptr % c_max_size, c_page_size));
break;
}

utils::memory_decommit(ptr, c_max_size);
}

for (auto ptr : m_data_ro_mems)
{
if (m_data_ro_mems.back() == ptr)
{
utils::memory_decommit(ptr, utils::align(data_ro_ptr % c_max_size, c_page_size));
break;
}

utils::memory_decommit(ptr, c_max_size);
}

for (auto ptr : m_data_rw_mems)
{
if (m_data_rw_mems.back() == ptr)
{
utils::memory_decommit(ptr, utils::align(data_rw_ptr % c_max_size, c_page_size));
break;
}

utils::memory_decommit(ptr, c_max_size);
}
}

// Start with a random offset for faster reserving
void* m_memory_pointer_sample = reinterpret_cast<void*>((utils::get_unique_tsc() / 256 + 0x2) << 32);

u8* memory_reserve_4GiB()
{
for (u64 addr = reinterpret_cast<u64>(m_memory_pointer_sample) + c_max_size; addr < 0x8000'0000'0000; addr += c_max_size)
{
if (auto ptr = utils::memory_reserve(c_max_size, reinterpret_cast<void*>(addr)))
{
m_memory_pointer_sample = ptr;
return static_cast<u8*>(ptr);
}
}

fmt::throw_exception("Failed to reserve vm memory");
}

llvm::JITSymbol findSymbol(const std::string& name) override
Expand All @@ -214,45 +270,72 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager
return {addr, llvm::JITSymbolFlags::Exported};
}

u8* allocate(u64& oldp, uptr size, uint align, utils::protection prot)
u8* allocate(u64& alloc_pos, std::vector<void*>& blocks_holder, uptr size, u64 align, utils::protection prot)
{
if (align > c_page_size)
align = std::max<u64>(align, 0x10);

const u64 sizea = utils::align(size, align);

if (!size || align > c_page_size || sizea > c_max_size || sizea < size)
{
jit_log.fatal("Unsupported alignment (size=0x%x, align=0x%x)", size, align);
jit_log.fatal("Unsupported size/alignment (size=0x%x, align=0x%x)", size, align);
return nullptr;
}

const u64 olda = utils::align(oldp, align);
const u64 newp = utils::align(olda + size, align);
const u64 oldp = alloc_pos;

if ((newp - 1) / c_max_size != oldp / c_max_size)
u64 olda = utils::align(oldp, align);

ensure(olda >= oldp);
ensure(olda < ~sizea);

u64 newp = olda + sizea;

if ((newp - 1) / c_max_size != (oldp - 1) / c_max_size)
{
jit_log.fatal("Out of memory (size=0x%x, align=0x%x)", size, align);
return nullptr;
if (newp / c_max_size > blocks_holder.size())
{
// Does not work for relocations, needs more robust solution
fmt::throw_exception("Out of memory (size=0x%x, align=0x%x)", size, align);
blocks_holder.emplace_back(utils::memory_reserve(c_max_size));
}

olda = utils::align(oldp, c_max_size);

ensure(olda >= oldp);
ensure(olda < ~sizea);

newp = olda + sizea;
}

if ((oldp - 1) / c_page_size != (newp - 1) / c_page_size)
// Update allocation counter
alloc_pos = newp;

if ((newp - 1) / c_page_size != (oldp - 1) / c_page_size)
{
// Allocate pages on demand
const u64 pagea = utils::align(oldp, c_page_size);
const u64 pagea = utils::align(olda, c_page_size);
const u64 psize = utils::align(newp - pagea, c_page_size);
utils::memory_commit(this->ptr + pagea, psize, prot);
utils::memory_commit(reinterpret_cast<u8*>(::at32(blocks_holder, pagea / c_max_size)) + (pagea % c_max_size), psize, prot);
}

// Update allocation counter
oldp = newp;

return this->ptr + olda;
return reinterpret_cast<u8*>(::at32(blocks_holder, olda / c_max_size)) + (olda % c_max_size);
}

u8* allocateCodeSection(uptr size, uint align, uint /*sec_id*/, llvm::StringRef /*sec_name*/) override
{
return allocate(code_ptr, size, align, utils::protection::wx);
return allocate(code_ptr, m_code_mems, size, align, utils::protection::wx);
}

u8* allocateDataSection(uptr size, uint align, uint /*sec_id*/, llvm::StringRef /*sec_name*/, bool /*is_ro*/) override
u8* allocateDataSection(uptr size, uint align, uint /*sec_id*/, llvm::StringRef /*sec_name*/, bool is_ro) override
{
return allocate(data_ptr, size, align, utils::protection::rw);
if (is_ro)
{
// Let's use RW for now
return allocate(data_ro_ptr, m_data_ro_mems, size, align, utils::protection::rw);
}

return allocate(data_rw_ptr, m_data_rw_mems, size, align, utils::protection::rw);
}

bool finalizeMemory(std::string* = nullptr) override
Expand Down

0 comments on commit 18d7e3b

Please sign in to comment.