dqn: Add ASAN poisoning
This commit is contained in:
parent
faf229c17f
commit
2740987956
@ -14,6 +14,7 @@
|
||||
#define STBSP__ASAN __declspec(no_sanitize_address)
|
||||
#endif
|
||||
|
||||
#define DQN_ASAN_POISON 1
|
||||
#define DQN_NO_CHECK_BREAK
|
||||
#define DQN_IMPLEMENTATION
|
||||
#include "dqn.h"
|
||||
@ -40,28 +41,30 @@ static Dqn_UTest Dqn_Test_Arena()
|
||||
|
||||
DQN_UTEST_TEST("Reused memory is zeroed out") {
|
||||
|
||||
Dqn_usize size = DQN_KILOBYTES(128);
|
||||
Dqn_Arena arena = {};
|
||||
Dqn_Arena_Grow(&arena, size, /*commit*/ size, /*flags*/ 0);
|
||||
uint8_t alignment = 1;
|
||||
Dqn_usize alloc_size = DQN_KILOBYTES(128);
|
||||
Dqn_MemBlockSizeRequiredResult size_required = Dqn_MemBlock_SizeRequired(nullptr, alloc_size, alignment, Dqn_MemBlockFlag_Nil);
|
||||
Dqn_Arena arena = {};
|
||||
Dqn_Arena_Grow(&arena, size_required.block_size, /*commit*/ size_required.block_size, /*flags*/ 0);
|
||||
|
||||
// NOTE: Allocate 128 kilobytes, fill it with garbage, then reset the arena
|
||||
uintptr_t first_ptr_address = 0;
|
||||
{
|
||||
Dqn_ArenaTempMemory temp_mem = Dqn_Arena_BeginTempMemory(&arena);
|
||||
void *ptr = Dqn_Arena_Alloc(&arena, size, 1, Dqn_ZeroMem_Yes);
|
||||
void *ptr = Dqn_Arena_Alloc(&arena, alloc_size, alignment, Dqn_ZeroMem_Yes);
|
||||
first_ptr_address = DQN_CAST(uintptr_t)ptr;
|
||||
DQN_MEMSET(ptr, 'z', size);
|
||||
DQN_MEMSET(ptr, 'z', alloc_size);
|
||||
Dqn_Arena_EndTempMemory(temp_mem, false /*cancel*/);
|
||||
}
|
||||
|
||||
// NOTE: Reallocate 128 kilobytes
|
||||
char *ptr = DQN_CAST(char *)Dqn_Arena_Alloc(&arena, size, 1, Dqn_ZeroMem_Yes);
|
||||
char *ptr = DQN_CAST(char *)Dqn_Arena_Alloc(&arena, alloc_size, alignment, Dqn_ZeroMem_Yes);
|
||||
|
||||
// NOTE: Double check we got the same pointer
|
||||
DQN_UTEST_ASSERT(&test, first_ptr_address == DQN_CAST(uintptr_t)ptr);
|
||||
|
||||
// NOTE: Check that the bytes are set to 0
|
||||
for (Dqn_usize i = 0; i < size; i++)
|
||||
for (Dqn_usize i = 0; i < alloc_size; i++)
|
||||
DQN_UTEST_ASSERT(&test, ptr[i] == 0);
|
||||
Dqn_Arena_Free(&arena);
|
||||
}
|
||||
@ -1647,6 +1650,15 @@ static Dqn_UTest Dqn_Test_VArray()
|
||||
DQN_UTEST_ASSERT(&test, array.size == DQN_ARRAY_UCOUNT(array_literal));
|
||||
DQN_UTEST_ASSERT(&test, DQN_MEMCMP(array.data, array_literal, DQN_ARRAY_UCOUNT(array_literal) * sizeof(array_literal[0])) == 0);
|
||||
}
|
||||
|
||||
DQN_UTEST_TEST("Test adding an array of items after erase") {
|
||||
uint32_t array_literal[] = {0, 1, 2, 3};
|
||||
Dqn_VArray_Add<uint32_t>(&array, array_literal, DQN_ARRAY_UCOUNT(array_literal));
|
||||
|
||||
uint32_t expected_literal[] = {14, 6, 0, 1, 2, 3};
|
||||
DQN_UTEST_ASSERT(&test, array.size == DQN_ARRAY_UCOUNT(expected_literal));
|
||||
DQN_UTEST_ASSERT(&test, DQN_MEMCMP(array.data, expected_literal, DQN_ARRAY_UCOUNT(expected_literal) * sizeof(expected_literal[0])) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
DQN_UTEST_TEST("Array of unaligned objects are contiguously laid out in memory") {
|
||||
|
@ -50,4 +50,6 @@ pushd Build
|
||||
REM set zig_cmd=zig c++ %zig_compile_flags%
|
||||
REM powershell -Command "$time = Measure-Command { !zig_cmd! | Out-Default }; Write-Host '[BUILD] zig:'$time.TotalSeconds's'; exit $LASTEXITCODE" || exit /b 1
|
||||
REM )
|
||||
|
||||
exit /b 1
|
||||
popd
|
||||
|
8
dqn.h
8
dqn.h
@ -256,6 +256,14 @@
|
||||
// [$TCTX] Dqn_ThreadContext | | Per-thread data structure e.g. temp arenas
|
||||
|
||||
// NOTE: Additional Configuration
|
||||
// - When compiling with ASAN, set this macro to `1` to enable poisoning of the
|
||||
// memory allocated and freed by memory arenas in the library. By default this
|
||||
// is set to `0`. By enabling this, all allocations will be guarded by a page,
|
||||
// before and after the returned pointer. All allocations will be aligned to
|
||||
// and padded to atleast DQN_ASAN_POISON_ALIGNMENT (e.g. 8 bytes).
|
||||
//
|
||||
// DQN_ASAN_POISON 1
|
||||
//
|
||||
// - Define this macro to record allocation stats for arenas used in the
|
||||
// thread context. The thread context arena stats can be printed by using
|
||||
// Dqn_Library_DumpThreadContextArenaStat.
|
||||
|
@ -389,7 +389,7 @@ template <typename T> T * Dqn_List_Add (Dqn_List<T> *list, T const &v
|
||||
DQN_API template <typename T> Dqn_VArray<T> Dqn_VArray_InitByteSize(Dqn_Arena *arena, Dqn_usize byte_size)
|
||||
{
|
||||
Dqn_VArray<T> result = {};
|
||||
result.block = Dqn_Arena_Grow(arena, byte_size, 0 /*commit*/, Dqn_MemBlockFlag_ArenaPrivate);
|
||||
result.block = Dqn_Arena_Grow(arena, byte_size, 0 /*commit*/, Dqn_MemBlockFlag_ArenaPrivate | Dqn_MemBlockFlag_AllocsAreContiguous);
|
||||
result.data = DQN_CAST(T *)Dqn_MemBlock_Alloc(result.block, /*size*/ 0, alignof(T), Dqn_ZeroMem_No);
|
||||
result.max = (result.block->size - result.block->used) / sizeof(T);
|
||||
return result;
|
||||
@ -474,6 +474,7 @@ DQN_API template <typename T> void Dqn_VArray_EraseRange(Dqn_VArray<T> *array, D
|
||||
DQN_MEMCPY(dest, src, (end - src) * sizeof(T));
|
||||
}
|
||||
array->size -= erase_count;
|
||||
Dqn_MemBlock_Pop(array->block, erase_count * sizeof(T));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,15 @@
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#if !defined(DQN_ASAN_POISON)
|
||||
#define DQN_ASAN_POISON 0
|
||||
#endif
|
||||
|
||||
#if !defined(DQN_ASAN_POISON_ALIGNMENT)
|
||||
#define DQN_ASAN_POISON_ALIGNMENT 8
|
||||
#endif
|
||||
|
||||
|
||||
// NOTE: [$CALL] Dqn_CallSite ======================================================================
|
||||
struct Dqn_CallSite
|
||||
{
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "b_stacktrace.h"
|
||||
|
||||
// NOTE: [$OS_H] OS Headers ========================================================================
|
||||
#include <sanitizer/asan_interface.h>
|
||||
|
||||
#if defined(DQN_OS_WIN32)
|
||||
#pragma comment(lib, "bcrypt")
|
||||
#pragma comment(lib, "wininet")
|
||||
|
140
dqn_memory.cpp
140
dqn_memory.cpp
@ -1,4 +1,6 @@
|
||||
// NOTE: [$ALLO] Dqn_Allocator =====================================================================
|
||||
#include <ios>
|
||||
#include <sanitizer/asan_interface.h>
|
||||
DQN_API void *Dqn_Allocator_Alloc(Dqn_Allocator allocator, size_t size, uint8_t align, Dqn_ZeroMem zero_mem)
|
||||
{
|
||||
void *result = NULL;
|
||||
@ -165,27 +167,59 @@ DQN_API int Dqn_VMem_Protect(void *ptr, Dqn_usize size, uint32_t page_flags)
|
||||
}
|
||||
|
||||
// NOTE: [$MEMB] Dqn_MemBlock ======================================================================
|
||||
DQN_API Dqn_usize Dqn_MemBlock_MetadataSize(uint8_t flags)
|
||||
DQN_API Dqn_MemBlockSizeRequiredResult Dqn_MemBlock_SizeRequired(Dqn_MemBlock const *block, Dqn_usize size, uint8_t alignment, uint32_t flags)
|
||||
{
|
||||
Dqn_usize result = sizeof(Dqn_MemBlock);
|
||||
if (flags & Dqn_MemBlockFlag_PageGuard)
|
||||
result = g_dqn_library->os_page_size;
|
||||
DQN_ASSERT(alignment > 0 && Dqn_IsPowerOfTwo(alignment));
|
||||
|
||||
Dqn_MemBlockSizeRequiredResult result = {};
|
||||
result.alloc_size = size;
|
||||
Dqn_MemBlockFlag block_flags = DQN_CAST(Dqn_MemBlockFlag)((block ? block->flags : 0) | flags);
|
||||
uint8_t ptr_alignment = alignment;
|
||||
|
||||
if (DQN_ASAN_POISON) {
|
||||
// NOTE: Guard a page after with poisoned memory. The first allocation
|
||||
// is always guarded with poison-ed memory to prevent read/writes behind
|
||||
// the block of memory.
|
||||
if ((block_flags & Dqn_MemBlockFlag_AllocsAreContiguous) == 0) {
|
||||
result.alloc_size = Dqn_AlignUpPowerOfTwo(size + g_dqn_library->os_page_size, DQN_ASAN_POISON_ALIGNMENT);
|
||||
}
|
||||
ptr_alignment = DQN_MAX(alignment, DQN_ASAN_POISON_ALIGNMENT);
|
||||
}
|
||||
|
||||
if (block) {
|
||||
uintptr_t address = DQN_CAST(uintptr_t)block->data + block->used;
|
||||
uintptr_t next_address = Dqn_AlignUpPowerOfTwo(address, ptr_alignment);
|
||||
result.data_offset = next_address - DQN_CAST(uintptr_t)block->data;
|
||||
Dqn_usize new_used = result.data_offset + result.alloc_size;
|
||||
result.block_size = new_used - block->used;
|
||||
} else {
|
||||
result.block_size = result.alloc_size + (ptr_alignment - 1);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
DQN_API Dqn_MemBlock *Dqn_MemBlock_Init(Dqn_usize reserve, Dqn_usize commit, uint8_t flags)
|
||||
Dqn_usize Dqn_MemBlock_MetadataSize()
|
||||
{
|
||||
DQN_ASSERTF(g_dqn_library->os_page_size, "Library needs to be initialised by called Dqn_Library_Init()");
|
||||
Dqn_usize init_poison_page = DQN_ASAN_POISON ? g_dqn_library->os_page_size : 0;
|
||||
Dqn_usize poison_alignment = DQN_ASAN_POISON ? DQN_ASAN_POISON_ALIGNMENT : 0;
|
||||
Dqn_usize result = Dqn_AlignUpPowerOfTwo(sizeof(Dqn_MemBlock), poison_alignment) + init_poison_page;
|
||||
return result;
|
||||
}
|
||||
|
||||
DQN_API Dqn_MemBlock *Dqn_MemBlock_Init(Dqn_usize reserve, Dqn_usize commit, uint32_t flags)
|
||||
{
|
||||
DQN_ASSERTF(g_dqn_library->os_page_size, "Library needs to be initialised by calling Dqn_Library_Init()");
|
||||
DQN_ASSERTF(Dqn_IsPowerOfTwo(g_dqn_library->os_page_size), "Invalid page size");
|
||||
DQN_ASSERTF((flags & ~Dqn_MemBlockFlag_All) == 0, "Invalid flag combination, must adhere to Dqn_MemBlockFlags");
|
||||
DQN_ASSERTF((flags & ~Dqn_MemBlockFlag_All) == 0, "Invalid flag combination, must adhere to Dqn_MemBlockFlags");
|
||||
|
||||
if (reserve == 0)
|
||||
return nullptr;
|
||||
|
||||
Dqn_usize metadata_size = Dqn_MemBlock_MetadataSize(flags);
|
||||
Dqn_usize reserve_aligned = Dqn_AlignUpPowerOfTwo(reserve + metadata_size, g_dqn_library->os_page_size);
|
||||
Dqn_usize commit_aligned = Dqn_AlignUpPowerOfTwo(commit + metadata_size, g_dqn_library->os_page_size);
|
||||
commit_aligned = DQN_MIN(commit_aligned, reserve_aligned);
|
||||
Dqn_usize metadata_size = Dqn_MemBlock_MetadataSize();
|
||||
Dqn_usize reserve_aligned = Dqn_AlignUpPowerOfTwo(metadata_size + reserve, g_dqn_library->os_page_size);
|
||||
Dqn_usize commit_aligned = Dqn_AlignUpPowerOfTwo(metadata_size + commit, g_dqn_library->os_page_size);
|
||||
commit_aligned = DQN_MIN(commit_aligned, reserve_aligned);
|
||||
|
||||
// NOTE: Avoid 1 syscall by committing on reserve if amounts are equal
|
||||
Dqn_VMemCommit commit_on_reserve = commit_aligned == reserve_aligned ? Dqn_VMemCommit_Yes : Dqn_VMemCommit_No;
|
||||
@ -198,55 +232,90 @@ DQN_API Dqn_MemBlock *Dqn_MemBlock_Init(Dqn_usize reserve, Dqn_usize commit, uin
|
||||
result->data = DQN_CAST(uint8_t *)result + metadata_size;
|
||||
result->size = reserve_aligned - metadata_size;
|
||||
result->commit = commit_aligned - metadata_size;
|
||||
result->flags = flags;
|
||||
result->flags = DQN_CAST(uint8_t)flags;
|
||||
|
||||
// NOTE: Guard pages
|
||||
if (flags & Dqn_MemBlockFlag_PageGuard)
|
||||
Dqn_VMem_Protect(result->data, commit_aligned, Dqn_VMemPage_ReadWrite | Dqn_VMemPage_Guard);
|
||||
if (DQN_ASAN_POISON) { // NOTE: Poison (guard page + entire block), we unpoison as we allocate
|
||||
DQN_ASSERT(Dqn_IsPowerOfTwoAligned(result->data, DQN_ASAN_POISON_ALIGNMENT));
|
||||
DQN_ASSERT(Dqn_IsPowerOfTwoAligned(result->size, DQN_ASAN_POISON_ALIGNMENT));
|
||||
void *poison_ptr = DQN_CAST(void *)Dqn_AlignUpPowerOfTwo(DQN_CAST(char *)result + sizeof(Dqn_MemBlock), DQN_ASAN_POISON_ALIGNMENT);
|
||||
ASAN_POISON_MEMORY_REGION(poison_ptr, g_dqn_library->os_page_size + result->size);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
DQN_API void *Dqn_MemBlock_Alloc(Dqn_MemBlock *block, Dqn_usize size, uint8_t alignment, Dqn_ZeroMem zero_mem)
|
||||
{
|
||||
DQN_ASSERT(zero_mem == Dqn_ZeroMem_Yes || zero_mem == Dqn_ZeroMem_No);
|
||||
|
||||
void *result = nullptr;
|
||||
if (!block)
|
||||
return result;
|
||||
|
||||
DQN_ASSERT(Dqn_IsPowerOfTwo(alignment));
|
||||
Dqn_usize aligned_used = Dqn_AlignUpPowerOfTwo(block->used, alignment);
|
||||
Dqn_usize new_used = aligned_used + size;
|
||||
Dqn_MemBlockSizeRequiredResult size_required = Dqn_MemBlock_SizeRequired(block, size, alignment, Dqn_MemBlockFlag_Nil);
|
||||
Dqn_usize new_used = size_required.data_offset + size_required.alloc_size;
|
||||
if (new_used > block->size)
|
||||
return result;
|
||||
|
||||
result = DQN_CAST(char *)block->data + aligned_used;
|
||||
result = DQN_CAST(char *)block->data + size_required.data_offset;
|
||||
block->used = new_used;
|
||||
DQN_ASSERT(Dqn_IsPowerOfTwoAligned(result, alignment));
|
||||
|
||||
if (DQN_ASAN_POISON) {
|
||||
ASAN_UNPOISON_MEMORY_REGION(result, size);
|
||||
}
|
||||
|
||||
if (zero_mem == Dqn_ZeroMem_Yes) {
|
||||
Dqn_usize reused_bytes = DQN_MIN(block->commit - aligned_used, size);
|
||||
Dqn_usize reused_bytes = DQN_MIN(block->commit - size_required.data_offset, size);
|
||||
DQN_MEMSET(result, DQN_MEMSET_BYTE, reused_bytes);
|
||||
}
|
||||
|
||||
if (block->commit < block->used) {
|
||||
Dqn_usize commit_size = Dqn_AlignUpPowerOfTwo(block->used - block->commit, g_dqn_library->os_page_size);
|
||||
void *commit_ptr = (void *)Dqn_AlignUpPowerOfTwo((char *)block->data + block->commit, g_dqn_library->os_page_size);
|
||||
block->commit += commit_size - Dqn_MemBlock_MetadataSize(block->flags);
|
||||
block->commit += commit_size;
|
||||
Dqn_VMem_Commit(commit_ptr, commit_size, Dqn_VMemPage_ReadWrite);
|
||||
DQN_ASSERT(block->commit <= block->size);
|
||||
}
|
||||
|
||||
if (block->flags & Dqn_MemBlockFlag_PageGuard)
|
||||
Dqn_VMem_Protect(result, size, Dqn_VMemPage_ReadWrite);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
DQN_API void Dqn_MemBlock_Free(Dqn_MemBlock *block)
|
||||
{
|
||||
if (block) {
|
||||
Dqn_usize release_size = block->size + Dqn_MemBlock_MetadataSize(block->flags);
|
||||
Dqn_VMem_Release(block, release_size);
|
||||
if (!block)
|
||||
return;
|
||||
Dqn_usize release_size = block->size + Dqn_MemBlock_MetadataSize();
|
||||
if (DQN_ASAN_POISON) {
|
||||
ASAN_UNPOISON_MEMORY_REGION(block, release_size);
|
||||
}
|
||||
Dqn_VMem_Release(block, release_size);
|
||||
}
|
||||
|
||||
DQN_API void Dqn_MemBlock_Pop(Dqn_MemBlock *block, Dqn_usize size)
|
||||
{
|
||||
if (!block)
|
||||
return;
|
||||
Dqn_usize size_adjusted = DQN_MIN(size, block->used);
|
||||
Dqn_usize to = block->used - size_adjusted;
|
||||
Dqn_MemBlock_PopTo(block, to);
|
||||
}
|
||||
|
||||
DQN_API void Dqn_MemBlock_PopTo(Dqn_MemBlock *block, Dqn_usize to)
|
||||
{
|
||||
if (!block || to >= block->used)
|
||||
return;
|
||||
|
||||
if (DQN_ASAN_POISON) {
|
||||
// TODO(doyle): The poison API takes addresses that are 8 byte aligned
|
||||
// so there are gaps here if we are dealing with objects that aren't 8
|
||||
// byte aligned unfortunately.
|
||||
void *poison_ptr = DQN_CAST(void *)Dqn_AlignUpPowerOfTwo(DQN_CAST(char *)block->data + to, DQN_ASAN_POISON_ALIGNMENT);
|
||||
void *end_ptr = DQN_CAST(char *)block->data + block->used;
|
||||
uintptr_t bytes_to_poison = DQN_CAST(uintptr_t)end_ptr - DQN_CAST(uintptr_t)poison_ptr;
|
||||
ASAN_POISON_MEMORY_REGION(poison_ptr, bytes_to_poison);
|
||||
}
|
||||
block->used = to;
|
||||
}
|
||||
|
||||
// NOTE: [$AREN] Dqn_Arena =========================================================================
|
||||
@ -318,7 +387,9 @@ DQN_API void *Dqn_Arena_Alloc(Dqn_Arena *arena, Dqn_usize size, uint8_t align, D
|
||||
arena->curr = arena->curr->next;
|
||||
|
||||
if (!arena->curr) {
|
||||
if (!Dqn_Arena_Grow(arena, size, size /*commit*/, 0 /*flags*/))
|
||||
Dqn_MemBlockSizeRequiredResult size_required = Dqn_MemBlock_SizeRequired(nullptr, size, align, Dqn_MemBlockFlag_Nil);
|
||||
Dqn_usize block_size = size_required.block_size;
|
||||
if (!Dqn_Arena_Grow(arena, block_size, block_size, Dqn_MemBlockFlag_Nil))
|
||||
return result;
|
||||
DQN_ASSERT(arena->curr);
|
||||
}
|
||||
@ -390,13 +461,10 @@ DQN_API void Dqn_Arena_EndTempMemory(Dqn_ArenaTempMemory temp_memory, bool cance
|
||||
return;
|
||||
|
||||
// NOTE: Revert the current block to the temp_memory's current block
|
||||
arena->blocks = temp_memory.blocks;
|
||||
arena->head = temp_memory.head;
|
||||
arena->curr = temp_memory.curr;
|
||||
if (arena->curr) {
|
||||
Dqn_MemBlock *curr = arena->curr;
|
||||
curr->used = temp_memory.curr_used;
|
||||
}
|
||||
arena->blocks = temp_memory.blocks;
|
||||
arena->head = temp_memory.head;
|
||||
arena->curr = temp_memory.curr;
|
||||
Dqn_MemBlock_PopTo(arena->curr, temp_memory.curr_used);
|
||||
|
||||
// NOTE: Free the tail blocks until we reach the temp_memory's tail block
|
||||
while (arena->tail != temp_memory.tail) {
|
||||
@ -411,7 +479,7 @@ DQN_API void Dqn_Arena_EndTempMemory(Dqn_ArenaTempMemory temp_memory, bool cance
|
||||
|
||||
// NOTE: Reset the usage of all the blocks between the tail and current block's
|
||||
for (Dqn_MemBlock *block = arena->tail; block && (block != arena->curr); block = block->prev)
|
||||
block->used = 0;
|
||||
Dqn_MemBlock_PopTo(block, 0);
|
||||
}
|
||||
|
||||
Dqn_ArenaTempMemoryScope::Dqn_ArenaTempMemoryScope(Dqn_Arena *arena)
|
||||
|
98
dqn_memory.h
98
dqn_memory.h
@ -59,17 +59,64 @@ DQN_API void Dqn_VMem_Release (void *ptr, Dqn_usize size);
|
||||
DQN_API int Dqn_VMem_Protect (void *ptr, Dqn_usize size, uint32_t page_flags);
|
||||
|
||||
// NOTE: [$MEMB] Dqn_MemBlock ======================================================================
|
||||
// Encapsulates allocation of objects from a raw block of memory by bumping a
|
||||
// a pointer in the block. Some examples include our memory arenas are
|
||||
// implemented as light wrappers over chained memory blocks and our arrays
|
||||
// backed by virtual memory take memory blocks.
|
||||
//
|
||||
// One pattern we take advantage of under this design is that our virtual arrays
|
||||
// can ask an arena for a memory block and sub-allocate its contiguous items
|
||||
// from it. Since the arena created the memory block, the array's lifetime is
|
||||
// bound to the arena which could also be managing a bunch of other allocations
|
||||
// with the same lifetime.
|
||||
//
|
||||
// This provides an advantage over creating a specific arena for that array that
|
||||
// is configured not to grow or chain (in order to adhere to the contiguous
|
||||
// layout requirement) thus limiting the arena to that 1 specific usecase.
|
||||
//
|
||||
// NOTE: API =======================================================================================
|
||||
// @proc Dqn_MemBlockSizeRequiredResult
|
||||
// @desc Calculate various size metrics about how many bytes it'd take to
|
||||
// allocate a pointer from the given block. The size of the allocation is
|
||||
// treated as one object and the padding and page-guards are applied
|
||||
// accordingly to the one object
|
||||
//
|
||||
// If you are trying to determine how many bytes are required for `N` distinct
|
||||
// objects then you must multiple the result of this function by `N` to
|
||||
// account for the per-item page-guard paddding.
|
||||
//
|
||||
// @param `block` Pass in the block you wish to allocate from to calculate
|
||||
// size metrics for. You may pass in `null` to calculate how many bytes are
|
||||
// needed to `Dqn_MemBlock_Init` a fresh block capable of allocating the size
|
||||
// requested.
|
||||
//
|
||||
// @param `flags` The `Dqn_MemBlockFlag`s to apply in the calculation. Various
|
||||
// features may influence the sizes required for allocating the requested
|
||||
// amount of bytes. If `block` is passed in, the flags will be OR'ed together
|
||||
// to determine the flags to account for.
|
||||
|
||||
enum Dqn_MemBlockFlag
|
||||
{
|
||||
Dqn_MemBlockFlag_PageGuard = 1 << 0,
|
||||
Dqn_MemBlockFlag_ArenaPrivate = 1 << 1,
|
||||
Dqn_MemBlockFlag_Nil = 0,
|
||||
Dqn_MemBlockFlag_ArenaPrivate = 1 << 0,
|
||||
|
||||
// Enforce that adjacent allocations from this block are contiguous in
|
||||
// memory (as long as the alignment used between allocations are
|
||||
/// consistent).
|
||||
//
|
||||
// This flag is currently only used when ASAN memory poison-ing is enabled
|
||||
// via `DQN_ASAN_POISON`. In this mode all allocations are sandwiched with a
|
||||
// page's worth of poison-ed memory breaking the contiguous requirement of
|
||||
// arrays. Passing this flag will stop the block from padding pointers with
|
||||
// poison.
|
||||
Dqn_MemBlockFlag_AllocsAreContiguous = 1 << 1,
|
||||
|
||||
// If leak tracing is enabled, this flag will allow the allocation recorded
|
||||
// from the reserve call to be leaked, e.g. not printed when leaks are
|
||||
// dumped to the console.
|
||||
Dqn_MemBlockFlag_AllocRecordLeakPermitted = 1 << 2,
|
||||
Dqn_MemBlockFlag_All = Dqn_MemBlockFlag_PageGuard |
|
||||
Dqn_MemBlockFlag_ArenaPrivate |
|
||||
Dqn_MemBlockFlag_All = Dqn_MemBlockFlag_ArenaPrivate |
|
||||
Dqn_MemBlockFlag_AllocsAreContiguous |
|
||||
Dqn_MemBlockFlag_AllocRecordLeakPermitted,
|
||||
};
|
||||
|
||||
@ -84,10 +131,31 @@ struct Dqn_MemBlock
|
||||
uint8_t flags;
|
||||
};
|
||||
|
||||
DQN_API Dqn_usize Dqn_MemBlock_MetadataSize(uint8_t flags);
|
||||
DQN_API Dqn_MemBlock *Dqn_MemBlock_Init (Dqn_usize reserve, Dqn_usize commit, uint8_t flags);
|
||||
DQN_API void *Dqn_MemBlock_Alloc (Dqn_MemBlock *block, Dqn_usize size, uint8_t alignment, Dqn_ZeroMem zero_mem);
|
||||
DQN_API void Dqn_MemBlock_Free (Dqn_MemBlock *block);
|
||||
struct Dqn_MemBlockSizeRequiredResult
|
||||
{
|
||||
// Offset from the block's data pointer that the allocation will start at
|
||||
// If `block` was null then this is always set to 0.
|
||||
Dqn_usize data_offset;
|
||||
|
||||
// How many bytes will be allocated for the amount requested by the user.
|
||||
// This is usually the same as the number requested except when ASAN
|
||||
// poison-ing is enabled. In that case, the pointer will be padded at the
|
||||
// end with a page's worth of poison-ed memory.
|
||||
Dqn_usize alloc_size;
|
||||
|
||||
// How many bytes of space is needed in a block for allocating the requested
|
||||
// pointer. This may differ from the allocation size depending on additional
|
||||
// alignment requirements *and* whether or not ASAN poison-ing is required.
|
||||
Dqn_usize block_size;
|
||||
};
|
||||
|
||||
DQN_API Dqn_usize Dqn_MemBlock_MetadataSize(uint8_t flags);
|
||||
DQN_API Dqn_MemBlockSizeRequiredResult Dqn_MemBlock_SizeRequired(Dqn_MemBlock const *block, Dqn_usize size, uint8_t alignment, uint32_t flags);
|
||||
DQN_API Dqn_MemBlock * Dqn_MemBlock_Init (Dqn_usize reserve, Dqn_usize commit, uint32_t flags);
|
||||
DQN_API void * Dqn_MemBlock_Alloc (Dqn_MemBlock *block, Dqn_usize size, uint8_t alignment, Dqn_ZeroMem zero_mem);
|
||||
DQN_API void Dqn_MemBlock_Free (Dqn_MemBlock *block);
|
||||
DQN_API void Dqn_MemBlock_Pop (Dqn_MemBlock *block, Dqn_usize size);
|
||||
DQN_API void Dqn_MemBlock_PopTo (Dqn_MemBlock *block, Dqn_usize to);
|
||||
|
||||
#define Dqn_MemBlock_New(block, Type, zero_mem) (Type *)Dqn_MemBlock_Alloc(block, sizeof(Type), alignof(Type), zero_mem)
|
||||
#define Dqn_MemBlock_NewArray(block, Type, count, zero_mem) (Type *)Dqn_MemBlock_Alloc(block, sizeof(Type) * count, alignof(Type), zero_mem)
|
||||
@ -126,7 +194,7 @@ DQN_API void Dqn_MemBlock_Free (Dqn_MemBlock *block);
|
||||
// @proc Dqn_Arena_Alloc, Dqn_Arena_New, Dqn_Arena_NewArray,
|
||||
// Dqn_Arena_NewArrayWithBlock,
|
||||
// @desc Alloc byte/objects
|
||||
// `Alloc` allocates bytes
|
||||
// `Alloc` allocates bytes
|
||||
// `New` allocates an object
|
||||
// `NewArray` allocates an array of objects
|
||||
// `NewArrayWithBlock` allocates an array of objects from the given memory 'block'
|
||||
@ -241,12 +309,12 @@ enum Dqn_ArenaCommit
|
||||
};
|
||||
|
||||
// NOTE: Allocation ================================================================================
|
||||
#define Dqn_Arena_New(arena, Type, zero_mem) (Type *)Dqn_Arena_Alloc(arena, sizeof(Type), alignof(Type), zero_mem)
|
||||
#define Dqn_Arena_NewCopy(arena, Type, src) (Type *)Dqn_Arena_Copy(arena, src, sizeof(*src), alignof(Type))
|
||||
#define Dqn_Arena_NewCopyZ(arena, Type, src) (Type *)Dqn_Arena_Copy(arena, src, sizeof(*src), alignof(Type))
|
||||
#define Dqn_Arena_NewArray(arena, Type, count, zero_mem) (Type *)Dqn_Arena_Alloc(arena, sizeof(Type) * count, alignof(Type), zero_mem)
|
||||
#define Dqn_Arena_NewArrayCopy(arena, Type, src, count) (Type *)Dqn_Arena_Copy(arena, src, sizeof(*src) * count, alignof(Type))
|
||||
#define Dqn_Arena_NewArrayCopyZ(arena, Type, src, count) (Type *)Dqn_Arena_CopyZ(arena, src, sizeof(*src) * count, alignof(Type))
|
||||
#define Dqn_Arena_New(arena, Type, zero_mem) (Type *)Dqn_Arena_Alloc(arena, sizeof(Type), alignof(Type), zero_mem)
|
||||
#define Dqn_Arena_NewCopy(arena, Type, src) (Type *)Dqn_Arena_Copy(arena, (src), sizeof(*src), alignof(Type))
|
||||
#define Dqn_Arena_NewCopyZ(arena, Type, src) (Type *)Dqn_Arena_Copy(arena, (src), sizeof(*src), alignof(Type))
|
||||
#define Dqn_Arena_NewArray(arena, Type, count, zero_mem) (Type *)Dqn_Arena_Alloc(arena, sizeof(Type) * (count), alignof(Type), zero_mem)
|
||||
#define Dqn_Arena_NewArrayCopy(arena, Type, src, count) (Type *)Dqn_Arena_Copy(arena, (src), sizeof(*src) * (count), alignof(Type))
|
||||
#define Dqn_Arena_NewArrayCopyZ(arena, Type, src, count) (Type *)Dqn_Arena_CopyZ(arena, (src), sizeof(*src) * (count), alignof(Type))
|
||||
|
||||
DQN_API Dqn_Allocator Dqn_Arena_Allocator (Dqn_Arena *arena);
|
||||
DQN_API Dqn_MemBlock * Dqn_Arena_Grow (Dqn_Arena *arena, Dqn_usize size, Dqn_usize commit, uint8_t flags);
|
||||
|
@ -3,10 +3,10 @@
|
||||
#error "Filesystem APIs requires Windows API, DQN_NO_WIN must not be defined"
|
||||
#endif
|
||||
// NOTE: [$FSYS] Dqn_Fs ============================================================================
|
||||
// NOTE: FS Manipulation =======================================================
|
||||
// NOTE: FS Manipulation ===========================================================================
|
||||
// TODO(dqn): We should have a Dqn_String8 interface and a CString interface
|
||||
//
|
||||
// NOTE: API ===================================================================
|
||||
// NOTE: API =======================================================================================
|
||||
// @proc Dqn_FsDelete
|
||||
// @desc Delete the item specified at the path. This function *CAN* not delete directories unless
|
||||
// the directory is empty.
|
||||
|
Loading…
Reference in New Issue
Block a user