14265 lines
476 KiB
C++
14265 lines
476 KiB
C++
// Generated by the DN single header generator 2025-06-29 00:14:47
|
|
|
|
#define DN_BASE_INC_CPP
|
|
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base.cpp"
|
|
#define DN_BASE_CPP
|
|
|
|
// DN: Single header generator commented out this header => #include "../dn_clangd.h"
|
|
|
|
// NOTE: [$INTR] Intrinsics ////////////////////////////////////////////////////////////////////////
|
|
#if !defined(DN_PLATFORM_ARM64) && !defined(DN_PLATFORM_EMSCRIPTEN)
|
|
#if defined(DN_COMPILER_GCC) || defined(DN_COMPILER_CLANG)
|
|
#include <cpuid.h>
|
|
#endif
|
|
|
|
DN_CPUFeatureDecl g_dn_cpu_feature_decl[DN_CPUFeature_Count];
|
|
|
|
DN_API DN_CPUIDResult DN_CPU_ID(DN_CPUIDArgs args)
|
|
{
|
|
DN_CPUIDResult result = {};
|
|
__cpuidex(result.values, args.eax, args.ecx);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CPU_HasFeatureArray(DN_CPUReport const *report, DN_CPUFeatureQuery *features, DN_USize features_size)
|
|
{
|
|
DN_USize result = 0;
|
|
DN_USize const BITS = sizeof(report->features[0]) * 8;
|
|
for (DN_ForIndexU(feature_index, features_size)) {
|
|
DN_CPUFeatureQuery *query = features + feature_index;
|
|
DN_USize chunk_index = query->feature / BITS;
|
|
DN_USize chunk_bit = query->feature % BITS;
|
|
DN_U64 chunk = report->features[chunk_index];
|
|
query->available = chunk & (1ULL << chunk_bit);
|
|
result += DN_CAST(int) query->available;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_CPU_HasFeature(DN_CPUReport const *report, DN_CPUFeature feature)
|
|
{
|
|
DN_CPUFeatureQuery query = {};
|
|
query.feature = feature;
|
|
bool result = DN_CPU_HasFeatureArray(report, &query, 1) == 1;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_CPU_HasAllFeatures(DN_CPUReport const *report, DN_CPUFeature const *features, DN_USize features_size)
|
|
{
|
|
bool result = true;
|
|
for (DN_USize index = 0; result && index < features_size; index++)
|
|
result &= DN_CPU_HasFeature(report, features[index]);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_CPU_SetFeature(DN_CPUReport *report, DN_CPUFeature feature)
|
|
{
|
|
DN_Assert(feature < DN_CPUFeature_Count);
|
|
DN_USize const BITS = sizeof(report->features[0]) * 8;
|
|
DN_USize chunk_index = feature / BITS;
|
|
DN_USize chunk_bit = feature % BITS;
|
|
report->features[chunk_index] |= (1ULL << chunk_bit);
|
|
}
|
|
|
|
DN_API DN_CPUReport DN_CPU_Report()
|
|
{
|
|
DN_CPUReport result = {};
|
|
DN_CPUIDResult fn_0000_[500] = {};
|
|
DN_CPUIDResult fn_8000_[500] = {};
|
|
int const EXTENDED_FUNC_BASE_EAX = 0x8000'0000;
|
|
int const REGISTER_SIZE = sizeof(fn_0000_[0].reg.eax);
|
|
|
|
// NOTE: Query standard/extended numbers ///////////////////////////////////////////////////////
|
|
{
|
|
DN_CPUIDArgs args = {};
|
|
|
|
// NOTE: Query standard function (e.g. eax = 0x0) for function count + cpu vendor
|
|
args = {};
|
|
fn_0000_[0] = DN_CPU_ID(args);
|
|
|
|
// NOTE: Query extended function (e.g. eax = 0x8000'0000) for function count + cpu vendor
|
|
args = {};
|
|
args.eax = DN_CAST(int) EXTENDED_FUNC_BASE_EAX;
|
|
fn_8000_[0] = DN_CPU_ID(args);
|
|
}
|
|
|
|
// NOTE: Extract function count ////////////////////////////////////////////////////////////////
|
|
int const STANDARD_FUNC_MAX_EAX = fn_0000_[0x0000].reg.eax;
|
|
int const EXTENDED_FUNC_MAX_EAX = fn_8000_[0x0000].reg.eax;
|
|
|
|
// NOTE: Enumerate all CPUID results for the known function counts /////////////////////////////
|
|
{
|
|
DN_AssertF((STANDARD_FUNC_MAX_EAX + 1) <= DN_ArrayCountI(fn_0000_),
|
|
"Max standard count is %d",
|
|
STANDARD_FUNC_MAX_EAX + 1);
|
|
DN_AssertF((DN_CAST(DN_ISize) EXTENDED_FUNC_MAX_EAX - EXTENDED_FUNC_BASE_EAX + 1) <= DN_ArrayCountI(fn_8000_),
|
|
"Max extended count is %zu",
|
|
DN_CAST(DN_ISize) EXTENDED_FUNC_MAX_EAX - EXTENDED_FUNC_BASE_EAX + 1);
|
|
|
|
for (int eax = 1; eax <= STANDARD_FUNC_MAX_EAX; eax++) {
|
|
DN_CPUIDArgs args = {};
|
|
args.eax = eax;
|
|
fn_0000_[eax] = DN_CPU_ID(args);
|
|
}
|
|
|
|
for (int eax = EXTENDED_FUNC_BASE_EAX + 1, index = 1; eax <= EXTENDED_FUNC_MAX_EAX; eax++, index++) {
|
|
DN_CPUIDArgs args = {};
|
|
args.eax = eax;
|
|
fn_8000_[index] = DN_CPU_ID(args);
|
|
}
|
|
}
|
|
|
|
// NOTE: Query CPU vendor //////////////////////////////////////////////////////////////////////
|
|
{
|
|
DN_Memcpy(result.vendor + 0, &fn_8000_[0x0000].reg.ebx, REGISTER_SIZE);
|
|
DN_Memcpy(result.vendor + 4, &fn_8000_[0x0000].reg.edx, REGISTER_SIZE);
|
|
DN_Memcpy(result.vendor + 8, &fn_8000_[0x0000].reg.ecx, REGISTER_SIZE);
|
|
}
|
|
|
|
// NOTE: Query CPU brand ///////////////////////////////////////////////////////////////////////
|
|
if (EXTENDED_FUNC_MAX_EAX >= (EXTENDED_FUNC_BASE_EAX + 4)) {
|
|
DN_Memcpy(result.brand + 0, &fn_8000_[0x0002].reg.eax, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 4, &fn_8000_[0x0002].reg.ebx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 8, &fn_8000_[0x0002].reg.ecx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 12, &fn_8000_[0x0002].reg.edx, REGISTER_SIZE);
|
|
|
|
DN_Memcpy(result.brand + 16, &fn_8000_[0x0003].reg.eax, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 20, &fn_8000_[0x0003].reg.ebx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 24, &fn_8000_[0x0003].reg.ecx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 28, &fn_8000_[0x0003].reg.edx, REGISTER_SIZE);
|
|
|
|
DN_Memcpy(result.brand + 32, &fn_8000_[0x0004].reg.eax, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 36, &fn_8000_[0x0004].reg.ebx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 40, &fn_8000_[0x0004].reg.ecx, REGISTER_SIZE);
|
|
DN_Memcpy(result.brand + 44, &fn_8000_[0x0004].reg.edx, REGISTER_SIZE);
|
|
|
|
DN_Assert(result.brand[sizeof(result.brand) - 1] == 0);
|
|
}
|
|
|
|
// NOTE: Query CPU features //////////////////////////////////////////////////////////////////
|
|
for (DN_USize ext_index = 0; ext_index < DN_CPUFeature_Count; ext_index++) {
|
|
bool available = false;
|
|
|
|
// NOTE: Mask bits taken from various manuals
|
|
// - AMD64 Architecture Programmer's Manual, Volumes 1-5
|
|
// - https://en.wikipedia.org/wiki/CPUID#Calling_CPUID
|
|
switch (DN_CAST(DN_CPUFeature) ext_index) {
|
|
case DN_CPUFeature_3DNow: available = (fn_8000_[0x0001].reg.edx & (1 << 31)); break;
|
|
case DN_CPUFeature_3DNowExt: available = (fn_8000_[0x0001].reg.edx & (1 << 30)); break;
|
|
case DN_CPUFeature_ABM: available = (fn_8000_[0x0001].reg.ecx & (1 << 5)); break;
|
|
case DN_CPUFeature_AES: available = (fn_0000_[0x0001].reg.ecx & (1 << 25)); break;
|
|
case DN_CPUFeature_AVX: available = (fn_0000_[0x0001].reg.ecx & (1 << 28)); break;
|
|
case DN_CPUFeature_AVX2: available = (fn_0000_[0x0007].reg.ebx & (1 << 0)); break;
|
|
case DN_CPUFeature_AVX512F: available = (fn_0000_[0x0007].reg.ebx & (1 << 16)); break;
|
|
case DN_CPUFeature_AVX512DQ: available = (fn_0000_[0x0007].reg.ebx & (1 << 17)); break;
|
|
case DN_CPUFeature_AVX512IFMA: available = (fn_0000_[0x0007].reg.ebx & (1 << 21)); break;
|
|
case DN_CPUFeature_AVX512PF: available = (fn_0000_[0x0007].reg.ebx & (1 << 26)); break;
|
|
case DN_CPUFeature_AVX512ER: available = (fn_0000_[0x0007].reg.ebx & (1 << 27)); break;
|
|
case DN_CPUFeature_AVX512CD: available = (fn_0000_[0x0007].reg.ebx & (1 << 28)); break;
|
|
case DN_CPUFeature_AVX512BW: available = (fn_0000_[0x0007].reg.ebx & (1 << 30)); break;
|
|
case DN_CPUFeature_AVX512VL: available = (fn_0000_[0x0007].reg.ebx & (1 << 31)); break;
|
|
case DN_CPUFeature_AVX512VBMI: available = (fn_0000_[0x0007].reg.ecx & (1 << 1)); break;
|
|
case DN_CPUFeature_AVX512VBMI2: available = (fn_0000_[0x0007].reg.ecx & (1 << 6)); break;
|
|
case DN_CPUFeature_AVX512VNNI: available = (fn_0000_[0x0007].reg.ecx & (1 << 11)); break;
|
|
case DN_CPUFeature_AVX512BITALG: available = (fn_0000_[0x0007].reg.ecx & (1 << 12)); break;
|
|
case DN_CPUFeature_AVX512VPOPCNTDQ: available = (fn_0000_[0x0007].reg.ecx & (1 << 14)); break;
|
|
case DN_CPUFeature_AVX5124VNNIW: available = (fn_0000_[0x0007].reg.edx & (1 << 2)); break;
|
|
case DN_CPUFeature_AVX5124FMAPS: available = (fn_0000_[0x0007].reg.edx & (1 << 3)); break;
|
|
case DN_CPUFeature_AVX512VP2INTERSECT: available = (fn_0000_[0x0007].reg.edx & (1 << 8)); break;
|
|
case DN_CPUFeature_AVX512FP16: available = (fn_0000_[0x0007].reg.edx & (1 << 23)); break;
|
|
case DN_CPUFeature_CLZERO: available = (fn_8000_[0x0008].reg.ebx & (1 << 0)); break;
|
|
case DN_CPUFeature_CMPXCHG8B: available = (fn_0000_[0x0001].reg.edx & (1 << 8)); break;
|
|
case DN_CPUFeature_CMPXCHG16B: available = (fn_0000_[0x0001].reg.ecx & (1 << 13)); break;
|
|
case DN_CPUFeature_F16C: available = (fn_0000_[0x0001].reg.ecx & (1 << 29)); break;
|
|
case DN_CPUFeature_FMA: available = (fn_0000_[0x0001].reg.ecx & (1 << 12)); break;
|
|
case DN_CPUFeature_FMA4: available = (fn_8000_[0x0001].reg.ecx & (1 << 16)); break;
|
|
case DN_CPUFeature_FP128: available = (fn_8000_[0x001A].reg.eax & (1 << 0)); break;
|
|
case DN_CPUFeature_FP256: available = (fn_8000_[0x001A].reg.eax & (1 << 2)); break;
|
|
case DN_CPUFeature_FPU: available = (fn_0000_[0x0001].reg.edx & (1 << 0)); break;
|
|
case DN_CPUFeature_MMX: available = (fn_0000_[0x0001].reg.edx & (1 << 23)); break;
|
|
case DN_CPUFeature_MONITOR: available = (fn_0000_[0x0001].reg.ecx & (1 << 3)); break;
|
|
case DN_CPUFeature_MOVBE: available = (fn_0000_[0x0001].reg.ecx & (1 << 22)); break;
|
|
case DN_CPUFeature_MOVU: available = (fn_8000_[0x001A].reg.eax & (1 << 1)); break;
|
|
case DN_CPUFeature_MmxExt: available = (fn_8000_[0x0001].reg.edx & (1 << 22)); break;
|
|
case DN_CPUFeature_PCLMULQDQ: available = (fn_0000_[0x0001].reg.ecx & (1 << 1)); break;
|
|
case DN_CPUFeature_POPCNT: available = (fn_0000_[0x0001].reg.ecx & (1 << 23)); break;
|
|
case DN_CPUFeature_RDRAND: available = (fn_0000_[0x0001].reg.ecx & (1 << 30)); break;
|
|
case DN_CPUFeature_RDSEED: available = (fn_0000_[0x0007].reg.ebx & (1 << 18)); break;
|
|
case DN_CPUFeature_RDTSCP: available = (fn_8000_[0x0001].reg.edx & (1 << 27)); break;
|
|
case DN_CPUFeature_SHA: available = (fn_0000_[0x0007].reg.ebx & (1 << 29)); break;
|
|
case DN_CPUFeature_SSE: available = (fn_0000_[0x0001].reg.edx & (1 << 25)); break;
|
|
case DN_CPUFeature_SSE2: available = (fn_0000_[0x0001].reg.edx & (1 << 26)); break;
|
|
case DN_CPUFeature_SSE3: available = (fn_0000_[0x0001].reg.ecx & (1 << 0)); break;
|
|
case DN_CPUFeature_SSE41: available = (fn_0000_[0x0001].reg.ecx & (1 << 19)); break;
|
|
case DN_CPUFeature_SSE42: available = (fn_0000_[0x0001].reg.ecx & (1 << 20)); break;
|
|
case DN_CPUFeature_SSE4A: available = (fn_8000_[0x0001].reg.ecx & (1 << 6)); break;
|
|
case DN_CPUFeature_SSSE3: available = (fn_0000_[0x0001].reg.ecx & (1 << 9)); break;
|
|
case DN_CPUFeature_TSC: available = (fn_0000_[0x0001].reg.edx & (1 << 4)); break;
|
|
case DN_CPUFeature_TscInvariant: available = (fn_8000_[0x0007].reg.edx & (1 << 8)); break;
|
|
case DN_CPUFeature_VAES: available = (fn_0000_[0x0007].reg.ecx & (1 << 9)); break;
|
|
case DN_CPUFeature_VPCMULQDQ: available = (fn_0000_[0x0007].reg.ecx & (1 << 10)); break;
|
|
case DN_CPUFeature_Count: DN_InvalidCodePath; break;
|
|
}
|
|
|
|
if (available)
|
|
DN_CPU_SetFeature(&result, DN_CAST(DN_CPUFeature) ext_index);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_PLATFORM_ARM64) && !defined(DN_PLATFORM_EMSCRIPTEN)
|
|
|
|
// NOTE: DN_TicketMutex ////////////////////////////////////////////////////////////////////////////
|
|
DN_API void DN_TicketMutex_Begin(DN_TicketMutex *mutex)
|
|
{
|
|
unsigned int ticket = DN_Atomic_AddU32(&mutex->ticket, 1);
|
|
DN_TicketMutex_BeginTicket(mutex, ticket);
|
|
}
|
|
|
|
DN_API void DN_TicketMutex_End(DN_TicketMutex *mutex)
|
|
{
|
|
DN_Atomic_AddU32(&mutex->serving, 1);
|
|
}
|
|
|
|
DN_API DN_UInt DN_TicketMutex_MakeTicket(DN_TicketMutex *mutex)
|
|
{
|
|
DN_UInt result = DN_Atomic_AddU32(&mutex->ticket, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_TicketMutex_BeginTicket(DN_TicketMutex const *mutex, DN_UInt ticket)
|
|
{
|
|
DN_AssertF(mutex->serving <= ticket,
|
|
"Mutex skipped ticket? Was ticket generated by the correct mutex via MakeTicket? ticket = %u, "
|
|
"mutex->serving = %u",
|
|
ticket,
|
|
mutex->serving);
|
|
while (ticket != mutex->serving) {
|
|
// NOTE: Use spinlock intrinsic
|
|
_mm_pause();
|
|
}
|
|
}
|
|
|
|
DN_API bool DN_TicketMutex_CanLock(DN_TicketMutex const *mutex, DN_UInt ticket)
|
|
{
|
|
bool result = (ticket == mutex->serving);
|
|
return result;
|
|
}
|
|
|
|
#if defined(DN_COMPILER_MSVC) || defined(DN_COMPILER_CLANG_CL)
|
|
#if !defined(DN_CRT_SECURE_NO_WARNINGS_PREVIOUSLY_DEFINED)
|
|
#undef _CRT_SECURE_NO_WARNINGS
|
|
#endif
|
|
#endif
|
|
|
|
// NOTE: DN_Bit ////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API void DN_Bit_UnsetInplace(DN_USize *flags, DN_USize bitfield)
|
|
{
|
|
*flags = (*flags & ~bitfield);
|
|
}
|
|
|
|
DN_API void DN_Bit_SetInplace(DN_USize *flags, DN_USize bitfield)
|
|
{
|
|
*flags = (*flags | bitfield);
|
|
}
|
|
|
|
DN_API bool DN_Bit_IsSet(DN_USize bits, DN_USize bits_to_set)
|
|
{
|
|
auto result = DN_CAST(bool)((bits & bits_to_set) == bits_to_set);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Bit_IsNotSet(DN_USize bits, DN_USize bits_to_check)
|
|
{
|
|
auto result = !DN_Bit_IsSet(bits, bits_to_check);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Safe ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_I64 DN_Safe_AddI64(int64_t a, int64_t b)
|
|
{
|
|
DN_I64 result = DN_CheckF(a <= INT64_MAX - b, "a=%zd, b=%zd", a, b) ? (a + b) : INT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I64 DN_Safe_MulI64(int64_t a, int64_t b)
|
|
{
|
|
DN_I64 result = DN_CheckF(a <= INT64_MAX / b, "a=%zd, b=%zd", a, b) ? (a * b) : INT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_Safe_AddU64(DN_U64 a, DN_U64 b)
|
|
{
|
|
DN_U64 result = DN_CheckF(a <= UINT64_MAX - b, "a=%zu, b=%zu", a, b) ? (a + b) : UINT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_Safe_SubU64(DN_U64 a, DN_U64 b)
|
|
{
|
|
DN_U64 result = DN_CheckF(a >= b, "a=%zu, b=%zu", a, b) ? (a - b) : 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_Safe_MulU64(DN_U64 a, DN_U64 b)
|
|
{
|
|
DN_U64 result = DN_CheckF(a <= UINT64_MAX / b, "a=%zu, b=%zu", a, b) ? (a * b) : UINT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_Safe_SubU32(DN_U32 a, DN_U32 b)
|
|
{
|
|
DN_U32 result = DN_CheckF(a >= b, "a=%u, b=%u", a, b) ? (a - b) : 0;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastUSizeToI* ////////////////////////////////////////////////////////////
|
|
// INT*_MAX literals will be promoted to the type of uintmax_t as uintmax_t is
|
|
// the highest possible rank (unsigned > signed).
|
|
DN_API int DN_SaturateCastUSizeToInt(DN_USize val)
|
|
{
|
|
int result = DN_Check(DN_CAST(uintmax_t) val <= INT_MAX) ? DN_CAST(int) val : INT_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API int8_t DN_SaturateCastUSizeToI8(DN_USize val)
|
|
{
|
|
int8_t result = DN_Check(DN_CAST(uintmax_t) val <= INT8_MAX) ? DN_CAST(int8_t) val : INT8_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I16 DN_SaturateCastUSizeToI16(DN_USize val)
|
|
{
|
|
DN_I16 result = DN_Check(DN_CAST(uintmax_t) val <= INT16_MAX) ? DN_CAST(DN_I16) val : INT16_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I32 DN_SaturateCastUSizeToI32(DN_USize val)
|
|
{
|
|
DN_I32 result = DN_Check(DN_CAST(uintmax_t) val <= INT32_MAX) ? DN_CAST(DN_I32) val : INT32_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API int64_t DN_SaturateCastUSizeToI64(DN_USize val)
|
|
{
|
|
int64_t result = DN_Check(DN_CAST(uintmax_t) val <= INT64_MAX) ? DN_CAST(int64_t) val : INT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastUSizeToU* ////////////////////////////////////////////////////////////
|
|
// Both operands are unsigned and the lowest rank operand will be promoted to
|
|
// match the highest rank operand.
|
|
DN_API DN_U8 DN_SaturateCastUSizeToU8(DN_USize val)
|
|
{
|
|
DN_U8 result = DN_Check(val <= UINT8_MAX) ? DN_CAST(DN_U8) val : UINT8_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U16 DN_SaturateCastUSizeToU16(DN_USize val)
|
|
{
|
|
DN_U16 result = DN_Check(val <= UINT16_MAX) ? DN_CAST(DN_U16) val : UINT16_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_SaturateCastUSizeToU32(DN_USize val)
|
|
{
|
|
DN_U32 result = DN_Check(val <= UINT32_MAX) ? DN_CAST(DN_U32) val : UINT32_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_SaturateCastUSizeToU64(DN_USize val)
|
|
{
|
|
DN_U64 result = DN_Check(DN_CAST(DN_U64) val <= UINT64_MAX) ? DN_CAST(DN_U64) val : UINT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastU64To* ///////////////////////////////////////////////////////////////
|
|
DN_API int DN_SaturateCastU64ToInt(DN_U64 val)
|
|
{
|
|
int result = DN_Check(val <= INT_MAX) ? DN_CAST(int) val : INT_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API int8_t DN_SaturateCastU64ToI8(DN_U64 val)
|
|
{
|
|
int8_t result = DN_Check(val <= INT8_MAX) ? DN_CAST(int8_t) val : INT8_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I16 DN_SaturateCastU64ToI16(DN_U64 val)
|
|
{
|
|
DN_I16 result = DN_Check(val <= INT16_MAX) ? DN_CAST(DN_I16) val : INT16_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I32 DN_SaturateCastU64ToI32(DN_U64 val)
|
|
{
|
|
DN_I32 result = DN_Check(val <= INT32_MAX) ? DN_CAST(DN_I32) val : INT32_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API int64_t DN_SaturateCastU64ToI64(DN_U64 val)
|
|
{
|
|
int64_t result = DN_Check(val <= INT64_MAX) ? DN_CAST(int64_t) val : INT64_MAX;
|
|
return result;
|
|
}
|
|
|
|
// Both operands are unsigned and the lowest rank operand will be promoted to
|
|
// match the highest rank operand.
|
|
DN_API unsigned int DN_SaturateCastU64ToUInt(DN_U64 val)
|
|
{
|
|
unsigned int result = DN_Check(val <= UINT8_MAX) ? DN_CAST(unsigned int) val : UINT_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U8 DN_SaturateCastU64ToU8(DN_U64 val)
|
|
{
|
|
DN_U8 result = DN_Check(val <= UINT8_MAX) ? DN_CAST(DN_U8) val : UINT8_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U16 DN_SaturateCastU64ToU16(DN_U64 val)
|
|
{
|
|
DN_U16 result = DN_Check(val <= UINT16_MAX) ? DN_CAST(DN_U16) val : UINT16_MAX;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_SaturateCastU64ToU32(DN_U64 val)
|
|
{
|
|
DN_U32 result = DN_Check(val <= UINT32_MAX) ? DN_CAST(DN_U32) val : UINT32_MAX;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastISizeToI* ////////////////////////////////////////////////////////////
|
|
// Both operands are signed so the lowest rank operand will be promoted to
|
|
// match the highest rank operand.
|
|
DN_API int DN_SaturateCastISizeToInt(DN_ISize val)
|
|
{
|
|
DN_Assert(val >= INT_MIN && val <= INT_MAX);
|
|
int result = DN_CAST(int) DN_Clamp(val, INT_MIN, INT_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API int8_t DN_SaturateCastISizeToI8(DN_ISize val)
|
|
{
|
|
DN_Assert(val >= INT8_MIN && val <= INT8_MAX);
|
|
int8_t result = DN_CAST(int8_t) DN_Clamp(val, INT8_MIN, INT8_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I16 DN_SaturateCastISizeToI16(DN_ISize val)
|
|
{
|
|
DN_Assert(val >= INT16_MIN && val <= INT16_MAX);
|
|
DN_I16 result = DN_CAST(DN_I16) DN_Clamp(val, INT16_MIN, INT16_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I32 DN_SaturateCastISizeToI32(DN_ISize val)
|
|
{
|
|
DN_Assert(val >= INT32_MIN && val <= INT32_MAX);
|
|
DN_I32 result = DN_CAST(DN_I32) DN_Clamp(val, INT32_MIN, INT32_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API int64_t DN_SaturateCastISizeToI64(DN_ISize val)
|
|
{
|
|
DN_Assert(DN_CAST(int64_t) val >= INT64_MIN && DN_CAST(int64_t) val <= INT64_MAX);
|
|
int64_t result = DN_CAST(int64_t) DN_Clamp(DN_CAST(int64_t) val, INT64_MIN, INT64_MAX);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastISizeToU* ////////////////////////////////////////////////////////////
|
|
// If the value is a negative integer, we clamp to 0. Otherwise, we know that
|
|
// the value is >=0, we can upcast safely to bounds check against the maximum
|
|
// allowed value.
|
|
DN_API unsigned int DN_SaturateCastISizeToUInt(DN_ISize val)
|
|
{
|
|
unsigned int result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT_MAX))
|
|
result = DN_CAST(unsigned int) val;
|
|
else
|
|
result = UINT_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U8 DN_SaturateCastISizeToU8(DN_ISize val)
|
|
{
|
|
DN_U8 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT8_MAX))
|
|
result = DN_CAST(DN_U8) val;
|
|
else
|
|
result = UINT8_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U16 DN_SaturateCastISizeToU16(DN_ISize val)
|
|
{
|
|
DN_U16 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT16_MAX))
|
|
result = DN_CAST(DN_U16) val;
|
|
else
|
|
result = UINT16_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_SaturateCastISizeToU32(DN_ISize val)
|
|
{
|
|
DN_U32 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT32_MAX))
|
|
result = DN_CAST(DN_U32) val;
|
|
else
|
|
result = UINT32_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_SaturateCastISizeToU64(DN_ISize val)
|
|
{
|
|
DN_U64 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT64_MAX))
|
|
result = DN_CAST(DN_U64) val;
|
|
else
|
|
result = UINT64_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_SaturateCastI64To* ///////////////////////////////////////////////////////////////
|
|
// Both operands are signed so the lowest rank operand will be promoted to
|
|
// match the highest rank operand.
|
|
DN_API DN_ISize DN_SaturateCastI64ToISize(int64_t val)
|
|
{
|
|
DN_Check(val >= DN_ISIZE_MIN && val <= DN_ISIZE_MAX);
|
|
DN_ISize result = DN_CAST(int64_t) DN_Clamp(val, DN_ISIZE_MIN, DN_ISIZE_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API int8_t DN_SaturateCastI64ToI8(int64_t val)
|
|
{
|
|
DN_Check(val >= INT8_MIN && val <= INT8_MAX);
|
|
int8_t result = DN_CAST(int8_t) DN_Clamp(val, INT8_MIN, INT8_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I16 DN_SaturateCastI64ToI16(int64_t val)
|
|
{
|
|
DN_Check(val >= INT16_MIN && val <= INT16_MAX);
|
|
DN_I16 result = DN_CAST(DN_I16) DN_Clamp(val, INT16_MIN, INT16_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I32 DN_SaturateCastI64ToI32(int64_t val)
|
|
{
|
|
DN_Check(val >= INT32_MIN && val <= INT32_MAX);
|
|
DN_I32 result = DN_CAST(DN_I32) DN_Clamp(val, INT32_MIN, INT32_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API unsigned int DN_SaturateCastI64ToUInt(int64_t val)
|
|
{
|
|
unsigned int result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT_MAX))
|
|
result = DN_CAST(unsigned int) val;
|
|
else
|
|
result = UINT_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ISize DN_SaturateCastI64ToUSize(int64_t val)
|
|
{
|
|
DN_USize result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= DN_USIZE_MAX))
|
|
result = DN_CAST(DN_USize) val;
|
|
else
|
|
result = DN_USIZE_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U8 DN_SaturateCastI64ToU8(int64_t val)
|
|
{
|
|
DN_U8 result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT8_MAX))
|
|
result = DN_CAST(DN_U8) val;
|
|
else
|
|
result = UINT8_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U16 DN_SaturateCastI64ToU16(int64_t val)
|
|
{
|
|
DN_U16 result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT16_MAX))
|
|
result = DN_CAST(DN_U16) val;
|
|
else
|
|
result = UINT16_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_SaturateCastI64ToU32(int64_t val)
|
|
{
|
|
DN_U32 result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT32_MAX))
|
|
result = DN_CAST(DN_U32) val;
|
|
else
|
|
result = UINT32_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_SaturateCastI64ToU64(int64_t val)
|
|
{
|
|
DN_U64 result = 0;
|
|
if (DN_Check(val >= DN_CAST(int64_t) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT64_MAX))
|
|
result = DN_CAST(DN_U64) val;
|
|
else
|
|
result = UINT64_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API int8_t DN_SaturateCastIntToI8(int val)
|
|
{
|
|
DN_Check(val >= INT8_MIN && val <= INT8_MAX);
|
|
int8_t result = DN_CAST(int8_t) DN_Clamp(val, INT8_MIN, INT8_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_I16 DN_SaturateCastIntToI16(int val)
|
|
{
|
|
DN_Check(val >= INT16_MIN && val <= INT16_MAX);
|
|
DN_I16 result = DN_CAST(DN_I16) DN_Clamp(val, INT16_MIN, INT16_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U8 DN_SaturateCastIntToU8(int val)
|
|
{
|
|
DN_U8 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT8_MAX))
|
|
result = DN_CAST(DN_U8) val;
|
|
else
|
|
result = UINT8_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U16 DN_SaturateCastIntToU16(int val)
|
|
{
|
|
DN_U16 result = 0;
|
|
if (DN_Check(val >= DN_CAST(DN_ISize) 0)) {
|
|
if (DN_Check(DN_CAST(uintmax_t) val <= UINT16_MAX))
|
|
result = DN_CAST(DN_U16) val;
|
|
else
|
|
result = UINT16_MAX;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U32 DN_SaturateCastIntToU32(int val)
|
|
{
|
|
static_assert(sizeof(val) <= sizeof(DN_U32), "Sanity check to allow simplifying of casting");
|
|
DN_U32 result = 0;
|
|
if (DN_Check(val >= 0))
|
|
result = DN_CAST(DN_U32) val;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_SaturateCastIntToU64(int val)
|
|
{
|
|
static_assert(sizeof(val) <= sizeof(DN_U64), "Sanity check to allow simplifying of casting");
|
|
DN_U64 result = 0;
|
|
if (DN_Check(val >= 0))
|
|
result = DN_CAST(DN_U64) val;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Asan ////////////////////////////////////////////////////////////////////////// ////////
|
|
static_assert(DN_IsPowerOfTwoAligned(DN_ASAN_POISON_GUARD_SIZE, DN_ASAN_POISON_ALIGNMENT),
|
|
"ASAN poison guard size must be a power-of-two and aligned to ASAN's alignment"
|
|
"requirement (8 bytes)");
|
|
|
|
DN_API void DN_ASAN_PoisonMemoryRegion(void const volatile *ptr, DN_USize size)
|
|
{
|
|
if (!ptr || !size)
|
|
return;
|
|
|
|
#if DN_HAS_FEATURE(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
|
DN_AssertF(DN_IsPowerOfTwoAligned(ptr, 8),
|
|
"Poisoning requires the pointer to be aligned on an 8 byte boundary");
|
|
|
|
__asan_poison_memory_region(ptr, size);
|
|
if (DN_ASAN_VET_POISON) {
|
|
DN_HardAssert(__asan_address_is_poisoned(ptr));
|
|
DN_HardAssert(__asan_address_is_poisoned((char *)ptr + (size - 1)));
|
|
}
|
|
#else
|
|
(void)ptr;
|
|
(void)size;
|
|
#endif
|
|
}
|
|
|
|
DN_API void DN_ASAN_UnpoisonMemoryRegion(void const volatile *ptr, DN_USize size)
|
|
{
|
|
if (!ptr || !size)
|
|
return;
|
|
|
|
#if DN_HAS_FEATURE(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
|
__asan_unpoison_memory_region(ptr, size);
|
|
if (DN_ASAN_VET_POISON)
|
|
DN_HardAssert(__asan_region_is_poisoned((void *)ptr, size) == 0);
|
|
#else
|
|
(void)ptr;
|
|
(void)size;
|
|
#endif
|
|
}
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base_containers.cpp"
|
|
#define DN_CONTAINERS_CPP
|
|
|
|
// DN: Single header generator commented out this header => #include "../dn_base_inc.h"
|
|
|
|
DN_API void *DN_CArray2_MakeArray(void *data, DN_USize *size, DN_USize max, DN_USize data_size, DN_USize make_size, DN_ZeroMem zero_mem)
|
|
{
|
|
void *result = nullptr;
|
|
DN_USize new_size = *size + make_size;
|
|
if (new_size <= max) {
|
|
result = DN_CAST(char *) data + (data_size * size[0]);
|
|
*size = new_size;
|
|
if (zero_mem == DN_ZeroMem_Yes)
|
|
DN_Memset(result, 0, data_size * make_size);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_CArray2_AddArray(void *data, DN_USize *size, DN_USize max, DN_USize data_size, void *elems, DN_USize elems_count, DN_ArrayAdd add)
|
|
{
|
|
void *result = DN_CArray2_MakeArray(data, size, max, data_size, elems_count, DN_ZeroMem_No);
|
|
if (result) {
|
|
if (add == DN_ArrayAdd_Append) {
|
|
DN_Memcpy(result, elems, elems_count * data_size);
|
|
} else {
|
|
char *move_dest = DN_CAST(char *)data + (elems_count * data_size); // Shift elements forward
|
|
char *move_src = DN_CAST(char *)data;
|
|
DN_Memmove(move_dest, move_src, data_size * size[0]);
|
|
DN_Memcpy(data, elems, data_size * elems_count);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_CArray2_GrowIfNeededFromPool(void **data, DN_USize size, DN_USize *max, DN_USize data_size, DN_Pool *pool)
|
|
{
|
|
bool result = true;
|
|
if (size >= *max) {
|
|
DN_USize new_max = DN_Max(*max * 2, 8);
|
|
DN_USize bytes_to_alloc = data_size * new_max;
|
|
void *buffer = DN_Pool_NewArray(pool, DN_U8, bytes_to_alloc);
|
|
if (buffer) {
|
|
DN_USize bytes_to_copy = data_size * size;
|
|
DN_Memcpy(buffer, *data, bytes_to_copy);
|
|
DN_Pool_Dealloc(pool, *data);
|
|
*data = buffer;
|
|
*max = new_max;
|
|
} else {
|
|
result = false;
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ArrayEraseResult DN_CArray2_EraseRange(void *data, DN_USize *size, DN_USize elem_size, DN_USize begin_index, DN_ISize count, DN_ArrayErase erase)
|
|
{
|
|
DN_ArrayEraseResult result = {};
|
|
if (!data || !size || *size == 0 || count == 0)
|
|
return result;
|
|
|
|
// Compute the range to erase
|
|
DN_USize start = 0, end = 0;
|
|
if (count < 0) {
|
|
DN_USize abs_count = DN_Abs(count);
|
|
start = begin_index >= abs_count ? begin_index - abs_count + 1 : 0;
|
|
end = begin_index >= abs_count ? begin_index + 1 : 0;
|
|
} else {
|
|
start = begin_index;
|
|
end = begin_index + count;
|
|
}
|
|
|
|
// Clamp indices to valid bounds
|
|
start = DN_Min(start, *size);
|
|
end = DN_Min(end, *size);
|
|
|
|
// Erase the range [start, end)
|
|
DN_USize erase_count = end > start ? end - start : 0;
|
|
if (erase_count) {
|
|
char *dest = (char *)data + (elem_size * start);
|
|
char *array_end = (char *)data + (elem_size * *size);
|
|
char *src = dest + (elem_size * erase_count);
|
|
if (erase == DN_ArrayErase_Stable) {
|
|
DN_USize move_size = array_end - src;
|
|
DN_Memmove(dest, src, move_size);
|
|
} else {
|
|
char *unstable_src = array_end - (elem_size * erase_count);
|
|
DN_USize move_size = array_end - unstable_src;
|
|
DN_Memcpy(dest, unstable_src, move_size);
|
|
}
|
|
*size -= erase_count;
|
|
}
|
|
|
|
result.items_erased = erase_count;
|
|
result.it_index = start;
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_CSLList_Detach(void **link, void **next)
|
|
{
|
|
void *result = *link;
|
|
if (*link) {
|
|
*link = *next;
|
|
*next = nullptr;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Ring_HasSpace(DN_Ring const *ring, DN_U64 size)
|
|
{
|
|
DN_U64 avail = ring->write_pos - ring->read_pos;
|
|
DN_U64 space = ring->size - avail;
|
|
bool result = space >= size;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Ring_HasData(DN_Ring const *ring, DN_U64 size)
|
|
{
|
|
DN_U64 data = ring->write_pos - ring->read_pos;
|
|
bool result = data >= size;
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Ring_Write(DN_Ring *ring, void const *src, DN_U64 src_size)
|
|
{
|
|
DN_Assert(src_size <= ring->size);
|
|
DN_U64 offset = ring->write_pos % ring->size;
|
|
DN_U64 bytes_before_split = ring->size - offset;
|
|
DN_U64 pre_split_bytes = DN_Min(bytes_before_split, src_size);
|
|
DN_U64 post_split_bytes = src_size - pre_split_bytes;
|
|
void const *pre_split_data = src;
|
|
void const *post_split_data = ((char *)src + pre_split_bytes);
|
|
DN_Memcpy(ring->base + offset, pre_split_data, pre_split_bytes);
|
|
DN_Memcpy(ring->base, post_split_data, post_split_bytes);
|
|
ring->write_pos += src_size;
|
|
}
|
|
|
|
DN_API void DN_Ring_Read(DN_Ring *ring, void *dest, DN_U64 dest_size)
|
|
{
|
|
DN_Assert(dest_size <= ring->size);
|
|
DN_U64 offset = ring->read_pos % ring->size;
|
|
DN_U64 bytes_before_split = ring->size - offset;
|
|
DN_U64 pre_split_bytes = DN_Min(bytes_before_split, dest_size);
|
|
DN_U64 post_split_bytes = dest_size - pre_split_bytes;
|
|
DN_Memcpy(dest, ring->base + offset, pre_split_bytes);
|
|
DN_Memcpy((char *)dest + pre_split_bytes, ring->base, post_split_bytes);
|
|
ring->read_pos += dest_size;
|
|
}
|
|
|
|
// NOTE: DN_CArray /////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T>
|
|
DN_ArrayEraseResult DN_CArray_EraseRange(T *data, DN_USize *size, DN_USize begin_index, DN_ISize count, DN_ArrayErase erase)
|
|
{
|
|
DN_ArrayEraseResult result = {};
|
|
if (!data || !size || *size == 0 || count == 0)
|
|
return result;
|
|
|
|
DN_AssertF(count != -1, "There's a bug with negative element erases, see the DN_VArray section in dn_docs.cpp");
|
|
|
|
// NOTE: Caculate the end index of the erase range
|
|
DN_ISize abs_count = DN_Abs(count);
|
|
DN_USize end_index = 0;
|
|
if (count < 0) {
|
|
end_index = begin_index - (abs_count - 1);
|
|
if (end_index > begin_index)
|
|
end_index = 0;
|
|
} else {
|
|
end_index = begin_index + (abs_count - 1);
|
|
if (end_index < begin_index)
|
|
end_index = (*size) - 1;
|
|
}
|
|
|
|
// NOTE: Ensure begin_index < one_past_end_index
|
|
if (end_index < begin_index) {
|
|
DN_USize tmp = begin_index;
|
|
begin_index = end_index;
|
|
end_index = tmp;
|
|
}
|
|
|
|
// NOTE: Ensure indexes are within valid bounds
|
|
begin_index = DN_Min(begin_index, *size);
|
|
end_index = DN_Min(end_index, *size - 1);
|
|
|
|
// NOTE: Erase the items in the range [begin_index, one_past_end_index)
|
|
DN_USize one_past_end_index = end_index + 1;
|
|
DN_USize erase_count = one_past_end_index - begin_index;
|
|
if (erase_count) {
|
|
T *end = data + *size;
|
|
T *dest = data + begin_index;
|
|
if (erase == DN_ArrayErase_Stable) {
|
|
T *src = dest + erase_count;
|
|
DN_Memmove(dest, src, (end - src) * sizeof(T));
|
|
} else {
|
|
T *src = end - erase_count;
|
|
DN_Memcpy(dest, src, (end - src) * sizeof(T));
|
|
}
|
|
*size -= erase_count;
|
|
}
|
|
|
|
result.items_erased = erase_count;
|
|
result.it_index = begin_index;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_CArray_MakeArray(T *data, DN_USize *size, DN_USize max, DN_USize count, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!data || !size || count == 0)
|
|
return nullptr;
|
|
|
|
if (!DN_CheckF((*size + count) <= max, "Array is out of space (user requested +%zu items, array has %zu/%zu items)", count, *size, max))
|
|
return nullptr;
|
|
|
|
// TODO: Use placement new? Why doesn't this work?
|
|
T *result = data + *size;
|
|
*size += count;
|
|
if (zero_mem == DN_ZeroMem_Yes)
|
|
DN_Memset(result, 0, sizeof(*result) * count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_CArray_InsertArray(T *data, DN_USize *size, DN_USize max, DN_USize index, T const *items, DN_USize count)
|
|
{
|
|
T *result = nullptr;
|
|
if (!data || !size || !items || count <= 0 || ((*size + count) > max))
|
|
return result;
|
|
|
|
DN_USize clamped_index = DN_Min(index, *size);
|
|
if (clamped_index != *size) {
|
|
char const *src = DN_CAST(char *)(data + clamped_index);
|
|
char const *dest = DN_CAST(char *)(data + (clamped_index + count));
|
|
char const *end = DN_CAST(char *)(data + (*size));
|
|
DN_USize bytes_to_move = end - src;
|
|
DN_Memmove(DN_CAST(void *) dest, src, bytes_to_move);
|
|
}
|
|
|
|
result = data + clamped_index;
|
|
DN_Memcpy(result, items, sizeof(T) * count);
|
|
*size += count;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T DN_CArray_PopFront(T *data, DN_USize *size, DN_USize count)
|
|
{
|
|
T result = {};
|
|
if (!data || !size || *size <= 0)
|
|
return result;
|
|
|
|
result = data[0];
|
|
DN_USize pop_count = DN_Min(count, *size);
|
|
DN_Memmove(data, data + pop_count, (*size - pop_count) * sizeof(T));
|
|
*size -= pop_count;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T DN_CArray_PopBack(T *data, DN_USize *size, DN_USize count)
|
|
{
|
|
T result = {};
|
|
if (!data || !size || *size <= 0)
|
|
return result;
|
|
|
|
DN_USize pop_count = DN_Min(count, *size);
|
|
result = data[(*size - 1)];
|
|
*size -= pop_count;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_ArrayFindResult<T> DN_CArray_Find(T *data, DN_USize size, T const &value)
|
|
{
|
|
DN_ArrayFindResult<T> result = {};
|
|
if (!data || size <= 0)
|
|
return result;
|
|
|
|
for (DN_USize index = 0; !result.data && index < size; index++) {
|
|
T *item = data + index;
|
|
if (*item == value) {
|
|
result.data = item;
|
|
result.index = index;
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_SARRAY)
|
|
// NOTE: DN_SArray /////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T>
|
|
DN_SArray<T> DN_SArray_Init(DN_Arena *arena, DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_SArray<T> result = {};
|
|
if (!arena || !size)
|
|
return result;
|
|
result.data = DN_Arena_NewArray(arena, T, size, zero_mem);
|
|
if (result.data)
|
|
result.max = size;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_SArray<T> DN_SArray_InitSlice(DN_Arena *arena, DN_Slice<T> slice, DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_USize max = DN_Max(slice.size, size);
|
|
DN_SArray<T> result = DN_SArray_Init<T>(arena, max, DN_ZeroMem_No);
|
|
if (DN_SArray_IsValid(&result)) {
|
|
DN_SArray_AddArray(&result, slice.data, slice.size);
|
|
if (zero_mem == DN_ZeroMem_Yes)
|
|
DN_Memset(result.data + result.size, 0, (result.max - result.size) * sizeof(T));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
template <typename T, size_t N>
|
|
DN_SArray<T> DN_SArray_InitCArray(DN_Arena *arena, T const (&array)[N], DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_SArray<T> result = DN_SArray_InitSlice(arena, DN_Slice_Init(DN_CAST(T *) array, N), size, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_SArray<T> DN_SArray_InitBuffer(T *buffer, DN_USize size)
|
|
{
|
|
DN_SArray<T> result = {};
|
|
result.data = buffer;
|
|
result.max = size;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_SArray_IsValid(DN_SArray<T> const *array)
|
|
{
|
|
bool result = array && array->data && array->size <= array->max;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_SArray_Slice(DN_SArray<T> const *array)
|
|
{
|
|
DN_Slice<T> result = {};
|
|
if (array)
|
|
result = DN_Slice_Init<T>(DN_CAST(T *) array->data, array->size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_MakeArray(DN_SArray<T> *array, DN_USize count, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!DN_SArray_IsValid(array))
|
|
return nullptr;
|
|
T *result = DN_CArray_MakeArray(array->data, &array->size, array->max, count, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_Make(DN_SArray<T> *array, DN_ZeroMem zero_mem)
|
|
{
|
|
T *result = DN_SArray_MakeArray(array, 1, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_AddArray(DN_SArray<T> *array, T const *items, DN_USize count)
|
|
{
|
|
T *result = DN_SArray_MakeArray(array, count, DN_ZeroMem_No);
|
|
if (result)
|
|
DN_Memcpy(result, items, count * sizeof(T));
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_SArray_AddCArray(DN_SArray<T> *array, T const (&items)[N])
|
|
{
|
|
T *result = DN_SArray_AddArray(array, items, N);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_Add(DN_SArray<T> *array, T const &item)
|
|
{
|
|
T *result = DN_SArray_AddArray(array, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_InsertArray(DN_SArray<T> *array, DN_USize index, T const *items, DN_USize count)
|
|
{
|
|
T *result = nullptr;
|
|
if (!DN_SArray_IsValid(array))
|
|
return result;
|
|
result = DN_CArray_InsertArray(array->data, &array->size, array->max, index, items, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_SArray_InsertCArray(DN_SArray<T> *array, DN_USize index, T const (&items)[N])
|
|
{
|
|
T *result = DN_SArray_InsertArray(array, index, items, N);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_SArray_Insert(DN_SArray<T> *array, DN_USize index, T const &item)
|
|
{
|
|
T *result = DN_SArray_InsertArray(array, index, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T DN_SArray_PopFront(DN_SArray<T> *array, DN_USize count)
|
|
{
|
|
T result = DN_CArray_PopFront(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T DN_SArray_PopBack(DN_SArray<T> *array, DN_USize count)
|
|
{
|
|
T result = DN_CArray_PopBack(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_ArrayEraseResult DN_SArray_EraseRange(DN_SArray<T> *array, DN_USize begin_index, DN_ISize count, DN_ArrayErase erase)
|
|
{
|
|
DN_ArrayEraseResult result = {};
|
|
if (!DN_SArray_IsValid(array) || array->size == 0 || count == 0)
|
|
return result;
|
|
result = DN_CArray_EraseRange(array->data, &array->size, begin_index, count, erase);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
void DN_SArray_Clear(DN_SArray<T> *array)
|
|
{
|
|
if (array)
|
|
array->size = 0;
|
|
}
|
|
#endif // !defined(DN_NO_SARRAY)
|
|
|
|
#if !defined(DN_NO_FARRAY)
|
|
// NOTE: DN_FArray /////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T, DN_USize N>
|
|
DN_FArray<T, N> DN_FArray_Init(T const *array, DN_USize count)
|
|
{
|
|
DN_FArray<T, N> result = {};
|
|
bool added = DN_FArray_AddArray(&result, array, count);
|
|
DN_Assert(added);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_FArray<T, N> DN_FArray_InitSlice(DN_Slice<T> slice)
|
|
{
|
|
DN_FArray<T, N> result = DN_FArray_Init(slice.data, slice.size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N, DN_USize K>
|
|
DN_FArray<T, N> DN_FArray_InitCArray(T const (&items)[K])
|
|
{
|
|
DN_FArray<T, N> result = DN_FArray_Init<T, N>(items, K);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
bool DN_FArray_IsValid(DN_FArray<T, N> const *array)
|
|
{
|
|
bool result = array && array->size <= DN_ArrayCountU(array->data);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_Slice<T> DN_FArray_Slice(DN_FArray<T, N> const *array)
|
|
{
|
|
DN_Slice<T> result = {};
|
|
if (array)
|
|
result = DN_Slice_Init<T>(DN_CAST(T *) array->data, array->size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_AddArray(DN_FArray<T, N> *array, T const *items, DN_USize count)
|
|
{
|
|
T *result = DN_FArray_MakeArray(array, count, DN_ZeroMem_No);
|
|
if (result)
|
|
DN_Memcpy(result, items, count * sizeof(T));
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N, DN_USize K>
|
|
T *DN_FArray_AddCArray(DN_FArray<T, N> *array, T const (&items)[K])
|
|
{
|
|
T *result = DN_FArray_MakeArray(array, K, DN_ZeroMem_No);
|
|
if (result)
|
|
DN_Memcpy(result, items, K * sizeof(T));
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_Add(DN_FArray<T, N> *array, T const &item)
|
|
{
|
|
T *result = DN_FArray_AddArray(array, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_MakeArray(DN_FArray<T, N> *array, DN_USize count, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!DN_FArray_IsValid(array))
|
|
return nullptr;
|
|
T *result = DN_CArray_MakeArray(array->data, &array->size, N, count, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_Make(DN_FArray<T, N> *array, DN_ZeroMem zero_mem)
|
|
{
|
|
T *result = DN_FArray_MakeArray(array, 1, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_InsertArray(DN_FArray<T, N> *array, DN_USize index, T const *items, DN_USize count)
|
|
{
|
|
T *result = nullptr;
|
|
if (!DN_FArray_IsValid(array))
|
|
return result;
|
|
result = DN_CArray_InsertArray(array->data, &array->size, N, index, items, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N, DN_USize K>
|
|
T *DN_FArray_InsertCArray(DN_FArray<T, N> *array, DN_USize index, T const (&items)[K])
|
|
{
|
|
T *result = DN_FArray_InsertArray(array, index, items, K);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_FArray_Insert(DN_FArray<T, N> *array, DN_USize index, T const &item)
|
|
{
|
|
T *result = DN_FArray_InsertArray(array, index, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T DN_FArray_PopFront(DN_FArray<T, N> *array, DN_USize count)
|
|
{
|
|
T result = DN_CArray_PopFront(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T DN_FArray_PopBack(DN_FArray<T, N> *array, DN_USize count)
|
|
{
|
|
T result = DN_CArray_PopBack(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_ArrayFindResult<T> DN_FArray_Find(DN_FArray<T, N> *array, T const &find)
|
|
{
|
|
DN_ArrayFindResult<T> result = DN_CArray_Find<T>(array->data, array->size, find);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_ArrayEraseResult DN_FArray_EraseRange(DN_FArray<T, N> *array, DN_USize begin_index, DN_ISize count, DN_ArrayErase erase)
|
|
{
|
|
DN_ArrayEraseResult result = {};
|
|
if (!DN_FArray_IsValid(array) || array->size == 0 || count == 0)
|
|
return result;
|
|
result = DN_CArray_EraseRange(array->data, &array->size, begin_index, count, erase);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
void DN_FArray_Clear(DN_FArray<T, N> *array)
|
|
{
|
|
if (array)
|
|
array->size = 0;
|
|
}
|
|
#endif // !defined(DN_NO_FARRAY)
|
|
|
|
#if !defined(DN_NO_SLICE)
|
|
template <typename T>
|
|
DN_Slice<T> DN_Slice_Init(T *const data, DN_USize size)
|
|
{
|
|
DN_Slice<T> result = {};
|
|
if (data) {
|
|
result.data = data;
|
|
result.size = size;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_Slice<T> DN_Slice_InitCArrayCopy(DN_Arena *arena, T const (&array)[N])
|
|
{
|
|
DN_Slice<T> result = DN_Slice_Alloc<T>(arena, N, DN_ZeroMem_No);
|
|
if (result.data)
|
|
DN_Memcpy(result.data, array, sizeof(T) * N);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_Slice_CopyPtr(DN_Arena *arena, T *const data, DN_USize size)
|
|
{
|
|
T *copy = DN_Arena_NewArrayCopy(arena, T, data, size);
|
|
DN_Slice<T> result = DN_Slice_Init(copy, copy ? size : 0);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_Slice_Copy(DN_Arena *arena, DN_Slice<T> slice)
|
|
{
|
|
DN_Slice<T> result = DN_Slice_CopyPtr(arena, slice.data, slice.size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_Slice_Alloc(DN_Arena *arena, DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_Slice<T> result = {};
|
|
if (!arena || size == 0)
|
|
return result;
|
|
result.data = DN_Arena_NewArray(arena, T, size, zero_mem);
|
|
if (result.data)
|
|
result.size = size;
|
|
return result;
|
|
}
|
|
|
|
#endif // !defined(DN_NO_SLICE)
|
|
|
|
#if !defined(DN_NO_DSMAP)
|
|
// NOTE: DN_DSMap //////////////////////////////////////////////////////////////////////////////////
|
|
DN_U32 const DN_DS_MAP_DEFAULT_HASH_SEED = 0x8a1ced49;
|
|
DN_U32 const DN_DS_MAP_SENTINEL_SLOT = 0;
|
|
|
|
template <typename T>
|
|
DN_DSMap<T> DN_DSMap_Init(DN_Arena *arena, DN_U32 size, DN_DSMapFlags flags)
|
|
{
|
|
DN_DSMap<T> result = {};
|
|
if (!DN_CheckF(DN_IsPowerOfTwo(size), "Power-of-two size required, given size was '%u'", size))
|
|
return result;
|
|
if (size <= 0)
|
|
return result;
|
|
if (!DN_Check(arena))
|
|
return result;
|
|
result.arena = arena;
|
|
result.pool = DN_Pool_Init(arena, DN_POOL_DEFAULT_ALIGN);
|
|
result.hash_to_slot = DN_Arena_NewArray(result.arena, DN_U32, size, DN_ZeroMem_Yes);
|
|
result.slots = DN_Arena_NewArray(result.arena, DN_DSMapSlot<T>, size, DN_ZeroMem_Yes);
|
|
result.occupied = 1; // For sentinel
|
|
result.size = size;
|
|
result.initial_size = size;
|
|
result.flags = flags;
|
|
DN_AssertF(result.hash_to_slot && result.slots, "We pre-allocated a block of memory sufficient in size for the 2 arrays. Maybe the pointers needed extra space because of natural alignment?");
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
void DN_DSMap_Deinit(DN_DSMap<T> *map, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!map)
|
|
return;
|
|
// TODO(doyle): Use zero_mem
|
|
(void)zero_mem;
|
|
DN_Arena_Deinit(map->arena);
|
|
*map = {};
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_DSMap_IsValid(DN_DSMap<T> const *map)
|
|
{
|
|
bool result = map &&
|
|
map->arena &&
|
|
map->hash_to_slot && // Hash to slot mapping array must be allocated
|
|
map->slots && // Slots array must be allocated
|
|
(map->size & (map->size - 1)) == 0 && // Must be power of two size
|
|
map->occupied >= 1; // DN_DS_MAP_SENTINEL_SLOT takes up one slot
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_U32 DN_DSMap_Hash(DN_DSMap<T> const *map, DN_DSMapKey key)
|
|
{
|
|
DN_U32 result = 0;
|
|
if (!map)
|
|
return result;
|
|
|
|
if (key.type == DN_DSMapKeyType_U64NoHash) {
|
|
result = DN_CAST(DN_U32) key.u64;
|
|
return result;
|
|
}
|
|
|
|
if (key.type == DN_DSMapKeyType_BufferAsU64NoHash) {
|
|
result = key.hash;
|
|
return result;
|
|
}
|
|
|
|
DN_U32 seed = map->hash_seed ? map->hash_seed : DN_DS_MAP_DEFAULT_HASH_SEED;
|
|
if (map->hash_function) {
|
|
map->hash_function(key, seed);
|
|
} else {
|
|
// NOTE: Courtesy of Demetri Spanos (which this hash table was inspired
|
|
// from), the following is a hashing function snippet provided for
|
|
// reliable, quick and simple quality hashing functions for hash table
|
|
// use.
|
|
// Source: https://github.com/demetri/scribbles/blob/c475464756c104c91bab83ed4e14badefef12ab5/hashing/ub_aware_hash_functions.c
|
|
|
|
char const *key_ptr = nullptr;
|
|
DN_U32 len = 0;
|
|
DN_U32 h = seed;
|
|
switch (key.type) {
|
|
case DN_DSMapKeyType_BufferAsU64NoHash: /*FALLTHRU*/
|
|
case DN_DSMapKeyType_U64NoHash: DN_InvalidCodePath; /*FALLTHRU*/
|
|
case DN_DSMapKeyType_Invalid: break;
|
|
|
|
case DN_DSMapKeyType_Buffer:
|
|
key_ptr = DN_CAST(char const *) key.buffer_data;
|
|
len = key.buffer_size;
|
|
break;
|
|
|
|
case DN_DSMapKeyType_U64:
|
|
key_ptr = DN_CAST(char const *) & key.u64;
|
|
len = sizeof(key.u64);
|
|
break;
|
|
}
|
|
|
|
// Murmur3 32-bit without UB unaligned accesses
|
|
// DN_U32 mur3_32_no_UB(const void *key, int len, DN_U32 h)
|
|
|
|
// main body, work on 32-bit blocks at a time
|
|
for (DN_U32 i = 0; i < len / 4; i++) {
|
|
DN_U32 k;
|
|
memcpy(&k, &key_ptr[i * 4], sizeof(k));
|
|
|
|
k *= 0xcc9e2d51;
|
|
k = ((k << 15) | (k >> 17)) * 0x1b873593;
|
|
h = (((h ^ k) << 13) | ((h ^ k) >> 19)) * 5 + 0xe6546b64;
|
|
}
|
|
|
|
// load/mix up to 3 remaining tail bytes into a tail block
|
|
DN_U32 t = 0;
|
|
uint8_t *tail = ((uint8_t *)key_ptr) + 4 * (len / 4);
|
|
switch (len & 3) {
|
|
case 3: t ^= tail[2] << 16;
|
|
case 2: t ^= tail[1] << 8;
|
|
case 1: {
|
|
t ^= tail[0] << 0;
|
|
h ^= ((0xcc9e2d51 * t << 15) | (0xcc9e2d51 * t >> 17)) * 0x1b873593;
|
|
}
|
|
}
|
|
|
|
// finalization mix, including key length
|
|
h = ((h ^ len) ^ ((h ^ len) >> 16)) * 0x85ebca6b;
|
|
h = (h ^ (h >> 13)) * 0xc2b2ae35;
|
|
result = h ^ (h >> 16);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_U32 DN_DSMap_HashToSlotIndex(DN_DSMap<T> const *map, DN_DSMapKey key)
|
|
{
|
|
DN_Assert(key.type != DN_DSMapKeyType_Invalid);
|
|
DN_U32 result = DN_DS_MAP_SENTINEL_SLOT;
|
|
if (!DN_DSMap_IsValid(map))
|
|
return result;
|
|
|
|
result = key.hash & (map->size - 1);
|
|
for (;;) {
|
|
if (result == DN_DS_MAP_SENTINEL_SLOT) // Sentinel is reserved
|
|
result++;
|
|
|
|
if (map->hash_to_slot[result] == DN_DS_MAP_SENTINEL_SLOT) // Slot is vacant, can use
|
|
return result;
|
|
|
|
DN_DSMapSlot<T> *slot = map->slots + map->hash_to_slot[result];
|
|
if (slot->key.type == DN_DSMapKeyType_Invalid || (slot->key.hash == key.hash && slot->key == key))
|
|
return result;
|
|
|
|
result = (result + 1) & (map->size - 1);
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_Find(DN_DSMap<T> const *map, DN_DSMapKey key)
|
|
{
|
|
DN_DSMapResult<T> result = {};
|
|
if (DN_DSMap_IsValid(map)) {
|
|
DN_U32 index = DN_DSMap_HashToSlotIndex(map, key);
|
|
if (index != DN_DS_MAP_SENTINEL_SLOT && map->hash_to_slot[index] == DN_DS_MAP_SENTINEL_SLOT) {
|
|
result.slot = map->slots; // NOTE: Set to sentinel value
|
|
} else {
|
|
result.slot = map->slots + map->hash_to_slot[index];
|
|
result.found = true;
|
|
}
|
|
result.value = &result.slot->value;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_Make(DN_DSMap<T> *map, DN_DSMapKey key)
|
|
{
|
|
DN_DSMapResult<T> result = {};
|
|
if (!DN_DSMap_IsValid(map))
|
|
return result;
|
|
|
|
DN_U32 index = DN_DSMap_HashToSlotIndex(map, key);
|
|
if (map->hash_to_slot[index] == DN_DS_MAP_SENTINEL_SLOT) {
|
|
// NOTE: Create the slot
|
|
if (index != DN_DS_MAP_SENTINEL_SLOT)
|
|
map->hash_to_slot[index] = map->occupied++;
|
|
|
|
// NOTE: Check if resize is required
|
|
bool map_is_75pct_full = (map->occupied * 4) > (map->size * 3);
|
|
if (map_is_75pct_full) {
|
|
if (!DN_DSMap_Resize(map, map->size * 2))
|
|
return result;
|
|
result = DN_DSMap_Make(map, key);
|
|
} else {
|
|
result.slot = map->slots + map->hash_to_slot[index];
|
|
result.slot->key = key; // NOTE: Assign key to new slot
|
|
if ((key.type == DN_DSMapKeyType_Buffer ||
|
|
key.type == DN_DSMapKeyType_BufferAsU64NoHash) &&
|
|
!key.no_copy_buffer)
|
|
result.slot->key.buffer_data = DN_Pool_NewArrayCopy(&map->pool, char, key.buffer_data, key.buffer_size);
|
|
}
|
|
} else {
|
|
result.slot = map->slots + map->hash_to_slot[index];
|
|
result.found = true;
|
|
}
|
|
|
|
result.value = &result.slot->value;
|
|
DN_Assert(result.slot->key.type != DN_DSMapKeyType_Invalid);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_Set(DN_DSMap<T> *map, DN_DSMapKey key, T const &value)
|
|
{
|
|
DN_DSMapResult<T> result = {};
|
|
if (!DN_DSMap_IsValid(map))
|
|
return result;
|
|
|
|
result = DN_DSMap_Make(map, key);
|
|
result.slot->value = value;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_FindKeyU64(DN_DSMap<T> const *map, DN_U64 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyU64(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Find(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_MakeKeyU64(DN_DSMap<T> *map, DN_U64 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyU64(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Make(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_SetKeyU64(DN_DSMap<T> *map, DN_U64 key, T const &value)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyU64(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Set(map, map_key, value);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_FindKeyStr8(DN_DSMap<T> const *map, DN_Str8 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyStr8(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Find(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_MakeKeyStr8(DN_DSMap<T> *map, DN_Str8 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyStr8(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Make(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapResult<T> DN_DSMap_SetKeyStr8(DN_DSMap<T> *map, DN_Str8 key, T const &value)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyStr8(map, key);
|
|
DN_DSMapResult<T> result = DN_DSMap_Set(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_DSMap_Resize(DN_DSMap<T> *map, DN_U32 size)
|
|
{
|
|
if (!DN_DSMap_IsValid(map) || size < map->occupied || size < map->initial_size)
|
|
return false;
|
|
|
|
DN_Arena *prev_arena = map->arena;
|
|
DN_Arena new_arena = {};
|
|
new_arena.mem_funcs = prev_arena->mem_funcs;
|
|
new_arena.flags = prev_arena->flags;
|
|
new_arena.label = prev_arena->label;
|
|
new_arena.prev = prev_arena->prev;
|
|
new_arena.next = prev_arena->next;
|
|
|
|
DN_DSMap<T> new_map = DN_DSMap_Init<T>(&new_arena, size, map->flags);
|
|
if (!DN_DSMap_IsValid(&new_map))
|
|
return false;
|
|
|
|
new_map.initial_size = map->initial_size;
|
|
for (DN_U32 old_index = 1 /*Sentinel*/; old_index < map->occupied; old_index++) {
|
|
DN_DSMapSlot<T> *old_slot = map->slots + old_index;
|
|
DN_DSMapKey old_key = old_slot->key;
|
|
if (old_key.type == DN_DSMapKeyType_Invalid)
|
|
continue;
|
|
DN_DSMap_Set(&new_map, old_key, old_slot->value);
|
|
}
|
|
|
|
if ((map->flags & DN_DSMapFlags_DontFreeArenaOnResize) == 0)
|
|
DN_DSMap_Deinit(map, DN_ZeroMem_No);
|
|
*map = new_map; // Update the map inplace
|
|
map->arena = prev_arena; // Restore the previous arena pointer, it's been de-init-ed
|
|
*map->arena = new_arena; // Re-init the old arena with the new data
|
|
map->pool.arena = map->arena;
|
|
return true;
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_DSMap_Erase(DN_DSMap<T> *map, DN_DSMapKey key)
|
|
{
|
|
if (!DN_DSMap_IsValid(map))
|
|
return false;
|
|
|
|
DN_U32 index = DN_DSMap_HashToSlotIndex(map, key);
|
|
if (index == 0)
|
|
return true;
|
|
|
|
DN_U32 slot_index = map->hash_to_slot[index];
|
|
if (slot_index == DN_DS_MAP_SENTINEL_SLOT)
|
|
return false;
|
|
|
|
// NOTE: Mark the slot as unoccupied
|
|
map->hash_to_slot[index] = DN_DS_MAP_SENTINEL_SLOT;
|
|
|
|
DN_DSMapSlot<T> *slot = map->slots + slot_index;
|
|
if (!slot->key.no_copy_buffer)
|
|
DN_Pool_Dealloc(&map->pool, DN_CAST(void *) slot->key.buffer_data);
|
|
*slot = {}; // TODO: Optional?
|
|
|
|
if (map->occupied > 1 /*Sentinel*/) {
|
|
// NOTE: Repair the hash chain, e.g. rehash all the items after the removed
|
|
// element and reposition them if necessary.
|
|
for (DN_U32 probe_index = index;;) {
|
|
probe_index = (probe_index + 1) & (map->size - 1);
|
|
if (map->hash_to_slot[probe_index] == DN_DS_MAP_SENTINEL_SLOT)
|
|
break;
|
|
|
|
DN_DSMapSlot<T> *probe = map->slots + map->hash_to_slot[probe_index];
|
|
DN_U32 new_index = probe->key.hash & (map->size - 1);
|
|
if (index <= probe_index) {
|
|
if (index < new_index && new_index <= probe_index)
|
|
continue;
|
|
} else {
|
|
if (index < new_index || new_index <= probe_index)
|
|
continue;
|
|
}
|
|
|
|
map->hash_to_slot[index] = map->hash_to_slot[probe_index];
|
|
map->hash_to_slot[probe_index] = DN_DS_MAP_SENTINEL_SLOT;
|
|
index = probe_index;
|
|
}
|
|
|
|
// NOTE: We have erased a slot from the hash table, this leaves a gap
|
|
// in our contiguous array. After repairing the chain, the hash mapping
|
|
// is correct.
|
|
// We will now fill in the vacant spot that we erased using the last
|
|
// element in the slot list.
|
|
if (map->occupied >= 3 /*Ignoring sentinel, at least 2 other elements to unstable erase*/) {
|
|
DN_U32 last_index = map->occupied - 1;
|
|
if (last_index != slot_index) {
|
|
// NOTE: Copy in last slot to the erase slot
|
|
DN_DSMapSlot<T> *last_slot = map->slots + last_index;
|
|
map->slots[slot_index] = *last_slot;
|
|
|
|
// NOTE: Update the hash-to-slot mapping for the value that was copied in
|
|
DN_U32 hash_to_slot_index = DN_DSMap_HashToSlotIndex(map, last_slot->key);
|
|
map->hash_to_slot[hash_to_slot_index] = slot_index;
|
|
*last_slot = {}; // TODO: Optional?
|
|
}
|
|
}
|
|
}
|
|
|
|
map->occupied--;
|
|
bool map_is_below_25pct_full = (map->occupied * 4) < (map->size * 1);
|
|
if (map_is_below_25pct_full && (map->size / 2) >= map->initial_size)
|
|
DN_DSMap_Resize(map, map->size / 2);
|
|
|
|
return true;
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_DSMap_EraseKeyU64(DN_DSMap<T> *map, DN_U64 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyU64(map, key);
|
|
bool result = DN_DSMap_Erase(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_DSMap_EraseKeyStr8(DN_DSMap<T> *map, DN_Str8 key)
|
|
{
|
|
DN_DSMapKey map_key = DN_DSMap_KeyStr8(map, key);
|
|
bool result = DN_DSMap_Erase(map, map_key);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapKey DN_DSMap_KeyBuffer(DN_DSMap<T> const *map, void const *data, DN_USize size)
|
|
{
|
|
DN_Assert(size > 0 && size <= UINT32_MAX);
|
|
DN_DSMapKey result = {};
|
|
result.type = DN_DSMapKeyType_Buffer;
|
|
result.buffer_data = data;
|
|
result.buffer_size = DN_CAST(DN_U32) size;
|
|
result.hash = DN_DSMap_Hash(map, result);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapKey DN_DSMap_KeyBufferAsU64NoHash(DN_DSMap<T> const *map, void const *data, DN_U32 size)
|
|
{
|
|
DN_DSMapKey result = {};
|
|
result.type = DN_DSMapKeyType_BufferAsU64NoHash;
|
|
result.buffer_data = data;
|
|
result.buffer_size = DN_CAST(DN_U32) size;
|
|
DN_Assert(size >= sizeof(result.hash));
|
|
DN_Memcpy(&result.hash, data, sizeof(result.hash));
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapKey DN_DSMap_KeyU64(DN_DSMap<T> const *map, DN_U64 u64)
|
|
{
|
|
DN_DSMapKey result = {};
|
|
result.type = DN_DSMapKeyType_U64;
|
|
result.u64 = u64;
|
|
result.hash = DN_DSMap_Hash(map, result);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_DSMapKey DN_DSMap_KeyStr8(DN_DSMap<T> const *map, DN_Str8 string)
|
|
{
|
|
DN_DSMapKey result = DN_DSMap_KeyBuffer(map, string.data, string.size);
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_DSMAP)
|
|
|
|
#if !defined(DN_NO_LIST)
|
|
// NOTE: DN_List ///////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T>
|
|
DN_List<T> DN_List_Init(DN_USize chunk_size)
|
|
{
|
|
DN_List<T> result = {};
|
|
result.chunk_size = chunk_size;
|
|
return result;
|
|
}
|
|
|
|
template <typename T, size_t N>
|
|
DN_List<T> DN_List_InitCArray(DN_Arena *arena, DN_USize chunk_size, T const (&array)[N])
|
|
{
|
|
DN_List<T> result = DN_List_Init<T>(arena, chunk_size);
|
|
for (DN_ForIndexU(index, N))
|
|
DN_List_Add(&result, array[index]);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_List<T> DN_List_InitSliceCopy(DN_Arena *arena, DN_USize chunk_size, DN_Slice<T> slice)
|
|
{
|
|
DN_List<T> result = DN_List_Init<T>(arena, chunk_size);
|
|
for (DN_ForIndexU(index, slice.size))
|
|
DN_List_Add(&result, slice.data[index]);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API bool DN_List_AttachTail_(DN_List<T> *list, DN_ListChunk<T> *tail)
|
|
{
|
|
if (!tail)
|
|
return false;
|
|
|
|
if (list->tail) {
|
|
list->tail->next = tail;
|
|
tail->prev = list->tail;
|
|
}
|
|
|
|
list->tail = tail;
|
|
|
|
if (!list->head)
|
|
list->head = list->tail;
|
|
return true;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API DN_ListChunk<T> *DN_List_AllocArena_(DN_List<T> *list, DN_Arena *arena, DN_USize count)
|
|
{
|
|
auto *result = DN_Arena_New(arena, DN_ListChunk<T>, DN_ZeroMem_Yes);
|
|
DN_ArenaTempMem tmem = DN_Arena_TempMemBegin(arena);
|
|
if (!result)
|
|
return nullptr;
|
|
|
|
DN_USize items = DN_Max(list->chunk_size, count);
|
|
result->data = DN_Arena_NewArray(arena, T, items, DN_ZeroMem_Yes);
|
|
result->size = items;
|
|
if (!result->data) {
|
|
DN_Arena_TempMemEnd(tmem);
|
|
result = nullptr;
|
|
}
|
|
|
|
DN_List_AttachTail_(list, result);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API DN_ListChunk<T> *DN_List_AllocPool_(DN_List<T> *list, DN_Pool *pool, DN_USize count)
|
|
{
|
|
auto *result = DN_Pool_New(pool, DN_ListChunk<T>);
|
|
if (!result)
|
|
return nullptr;
|
|
|
|
DN_USize items = DN_Max(list->chunk_size, count);
|
|
result->data = DN_Pool_NewArray(pool, T, items);
|
|
result->size = items;
|
|
if (!result->data) {
|
|
DN_Pool_Dealloc(result);
|
|
result = nullptr;
|
|
}
|
|
|
|
DN_List_AttachTail_(list, result);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API T *DN_List_MakeArena(DN_List<T> *list, DN_Arena *arena, DN_USize count)
|
|
{
|
|
if (list->chunk_size == 0)
|
|
list->chunk_size = 128;
|
|
|
|
if (!list->tail || (list->tail->count + count) > list->tail->size) {
|
|
if (!DN_List_AllocArena_(list, arena, count))
|
|
return nullptr;
|
|
}
|
|
|
|
T *result = list->tail->data + list->tail->count;
|
|
list->tail->count += count;
|
|
list->count += count;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API T *DN_List_MakePool(DN_List<T> *list, DN_Pool *pool, DN_USize count)
|
|
{
|
|
if (list->chunk_size == 0)
|
|
list->chunk_size = 128;
|
|
|
|
if (!list->tail || (list->tail->count + count) > list->tail->size) {
|
|
if (!DN_List_AllocPool_(list, pool, count))
|
|
return nullptr;
|
|
}
|
|
|
|
T *result = list->tail->data + list->tail->count;
|
|
list->tail->count += count;
|
|
list->count += count;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API T *DN_List_AddArena(DN_List<T> *list, DN_Arena *arena, T const &value)
|
|
{
|
|
T *result = DN_List_MakeArena(list, arena, 1);
|
|
*result = value;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API T *DN_List_AddPool(DN_List<T> *list, DN_Pool *pool, T const &value)
|
|
{
|
|
T *result = DN_List_MakePool(list, pool, 1);
|
|
*result = value;
|
|
return result;
|
|
}
|
|
|
|
template <typename T, size_t N>
|
|
DN_API bool DN_List_AddCArray(DN_List<T> *list, T const (&array)[N])
|
|
{
|
|
if (!list)
|
|
return false;
|
|
for (T const &item : array)
|
|
if (!DN_List_Add(list, item))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API void DN_List_AddListArena(DN_List<T> *list, DN_Arena *arena, DN_List<T> other)
|
|
{
|
|
if (!list)
|
|
return;
|
|
// TODO(doyle): Copy chunk by chunk
|
|
for (DN_ListIterator<T> it = {}; DN_List_Iterate(&other, &it, 0 /*start_index*/);)
|
|
DN_List_AddArena(list, arena, *it.data);
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API void DN_List_AddListPool(DN_List<T> *list, DN_Pool *pool, DN_List<T> other)
|
|
{
|
|
if (!list)
|
|
return;
|
|
// TODO(doyle): Copy chunk by chunk
|
|
for (DN_ListIterator<T> it = {}; DN_List_Iterate(&other, &it, 0 /*start_index*/);)
|
|
DN_List_AddPool(list, pool, *it.data);
|
|
}
|
|
|
|
template <typename T>
|
|
void DN_List_Clear(DN_List<T> *list)
|
|
{
|
|
if (!list)
|
|
return;
|
|
list->head = list->tail = nullptr;
|
|
list->count = 0;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API bool DN_List_Iterate(DN_List<T> *list, DN_ListIterator<T> *it, DN_USize start_index)
|
|
{
|
|
bool result = false;
|
|
if (!list || !it || list->chunk_size <= 0)
|
|
return result;
|
|
|
|
if (it->init) {
|
|
it->index++;
|
|
} else {
|
|
*it = {};
|
|
if (start_index == 0) {
|
|
it->chunk = list->head;
|
|
} else {
|
|
DN_List_At(list, start_index, &it->chunk);
|
|
if (list->chunk_size > 0)
|
|
it->chunk_data_index = start_index % list->chunk_size;
|
|
}
|
|
|
|
it->init = true;
|
|
}
|
|
|
|
if (it->chunk) {
|
|
if (it->chunk_data_index >= it->chunk->count) {
|
|
it->chunk = it->chunk->next;
|
|
it->chunk_data_index = 0;
|
|
}
|
|
|
|
if (it->chunk) {
|
|
it->data = it->chunk->data + it->chunk_data_index++;
|
|
result = true;
|
|
}
|
|
}
|
|
|
|
if (!it->chunk)
|
|
DN_Assert(result == false);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_API T *DN_List_At(DN_List<T> *list, DN_USize index, DN_ListChunk<T> **at_chunk)
|
|
{
|
|
if (!list || index >= list->count || !list->head)
|
|
return nullptr;
|
|
|
|
// NOTE: Scan forwards to the chunk we need. We don't have random access to chunks
|
|
DN_ListChunk<T> **chunk = &list->head;
|
|
DN_USize running_index = index;
|
|
for (; (*chunk) && running_index >= (*chunk)->size; chunk = &((*chunk)->next))
|
|
running_index -= (*chunk)->size;
|
|
|
|
T *result = nullptr;
|
|
if (*chunk)
|
|
result = (*chunk)->data + running_index;
|
|
|
|
if (result && at_chunk)
|
|
*at_chunk = *chunk;
|
|
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_List_ToSliceCopy(DN_List<T> const *list, DN_Arena *arena)
|
|
{
|
|
// TODO(doyle): Chunk memcopies is much faster
|
|
DN_Slice<T> result = DN_Slice_Alloc<T>(arena, list->count, DN_ZeroMem_No);
|
|
if (result.size) {
|
|
DN_USize slice_index = 0;
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6011) // Dereferencing NULL pointer 'x'
|
|
for (DN_ListIterator<T> it = {}; DN_List_Iterate<T>(DN_CAST(DN_List<T> *) list, &it, 0);)
|
|
result.data[slice_index++] = *it.data;
|
|
DN_MSVC_WARNING_POP
|
|
DN_Assert(slice_index == result.size);
|
|
}
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_LIST)
|
|
|
|
// NOTE: DN_Slice //////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str8 DN_Slice_Str8Render(DN_Arena *arena, DN_Slice<DN_Str8> array, DN_Str8 separator)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
DN_USize total_size = 0;
|
|
for (DN_USize index = 0; index < array.size; index++) {
|
|
if (index)
|
|
total_size += separator.size;
|
|
DN_Str8 item = array.data[index];
|
|
total_size += item.size;
|
|
}
|
|
|
|
result = DN_Str8_Alloc(arena, total_size, DN_ZeroMem_No);
|
|
if (result.data) {
|
|
DN_USize write_index = 0;
|
|
for (DN_USize index = 0; index < array.size; index++) {
|
|
if (index) {
|
|
DN_Memcpy(result.data + write_index, separator.data, separator.size);
|
|
write_index += separator.size;
|
|
}
|
|
DN_Str8 item = array.data[index];
|
|
DN_Memcpy(result.data + write_index, item.data, item.size);
|
|
write_index += item.size;
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Slice_Str8RenderSpaceSeparated(DN_Arena *arena, DN_Slice<DN_Str8> array)
|
|
{
|
|
DN_Str8 result = DN_Slice_Str8Render(arena, array, DN_STR8(" "));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str16 DN_Slice_Str16Render(DN_Arena *arena, DN_Slice<DN_Str16> array, DN_Str16 separator)
|
|
{
|
|
DN_Str16 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
DN_USize total_size = 0;
|
|
for (DN_USize index = 0; index < array.size; index++) {
|
|
if (index)
|
|
total_size += separator.size;
|
|
DN_Str16 item = array.data[index];
|
|
total_size += item.size;
|
|
}
|
|
|
|
result = {DN_Arena_NewArray(arena, wchar_t, total_size + 1, DN_ZeroMem_No), total_size};
|
|
if (result.data) {
|
|
DN_USize write_index = 0;
|
|
for (DN_USize index = 0; index < array.size; index++) {
|
|
if (index) {
|
|
DN_Memcpy(result.data + write_index, separator.data, separator.size * sizeof(result.data[0]));
|
|
write_index += separator.size;
|
|
}
|
|
DN_Str16 item = array.data[index];
|
|
DN_Memcpy(result.data + write_index, item.data, item.size * sizeof(result.data[0]));
|
|
write_index += item.size;
|
|
}
|
|
}
|
|
|
|
result.data[total_size] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str16 DN_Slice_Str16RenderSpaceSeparated(DN_Arena *arena, DN_Slice<DN_Str16> array)
|
|
{
|
|
DN_Str16 result = DN_Slice_Str16Render(arena, array, DN_STR16(L" "));
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_DSMAP)
|
|
// NOTE: DN_DSMap //////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_DSMapKey DN_DSMap_KeyU64NoHash(DN_U64 u64)
|
|
{
|
|
DN_DSMapKey result = {};
|
|
result.type = DN_DSMapKeyType_U64NoHash;
|
|
result.u64 = u64;
|
|
result.hash = DN_CAST(DN_U32) u64;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_DSMap_KeyEquals(DN_DSMapKey lhs, DN_DSMapKey rhs)
|
|
{
|
|
bool result = false;
|
|
if (lhs.type == rhs.type && lhs.hash == rhs.hash) {
|
|
switch (lhs.type) {
|
|
case DN_DSMapKeyType_Invalid: result = true; break;
|
|
case DN_DSMapKeyType_U64NoHash: result = true; break;
|
|
case DN_DSMapKeyType_U64: result = lhs.u64 == rhs.u64; break;
|
|
|
|
case DN_DSMapKeyType_BufferAsU64NoHash: /*FALLTHRU*/
|
|
case DN_DSMapKeyType_Buffer: {
|
|
if (lhs.buffer_size == rhs.buffer_size)
|
|
result = DN_Memcmp(lhs.buffer_data, rhs.buffer_data, lhs.buffer_size) == 0;
|
|
} break;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator==(DN_DSMapKey lhs, DN_DSMapKey rhs)
|
|
{
|
|
bool result = DN_DSMap_KeyEquals(lhs, rhs);
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_DSMAP)
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base_convert.cpp"
|
|
#define DN_CONVERT_CPP
|
|
|
|
DN_API int DN_CVT_FmtBuffer3DotTruncate(char *buffer, int size, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
int size_required = DN_VSNPrintF(buffer, size, fmt, args);
|
|
int result = DN_Max(DN_Min(size_required, size - 1), 0);
|
|
if (result == size - 1) {
|
|
buffer[size - 2] = '.';
|
|
buffer[size - 3] = '.';
|
|
}
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_CVTU64Str8 DN_CVT_U64ToStr8(uint64_t val, char separator)
|
|
{
|
|
DN_CVTU64Str8 result = {};
|
|
if (val == 0) {
|
|
result.data[result.size++] = '0';
|
|
} else {
|
|
// NOTE: The number is written in reverse because we form the string by
|
|
// dividing by 10, so we write it in, then reverse it out after all is
|
|
// done.
|
|
DN_CVTU64Str8 temp = {};
|
|
for (DN_USize digit_count = 0; val > 0; digit_count++) {
|
|
if (separator && (digit_count != 0) && (digit_count % 3 == 0))
|
|
temp.data[temp.size++] = separator;
|
|
|
|
auto digit = DN_CAST(char)(val % 10);
|
|
temp.data[temp.size++] = '0' + digit;
|
|
val /= 10;
|
|
}
|
|
|
|
// NOTE: Reverse the string
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6293) // Ill-defined for-loop
|
|
DN_MSVC_WARNING_DISABLE(6385) // Reading invalid data from 'temp.data' unsigned overflow is valid for loop termination
|
|
for (DN_USize temp_index = temp.size - 1; temp_index < temp.size; temp_index--) {
|
|
char ch = temp.data[temp_index];
|
|
result.data[result.size++] = ch;
|
|
}
|
|
DN_MSVC_WARNING_POP
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_CVTU64ByteSize DN_CVT_U64ToByteSize(uint64_t bytes, DN_CVTU64ByteSizeType desired_type)
|
|
{
|
|
DN_CVTU64ByteSize result = {};
|
|
result.bytes = DN_CAST(DN_F64) bytes;
|
|
if (!DN_Check(desired_type != DN_CVTU64ByteSizeType_Count)) {
|
|
result.suffix = DN_CVT_U64ByteSizeTypeString(result.type);
|
|
return result;
|
|
}
|
|
|
|
if (desired_type == DN_CVTU64ByteSizeType_Auto)
|
|
for (; result.type < DN_CVTU64ByteSizeType_Count && result.bytes >= 1024.0; result.type = DN_CAST(DN_CVTU64ByteSizeType)(DN_CAST(DN_USize) result.type + 1))
|
|
result.bytes /= 1024.0;
|
|
else
|
|
for (; result.type < desired_type; result.type = DN_CAST(DN_CVTU64ByteSizeType)(DN_CAST(DN_USize) result.type + 1))
|
|
result.bytes /= 1024.0;
|
|
|
|
result.suffix = DN_CVT_U64ByteSizeTypeString(result.type);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_U64ToByteSizeStr8(DN_Arena *arena, uint64_t bytes, DN_CVTU64ByteSizeType desired_type)
|
|
{
|
|
DN_CVTU64ByteSize byte_size = DN_CVT_U64ToByteSize(bytes, desired_type);
|
|
DN_Str8 result = DN_Str8_InitF(arena, "%.2f%.*s", byte_size.bytes, DN_STR_FMT(byte_size.suffix));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_U64ByteSizeTypeString(DN_CVTU64ByteSizeType type)
|
|
{
|
|
DN_Str8 result = DN_STR8("");
|
|
switch (type) {
|
|
case DN_CVTU64ByteSizeType_B: result = DN_STR8("B"); break;
|
|
case DN_CVTU64ByteSizeType_KiB: result = DN_STR8("KiB"); break;
|
|
case DN_CVTU64ByteSizeType_MiB: result = DN_STR8("MiB"); break;
|
|
case DN_CVTU64ByteSizeType_GiB: result = DN_STR8("GiB"); break;
|
|
case DN_CVTU64ByteSizeType_TiB: result = DN_STR8("TiB"); break;
|
|
case DN_CVTU64ByteSizeType_Count: result = DN_STR8(""); break;
|
|
case DN_CVTU64ByteSizeType_Auto: result = DN_STR8(""); break;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_U64ToAge(DN_Arena *arena, DN_U64 age_s, DN_CVTU64AgeUnit unit)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
char buffer[512];
|
|
DN_Arena stack_arena = DN_Arena_InitFromBuffer(buffer, sizeof(buffer), DN_ArenaFlags_NoPoison);
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(&stack_arena);
|
|
DN_U64 remainder = age_s;
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Year) {
|
|
DN_USize value = remainder / DN_YearsToSec(1);
|
|
remainder -= DN_YearsToSec(value);
|
|
if (value)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zuyr", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Week) {
|
|
DN_USize value = remainder / DN_WeeksToSec(1);
|
|
remainder -= DN_WeeksToSec(value);
|
|
if (value)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zuw", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Day) {
|
|
DN_USize value = remainder / DN_DaysToSec(1);
|
|
remainder -= DN_DaysToSec(value);
|
|
if (value)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zud", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Hr) {
|
|
DN_USize value = remainder / DN_HoursToSec(1);
|
|
remainder -= DN_HoursToSec(value);
|
|
if (value)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zuh", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Min) {
|
|
DN_USize value = remainder / DN_MinutesToSec(1);
|
|
remainder -= DN_MinutesToSec(value);
|
|
if (value)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zum", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Sec) {
|
|
DN_USize value = remainder;
|
|
if (value || builder.string_size == 0)
|
|
DN_Str8Builder_AppendF(&builder, "%s%zus", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
result = DN_Str8Builder_Build(&builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_F64ToAge(DN_Arena *arena, DN_F64 age_s, DN_CVTU64AgeUnit unit)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
char buffer[256];
|
|
DN_Arena stack_arena = DN_Arena_InitFromBuffer(buffer, sizeof(buffer), DN_ArenaFlags_NoPoison);
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(&stack_arena);
|
|
DN_F64 remainder = age_s;
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Year) {
|
|
DN_F64 value = remainder / DN_CAST(DN_F64) DN_YearsToSec(1);
|
|
if (value >= 1.0) {
|
|
remainder -= DN_YearsToSec(value);
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fyr", builder.string_size ? " " : "", value);
|
|
}
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Week) {
|
|
DN_F64 value = remainder / DN_CAST(DN_F64) DN_WeeksToSec(1);
|
|
if (value >= 1.0) {
|
|
remainder -= DN_WeeksToSec(value);
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fw", builder.string_size ? " " : "", value);
|
|
}
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Day) {
|
|
DN_F64 value = remainder / DN_CAST(DN_F64) DN_DaysToSec(1);
|
|
if (value >= 1.0) {
|
|
remainder -= DN_WeeksToSec(value);
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fd", builder.string_size ? " " : "", value);
|
|
}
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Hr) {
|
|
DN_F64 value = remainder / DN_CAST(DN_F64) DN_HoursToSec(1);
|
|
if (value >= 1.0) {
|
|
remainder -= DN_HoursToSec(value);
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fh", builder.string_size ? " " : "", value);
|
|
}
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Min) {
|
|
DN_F64 value = remainder / DN_CAST(DN_F64) DN_MinutesToSec(1);
|
|
if (value >= 1.0) {
|
|
remainder -= DN_MinutesToSec(value);
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fm", builder.string_size ? " " : "", value);
|
|
}
|
|
}
|
|
|
|
if (unit & DN_CVTU64AgeUnit_Sec) {
|
|
DN_F64 value = remainder;
|
|
DN_Str8Builder_AppendF(&builder, "%s%.1fs", builder.string_size ? " " : "", value);
|
|
}
|
|
|
|
result = DN_Str8Builder_Build(&builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_CVT_HexToU64(DN_Str8 hex)
|
|
{
|
|
DN_Str8 real_hex = DN_Str8_TrimPrefix(DN_Str8_TrimPrefix(hex, DN_STR8("0x")), DN_STR8("0X"));
|
|
DN_USize max_hex_size = sizeof(uint64_t) * 2 /*hex chars per byte*/;
|
|
DN_Assert(real_hex.size <= max_hex_size);
|
|
|
|
DN_USize size = DN_Min(max_hex_size, real_hex.size);
|
|
uint64_t result = 0;
|
|
for (DN_USize index = 0; index < size; index++) {
|
|
char ch = real_hex.data[index];
|
|
DN_CharHexToU8 val = DN_Char_HexToU8(ch);
|
|
if (!val.success)
|
|
break;
|
|
result = (result << 4) | val.value;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_U64ToHex(DN_Arena *arena, uint64_t number, uint32_t flags)
|
|
{
|
|
DN_Str8 prefix = {};
|
|
if ((flags & DN_CVTHexU64Str8Flags_0xPrefix))
|
|
prefix = DN_STR8("0x");
|
|
|
|
char const *fmt = (flags & DN_CVTHexU64Str8Flags_UppercaseHex) ? "%I64X" : "%I64x";
|
|
DN_USize required_size = DN_CStr8_FSize(fmt, number) + prefix.size;
|
|
DN_Str8 result = DN_Str8_Alloc(arena, required_size, DN_ZeroMem_No);
|
|
|
|
if (DN_Str8_HasData(result)) {
|
|
DN_Memcpy(result.data, prefix.data, prefix.size);
|
|
int space = DN_CAST(int) DN_Max((result.size - prefix.size) + 1, 0); /*null-terminator*/
|
|
DN_SNPrintF(result.data + prefix.size, space, fmt, number);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_CVTU64HexStr8 DN_CVT_U64ToHexStr8(uint64_t number, DN_CVTHexU64Str8Flags flags)
|
|
{
|
|
DN_Str8 prefix = {};
|
|
if (flags & DN_CVTHexU64Str8Flags_0xPrefix)
|
|
prefix = DN_STR8("0x");
|
|
|
|
DN_CVTU64HexStr8 result = {};
|
|
DN_Memcpy(result.data, prefix.data, prefix.size);
|
|
result.size += DN_CAST(int8_t) prefix.size;
|
|
|
|
char const *fmt = (flags & DN_CVTHexU64Str8Flags_UppercaseHex) ? "%I64X" : "%I64x";
|
|
int size = DN_SNPrintF(result.data + result.size, DN_ArrayCountU(result.data) - result.size, fmt, number);
|
|
result.size += DN_CAST(uint8_t) size;
|
|
DN_Assert(result.size < DN_ArrayCountU(result.data));
|
|
|
|
// NOTE: snprintf returns the required size of the format string
|
|
// irrespective of if there's space or not, but, always null terminates so
|
|
// the last byte is wasted.
|
|
result.size = DN_Min(result.size, DN_ArrayCountU(result.data) - 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_CVT_BytesToHexPtr(void const *src, DN_USize src_size, char *dest, DN_USize dest_size)
|
|
{
|
|
if (!src || !dest)
|
|
return false;
|
|
|
|
if (!DN_Check(dest_size >= src_size * 2))
|
|
return false;
|
|
|
|
char const *HEX = "0123456789abcdef";
|
|
unsigned char const *src_u8 = DN_CAST(unsigned char const *) src;
|
|
for (DN_USize src_index = 0, dest_index = 0; src_index < src_size; src_index++) {
|
|
char byte = src_u8[src_index];
|
|
char hex01 = (byte >> 4) & 0b1111;
|
|
char hex02 = (byte >> 0) & 0b1111;
|
|
dest[dest_index++] = HEX[(int)hex01];
|
|
dest[dest_index++] = HEX[(int)hex02];
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_BytesToHex(DN_Arena *arena, void const *src, DN_USize size)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!src || size <= 0)
|
|
return result;
|
|
|
|
result = DN_Str8_Alloc(arena, size * 2, DN_ZeroMem_No);
|
|
result.data[result.size] = 0;
|
|
bool converted = DN_CVT_BytesToHexPtr(src, size, result.data, result.size);
|
|
DN_Assert(converted);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CVT_HexToBytesPtrUnchecked(DN_Str8 hex, void *dest, DN_USize dest_size)
|
|
{
|
|
DN_USize result = 0;
|
|
unsigned char *dest_u8 = DN_CAST(unsigned char *) dest;
|
|
|
|
for (DN_USize hex_index = 0; hex_index < hex.size; hex_index += 2, result += 1) {
|
|
char hex01 = hex.data[hex_index];
|
|
char hex02 = (hex_index + 1 < hex.size) ? hex.data[hex_index + 1] : 0;
|
|
char bit4_01 = DN_Char_HexToU8(hex01).value;
|
|
char bit4_02 = DN_Char_HexToU8(hex02).value;
|
|
char byte = (bit4_01 << 4) | (bit4_02 << 0);
|
|
dest_u8[result] = byte;
|
|
}
|
|
|
|
DN_Assert(result <= dest_size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CVT_HexToBytesPtr(DN_Str8 hex, void *dest, DN_USize dest_size)
|
|
{
|
|
hex = DN_Str8_TrimPrefix(hex, DN_STR8("0x"));
|
|
hex = DN_Str8_TrimPrefix(hex, DN_STR8("0X"));
|
|
|
|
DN_USize result = 0;
|
|
if (!DN_Str8_HasData(hex))
|
|
return result;
|
|
|
|
// NOTE: Trimmed hex can be "0xf" -> "f" or "0xAB" -> "AB"
|
|
// Either way, the size can be odd or even, hence we round up to the nearest
|
|
// multiple of two to ensure that we calculate the min buffer size orrectly.
|
|
DN_USize hex_size_rounded_up = hex.size + (hex.size % 2);
|
|
DN_USize min_buffer_size = hex_size_rounded_up / 2;
|
|
if (hex.size <= 0 || !DN_Check(dest_size >= min_buffer_size))
|
|
return result;
|
|
|
|
result = DN_CVT_HexToBytesPtrUnchecked(hex, dest, dest_size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_HexToBytesUnchecked(DN_Arena *arena, DN_Str8 hex)
|
|
{
|
|
DN_USize hex_size_rounded_up = hex.size + (hex.size % 2);
|
|
DN_Str8 result = DN_Str8_Alloc(arena, (hex_size_rounded_up / 2), DN_ZeroMem_No);
|
|
if (result.data) {
|
|
DN_USize bytes_written = DN_CVT_HexToBytesPtr(hex, result.data, result.size);
|
|
DN_Assert(bytes_written == result.size);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_CVT_HexToBytes(DN_Arena *arena, DN_Str8 hex)
|
|
{
|
|
hex = DN_Str8_TrimPrefix(hex, DN_STR8("0x"));
|
|
hex = DN_Str8_TrimPrefix(hex, DN_STR8("0X"));
|
|
|
|
DN_Str8 result = {};
|
|
if (!DN_Str8_HasData(hex))
|
|
return result;
|
|
|
|
if (!DN_Check(DN_Str8_IsAll(hex, DN_Str8IsAll_Hex)))
|
|
return result;
|
|
|
|
result = DN_CVT_HexToBytesUnchecked(arena, hex);
|
|
return result;
|
|
}
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base_mem.cpp"
|
|
#define DN_BASE_MEM_CPP
|
|
|
|
static DN_ArenaBlock *DN_Arena_BlockInitFromMemFuncs_(DN_U64 reserve, DN_U64 commit, bool track_alloc, bool alloc_can_leak, DN_ArenaMemFuncs mem_funcs)
|
|
{
|
|
DN_ArenaBlock *result = nullptr;
|
|
switch (mem_funcs.type) {
|
|
case DN_ArenaMemFuncType_Nil:
|
|
break;
|
|
|
|
case DN_ArenaMemFuncType_Basic: {
|
|
DN_AssertF(reserve > DN_ARENA_HEADER_SIZE, "%I64u > %I64u", reserve, DN_ARENA_HEADER_SIZE);
|
|
result = DN_CAST(DN_ArenaBlock *) mem_funcs.basic_alloc(reserve);
|
|
if (!result)
|
|
return result;
|
|
|
|
result->used = DN_ARENA_HEADER_SIZE;
|
|
result->commit = reserve;
|
|
result->reserve = reserve;
|
|
} break;
|
|
|
|
case DN_ArenaMemFuncType_VMem: {
|
|
DN_AssertF(mem_funcs.vmem_page_size, "Page size must be set to a non-zero, power of two value");
|
|
DN_Assert(DN_IsPowerOfTwo(mem_funcs.vmem_page_size));
|
|
|
|
DN_USize const page_size = mem_funcs.vmem_page_size;
|
|
DN_U64 real_reserve = reserve ? reserve : DN_ARENA_RESERVE_SIZE;
|
|
DN_U64 real_commit = commit ? commit : DN_ARENA_COMMIT_SIZE;
|
|
real_reserve = DN_AlignUpPowerOfTwo(real_reserve, page_size);
|
|
real_commit = DN_Min(DN_AlignUpPowerOfTwo(real_commit, page_size), real_reserve);
|
|
DN_AssertF(DN_ARENA_HEADER_SIZE < real_commit && real_commit <= real_reserve, "%I64u < %I64u <= %I64u", DN_ARENA_HEADER_SIZE, real_commit, real_reserve);
|
|
|
|
DN_MemCommit mem_commit = real_reserve == real_commit ? DN_MemCommit_Yes : DN_MemCommit_No;
|
|
result = DN_CAST(DN_ArenaBlock *) mem_funcs.vmem_reserve(real_reserve, mem_commit, DN_MemPage_ReadWrite);
|
|
if (!result)
|
|
return result;
|
|
|
|
if (mem_commit == DN_MemCommit_No && !mem_funcs.vmem_commit(result, real_commit, DN_MemPage_ReadWrite)) {
|
|
mem_funcs.vmem_release(result, real_reserve);
|
|
return result;
|
|
}
|
|
|
|
result->used = DN_ARENA_HEADER_SIZE;
|
|
result->commit = real_commit;
|
|
result->reserve = real_reserve;
|
|
} break;
|
|
}
|
|
|
|
if (track_alloc && result)
|
|
DN_Debug_TrackAlloc(result, result->reserve, alloc_can_leak);
|
|
|
|
return result;
|
|
}
|
|
|
|
static DN_ArenaBlock *DN_Arena_BlockInitFlagsFromMemFuncs_(DN_U64 reserve, DN_U64 commit, DN_ArenaFlags flags, DN_ArenaMemFuncs mem_funcs)
|
|
{
|
|
bool track_alloc = (flags & DN_ArenaFlags_NoAllocTrack) == 0;
|
|
bool alloc_can_leak = flags & DN_ArenaFlags_AllocCanLeak;
|
|
DN_ArenaBlock *result = DN_Arena_BlockInitFromMemFuncs_(reserve, commit, track_alloc, alloc_can_leak, mem_funcs);
|
|
if (result && ((flags & DN_ArenaFlags_NoPoison) == 0))
|
|
DN_ASAN_PoisonMemoryRegion(DN_CAST(char *) result + DN_ARENA_HEADER_SIZE, result->commit - DN_ARENA_HEADER_SIZE);
|
|
return result;
|
|
}
|
|
|
|
static void DN_Arena_UpdateStatsOnNewBlock_(DN_Arena *arena, DN_ArenaBlock const *block)
|
|
{
|
|
DN_Assert(arena);
|
|
if (block) {
|
|
arena->stats.info.used += block->used;
|
|
arena->stats.info.commit += block->commit;
|
|
arena->stats.info.reserve += block->reserve;
|
|
arena->stats.info.blocks += 1;
|
|
|
|
arena->stats.hwm.used = DN_Max(arena->stats.hwm.used, arena->stats.info.used);
|
|
arena->stats.hwm.commit = DN_Max(arena->stats.hwm.commit, arena->stats.info.commit);
|
|
arena->stats.hwm.reserve = DN_Max(arena->stats.hwm.reserve, arena->stats.info.reserve);
|
|
arena->stats.hwm.blocks = DN_Max(arena->stats.hwm.blocks, arena->stats.info.blocks);
|
|
}
|
|
}
|
|
|
|
DN_API DN_Arena DN_Arena_InitFromBuffer(void *buffer, DN_USize size, DN_ArenaFlags flags)
|
|
{
|
|
DN_Assert(buffer);
|
|
DN_AssertF(DN_ARENA_HEADER_SIZE < size, "Buffer (%zu bytes) too small, need atleast %zu bytes to store arena metadata", size, DN_ARENA_HEADER_SIZE);
|
|
DN_AssertF(DN_IsPowerOfTwo(size), "Buffer (%zu bytes) must be a power-of-two", size);
|
|
|
|
// NOTE: Init block
|
|
DN_ArenaBlock *block = DN_CAST(DN_ArenaBlock *) buffer;
|
|
block->commit = size;
|
|
block->reserve = size;
|
|
block->used = DN_ARENA_HEADER_SIZE;
|
|
if (block && ((flags & DN_ArenaFlags_NoPoison) == 0))
|
|
DN_ASAN_PoisonMemoryRegion(DN_CAST(char *) block + DN_ARENA_HEADER_SIZE, block->commit - DN_ARENA_HEADER_SIZE);
|
|
|
|
DN_Arena result = {};
|
|
result.flags = flags | DN_ArenaFlags_NoGrow | DN_ArenaFlags_NoAllocTrack | DN_ArenaFlags_AllocCanLeak | DN_ArenaFlags_UserBuffer;
|
|
result.curr = block;
|
|
DN_Arena_UpdateStatsOnNewBlock_(&result, result.curr);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Arena DN_Arena_InitFromMemFuncs(DN_U64 reserve, DN_U64 commit, DN_ArenaFlags flags, DN_ArenaMemFuncs mem_funcs)
|
|
{
|
|
DN_Arena result = {};
|
|
result.flags = flags;
|
|
result.mem_funcs = mem_funcs;
|
|
result.flags |= DN_ArenaFlags_MemFuncs;
|
|
result.curr = DN_Arena_BlockInitFlagsFromMemFuncs_(reserve, commit, flags, mem_funcs);
|
|
DN_Arena_UpdateStatsOnNewBlock_(&result, result.curr);
|
|
return result;
|
|
}
|
|
|
|
static void DN_Arena_BlockDeinit_(DN_Arena const *arena, DN_ArenaBlock *block)
|
|
{
|
|
DN_USize release_size = block->reserve;
|
|
if (DN_Bit_IsNotSet(arena->flags, DN_ArenaFlags_NoAllocTrack))
|
|
DN_Debug_TrackDealloc(block);
|
|
DN_ASAN_UnpoisonMemoryRegion(block, block->commit);
|
|
if (arena->flags & DN_ArenaFlags_MemFuncs) {
|
|
if (arena->mem_funcs.type == DN_ArenaMemFuncType_Basic)
|
|
arena->mem_funcs.basic_dealloc(block);
|
|
else
|
|
arena->mem_funcs.vmem_release(block, release_size);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_Arena_Deinit(DN_Arena *arena)
|
|
{
|
|
for (DN_ArenaBlock *block = arena ? arena->curr : nullptr; block;) {
|
|
DN_ArenaBlock *block_to_free = block;
|
|
block = block->prev;
|
|
DN_Arena_BlockDeinit_(arena, block_to_free);
|
|
}
|
|
if (arena)
|
|
*arena = {};
|
|
}
|
|
|
|
DN_API bool DN_Arena_CommitTo(DN_Arena *arena, DN_U64 pos)
|
|
{
|
|
if (!arena || !arena->curr)
|
|
return false;
|
|
|
|
DN_ArenaBlock *curr = arena->curr;
|
|
if (pos <= curr->commit)
|
|
return true;
|
|
|
|
DN_U64 real_pos = pos;
|
|
if (!DN_Check(pos <= curr->reserve))
|
|
real_pos = curr->reserve;
|
|
|
|
DN_Assert(arena->mem_funcs.vmem_page_size);
|
|
DN_USize end_commit = DN_AlignUpPowerOfTwo(real_pos, arena->mem_funcs.vmem_page_size);
|
|
DN_USize commit_size = end_commit - curr->commit;
|
|
char *commit_ptr = DN_CAST(char *) curr + curr->commit;
|
|
if (!arena->mem_funcs.vmem_commit(commit_ptr, commit_size, DN_MemPage_ReadWrite))
|
|
return false;
|
|
|
|
bool poison = DN_ASAN_POISON && ((arena->flags & DN_ArenaFlags_NoPoison) == 0);
|
|
if (poison)
|
|
DN_ASAN_PoisonMemoryRegion(commit_ptr, commit_size);
|
|
|
|
curr->commit = end_commit;
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_Arena_Commit(DN_Arena *arena, DN_U64 size)
|
|
{
|
|
if (!arena || !arena->curr)
|
|
return false;
|
|
DN_U64 pos = DN_Min(arena->curr->reserve, arena->curr->commit + size);
|
|
bool result = DN_Arena_CommitTo(arena, pos);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Arena_Grow(DN_Arena *arena, DN_U64 reserve, DN_U64 commit)
|
|
{
|
|
if (arena->flags & (DN_ArenaFlags_NoGrow | DN_ArenaFlags_UserBuffer))
|
|
return false;
|
|
|
|
bool result = false;
|
|
DN_ArenaBlock *new_block = DN_Arena_BlockInitFlagsFromMemFuncs_(reserve, commit, arena->flags, arena->mem_funcs);
|
|
if (new_block) {
|
|
result = true;
|
|
new_block->prev = arena->curr;
|
|
arena->curr = new_block;
|
|
new_block->reserve_sum = new_block->prev->reserve_sum + new_block->prev->reserve;
|
|
DN_Arena_UpdateStatsOnNewBlock_(arena, arena->curr);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_Arena_Alloc(DN_Arena *arena, DN_U64 size, uint8_t align, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!arena)
|
|
return nullptr;
|
|
|
|
if (!arena->curr) {
|
|
arena->curr = DN_Arena_BlockInitFlagsFromMemFuncs_(DN_ARENA_RESERVE_SIZE, DN_ARENA_COMMIT_SIZE, arena->flags, arena->mem_funcs);
|
|
DN_Arena_UpdateStatsOnNewBlock_(arena, arena->curr);
|
|
}
|
|
|
|
if (!arena->curr)
|
|
return nullptr;
|
|
|
|
try_alloc_again:
|
|
DN_ArenaBlock *curr = arena->curr;
|
|
bool poison = DN_ASAN_POISON && ((arena->flags & DN_ArenaFlags_NoPoison) == 0);
|
|
uint8_t real_align = poison ? DN_Max(align, DN_ASAN_POISON_ALIGNMENT) : align;
|
|
DN_U64 offset_pos = DN_AlignUpPowerOfTwo(curr->used, real_align) + (poison ? DN_ASAN_POISON_GUARD_SIZE : 0);
|
|
DN_U64 end_pos = offset_pos + size;
|
|
DN_U64 alloc_size = end_pos - curr->used;
|
|
|
|
if (end_pos > curr->reserve) {
|
|
if (arena->flags & (DN_ArenaFlags_NoGrow | DN_ArenaFlags_UserBuffer))
|
|
return nullptr;
|
|
DN_USize new_reserve = DN_Max(DN_ARENA_HEADER_SIZE + alloc_size, DN_ARENA_RESERVE_SIZE);
|
|
DN_USize new_commit = DN_Max(DN_ARENA_HEADER_SIZE + alloc_size, DN_ARENA_COMMIT_SIZE);
|
|
if (!DN_Arena_Grow(arena, new_reserve, new_commit))
|
|
return nullptr;
|
|
goto try_alloc_again;
|
|
}
|
|
|
|
DN_USize prev_arena_commit = curr->commit;
|
|
if (end_pos > curr->commit) {
|
|
DN_Assert(arena->mem_funcs.vmem_page_size);
|
|
DN_Assert(arena->mem_funcs.type == DN_ArenaMemFuncType_VMem);
|
|
DN_Assert((arena->flags & DN_ArenaFlags_UserBuffer) == 0);
|
|
DN_USize end_commit = DN_AlignUpPowerOfTwo(end_pos, arena->mem_funcs.vmem_page_size);
|
|
DN_USize commit_size = end_commit - curr->commit;
|
|
char *commit_ptr = DN_CAST(char *) curr + curr->commit;
|
|
if (!arena->mem_funcs.vmem_commit(commit_ptr, commit_size, DN_MemPage_ReadWrite))
|
|
return nullptr;
|
|
if (poison)
|
|
DN_ASAN_PoisonMemoryRegion(commit_ptr, commit_size);
|
|
curr->commit = end_commit;
|
|
arena->stats.info.commit += commit_size;
|
|
arena->stats.hwm.commit = DN_Max(arena->stats.hwm.commit, arena->stats.info.commit);
|
|
}
|
|
|
|
void *result = DN_CAST(char *) curr + offset_pos;
|
|
curr->used += alloc_size;
|
|
arena->stats.info.used += alloc_size;
|
|
arena->stats.hwm.used = DN_Max(arena->stats.hwm.used, arena->stats.info.used);
|
|
DN_ASAN_UnpoisonMemoryRegion(result, size);
|
|
|
|
if (zero_mem == DN_ZeroMem_Yes) {
|
|
DN_USize reused_bytes = DN_Min(prev_arena_commit - offset_pos, size);
|
|
DN_Memset(result, 0, reused_bytes);
|
|
}
|
|
|
|
DN_Assert(arena->stats.hwm.used >= arena->stats.info.used);
|
|
DN_Assert(arena->stats.hwm.commit >= arena->stats.info.commit);
|
|
DN_Assert(arena->stats.hwm.reserve >= arena->stats.info.reserve);
|
|
DN_Assert(arena->stats.hwm.blocks >= arena->stats.info.blocks);
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_Arena_AllocContiguous(DN_Arena *arena, DN_U64 size, uint8_t align, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_ArenaFlags prev_flags = arena->flags;
|
|
arena->flags |= (DN_ArenaFlags_NoGrow | DN_ArenaFlags_NoPoison);
|
|
void *memory = DN_Arena_Alloc(arena, size, align, zero_mem);
|
|
arena->flags = prev_flags;
|
|
return memory;
|
|
}
|
|
|
|
DN_API void *DN_Arena_Copy(DN_Arena *arena, void const *data, DN_U64 size, uint8_t align)
|
|
{
|
|
if (!arena || !data || size == 0)
|
|
return nullptr;
|
|
void *result = DN_Arena_Alloc(arena, size, align, DN_ZeroMem_No);
|
|
if (result)
|
|
DN_Memcpy(result, data, size);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Arena_PopTo(DN_Arena *arena, DN_U64 init_used)
|
|
{
|
|
if (!arena || !arena->curr)
|
|
return;
|
|
DN_U64 used = DN_Max(DN_ARENA_HEADER_SIZE, init_used);
|
|
DN_ArenaBlock *curr = arena->curr;
|
|
while (curr->reserve_sum >= used) {
|
|
DN_ArenaBlock *block_to_free = curr;
|
|
arena->stats.info.used -= block_to_free->used;
|
|
arena->stats.info.commit -= block_to_free->commit;
|
|
arena->stats.info.reserve -= block_to_free->reserve;
|
|
arena->stats.info.blocks -= 1;
|
|
if (arena->flags & DN_ArenaFlags_UserBuffer)
|
|
break;
|
|
curr = curr->prev;
|
|
DN_Arena_BlockDeinit_(arena, block_to_free);
|
|
}
|
|
|
|
arena->stats.info.used -= curr->used;
|
|
arena->curr = curr;
|
|
curr->used = used - curr->reserve_sum;
|
|
char *poison_ptr = (char *)curr + DN_AlignUpPowerOfTwo(curr->used, DN_ASAN_POISON_ALIGNMENT);
|
|
DN_USize poison_size = ((char *)curr + curr->commit) - poison_ptr;
|
|
DN_ASAN_PoisonMemoryRegion(poison_ptr, poison_size);
|
|
arena->stats.info.used += curr->used;
|
|
}
|
|
|
|
DN_API void DN_Arena_Pop(DN_Arena *arena, DN_U64 amount)
|
|
{
|
|
DN_ArenaBlock *curr = arena->curr;
|
|
DN_USize used_sum = curr->reserve_sum + curr->used;
|
|
if (!DN_Check(amount <= used_sum))
|
|
amount = used_sum;
|
|
DN_USize pop_to = used_sum - amount;
|
|
DN_Arena_PopTo(arena, pop_to);
|
|
}
|
|
|
|
DN_API DN_U64 DN_Arena_Pos(DN_Arena const *arena)
|
|
{
|
|
DN_U64 result = (arena && arena->curr) ? arena->curr->reserve_sum + arena->curr->used : 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Arena_Clear(DN_Arena *arena)
|
|
{
|
|
DN_Arena_PopTo(arena, 0);
|
|
}
|
|
|
|
DN_API bool DN_Arena_OwnsPtr(DN_Arena const *arena, void *ptr)
|
|
{
|
|
bool result = false;
|
|
uintptr_t uint_ptr = DN_CAST(uintptr_t) ptr;
|
|
for (DN_ArenaBlock const *block = arena ? arena->curr : nullptr; !result && block; block = block->prev) {
|
|
uintptr_t begin = DN_CAST(uintptr_t) block + DN_ARENA_HEADER_SIZE;
|
|
uintptr_t end = begin + block->reserve;
|
|
result = uint_ptr >= begin && uint_ptr <= end;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ArenaStats DN_Arena_SumStatsArray(DN_ArenaStats const *array, DN_USize size)
|
|
{
|
|
DN_ArenaStats result = {};
|
|
for (DN_ForItSize(it, DN_ArenaStats const, array, size)) {
|
|
DN_ArenaStats stats = *it.data;
|
|
result.info.used += stats.info.used;
|
|
result.info.commit += stats.info.commit;
|
|
result.info.reserve += stats.info.reserve;
|
|
result.info.blocks += stats.info.blocks;
|
|
|
|
result.hwm.used = DN_Max(result.hwm.used, result.info.used);
|
|
result.hwm.commit = DN_Max(result.hwm.commit, result.info.commit);
|
|
result.hwm.reserve = DN_Max(result.hwm.reserve, result.info.reserve);
|
|
result.hwm.blocks = DN_Max(result.hwm.blocks, result.info.blocks);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ArenaStats DN_Arena_SumStats(DN_ArenaStats lhs, DN_ArenaStats rhs)
|
|
{
|
|
DN_ArenaStats array[] = {lhs, rhs};
|
|
DN_ArenaStats result = DN_Arena_SumStatsArray(array, DN_ArrayCountU(array));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ArenaStats DN_Arena_SumArenaArrayToStats(DN_Arena const *array, DN_USize size)
|
|
{
|
|
DN_ArenaStats result = {};
|
|
for (DN_USize index = 0; index < size; index++) {
|
|
DN_Arena const *arena = array + index;
|
|
result = DN_Arena_SumStats(result, arena->stats);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ArenaTempMem DN_Arena_TempMemBegin(DN_Arena *arena)
|
|
{
|
|
DN_ArenaTempMem result = {};
|
|
if (arena) {
|
|
DN_ArenaBlock *curr = arena->curr;
|
|
result = {arena, curr ? curr->reserve_sum + curr->used : 0};
|
|
}
|
|
return result;
|
|
};
|
|
|
|
DN_API void DN_Arena_TempMemEnd(DN_ArenaTempMem mem)
|
|
{
|
|
DN_Arena_PopTo(mem.arena, mem.used_sum);
|
|
};
|
|
|
|
DN_ArenaTempMemScope::DN_ArenaTempMemScope(DN_Arena *arena)
|
|
{
|
|
mem = DN_Arena_TempMemBegin(arena);
|
|
}
|
|
|
|
DN_ArenaTempMemScope::~DN_ArenaTempMemScope()
|
|
{
|
|
DN_Arena_TempMemEnd(mem);
|
|
}
|
|
|
|
// NOTE: DN_Pool ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Pool DN_Pool_Init(DN_Arena *arena, uint8_t align)
|
|
{
|
|
DN_Pool result = {};
|
|
if (arena) {
|
|
result.arena = arena;
|
|
result.align = align ? align : DN_POOL_DEFAULT_ALIGN;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Pool_IsValid(DN_Pool const *pool)
|
|
{
|
|
bool result = pool && pool->arena && pool->align;
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_Pool_Alloc(DN_Pool *pool, DN_USize size)
|
|
{
|
|
void *result = nullptr;
|
|
if (!DN_Pool_IsValid(pool))
|
|
return result;
|
|
|
|
DN_USize const required_size = sizeof(DN_PoolSlot) + pool->align + size;
|
|
DN_USize const size_to_slot_offset = 5; // __lzcnt64(32) e.g. DN_PoolSlotSize_32B
|
|
DN_USize slot_index = 0;
|
|
if (required_size > 32) {
|
|
// NOTE: Round up if not PoT as the low bits are set.
|
|
DN_USize dist_to_next_msb = DN_CountLeadingZerosU64(required_size) + 1;
|
|
dist_to_next_msb -= DN_CAST(DN_USize)(!DN_IsPowerOfTwo(required_size));
|
|
|
|
DN_USize const register_size = sizeof(DN_USize) * 8;
|
|
DN_Assert(register_size >= dist_to_next_msb + size_to_slot_offset);
|
|
slot_index = register_size - dist_to_next_msb - size_to_slot_offset;
|
|
}
|
|
|
|
if (!DN_CheckF(slot_index < DN_PoolSlotSize_Count, "Chunk pool does not support the requested allocation size"))
|
|
return result;
|
|
|
|
DN_USize slot_size_in_bytes = 1ULL << (slot_index + size_to_slot_offset);
|
|
DN_Assert(required_size <= (slot_size_in_bytes << 0));
|
|
DN_Assert(required_size >= (slot_size_in_bytes >> 1));
|
|
|
|
DN_PoolSlot *slot = nullptr;
|
|
if (pool->slots[slot_index]) {
|
|
slot = pool->slots[slot_index];
|
|
pool->slots[slot_index] = slot->next;
|
|
DN_Memset(slot->data, 0, size);
|
|
DN_Assert(DN_IsPowerOfTwoAligned(slot->data, pool->align));
|
|
} else {
|
|
void *bytes = DN_Arena_Alloc(pool->arena, slot_size_in_bytes, alignof(DN_PoolSlot), DN_ZeroMem_Yes);
|
|
slot = DN_CAST(DN_PoolSlot *) bytes;
|
|
|
|
// NOTE: The raw pointer is round up to the next 'pool->align'-ed
|
|
// address ensuring at least 1 byte of padding between the raw pointer
|
|
// and the pointer given to the user and that the user pointer is
|
|
// aligned to the pool's alignment.
|
|
//
|
|
// This allows us to smuggle 1 byte behind the user pointer that has
|
|
// the offset to the original pointer.
|
|
slot->data = DN_CAST(void *) DN_AlignDownPowerOfTwo(DN_CAST(uintptr_t) slot + sizeof(DN_PoolSlot) + pool->align, pool->align);
|
|
|
|
uintptr_t offset_to_original_ptr = DN_CAST(uintptr_t) slot->data - DN_CAST(uintptr_t) bytes;
|
|
DN_Assert(slot->data > bytes);
|
|
DN_Assert(offset_to_original_ptr <= sizeof(DN_PoolSlot) + pool->align);
|
|
|
|
// NOTE: Store the offset to the original pointer behind the user's
|
|
// pointer.
|
|
char *offset_to_original_storage = DN_CAST(char *) slot->data - 1;
|
|
DN_Memcpy(offset_to_original_storage, &offset_to_original_ptr, 1);
|
|
}
|
|
|
|
// NOTE: Smuggle the slot type in the next pointer so that we know, when the
|
|
// pointer gets returned which free list to return the pointer to.
|
|
result = slot->data;
|
|
slot->next = DN_CAST(DN_PoolSlot *) slot_index;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Pool_AllocStr8FV(DN_Pool *pool, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!DN_Pool_IsValid(pool))
|
|
return result;
|
|
|
|
DN_USize size_required = DN_CStr8_FVSize(fmt, args);
|
|
result.data = DN_CAST(char *) DN_Pool_Alloc(pool, size_required + 1);
|
|
if (result.data) {
|
|
result.size = size_required;
|
|
DN_VSNPrintF(result.data, DN_CAST(int)(result.size + 1), fmt, args);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Pool_AllocStr8F(DN_Pool *pool, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Pool_AllocStr8FV(pool, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Pool_AllocStr8Copy(DN_Pool *pool, DN_Str8 string)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!DN_Pool_IsValid(pool))
|
|
return result;
|
|
|
|
if (!DN_Str8_HasData(string))
|
|
return result;
|
|
|
|
char *data = DN_CAST(char *) DN_Pool_Alloc(pool, string.size + 1);
|
|
if (!data)
|
|
return result;
|
|
|
|
DN_Memcpy(data, string.data, string.size);
|
|
data[string.size] = 0;
|
|
result = DN_Str8_Init(data, string.size);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Pool_Dealloc(DN_Pool *pool, void *ptr)
|
|
{
|
|
if (!DN_Pool_IsValid(pool) || !ptr)
|
|
return;
|
|
|
|
DN_Assert(DN_Arena_OwnsPtr(pool->arena, ptr));
|
|
|
|
char const *one_byte_behind_ptr = DN_CAST(char *) ptr - 1;
|
|
DN_USize offset_to_original_ptr = 0;
|
|
DN_Memcpy(&offset_to_original_ptr, one_byte_behind_ptr, 1);
|
|
DN_Assert(offset_to_original_ptr <= sizeof(DN_PoolSlot) + pool->align);
|
|
|
|
char *original_ptr = DN_CAST(char *) ptr - offset_to_original_ptr;
|
|
DN_PoolSlot *slot = DN_CAST(DN_PoolSlot *) original_ptr;
|
|
DN_PoolSlotSize slot_index = DN_CAST(DN_PoolSlotSize)(DN_CAST(uintptr_t) slot->next);
|
|
DN_Assert(slot_index < DN_PoolSlotSize_Count);
|
|
|
|
slot->next = pool->slots[slot_index];
|
|
pool->slots[slot_index] = slot;
|
|
}
|
|
|
|
DN_API void *DN_Pool_Copy(DN_Pool *pool, void const *data, DN_U64 size, uint8_t align)
|
|
{
|
|
if (!pool || !data || size == 0)
|
|
return nullptr;
|
|
|
|
// TODO: Hmm should align be part of the alloc interface in general? I'm not going to worry
|
|
// about this until we crash because of misalignment.
|
|
DN_Assert(pool->align >= align);
|
|
|
|
void *result = DN_Pool_Alloc(pool, size);
|
|
if (result)
|
|
DN_Memcpy(result, data, size);
|
|
return result;
|
|
}
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base_string.cpp"
|
|
#define DN_STRING_CPP
|
|
|
|
// NOTE: DN_CStr8 //////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_USize DN_CStr8_FSize(DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_USize result = DN_VSNPrintF(nullptr, 0, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CStr8_FVSize(DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
va_list args_copy;
|
|
va_copy(args_copy, args);
|
|
DN_USize result = DN_VSNPrintF(nullptr, 0, fmt, args_copy);
|
|
va_end(args_copy);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CStr8_Size(char const *src)
|
|
{
|
|
DN_USize result = 0;
|
|
while (src && src[0] != 0) {
|
|
src++;
|
|
result++;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_CStr16_Size(wchar_t const *src)
|
|
{
|
|
DN_USize result = 0;
|
|
while (src && src[0] != 0) {
|
|
src++;
|
|
result++;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Str16 //////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool operator==(DN_Str16 const &lhs, DN_Str16 const &rhs)
|
|
{
|
|
bool result = false;
|
|
if (lhs.size == rhs.size)
|
|
result = DN_Memcmp(lhs.data, rhs.data, lhs.size * sizeof(*lhs.data)) == 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator!=(DN_Str16 const &lhs, DN_Str16 const &rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Str8 ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str8 DN_Str8_InitCStr8(char const *src)
|
|
{
|
|
DN_USize size = DN_CStr8_Size(src);
|
|
DN_Str8 result = DN_Str8_Init(src, size);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_IsAll(DN_Str8 string, DN_Str8IsAll is_all)
|
|
{
|
|
bool result = DN_Str8_HasData(string);
|
|
if (!result)
|
|
return result;
|
|
|
|
switch (is_all) {
|
|
case DN_Str8IsAll_Digits: {
|
|
for (DN_USize index = 0; result && index < string.size; index++)
|
|
result = string.data[index] >= '0' && string.data[index] <= '9';
|
|
} break;
|
|
|
|
case DN_Str8IsAll_Hex: {
|
|
DN_Str8 trimmed = DN_Str8_TrimPrefix(string, DN_STR8("0x"), DN_Str8EqCase_Insensitive);
|
|
for (DN_USize index = 0; result && index < trimmed.size; index++) {
|
|
char ch = trimmed.data[index];
|
|
result = (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F');
|
|
}
|
|
} break;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API char *DN_Str8_End(DN_Str8 string)
|
|
{
|
|
char *result = string.data + string.size;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Slice(DN_Str8 string, DN_USize offset, DN_USize size)
|
|
{
|
|
DN_Str8 result = DN_Str8_Init(string.data, 0);
|
|
if (!DN_Str8_HasData(string))
|
|
return result;
|
|
|
|
DN_USize capped_offset = DN_Min(offset, string.size);
|
|
DN_USize max_size = string.size - capped_offset;
|
|
DN_USize capped_size = DN_Min(size, max_size);
|
|
result = DN_Str8_Init(string.data + capped_offset, capped_size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Advance(DN_Str8 string, DN_USize amount)
|
|
{
|
|
DN_Str8 result = DN_Str8_Slice(string, amount, DN_USIZE_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_NextLine(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_BinarySplit(string, DN_STR8("\n")).rhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8BinarySplitResult DN_Str8_BinarySplitArray(DN_Str8 string, DN_Str8 const *find, DN_USize find_size)
|
|
{
|
|
DN_Str8BinarySplitResult result = {};
|
|
if (!DN_Str8_HasData(string) || !find || find_size == 0)
|
|
return result;
|
|
|
|
result.lhs = string;
|
|
for (size_t index = 0; !result.rhs.data && index < string.size; index++) {
|
|
for (DN_USize find_index = 0; find_index < find_size; find_index++) {
|
|
DN_Str8 find_item = find[find_index];
|
|
DN_Str8 string_slice = DN_Str8_Slice(string, index, find_item.size);
|
|
if (DN_Str8_Eq(string_slice, find_item)) {
|
|
result.lhs.size = index;
|
|
result.rhs.data = string_slice.data + find_item.size;
|
|
result.rhs.size = string.size - (index + find_item.size);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8BinarySplitResult DN_Str8_BinarySplit(DN_Str8 string, DN_Str8 find)
|
|
{
|
|
DN_Str8BinarySplitResult result = DN_Str8_BinarySplitArray(string, &find, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8BinarySplitResult DN_Str8_BinarySplitLastArray(DN_Str8 string, DN_Str8 const *find, DN_USize find_size)
|
|
{
|
|
DN_Str8BinarySplitResult result = {};
|
|
if (!DN_Str8_HasData(string) || !find || find_size == 0)
|
|
return result;
|
|
|
|
result.lhs = string;
|
|
for (size_t index = string.size - 1; !result.rhs.data && index < string.size; index--) {
|
|
for (DN_USize find_index = 0; find_index < find_size; find_index++) {
|
|
DN_Str8 find_item = find[find_index];
|
|
DN_Str8 string_slice = DN_Str8_Slice(string, index, find_item.size);
|
|
if (DN_Str8_Eq(string_slice, find_item)) {
|
|
result.lhs.size = index;
|
|
result.rhs.data = string_slice.data + find_item.size;
|
|
result.rhs.size = string.size - (index + find_item.size);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8BinarySplitResult DN_Str8_BinarySplitLast(DN_Str8 string, DN_Str8 find)
|
|
{
|
|
DN_Str8BinarySplitResult result = DN_Str8_BinarySplitLastArray(string, &find, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_USize DN_Str8_Split(DN_Str8 string, DN_Str8 delimiter, DN_Str8 *splits, DN_USize splits_count, DN_Str8SplitIncludeEmptyStrings mode)
|
|
{
|
|
DN_USize result = 0; // The number of splits in the actual string.
|
|
if (!DN_Str8_HasData(string) || !DN_Str8_HasData(delimiter) || delimiter.size <= 0)
|
|
return result;
|
|
|
|
DN_Str8BinarySplitResult split = {};
|
|
DN_Str8 first = string;
|
|
do {
|
|
split = DN_Str8_BinarySplit(first, delimiter);
|
|
if (split.lhs.size || mode == DN_Str8SplitIncludeEmptyStrings_Yes) {
|
|
if (splits && result < splits_count)
|
|
splits[result] = split.lhs;
|
|
result++;
|
|
}
|
|
first = split.rhs;
|
|
} while (first.size);
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Slice<DN_Str8> DN_Str8_SplitAlloc(DN_Arena *arena, DN_Str8 string, DN_Str8 delimiter, DN_Str8SplitIncludeEmptyStrings mode)
|
|
{
|
|
DN_Slice<DN_Str8> result = {};
|
|
DN_USize splits_required = DN_Str8_Split(string, delimiter, /*splits*/ nullptr, /*count*/ 0, mode);
|
|
result.data = DN_Arena_NewArray(arena, DN_Str8, splits_required, DN_ZeroMem_No);
|
|
if (result.data) {
|
|
result.size = DN_Str8_Split(string, delimiter, result.data, splits_required, mode);
|
|
DN_Assert(splits_required == result.size);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8FindResult DN_Str8_FindStr8Array(DN_Str8 string, DN_Str8 const *find, DN_USize find_size, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8FindResult result = {};
|
|
if (!DN_Str8_HasData(string) || !find || find_size == 0)
|
|
return result;
|
|
|
|
for (DN_USize index = 0; !result.found && index < string.size; index++) {
|
|
for (DN_USize find_index = 0; find_index < find_size; find_index++) {
|
|
DN_Str8 find_item = find[find_index];
|
|
DN_Str8 string_slice = DN_Str8_Slice(string, index, find_item.size);
|
|
if (DN_Str8_Eq(string_slice, find_item, eq_case)) {
|
|
result.found = true;
|
|
result.index = index;
|
|
result.start_to_before_match = DN_Str8_Init(string.data, index);
|
|
result.match = DN_Str8_Init(string.data + index, find_item.size);
|
|
result.match_to_end_of_buffer = DN_Str8_Init(result.match.data, string.size - index);
|
|
result.after_match_to_end_of_buffer = DN_Str8_Advance(result.match_to_end_of_buffer, find_item.size);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8FindResult DN_Str8_FindStr8(DN_Str8 string, DN_Str8 find, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8FindResult result = DN_Str8_FindStr8Array(string, &find, 1, eq_case);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8FindResult DN_Str8_Find(DN_Str8 string, uint32_t flags)
|
|
{
|
|
DN_Str8FindResult result = {};
|
|
for (size_t index = 0; !result.found && index < string.size; index++) {
|
|
result.found |= ((flags & DN_Str8FindFlag_Digit) && DN_Char_IsDigit(string.data[index]));
|
|
result.found |= ((flags & DN_Str8FindFlag_Alphabet) && DN_Char_IsAlphabet(string.data[index]));
|
|
result.found |= ((flags & DN_Str8FindFlag_Whitespace) && DN_Char_IsWhitespace(string.data[index]));
|
|
result.found |= ((flags & DN_Str8FindFlag_Plus) && string.data[index] == '+');
|
|
result.found |= ((flags & DN_Str8FindFlag_Minus) && string.data[index] == '-');
|
|
if (result.found) {
|
|
result.index = index;
|
|
result.match = DN_Str8_Init(string.data + index, 1);
|
|
result.match_to_end_of_buffer = DN_Str8_Init(result.match.data, string.size - index);
|
|
result.after_match_to_end_of_buffer = DN_Str8_Advance(result.match_to_end_of_buffer, 1);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Segment(DN_Arena *arena, DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
if (!segment_size || !DN_Str8_HasData(src)) {
|
|
DN_Str8 result = DN_Str8_Copy(arena, src);
|
|
return result;
|
|
}
|
|
|
|
DN_USize segments = src.size / segment_size;
|
|
if (src.size % segment_size == 0)
|
|
segments--;
|
|
|
|
DN_USize segment_counter = 0;
|
|
DN_Str8 result = DN_Str8_Alloc(arena, src.size + segments, DN_ZeroMem_Yes);
|
|
DN_USize write_index = 0;
|
|
for (DN_ForIndexU(src_index, src.size)) {
|
|
result.data[write_index++] = src.data[src_index];
|
|
if ((src_index + 1) % segment_size == 0 && segment_counter < segments) {
|
|
result.data[write_index++] = segment_char;
|
|
segment_counter++;
|
|
}
|
|
DN_AssertF(write_index <= result.size, "result.size=%zu, write_index=%zu", result.size, write_index);
|
|
}
|
|
|
|
DN_AssertF(write_index == result.size, "result.size=%zu, write_index=%zu", result.size, write_index);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_ReverseSegment(DN_Arena *arena, DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
if (!segment_size || !DN_Str8_HasData(src)) {
|
|
DN_Str8 result = DN_Str8_Copy(arena, src);
|
|
return result;
|
|
}
|
|
|
|
DN_USize segments = src.size / segment_size;
|
|
if (src.size % segment_size == 0)
|
|
segments--;
|
|
|
|
DN_USize write_counter = 0;
|
|
DN_USize segment_counter = 0;
|
|
DN_Str8 result = DN_Str8_Alloc(arena, src.size + segments, DN_ZeroMem_Yes);
|
|
DN_USize write_index = result.size - 1;
|
|
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6293) // NOTE: Ill-defined loop
|
|
for (size_t src_index = src.size - 1; src_index < src.size; src_index--) {
|
|
DN_MSVC_WARNING_POP
|
|
result.data[write_index--] = src.data[src_index];
|
|
if (++write_counter % segment_size == 0 && segment_counter < segments) {
|
|
result.data[write_index--] = segment_char;
|
|
segment_counter++;
|
|
}
|
|
}
|
|
|
|
DN_Assert(write_index == SIZE_MAX);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_Eq(DN_Str8 lhs, DN_Str8 rhs, DN_Str8EqCase eq_case)
|
|
{
|
|
if (lhs.size != rhs.size)
|
|
return false;
|
|
|
|
if (lhs.size == 0)
|
|
return true;
|
|
|
|
if (!lhs.data || !rhs.data)
|
|
return false;
|
|
|
|
bool result = true;
|
|
switch (eq_case) {
|
|
case DN_Str8EqCase_Sensitive: {
|
|
result = (DN_Memcmp(lhs.data, rhs.data, lhs.size) == 0);
|
|
} break;
|
|
|
|
case DN_Str8EqCase_Insensitive: {
|
|
for (DN_USize index = 0; index < lhs.size && result; index++)
|
|
result = (DN_Char_ToLower(lhs.data[index]) == DN_Char_ToLower(rhs.data[index]));
|
|
} break;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_EqInsensitive(DN_Str8 lhs, DN_Str8 rhs)
|
|
{
|
|
bool result = DN_Str8_Eq(lhs, rhs, DN_Str8EqCase_Insensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_StartsWith(DN_Str8 string, DN_Str8 prefix, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8 substring = {string.data, DN_Min(prefix.size, string.size)};
|
|
bool result = DN_Str8_Eq(substring, prefix, eq_case);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_StartsWithInsensitive(DN_Str8 string, DN_Str8 prefix)
|
|
{
|
|
bool result = DN_Str8_StartsWith(string, prefix, DN_Str8EqCase_Insensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_EndsWith(DN_Str8 string, DN_Str8 suffix, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8 substring = {string.data + string.size - suffix.size, DN_Min(string.size, suffix.size)};
|
|
bool result = DN_Str8_Eq(substring, suffix, eq_case);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_EndsWithInsensitive(DN_Str8 string, DN_Str8 suffix)
|
|
{
|
|
bool result = DN_Str8_EndsWith(string, suffix, DN_Str8EqCase_Insensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8_HasChar(DN_Str8 string, char ch)
|
|
{
|
|
bool result = false;
|
|
for (DN_USize index = 0; !result && index < string.size; index++)
|
|
result = string.data[index] == ch;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimPrefix(DN_Str8 string, DN_Str8 prefix, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8 result = string;
|
|
if (DN_Str8_StartsWith(string, prefix, eq_case)) {
|
|
result.data += prefix.size;
|
|
result.size -= prefix.size;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimHexPrefix(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_TrimPrefix(string, DN_STR8("0x"), DN_Str8EqCase_Insensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimSuffix(DN_Str8 string, DN_Str8 suffix, DN_Str8EqCase eq_case)
|
|
{
|
|
DN_Str8 result = string;
|
|
if (DN_Str8_EndsWith(string, suffix, eq_case))
|
|
result.size -= suffix.size;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimAround(DN_Str8 string, DN_Str8 trim_string)
|
|
{
|
|
DN_Str8 result = DN_Str8_TrimPrefix(string, trim_string);
|
|
result = DN_Str8_TrimSuffix(result, trim_string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimHeadWhitespace(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = string;
|
|
if (!DN_Str8_HasData(string))
|
|
return result;
|
|
|
|
char const *start = string.data;
|
|
char const *end = string.data + string.size;
|
|
while (start < end && DN_Char_IsWhitespace(start[0]))
|
|
start++;
|
|
|
|
result = DN_Str8_Init(start, end - start);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimTailWhitespace(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = string;
|
|
if (!DN_Str8_HasData(string))
|
|
return result;
|
|
|
|
char const *start = string.data;
|
|
char const *end = string.data + string.size;
|
|
while (end > start && DN_Char_IsWhitespace(end[-1]))
|
|
end--;
|
|
|
|
result = DN_Str8_Init(start, end - start);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimWhitespaceAround(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_TrimHeadWhitespace(string);
|
|
result = DN_Str8_TrimTailWhitespace(result);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_TrimByteOrderMark(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = string;
|
|
if (!DN_Str8_HasData(result))
|
|
return result;
|
|
|
|
// TODO(dn): This is little endian
|
|
DN_Str8 UTF8_BOM = DN_STR8("\xEF\xBB\xBF");
|
|
DN_Str8 UTF16_BOM_BE = DN_STR8("\xEF\xFF");
|
|
DN_Str8 UTF16_BOM_LE = DN_STR8("\xFF\xEF");
|
|
DN_Str8 UTF32_BOM_BE = DN_STR8("\x00\x00\xFE\xFF");
|
|
DN_Str8 UTF32_BOM_LE = DN_STR8("\xFF\xFE\x00\x00");
|
|
|
|
result = DN_Str8_TrimPrefix(result, UTF8_BOM, DN_Str8EqCase_Sensitive);
|
|
result = DN_Str8_TrimPrefix(result, UTF16_BOM_BE, DN_Str8EqCase_Sensitive);
|
|
result = DN_Str8_TrimPrefix(result, UTF16_BOM_LE, DN_Str8EqCase_Sensitive);
|
|
result = DN_Str8_TrimPrefix(result, UTF32_BOM_BE, DN_Str8EqCase_Sensitive);
|
|
result = DN_Str8_TrimPrefix(result, UTF32_BOM_LE, DN_Str8EqCase_Sensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FileNameFromPath(DN_Str8 path)
|
|
{
|
|
DN_Str8 separators[] = {DN_STR8("/"), DN_STR8("\\")};
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplitLastArray(path, separators, DN_ArrayCountU(separators));
|
|
DN_Str8 result = DN_Str8_HasData(split.rhs) ? split.rhs : split.lhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FileNameNoExtension(DN_Str8 path)
|
|
{
|
|
DN_Str8 file_name = DN_Str8_FileNameFromPath(path);
|
|
DN_Str8 result = DN_Str8_FilePathNoExtension(file_name);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FilePathNoExtension(DN_Str8 path)
|
|
{
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplitLast(path, DN_STR8("."));
|
|
DN_Str8 result = split.lhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FileExtension(DN_Str8 path)
|
|
{
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplitLast(path, DN_STR8("."));
|
|
DN_Str8 result = split.rhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FileDirectoryFromPath(DN_Str8 path)
|
|
{
|
|
DN_Str8 separators[] = {DN_STR8("/"), DN_STR8("\\")};
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplitLastArray(path, separators, DN_ArrayCountU(separators));
|
|
DN_Str8 result = split.lhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8ToU64Result DN_Str8_ToU64(DN_Str8 string, char separator)
|
|
{
|
|
// NOTE: Argument check
|
|
DN_Str8ToU64Result result = {};
|
|
if (!DN_Str8_HasData(string)) {
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Sanitize input/output
|
|
DN_Str8 trim_string = DN_Str8_TrimWhitespaceAround(string);
|
|
if (trim_string.size == 0) {
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Handle prefix '+'
|
|
DN_USize start_index = 0;
|
|
if (!DN_Char_IsDigit(trim_string.data[0])) {
|
|
if (trim_string.data[0] != '+')
|
|
return result;
|
|
start_index++;
|
|
}
|
|
|
|
// NOTE: Convert the string number to the binary number
|
|
for (DN_USize index = start_index; index < trim_string.size; index++) {
|
|
char ch = trim_string.data[index];
|
|
if (index) {
|
|
if (separator != 0 && ch == separator)
|
|
continue;
|
|
}
|
|
|
|
if (!DN_Char_IsDigit(ch))
|
|
return result;
|
|
|
|
result.value = DN_Safe_MulU64(result.value, 10);
|
|
uint64_t digit = ch - '0';
|
|
result.value = DN_Safe_AddU64(result.value, digit);
|
|
}
|
|
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8ToI64Result DN_Str8_ToI64(DN_Str8 string, char separator)
|
|
{
|
|
// NOTE: Argument check
|
|
DN_Str8ToI64Result result = {};
|
|
if (!DN_Str8_HasData(string)) {
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Sanitize input/output
|
|
DN_Str8 trim_string = DN_Str8_TrimWhitespaceAround(string);
|
|
if (trim_string.size == 0) {
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
bool negative = false;
|
|
DN_USize start_index = 0;
|
|
if (!DN_Char_IsDigit(trim_string.data[0])) {
|
|
negative = (trim_string.data[start_index] == '-');
|
|
if (!negative && trim_string.data[0] != '+')
|
|
return result;
|
|
start_index++;
|
|
}
|
|
|
|
// NOTE: Convert the string number to the binary number
|
|
for (DN_USize index = start_index; index < trim_string.size; index++) {
|
|
char ch = trim_string.data[index];
|
|
if (index) {
|
|
if (separator != 0 && ch == separator)
|
|
continue;
|
|
}
|
|
|
|
if (!DN_Char_IsDigit(ch))
|
|
return result;
|
|
|
|
result.value = DN_Safe_MulU64(result.value, 10);
|
|
uint64_t digit = ch - '0';
|
|
result.value = DN_Safe_AddU64(result.value, digit);
|
|
}
|
|
|
|
if (negative)
|
|
result.value *= -1;
|
|
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AppendF(DN_Arena *arena, DN_Str8 string, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_AppendFV(arena, string, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AppendFV(DN_Arena *arena, DN_Str8 string, char const *fmt, va_list args)
|
|
{
|
|
// TODO: Calculate size and write into one buffer instead of 2 appends
|
|
DN_Str8 append = DN_Str8_InitFV(arena, fmt, args);
|
|
DN_Str8 result = DN_Str8_Alloc(arena, string.size + append.size, DN_ZeroMem_No);
|
|
DN_Memcpy(result.data, string.data, string.size);
|
|
DN_Memcpy(result.data + string.size, append.data, append.size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FillF(DN_Arena *arena, DN_USize count, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_FillFV(arena, count, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FillFV(DN_Arena *arena, DN_USize count, char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 fill = DN_Str8_InitFV(arena, fmt, args);
|
|
DN_Str8 result = DN_Str8_Alloc(arena, count * fill.size, DN_ZeroMem_No);
|
|
for (DN_USize index = 0; index < count; index++) {
|
|
void *dest = result.data + (index * fill.size);
|
|
DN_Memcpy(dest, fill.data, fill.size);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Str8_Remove(DN_Str8 *string, DN_USize offset, DN_USize size)
|
|
{
|
|
if (!string || !DN_Str8_HasData(*string))
|
|
return;
|
|
|
|
char *end = string->data + string->size;
|
|
char *dest = DN_Min(string->data + offset, end);
|
|
char *src = DN_Min(string->data + offset + size, end);
|
|
DN_USize bytes_to_move = end - src;
|
|
DN_Memmove(dest, src, bytes_to_move);
|
|
string->size -= bytes_to_move;
|
|
}
|
|
|
|
DN_API DN_Str8DotTruncateResult DN_Str8_DotTruncateMiddle(DN_Arena *arena, DN_Str8 str8, uint32_t side_size, DN_Str8 truncator)
|
|
{
|
|
DN_Str8DotTruncateResult result = {};
|
|
if (str8.size <= (side_size * 2)) {
|
|
result.str8 = DN_Str8_Copy(arena, str8);
|
|
return result;
|
|
}
|
|
|
|
DN_Str8 head = DN_Str8_Slice(str8, 0, side_size);
|
|
DN_Str8 tail = DN_Str8_Slice(str8, str8.size - side_size, side_size);
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6284) // Object passed as _Param_(3) when a string is required in call to 'DN_Str8_InitF' Actual type: 'struct DN_Str8'
|
|
result.str8 = DN_Str8_InitF(arena, "%S%S%S", head, truncator, tail);
|
|
DN_MSVC_WARNING_POP
|
|
result.truncated = true;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Lower(DN_Arena *arena, DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Copy(arena, string);
|
|
for (DN_ForIndexU(index, result.size))
|
|
result.data[index] = DN_Char_ToLower(result.data[index]);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Upper(DN_Arena *arena, DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Copy(arena, string);
|
|
for (DN_ForIndexU(index, result.size))
|
|
result.data[index] = DN_Char_ToUpper(result.data[index]);
|
|
return result;
|
|
}
|
|
|
|
#if defined(__cplusplus)
|
|
DN_API bool operator==(DN_Str8 const &lhs, DN_Str8 const &rhs)
|
|
{
|
|
bool result = DN_Str8_Eq(lhs, rhs, DN_Str8EqCase_Sensitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator!=(DN_Str8 const &lhs, DN_Str8 const &rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
#endif
|
|
|
|
DN_API DN_Str8 DN_Str8_InitF(DN_Arena *arena, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list va;
|
|
va_start(va, fmt);
|
|
DN_Str8 result = DN_Str8_InitFV(arena, fmt, va);
|
|
va_end(va);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFV(DN_Arena *arena, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!fmt)
|
|
return result;
|
|
|
|
DN_USize size = DN_CStr8_FVSize(fmt, args);
|
|
if (size) {
|
|
result = DN_Str8_Alloc(arena, size, DN_ZeroMem_No);
|
|
if (DN_Str8_HasData(result))
|
|
DN_VSNPrintF(result.data, DN_SaturateCastISizeToInt(size + 1 /*null-terminator*/), fmt, args);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Alloc(DN_Arena *arena, DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_Str8 result = {};
|
|
result.data = DN_Arena_NewArray(arena, char, size + 1, zero_mem);
|
|
if (result.data)
|
|
result.size = size;
|
|
result.data[result.size] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Copy(DN_Arena *arena, DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Alloc(arena, string.size, DN_ZeroMem_No);
|
|
if (DN_Str8_HasData(result)) {
|
|
DN_Memcpy(result.data, string.data, string.size);
|
|
result.data[string.size] = 0;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Str8Builder ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str8Builder DN_Str8Builder_Init(DN_Arena *arena)
|
|
{
|
|
DN_Str8Builder result = {};
|
|
result.arena = arena;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8Builder DN_Str8Builder_InitArrayRef(DN_Arena *arena,
|
|
DN_Str8 const *strings,
|
|
DN_USize size)
|
|
{
|
|
DN_Str8Builder result = DN_Str8Builder_Init(arena);
|
|
DN_Str8Builder_AppendArrayRef(&result, strings, size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8Builder DN_Str8Builder_InitArrayCopy(DN_Arena *arena,
|
|
DN_Str8 const *strings,
|
|
DN_USize size)
|
|
{
|
|
DN_Str8Builder result = DN_Str8Builder_Init(arena);
|
|
DN_Str8Builder_AppendArrayCopy(&result, strings, size);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AddArrayRef(DN_Str8Builder *builder, DN_Str8 const *strings, DN_USize size, DN_Str8BuilderAdd add)
|
|
{
|
|
if (!builder)
|
|
return false;
|
|
|
|
if (!strings || size <= 0)
|
|
return true;
|
|
|
|
DN_Str8Link *links = DN_Arena_NewArray(builder->arena, DN_Str8Link, size, DN_ZeroMem_No);
|
|
if (!links)
|
|
return false;
|
|
|
|
if (add == DN_Str8BuilderAdd_Append) {
|
|
for (DN_ForIndexU(index, size)) {
|
|
DN_Str8 string = strings[index];
|
|
DN_Str8Link *link = links + index;
|
|
|
|
link->string = string;
|
|
link->next = NULL;
|
|
|
|
if (builder->head)
|
|
builder->tail->next = link;
|
|
else
|
|
builder->head = link;
|
|
|
|
builder->tail = link;
|
|
builder->count++;
|
|
builder->string_size += string.size;
|
|
}
|
|
} else {
|
|
DN_Assert(add == DN_Str8BuilderAdd_Prepend);
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6293) // NOTE: Ill-defined loop
|
|
for (DN_USize index = size - 1; index < size; index--) {
|
|
DN_MSVC_WARNING_POP
|
|
DN_Str8 string = strings[index];
|
|
DN_Str8Link *link = links + index;
|
|
link->string = string;
|
|
link->next = builder->head;
|
|
builder->head = link;
|
|
if (!builder->tail)
|
|
builder->tail = link;
|
|
builder->count++;
|
|
builder->string_size += string.size;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AddArrayCopy(DN_Str8Builder *builder, DN_Str8 const *strings, DN_USize size, DN_Str8BuilderAdd add)
|
|
{
|
|
if (!builder)
|
|
return false;
|
|
|
|
if (!strings || size <= 0)
|
|
return true;
|
|
|
|
DN_ArenaTempMem tmp_mem = DN_Arena_TempMemBegin(builder->arena);
|
|
bool result = true;
|
|
DN_Str8 *strings_copy = DN_Arena_NewArray(builder->arena, DN_Str8, size, DN_ZeroMem_No);
|
|
for (DN_ForIndexU(index, size)) {
|
|
strings_copy[index] = DN_Str8_Copy(builder->arena, strings[index]);
|
|
if (strings_copy[index].size != strings[index].size) {
|
|
result = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (result)
|
|
result = DN_Str8Builder_AddArrayRef(builder, strings_copy, size, add);
|
|
|
|
if (!result)
|
|
DN_Arena_TempMemEnd(tmp_mem);
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AddFV(DN_Str8Builder *builder, DN_Str8BuilderAdd add, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 string = DN_Str8_InitFV(builder->arena, fmt, args);
|
|
DN_ArenaTempMem temp_mem = DN_Arena_TempMemBegin(builder->arena);
|
|
bool result = DN_Str8Builder_AddArrayRef(builder, &string, 1, add);
|
|
if (!result)
|
|
DN_Arena_TempMemEnd(temp_mem);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendRef(DN_Str8Builder *builder, DN_Str8 string)
|
|
{
|
|
bool result = DN_Str8Builder_AddArrayRef(builder, &string, 1, DN_Str8BuilderAdd_Append);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendCopy(DN_Str8Builder *builder, DN_Str8 string)
|
|
{
|
|
bool result = DN_Str8Builder_AddArrayCopy(builder, &string, 1, DN_Str8BuilderAdd_Append);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendF(DN_Str8Builder *builder, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
bool result = DN_Str8Builder_AppendFV(builder, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendBytesRef(DN_Str8Builder *builder, void const *ptr, DN_USize size)
|
|
{
|
|
DN_Str8 input = DN_Str8_Init(ptr, size);
|
|
bool result = DN_Str8Builder_AppendRef(builder, input);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendBytesCopy(DN_Str8Builder *builder, void const *ptr, DN_USize size)
|
|
{
|
|
DN_Str8 input = DN_Str8_Init(ptr, size);
|
|
bool result = DN_Str8Builder_AppendCopy(builder, input);
|
|
return result;
|
|
}
|
|
|
|
static bool DN_Str8Builder_AppendBuilder_(DN_Str8Builder *dest, DN_Str8Builder const *src, bool copy)
|
|
{
|
|
if (!dest)
|
|
return false;
|
|
if (!src)
|
|
return true;
|
|
|
|
DN_Arena_TempMemBegin(dest->arena);
|
|
DN_Str8Link *links = DN_Arena_NewArray(dest->arena, DN_Str8Link, src->count, DN_ZeroMem_No);
|
|
if (!links)
|
|
return false;
|
|
|
|
DN_Str8Link *first = nullptr;
|
|
DN_Str8Link *last = nullptr;
|
|
DN_USize link_index = 0;
|
|
bool result = true;
|
|
for (DN_Str8Link const *it = src->head; it; it = it->next) {
|
|
DN_Str8Link *link = links + link_index++;
|
|
link->next = nullptr;
|
|
link->string = it->string;
|
|
|
|
if (copy) {
|
|
link->string = DN_Str8_Copy(dest->arena, it->string);
|
|
if (link->string.size != it->string.size) {
|
|
result = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (last)
|
|
last->next = link;
|
|
else
|
|
first = link;
|
|
last = link;
|
|
}
|
|
|
|
if (result) {
|
|
if (dest->head)
|
|
dest->tail->next = first;
|
|
else
|
|
dest->head = first;
|
|
dest->tail = last;
|
|
dest->count += src->count;
|
|
dest->string_size += src->string_size;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendBuilderRef(DN_Str8Builder *dest, DN_Str8Builder const *src)
|
|
{
|
|
bool result = DN_Str8Builder_AppendBuilder_(dest, src, false);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_AppendBuilderCopy(DN_Str8Builder *dest, DN_Str8Builder const *src)
|
|
{
|
|
bool result = DN_Str8Builder_AppendBuilder_(dest, src, true);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_PrependRef(DN_Str8Builder *builder, DN_Str8 string)
|
|
{
|
|
bool result = DN_Str8Builder_AddArrayRef(builder, &string, 1, DN_Str8BuilderAdd_Prepend);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_PrependCopy(DN_Str8Builder *builder, DN_Str8 string)
|
|
{
|
|
bool result = DN_Str8Builder_AddArrayCopy(builder, &string, 1, DN_Str8BuilderAdd_Prepend);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_PrependF(DN_Str8Builder *builder, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
bool result = DN_Str8Builder_PrependFV(builder, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Str8Builder_Erase(DN_Str8Builder *builder, DN_Str8 string)
|
|
{
|
|
for (DN_Str8Link **it = &builder->head; *it; it = &((*it)->next)) {
|
|
if ((*it)->string == string) {
|
|
*it = (*it)->next;
|
|
builder->string_size -= string.size;
|
|
builder->count -= 1;
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
DN_API DN_Str8Builder DN_Str8Builder_Copy(DN_Arena *arena, DN_Str8Builder const *builder)
|
|
{
|
|
DN_Str8Builder result = DN_Str8Builder_Init(arena);
|
|
DN_Str8Builder_AppendBuilderCopy(&result, builder);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8Builder_Build(DN_Str8Builder const *builder, DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = DN_Str8Builder_BuildDelimited(builder, DN_STR8(""), arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8Builder_BuildDelimited(DN_Str8Builder const *builder, DN_Str8 delimiter, DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = DN_ZeroInit;
|
|
if (!builder || builder->string_size <= 0 || builder->count <= 0)
|
|
return result;
|
|
|
|
DN_USize size_for_delimiter = DN_Str8_HasData(delimiter) ? ((builder->count - 1) * delimiter.size) : 0;
|
|
result.data = DN_Arena_NewArray(arena,
|
|
char,
|
|
builder->string_size + size_for_delimiter + 1 /*null terminator*/,
|
|
DN_ZeroMem_No);
|
|
if (!result.data)
|
|
return result;
|
|
|
|
for (DN_Str8Link *link = builder->head; link; link = link->next) {
|
|
DN_Memcpy(result.data + result.size, link->string.data, link->string.size);
|
|
result.size += link->string.size;
|
|
if (link->next && DN_Str8_HasData(delimiter)) {
|
|
DN_Memcpy(result.data + result.size, delimiter.data, delimiter.size);
|
|
result.size += delimiter.size;
|
|
}
|
|
}
|
|
|
|
result.data[result.size] = 0;
|
|
DN_Assert(result.size == builder->string_size + size_for_delimiter);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Slice<DN_Str8> DN_Str8Builder_BuildSlice(DN_Str8Builder const *builder, DN_Arena *arena)
|
|
{
|
|
DN_Slice<DN_Str8> result = DN_ZeroInit;
|
|
if (!builder || builder->string_size <= 0 || builder->count <= 0)
|
|
return result;
|
|
|
|
result = DN_Slice_Alloc<DN_Str8>(arena, builder->count, DN_ZeroMem_No);
|
|
if (!result.data)
|
|
return result;
|
|
|
|
DN_USize slice_index = 0;
|
|
for (DN_Str8Link *link = builder->head; link; link = link->next)
|
|
result.data[slice_index++] = DN_Str8_Copy(arena, link->string);
|
|
|
|
DN_Assert(slice_index == builder->count);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Char ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool DN_Char_IsAlphabet(char ch)
|
|
{
|
|
bool result = (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z');
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Char_IsDigit(char ch)
|
|
{
|
|
bool result = (ch >= '0' && ch <= '9');
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Char_IsAlphaNum(char ch)
|
|
{
|
|
bool result = DN_Char_IsAlphabet(ch) || DN_Char_IsDigit(ch);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Char_IsWhitespace(char ch)
|
|
{
|
|
bool result = (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r');
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Char_IsHex(char ch)
|
|
{
|
|
bool result = ((ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F') || (ch >= '0' && ch <= '9'));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_CharHexToU8 DN_Char_HexToU8(char ch)
|
|
{
|
|
DN_CharHexToU8 result = {};
|
|
result.success = true;
|
|
if (ch >= 'a' && ch <= 'f')
|
|
result.value = ch - 'a' + 10;
|
|
else if (ch >= 'A' && ch <= 'F')
|
|
result.value = ch - 'A' + 10;
|
|
else if (ch >= '0' && ch <= '9')
|
|
result.value = ch - '0';
|
|
else
|
|
result.success = false;
|
|
return result;
|
|
}
|
|
|
|
static char constexpr DN_HEX_LUT[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
|
|
|
|
DN_API char DN_Char_ToHex(char ch)
|
|
{
|
|
char result = DN_CAST(char) - 1;
|
|
if (ch < 16)
|
|
result = DN_HEX_LUT[DN_CAST(uint8_t) ch];
|
|
return result;
|
|
}
|
|
|
|
DN_API char DN_Char_ToHexUnchecked(char ch)
|
|
{
|
|
char result = DN_HEX_LUT[DN_CAST(uint8_t) ch];
|
|
return result;
|
|
}
|
|
|
|
DN_API char DN_Char_ToLower(char ch)
|
|
{
|
|
char result = ch;
|
|
if (result >= 'A' && result <= 'Z')
|
|
result += 'a' - 'A';
|
|
return result;
|
|
}
|
|
|
|
DN_API char DN_Char_ToUpper(char ch)
|
|
{
|
|
char result = ch;
|
|
if (result >= 'a' && result <= 'z')
|
|
result -= 'a' - 'A';
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_UTF ////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API int DN_UTF8_EncodeCodepoint(uint8_t utf8[4], uint32_t codepoint)
|
|
{
|
|
// NOTE: Table from https://www.reedbeta.com/blog/programmers-intro-to-unicode/
|
|
// ----------------------------------------+----------------------------+--------------------+
|
|
// UTF-8 (binary) | Code point (binary) | Range |
|
|
// ----------------------------------------+----------------------------+--------------------+
|
|
// 0xxx'xxxx | xxx'xxxx | U+0000 - U+007F |
|
|
// 110x'xxxx 10yy'yyyy | xxx'xxyy'yyyy | U+0080 - U+07FF |
|
|
// 1110'xxxx 10yy'yyyy 10zz'zzzz | xxxx'yyyy'yyzz'zzzz | U+0800 - U+FFFF |
|
|
// 1111'0xxx 10yy'yyyy 10zz'zzzz 10ww'wwww | x'xxyy'yyyy'zzzz'zzww'wwww | U+10000 - U+10FFFF |
|
|
// ----------------------------------------+----------------------------+--------------------+
|
|
|
|
if (codepoint <= 0b0111'1111) {
|
|
utf8[0] = DN_CAST(uint8_t) codepoint;
|
|
return 1;
|
|
}
|
|
|
|
if (codepoint <= 0b0111'1111'1111) {
|
|
utf8[0] = (0b1100'0000 | ((codepoint >> 6) & 0b01'1111)); // x
|
|
utf8[1] = (0b1000'0000 | ((codepoint >> 0) & 0b11'1111)); // y
|
|
return 2;
|
|
}
|
|
|
|
if (codepoint <= 0b1111'1111'1111'1111) {
|
|
utf8[0] = (0b1110'0000 | ((codepoint >> 12) & 0b00'1111)); // x
|
|
utf8[1] = (0b1000'0000 | ((codepoint >> 6) & 0b11'1111)); // y
|
|
utf8[2] = (0b1000'0000 | ((codepoint >> 0) & 0b11'1111)); // z
|
|
return 3;
|
|
}
|
|
|
|
if (codepoint <= 0b1'1111'1111'1111'1111'1111) {
|
|
utf8[0] = (0b1111'0000 | ((codepoint >> 18) & 0b00'0111)); // x
|
|
utf8[1] = (0b1000'0000 | ((codepoint >> 12) & 0b11'1111)); // y
|
|
utf8[2] = (0b1000'0000 | ((codepoint >> 6) & 0b11'1111)); // z
|
|
utf8[3] = (0b1000'0000 | ((codepoint >> 0) & 0b11'1111)); // w
|
|
return 4;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
DN_API int DN_UTF16_EncodeCodepoint(uint16_t utf16[2], uint32_t codepoint)
|
|
{
|
|
// NOTE: Table from https://www.reedbeta.com/blog/programmers-intro-to-unicode/
|
|
// ----------------------------------------+------------------------------------+------------------+
|
|
// UTF-16 (binary) | Code point (binary) | Range |
|
|
// ----------------------------------------+------------------------------------+------------------+
|
|
// xxxx'xxxx'xxxx'xxxx | xxxx'xxxx'xxxx'xxxx | U+0000???U+FFFF |
|
|
// 1101'10xx'xxxx'xxxx 1101'11yy'yyyy'yyyy | xxxx'xxxx'xxyy'yyyy'yyyy + 0x10000 | U+10000???U+10FFFF |
|
|
// ----------------------------------------+------------------------------------+------------------+
|
|
|
|
if (codepoint <= 0b1111'1111'1111'1111) {
|
|
utf16[0] = DN_CAST(uint16_t) codepoint;
|
|
return 1;
|
|
}
|
|
|
|
if (codepoint <= 0b1111'1111'1111'1111'1111) {
|
|
uint32_t surrogate_codepoint = codepoint + 0x10000;
|
|
utf16[0] = 0b1101'1000'0000'0000 | ((surrogate_codepoint >> 10) & 0b11'1111'1111); // x
|
|
utf16[1] = 0b1101'1100'0000'0000 | ((surrogate_codepoint >> 0) & 0b11'1111'1111); // y
|
|
return 2;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
// DN: Single header generator inlined this file => #include "Base/dn_base_log.cpp"
|
|
#define DN_BASE_LOG_CPP
|
|
|
|
// DN: Single header generator commented out this header => #include "../dn_clangd.h"
|
|
|
|
static DN_LOGEmitFromTypeFVFunc *g_dn_base_log_emit_from_type_fv_func_;
|
|
static void *g_dn_base_log_emit_from_type_fv_user_context_;
|
|
|
|
DN_API DN_Str8 DN_LOG_ColourEscapeCodeStr8FromRGB(DN_LOGColourType colour, DN_U8 r, DN_U8 g, DN_U8 b)
|
|
{
|
|
DN_THREAD_LOCAL char buffer[32];
|
|
buffer[0] = 0;
|
|
DN_Str8 result = {};
|
|
result.size = DN_SNPrintF(buffer,
|
|
DN_ArrayCountU(buffer),
|
|
"\x1b[%d;2;%u;%u;%um",
|
|
colour == DN_LOGColourType_Fg ? 38 : 48,
|
|
r,
|
|
g,
|
|
b);
|
|
result.data = buffer;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_LOG_ColourEscapeCodeStr8FromU32(DN_LOGColourType colour, DN_U32 value)
|
|
{
|
|
DN_U8 r = DN_CAST(DN_U8)(value >> 24);
|
|
DN_U8 g = DN_CAST(DN_U8)(value >> 16);
|
|
DN_U8 b = DN_CAST(DN_U8)(value >> 8);
|
|
DN_Str8 result = DN_LOG_ColourEscapeCodeStr8FromRGB(colour, r, g, b);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_LOGPrefixSize DN_LOG_MakePrefix(DN_LOGStyle style, DN_LOGTypeParam type, DN_CallSite call_site, DN_LOGDate date, char *dest, DN_USize dest_size)
|
|
{
|
|
DN_Str8 type_str8 = type.str8;
|
|
if (type.is_u32_enum) {
|
|
switch (type.u32) {
|
|
case DN_LOGType_Debug: type_str8 = DN_STR8("DEBUG"); break;
|
|
case DN_LOGType_Info: type_str8 = DN_STR8("INFO "); break;
|
|
case DN_LOGType_Warning: type_str8 = DN_STR8("WARN"); break;
|
|
case DN_LOGType_Error: type_str8 = DN_STR8("ERROR"); break;
|
|
case DN_LOGType_Count: type_str8 = DN_STR8("BADXX"); break;
|
|
}
|
|
}
|
|
|
|
static DN_USize max_type_length = 0;
|
|
max_type_length = DN_Max(max_type_length, type_str8.size);
|
|
int type_padding = DN_CAST(int)(max_type_length - type_str8.size);
|
|
|
|
DN_Str8 colour_esc = {};
|
|
DN_Str8 bold_esc = {};
|
|
DN_Str8 reset_esc = {};
|
|
if (style.colour) {
|
|
bold_esc = DN_STR8(DN_LOG_BoldEscapeCode);
|
|
reset_esc = DN_STR8(DN_LOG_ResetEscapeCode);
|
|
colour_esc = DN_LOG_ColourEscapeCodeStr8FromRGB(DN_LOGColourType_Fg, style.r, style.g, style.b);
|
|
}
|
|
|
|
DN_Str8 file_name = DN_Str8_FileNameFromPath(call_site.file);
|
|
DN_GCC_WARNING_PUSH
|
|
DN_GCC_WARNING_DISABLE(-Wformat)
|
|
DN_GCC_WARNING_DISABLE(-Wformat-extra-args)
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(4477)
|
|
int size = DN_SNPrintF(dest,
|
|
DN_CAST(int)dest_size,
|
|
"%04u-%02u-%02uT%02u:%02u:%02u" // date
|
|
"%S" // colour
|
|
"%S" // bold
|
|
" %S" // type
|
|
"%.*s" // type padding
|
|
"%S" // reset
|
|
" %S" // file name
|
|
":%05I32u " // line number
|
|
,
|
|
date.year,
|
|
date.month,
|
|
date.day,
|
|
date.hour,
|
|
date.minute,
|
|
date.second,
|
|
colour_esc, // colour
|
|
bold_esc, // bold
|
|
type_str8, // type
|
|
DN_CAST(int) type_padding,
|
|
"", // type padding
|
|
reset_esc, // reset
|
|
file_name, // file name
|
|
call_site.line); // line number
|
|
DN_MSVC_WARNING_POP // '%S' requires an argument of type 'wchar_t *', but variadic argument 7 has type 'DN_Str8'
|
|
DN_GCC_WARNING_POP
|
|
|
|
static DN_USize max_header_length = 0;
|
|
DN_USize size_no_ansi_codes = size - colour_esc.size - reset_esc.size - bold_esc.size;
|
|
max_header_length = DN_Max(max_header_length, size_no_ansi_codes);
|
|
DN_USize header_padding = max_header_length - size_no_ansi_codes;
|
|
|
|
DN_LOGPrefixSize result = {};
|
|
result.size = size;
|
|
result.padding = header_padding;
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_LOG_SetEmitFromTypeFVFunc(DN_LOGEmitFromTypeFVFunc *print_func, void *user_data)
|
|
{
|
|
g_dn_base_log_emit_from_type_fv_func_ = print_func;
|
|
g_dn_base_log_emit_from_type_fv_user_context_ = user_data;
|
|
}
|
|
|
|
DN_API void DN_LOG_EmitFromType(DN_LOGTypeParam type, DN_CallSite call_site, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
DN_LOGEmitFromTypeFVFunc *func = g_dn_base_log_emit_from_type_fv_func_;
|
|
void *user_context = g_dn_base_log_emit_from_type_fv_user_context_;
|
|
if (func) {
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
func(type, user_context, call_site, fmt, args);
|
|
va_end(args);
|
|
}
|
|
}
|
|
|
|
DN_API DN_LOGTypeParam DN_LOG_MakeU32LogTypeParam(DN_LOGType type)
|
|
{
|
|
DN_LOGTypeParam result = {};
|
|
result.is_u32_enum = true;
|
|
result.u32 = type;
|
|
return result;
|
|
}
|
|
#define DN_OS_INC_CPP
|
|
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_tls.cpp"
|
|
#define DN_OS_TLSCPP
|
|
|
|
// NOTE: DN_OSTLS ////////////////////////////////////////////////////////////////////////////////////
|
|
DN_OSTLSTMem::DN_OSTLSTMem(DN_OSTLS *tls, DN_U8 arena_index, DN_OSTLSPushTMem push_tmem)
|
|
{
|
|
DN_Assert(arena_index == DN_OSTLSArena_TMem0 || arena_index == DN_OSTLSArena_TMem1);
|
|
arena = tls->arenas + arena_index;
|
|
temp_mem = DN_Arena_TempMemBegin(arena);
|
|
destructed = false;
|
|
push_arena = push_tmem;
|
|
if (push_arena)
|
|
DN_OS_TLSPushArena(arena);
|
|
}
|
|
|
|
DN_OSTLSTMem::~DN_OSTLSTMem()
|
|
{
|
|
DN_Assert(destructed == false);
|
|
DN_Arena_TempMemEnd(temp_mem);
|
|
destructed = true;
|
|
if (push_arena)
|
|
DN_OS_TLSPopArena();
|
|
}
|
|
|
|
DN_API void DN_OS_TLSInit(DN_OSTLS *tls, DN_OSTLSInitArgs args)
|
|
{
|
|
DN_Check(tls);
|
|
if (tls->init)
|
|
return;
|
|
|
|
DN_U64 reserve = args.reserve ? args.reserve : DN_Kilobytes(64);
|
|
DN_U64 commit = args.commit ? args.commit : DN_Kilobytes(4);
|
|
DN_U64 err_sink_reserve = args.err_sink_reserve ? args.err_sink_reserve : DN_Kilobytes(64);
|
|
DN_U64 err_sink_commit = args.err_sink_commit ? args.err_sink_commit : DN_Kilobytes(4);
|
|
|
|
// TODO: We shouldn't have the no alloc track flag here but the initial TLS
|
|
// init on OS init happens before CORE init. CORE init is the one responsible
|
|
// for setting up the alloc tracking data structures.
|
|
for (DN_ForItCArray(it, DN_Arena, tls->arenas)) {
|
|
DN_Arena *arena = it.data;
|
|
switch (DN_CAST(DN_OSTLSArena) it.index) {
|
|
default: *arena = DN_Arena_InitFromOSVMem(reserve, commit, DN_ArenaFlags_AllocCanLeak | DN_ArenaFlags_NoAllocTrack); break;
|
|
case DN_OSTLSArena_ErrorSink: *arena = DN_Arena_InitFromOSVMem(err_sink_reserve, err_sink_commit, DN_ArenaFlags_AllocCanLeak | DN_ArenaFlags_NoAllocTrack); break;
|
|
case DN_OSTLSArena_Count: DN_InvalidCodePath; break;
|
|
}
|
|
}
|
|
|
|
tls->thread_id = DN_OS_ThreadID();
|
|
tls->err_sink.arena = tls->arenas + DN_OSTLSArena_ErrorSink;
|
|
tls->init = true;
|
|
}
|
|
|
|
DN_API void DN_OS_TLSDeinit(DN_OSTLS *tls)
|
|
{
|
|
tls->init = false;
|
|
tls->err_sink = {};
|
|
tls->arena_stack_index = {};
|
|
for (DN_ForItCArray(it, DN_Arena, tls->arenas))
|
|
DN_Arena_Deinit(it.data);
|
|
}
|
|
|
|
DN_THREAD_LOCAL DN_OSTLS *g_dn_curr_thread_tls;
|
|
DN_API void DN_OS_TLSSetCurrentThreadTLS(DN_OSTLS *tls)
|
|
{
|
|
g_dn_curr_thread_tls = tls;
|
|
}
|
|
|
|
DN_API DN_OSTLS *DN_OS_TLSGet()
|
|
{
|
|
DN_Assert(g_dn_curr_thread_tls &&
|
|
"DN must be initialised (via DN_Core_Init) before calling any functions depending on "
|
|
"TLS if this is the main thread, OR, the created thread has not called "
|
|
"SetCurrentThreadTLS yet so the TLS data structure hasn't been assigned yet");
|
|
return g_dn_curr_thread_tls;
|
|
}
|
|
|
|
DN_API DN_Arena *DN_OS_TLSArena()
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_Arena *result = tls->arenas + DN_OSTLSArena_Main;
|
|
return result;
|
|
}
|
|
|
|
// TODO: Is there a way to handle conflict arenas without the user needing to
|
|
// manually pass it in?
|
|
DN_API DN_OSTLSTMem DN_OS_TLSGetTMem(void const *conflict_arena, DN_OSTLSPushTMem push_tmem)
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_U8 tls_index = (DN_U8)-1;
|
|
for (DN_U8 index = DN_OSTLSArena_TMem0; index <= DN_OSTLSArena_TMem1; index++) {
|
|
DN_Arena *arena = tls->arenas + index;
|
|
if (!conflict_arena || arena != conflict_arena) {
|
|
tls_index = index;
|
|
break;
|
|
}
|
|
}
|
|
|
|
DN_Assert(tls_index != (DN_U8)-1);
|
|
return DN_OSTLSTMem(tls, tls_index, push_tmem);
|
|
}
|
|
|
|
DN_API void DN_OS_TLSPushArena(DN_Arena *arena)
|
|
{
|
|
DN_Assert(arena);
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_Assert(tls->arena_stack_index < DN_ArrayCountU(tls->arena_stack));
|
|
tls->arena_stack[tls->arena_stack_index++] = arena;
|
|
}
|
|
|
|
DN_API void DN_OS_TLSPopArena()
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_Assert(tls->arena_stack_index > 0);
|
|
tls->arena_stack_index--;
|
|
}
|
|
|
|
DN_API DN_Arena *DN_OS_TLSTopArena()
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_Arena *result = nullptr;
|
|
if (tls->arena_stack_index)
|
|
result = tls->arena_stack[tls->arena_stack_index - 1];
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_TLSBeginFrame(DN_Arena *frame_arena)
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
tls->frame_arena = frame_arena;
|
|
}
|
|
|
|
DN_API DN_Arena *DN_OS_TLSFrameArena()
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_Arena *result = tls->frame_arena;
|
|
DN_AssertF(result, "Frame arena must be set by calling DN_OS_TLSBeginFrame at the beginning of the frame");
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSErrSink ////////////////////////////////////////////////////////////////////////////////
|
|
static void DN_OS_ErrSinkCheck_(DN_OSErrSink const *err)
|
|
{
|
|
DN_AssertF(err->arena, "Arena should be assigned in TLS init");
|
|
if (err->stack_size == 0)
|
|
return;
|
|
|
|
DN_OSErrSinkNode const *node = err->stack + (err->stack_size - 1);
|
|
DN_Assert(node->mode >= DN_OSErrSinkMode_Nil && node->mode <= DN_OSErrSinkMode_ExitOnError);
|
|
DN_Assert(node->msg_sentinel);
|
|
|
|
// NOTE: Walk the list ensuring we eventually terminate at the sentinel (e.g. we have a
|
|
// well formed doubly-linked-list terminated by a sentinel, or otherwise we will hit the
|
|
// walk limit or dereference a null pointer and assert)
|
|
size_t WALK_LIMIT = 99'999;
|
|
size_t walk = 0;
|
|
for (DN_OSErrSinkMsg *it = node->msg_sentinel->next; it != node->msg_sentinel; it = it->next, walk++) {
|
|
DN_AssertF(it, "Encountered null pointer which should not happen in a sentinel DLL");
|
|
DN_Assert(walk < WALK_LIMIT);
|
|
}
|
|
}
|
|
|
|
DN_API DN_OSErrSink *DN_OS_ErrSinkBegin_(DN_OSErrSinkMode mode, DN_CallSite call_site)
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_OSErrSink *err = &tls->err_sink;
|
|
DN_OSErrSink *result = err;
|
|
DN_USize arena_pos = DN_Arena_Pos(result->arena);
|
|
|
|
if (tls->err_sink.stack_size == DN_ArrayCountU(err->stack)) {
|
|
DN_Str8Builder builder = DN_Str8Builder_InitFromTLS();
|
|
DN_USize counter = 0;
|
|
for (DN_ForItSize(it, DN_OSErrSinkNode, err->stack, err->stack_size)) {
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6284) // Object passed as _Param_(4) when a string is required in call to 'DN_Str8Builder_AppendF' Actual type: 'struct DN_Str8'.
|
|
DN_Str8Builder_AppendF(&builder, " [%04zu] %S:%u %S\n", counter++, it.data->call_site.file, it.data->call_site.line, it.data->call_site.function);
|
|
DN_MSVC_WARNING_POP
|
|
}
|
|
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6284) // Object passed as _Param_(6) when a string is required in call to 'DN_LOG_EmitFromType' Actual type: 'struct DN_Str8'.
|
|
DN_AssertF(tls->err_sink.stack_size < DN_ArrayCountU(err->stack),
|
|
"Error sink has run out of error scopes, potential leak. Scopes were\n%S", DN_Str8Builder_BuildFromTLS(&builder));
|
|
DN_MSVC_WARNING_POP
|
|
}
|
|
|
|
DN_OSErrSinkNode *node = tls->err_sink.stack + tls->err_sink.stack_size++;
|
|
node->arena_pos = arena_pos;
|
|
node->mode = mode;
|
|
node->call_site = call_site;
|
|
DN_DLList_InitArena(node->msg_sentinel, DN_OSErrSinkMsg, result->arena);
|
|
|
|
// NOTE: Handle allocation error
|
|
if (!DN_Check(node && node->msg_sentinel)) {
|
|
DN_Arena_PopTo(result->arena, arena_pos);
|
|
node->msg_sentinel = nullptr;
|
|
tls->err_sink.stack_size--;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_ErrSinkHasError(DN_OSErrSink *err)
|
|
{
|
|
bool result = false;
|
|
if (err && err->stack_size) {
|
|
DN_OSErrSinkNode *node = err->stack + (err->stack_size - 1);
|
|
result = DN_DLList_HasItems(node->msg_sentinel);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSErrSinkMsg *DN_OS_ErrSinkEnd(DN_Arena *arena, DN_OSErrSink *err)
|
|
{
|
|
DN_OSErrSinkMsg *result = nullptr;
|
|
DN_OS_ErrSinkCheck_(err);
|
|
if (!err || err->stack_size == 0)
|
|
return result;
|
|
|
|
DN_AssertF(arena != err->arena,
|
|
"You are not allowed to reuse the arena for ending the error sink because the memory would get popped and lost");
|
|
// NOTE: Walk the list and allocate it onto the user's arena
|
|
DN_OSErrSinkNode *node = err->stack + (err->stack_size - 1);
|
|
DN_OSErrSinkMsg *prev = nullptr;
|
|
for (DN_OSErrSinkMsg *it = node->msg_sentinel->next; it != node->msg_sentinel; it = it->next) {
|
|
DN_OSErrSinkMsg *entry = DN_Arena_New(arena, DN_OSErrSinkMsg, DN_ZeroMem_Yes);
|
|
entry->msg = DN_Str8_Copy(arena, it->msg);
|
|
entry->call_site = it->call_site;
|
|
entry->error_code = it->error_code;
|
|
if (!result)
|
|
result = entry; // Assign first entry if we haven't yet
|
|
if (prev)
|
|
prev->next = entry; // Link the prev message to the current one
|
|
prev = entry; // Update prev to latest
|
|
}
|
|
|
|
// NOTE: Deallocate all the memory for this scope
|
|
err->stack_size--;
|
|
DN_Arena_PopTo(err->arena, node->arena_pos);
|
|
return result;
|
|
}
|
|
|
|
static void DN_OS_ErrSinkAddMsgToStr8Builder_(DN_Str8Builder *builder, DN_OSErrSinkMsg *msg, DN_OSErrSinkMsg *end)
|
|
{
|
|
if (msg == end) // NOTE: No error messages to add
|
|
return;
|
|
|
|
if (msg->next == end) {
|
|
DN_OSErrSinkMsg *it = msg;
|
|
DN_Str8 file_name = DN_Str8_FileNameFromPath(it->call_site.file);
|
|
DN_Str8Builder_AppendF(builder,
|
|
"%.*s:%05I32u:%.*s %.*s",
|
|
DN_STR_FMT(file_name),
|
|
it->call_site.line,
|
|
DN_STR_FMT(it->call_site.function),
|
|
DN_STR_FMT(it->msg));
|
|
} else {
|
|
// NOTE: More than one message
|
|
for (DN_OSErrSinkMsg *it = msg; it != end; it = it->next) {
|
|
DN_Str8 file_name = DN_Str8_FileNameFromPath(it->call_site.file);
|
|
DN_Str8Builder_AppendF(builder,
|
|
"%s - %.*s:%05I32u:%.*s%s%.*s",
|
|
it == msg ? "" : "\n",
|
|
DN_STR_FMT(file_name),
|
|
it->call_site.line,
|
|
DN_STR_FMT(it->call_site.function),
|
|
DN_Str8_HasData(it->msg) ? " " : "",
|
|
DN_STR_FMT(it->msg));
|
|
}
|
|
}
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_ErrSinkEndStr8(DN_Arena *arena, DN_OSErrSink *err)
|
|
{
|
|
DN_Str8 result = {};
|
|
DN_OS_ErrSinkCheck_(err);
|
|
if (!err || err->stack_size == 0)
|
|
return result;
|
|
|
|
DN_AssertF(arena != err->arena,
|
|
"You are not allowed to reuse the arena for ending the error sink because the memory would get popped and lost");
|
|
|
|
// NOTE: Walk the list and allocate it onto the user's arena
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(arena);
|
|
DN_Str8Builder builder = DN_Str8Builder_InitFromTLS();
|
|
DN_OSErrSinkNode *node = err->stack + (err->stack_size - 1);
|
|
DN_OS_ErrSinkAddMsgToStr8Builder_(&builder, node->msg_sentinel->next, node->msg_sentinel);
|
|
|
|
// NOTE: Deallocate all the memory for this scope
|
|
err->stack_size--;
|
|
DN_U64 arena_pos = node->arena_pos;
|
|
DN_Arena_PopTo(err->arena, arena_pos);
|
|
|
|
result = DN_Str8Builder_Build(&builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ErrSinkEndAndIgnore(DN_OSErrSink *err)
|
|
{
|
|
DN_OS_ErrSinkEnd(nullptr, err);
|
|
}
|
|
|
|
DN_API bool DN_OS_ErrSinkEndAndLogError_(DN_OSErrSink *err, DN_CallSite call_site, DN_Str8 err_msg)
|
|
{
|
|
DN_AssertF(err->stack_size, "Begin must be called before calling end");
|
|
DN_OSErrSinkNode *node = err->stack + (err->stack_size - 1);
|
|
DN_AssertF(node->msg_sentinel, "Begin must be called before calling end");
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_OSErrSinkMode mode = node->mode;
|
|
DN_OSErrSinkMsg *msg = DN_OS_ErrSinkEnd(tmem.arena, err);
|
|
if (!msg)
|
|
return false;
|
|
|
|
DN_Str8Builder builder = DN_Str8Builder_InitFromTLS();
|
|
if (DN_Str8_HasData(err_msg)) {
|
|
DN_Str8Builder_AppendRef(&builder, err_msg);
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8(":"));
|
|
} else {
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8("Error(s) encountered:"));
|
|
}
|
|
|
|
if (msg->next) // NOTE: More than 1 message
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8("\n"));
|
|
DN_OS_ErrSinkAddMsgToStr8Builder_(&builder, msg, nullptr);
|
|
|
|
DN_Str8 log = DN_Str8Builder_BuildFromTLS(&builder);
|
|
DN_LOG_EmitFromType(DN_LOG_MakeU32LogTypeParam(DN_LOGType_Error), call_site, "%.*s", DN_STR_FMT(log));
|
|
|
|
if (mode == DN_OSErrSinkMode_DebugBreakOnEndAndLog)
|
|
DN_DebugBreak;
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_OS_ErrSinkEndAndLogErrorFV_(DN_OSErrSink *err, DN_CallSite call_site, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 log = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
bool result = DN_OS_ErrSinkEndAndLogError_(err, call_site, log);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_ErrSinkEndAndLogErrorF_(DN_OSErrSink *err, DN_CallSite call_site, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 log = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
bool result = DN_OS_ErrSinkEndAndLogError_(err, call_site, log);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ErrSinkEndAndExitIfErrorFV_(DN_OSErrSink *err, DN_CallSite call_site, DN_U32 exit_val, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
if (DN_OS_ErrSinkEndAndLogErrorFV_(err, call_site, fmt, args)) {
|
|
DN_DebugBreak;
|
|
DN_OS_Exit(exit_val);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_ErrSinkEndAndExitIfErrorF_(DN_OSErrSink *err, DN_CallSite call_site, DN_U32 exit_val, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_ErrSinkEndAndExitIfErrorFV_(err, call_site, exit_val, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_OS_ErrSinkAppendFV_(DN_OSErrSink *err, DN_U32 error_code, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
if (!err)
|
|
return;
|
|
DN_Assert(err && err->stack_size);
|
|
DN_OSErrSinkNode *node = err->stack + (err->stack_size - 1);
|
|
DN_AssertF(node, "Error sink must be begun by calling 'Begin' before using this function.");
|
|
|
|
DN_OSErrSinkMsg *msg = DN_Arena_New(err->arena, DN_OSErrSinkMsg, DN_ZeroMem_Yes);
|
|
if (DN_Check(msg)) {
|
|
msg->msg = DN_Str8_InitFV(err->arena, fmt, args);
|
|
msg->error_code = error_code;
|
|
msg->call_site = DN_OS_TLSGet()->call_site;
|
|
DN_DLList_Prepend(node->msg_sentinel, msg);
|
|
|
|
if (node->mode == DN_OSErrSinkMode_ExitOnError)
|
|
DN_OS_ErrSinkEndAndExitIfErrorF_(err, msg->call_site, error_code, "Fatal error %u", error_code);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_ErrSinkAppendF_(DN_OSErrSink *err, DN_U32 error_code, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
if (!err)
|
|
return;
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_ErrSinkAppendFV_(err, error_code, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os.cpp"
|
|
#define DN_OS_CPP
|
|
|
|
#if defined(DN_PLATFORM_POSIX)
|
|
#include <sys/sysinfo.h> // get_nprocs
|
|
#include <unistd.h> // getpagesize
|
|
#endif
|
|
|
|
static DN_OSCore *g_dn_os_core_;
|
|
|
|
static void DN_OS_LOGEmitFromTypeTypeFV_(DN_LOGTypeParam type, void *user_data, DN_CallSite call_site, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Assert(user_data);
|
|
DN_OSCore *core = DN_CAST(DN_OSCore *)user_data;
|
|
|
|
// NOTE: Open log file for appending if requested ////////////////////////////////////////////////
|
|
DN_TicketMutex_Begin(&core->log_file_mutex);
|
|
if (core->log_to_file && !core->log_file.handle && !core->log_file.error) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6284) // Object passed as _Param_(3) when a string is required in call to 'DN_OS_PathF' Actual type: 'struct DN_Str8'.
|
|
DN_Str8 log_path = DN_OS_PathF(tmem.arena, "%S/dn.log", DN_OS_EXEDir(tmem.arena));
|
|
DN_MSVC_WARNING_POP
|
|
core->log_file = DN_OS_FileOpen(log_path, DN_OSFileOpen_CreateAlways, DN_OSFileAccess_AppendOnly, nullptr);
|
|
}
|
|
DN_TicketMutex_End(&core->log_file_mutex);
|
|
|
|
DN_LOGStyle style = {};
|
|
if (!core->log_no_colour) {
|
|
style.colour = true;
|
|
style.bold = DN_LOGBold_Yes;
|
|
if (type.is_u32_enum) {
|
|
switch (type.u32) {
|
|
case DN_LOGType_Debug: {
|
|
style.colour = false;
|
|
style.bold = DN_LOGBold_No;
|
|
} break;
|
|
|
|
case DN_LOGType_Info: {
|
|
style.g = 0x87;
|
|
style.b = 0xff;
|
|
} break;
|
|
|
|
case DN_LOGType_Warning: {
|
|
style.r = 0xff;
|
|
style.g = 0xff;
|
|
} break;
|
|
|
|
case DN_LOGType_Error: {
|
|
style.r = 0xff;
|
|
} break;
|
|
}
|
|
}
|
|
}
|
|
|
|
DN_OSDateTime os_date = DN_OS_DateLocalTimeNow();
|
|
DN_LOGDate log_date = {};
|
|
log_date.year = os_date.year;
|
|
log_date.month = os_date.month;
|
|
log_date.day = os_date.day;
|
|
log_date.hour = os_date.hour;
|
|
log_date.minute = os_date.minutes;
|
|
log_date.second = os_date.seconds;
|
|
|
|
char prefix_buffer[128] = {};
|
|
DN_LOGPrefixSize prefix_size = DN_LOG_MakePrefix(style, type, call_site, log_date, prefix_buffer, sizeof(prefix_buffer));
|
|
|
|
va_list args_copy;
|
|
va_copy(args_copy, args);
|
|
DN_TicketMutex_Begin(&core->log_file_mutex);
|
|
{
|
|
DN_OS_FileWrite(&core->log_file, DN_Str8_Init(prefix_buffer, prefix_size.size), nullptr);
|
|
DN_OS_FileWriteF(&core->log_file, nullptr, "%*s ", DN_CAST(int)prefix_size.padding, "");
|
|
DN_OS_FileWriteFV(&core->log_file, nullptr, fmt, args_copy);
|
|
DN_OS_FileWrite(&core->log_file, DN_STR8("\n"), nullptr);
|
|
}
|
|
DN_TicketMutex_End(&core->log_file_mutex);
|
|
va_end(args_copy);
|
|
|
|
DN_OSPrintDest dest = (type.is_u32_enum && type.u32 == DN_LOGType_Error) ? DN_OSPrintDest_Err : DN_OSPrintDest_Out;
|
|
DN_OS_Print(dest, DN_Str8_Init(prefix_buffer, prefix_size.size));
|
|
DN_OS_PrintF(dest, "%*s ", DN_CAST(int)prefix_size.padding, "");
|
|
DN_OS_PrintLnFV(dest, fmt, args);
|
|
}
|
|
|
|
DN_API void DN_OS_Init(DN_OSCore *os, DN_OSInitArgs *args)
|
|
{
|
|
g_dn_os_core_ = os;
|
|
|
|
// NOTE: OS
|
|
{
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
SYSTEM_INFO system_info = {};
|
|
GetSystemInfo(&system_info);
|
|
|
|
os->logical_processor_count = system_info.dwNumberOfProcessors;
|
|
os->page_size = system_info.dwPageSize;
|
|
os->alloc_granularity = system_info.dwAllocationGranularity;
|
|
#else
|
|
os->logical_processor_count = get_nprocs();
|
|
os->page_size = getpagesize();
|
|
os->alloc_granularity = os->page_size;
|
|
#endif
|
|
}
|
|
|
|
// NOTE: Setup logging
|
|
DN_OS_EmitLogsWithOSPrintFunctions(os);
|
|
|
|
{
|
|
os->arena = DN_Arena_InitFromOSVMem(DN_Megabytes(1), DN_Kilobytes(4), DN_ArenaFlags_NoAllocTrack);
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
os->platform_context = DN_Arena_New(&os->arena, DN_W32Core, DN_ZeroMem_Yes);
|
|
#elif defined(DN_PLATFORM_POSIX)
|
|
os->platform_context = DN_Arena_New(&os->arena, DN_POSIXCore, DN_ZeroMem_Yes);
|
|
#endif
|
|
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
DN_W32Core *w32 = DN_CAST(DN_W32Core *) os->platform_context;
|
|
InitializeCriticalSection(&w32->sync_primitive_free_list_mutex);
|
|
|
|
QueryPerformanceFrequency(&w32->qpc_frequency);
|
|
HMODULE module = LoadLibraryA("kernel32.dll");
|
|
w32->set_thread_description = DN_CAST(DN_W32SetThreadDescriptionFunc *) GetProcAddress(module, "SetThreadDescription");
|
|
FreeLibrary(module);
|
|
|
|
// NOTE: win32 bcrypt
|
|
wchar_t const BCRYPT_ALGORITHM[] = L"RNG";
|
|
long /*NTSTATUS*/ init_status = BCryptOpenAlgorithmProvider(&w32->bcrypt_rng_handle, BCRYPT_ALGORITHM, nullptr /*implementation*/, 0 /*flags*/);
|
|
if (w32->bcrypt_rng_handle && init_status == 0)
|
|
w32->bcrypt_init_success = true;
|
|
else
|
|
DN_LOG_ErrorF("Failed to initialise Windows secure random number generator, error: %d", init_status);
|
|
#else
|
|
DN_POSIXCore *posix = DN_CAST(DN_POSIXCore *) os->platform_context;
|
|
int mutex_init = pthread_mutex_init(&posix->sync_primitive_free_list_mutex, nullptr);
|
|
DN_Assert(mutex_init == 0);
|
|
#endif
|
|
}
|
|
|
|
// NOTE: Initialise tmem arenas which allocate memory and will be
|
|
// recorded to the now initialised allocation table. The initialisation
|
|
// of tmem memory may request tmem memory itself in leak tracing mode.
|
|
// This is supported as the tmem arenas defer allocation tracking until
|
|
// initialisation is done.
|
|
DN_OSTLSInitArgs tls_init_args = {};
|
|
if (args) {
|
|
tls_init_args.commit = args->tls_commit;
|
|
tls_init_args.reserve = args->tls_reserve;
|
|
tls_init_args.err_sink_reserve = args->tls_err_sink_reserve;
|
|
tls_init_args.err_sink_commit = args->tls_err_sink_commit;
|
|
}
|
|
|
|
DN_OS_TLSInit(&os->tls, tls_init_args);
|
|
DN_OS_TLSSetCurrentThreadTLS(&os->tls);
|
|
os->cpu_report = DN_CPU_Report();
|
|
|
|
#define DN_CPU_FEAT_XENTRY(label) g_dn_cpu_feature_decl[DN_CPUFeature_##label] = {DN_CPUFeature_##label, DN_STR8(#label)};
|
|
DN_CPU_FEAT_XMACRO
|
|
#undef DN_CPU_FEAT_XENTRY
|
|
DN_Assert(g_dn_os_core_);
|
|
}
|
|
|
|
DN_API void DN_OS_EmitLogsWithOSPrintFunctions(DN_OSCore *os)
|
|
{
|
|
DN_Assert(os);
|
|
DN_LOG_SetEmitFromTypeFVFunc(DN_OS_LOGEmitFromTypeTypeFV_, os);
|
|
}
|
|
|
|
DN_API void DN_OS_DumpThreadContextArenaStat(DN_Str8 file_path)
|
|
{
|
|
#if defined(DN_DEBUG_THREAD_CONTEXT)
|
|
// NOTE: Open a file to write the arena stats to
|
|
FILE *file = nullptr;
|
|
fopen_s(&file, file_path.data, "a+b");
|
|
if (file) {
|
|
DN_LOG_ErrorF("Failed to dump thread context arenas [file=%.*s]", DN_STR_FMT(file_path));
|
|
return;
|
|
}
|
|
|
|
// NOTE: Copy the stats from library book-keeping
|
|
// NOTE: Extremely short critical section, copy the stats then do our
|
|
// work on it.
|
|
DN_ArenaStat stats[DN_CArray_CountI(g_dn_core->thread_context_arena_stats)];
|
|
int stats_size = 0;
|
|
|
|
DN_TicketMutex_Begin(&g_dn_core->thread_context_mutex);
|
|
stats_size = g_dn_core->thread_context_arena_stats_count;
|
|
DN_Memcpy(stats, g_dn_core->thread_context_arena_stats, sizeof(stats[0]) * stats_size);
|
|
DN_TicketMutex_End(&g_dn_core->thread_context_mutex);
|
|
|
|
// NOTE: Print the cumulative stat
|
|
DN_DateHMSTimeStr now = DN_Date_HMSLocalTimeStrNow();
|
|
fprintf(file,
|
|
"Time=%.*s %.*s | Thread Context Arenas | Count=%d\n",
|
|
now.date_size,
|
|
now.date,
|
|
now.hms_size,
|
|
now.hms,
|
|
g_dn_core->thread_context_arena_stats_count);
|
|
|
|
// NOTE: Write the cumulative thread arena data
|
|
{
|
|
DN_ArenaStat stat = {};
|
|
for (DN_USize index = 0; index < stats_size; index++) {
|
|
DN_ArenaStat const *current = stats + index;
|
|
stat.capacity += current->capacity;
|
|
stat.used += current->used;
|
|
stat.wasted += current->wasted;
|
|
stat.blocks += current->blocks;
|
|
|
|
stat.capacity_hwm = DN_Max(stat.capacity_hwm, current->capacity_hwm);
|
|
stat.used_hwm = DN_Max(stat.used_hwm, current->used_hwm);
|
|
stat.wasted_hwm = DN_Max(stat.wasted_hwm, current->wasted_hwm);
|
|
stat.blocks_hwm = DN_Max(stat.blocks_hwm, current->blocks_hwm);
|
|
}
|
|
|
|
DN_ArenaStatStr stats_string = DN_Arena_StatStr(&stat);
|
|
fprintf(file, " [ALL] CURR %.*s\n", stats_string.size, stats_string.data);
|
|
}
|
|
|
|
// NOTE: Print individual thread arena data
|
|
for (DN_USize index = 0; index < stats_size; index++) {
|
|
DN_ArenaStat const *current = stats + index;
|
|
DN_ArenaStatStr current_string = DN_Arena_StatStr(current);
|
|
fprintf(file, " [%03d] CURR %.*s\n", DN_CAST(int) index, current_string.size, current_string.data);
|
|
}
|
|
|
|
fclose(file);
|
|
DN_LOG_InfoF("Dumped thread context arenas [file=%.*s]", DN_STR_FMT(file_path));
|
|
#else
|
|
(void)file_path;
|
|
#endif // #if defined(DN_DEBUG_THREAD_CONTEXT)
|
|
}
|
|
|
|
// NOTE: Date //////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSDateTimeStr8 DN_OS_DateLocalTimeStr8(DN_OSDateTime time, char date_separator, char hms_separator)
|
|
{
|
|
DN_OSDateTimeStr8 result = {};
|
|
result.hms_size = DN_CAST(uint8_t) DN_SNPrintF(result.hms,
|
|
DN_ArrayCountI(result.hms),
|
|
"%02hhu%c%02hhu%c%02hhu",
|
|
time.hour,
|
|
hms_separator,
|
|
time.minutes,
|
|
hms_separator,
|
|
time.seconds);
|
|
|
|
result.date_size = DN_CAST(uint8_t) DN_SNPrintF(result.date,
|
|
DN_ArrayCountI(result.date),
|
|
"%hu%c%02hhu%c%02hhu",
|
|
time.year,
|
|
date_separator,
|
|
time.month,
|
|
date_separator,
|
|
time.day);
|
|
|
|
DN_Assert(result.hms_size < DN_ArrayCountU(result.hms));
|
|
DN_Assert(result.date_size < DN_ArrayCountU(result.date));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSDateTimeStr8 DN_OS_DateLocalTimeStr8Now(char date_separator, char hms_separator)
|
|
{
|
|
DN_OSDateTime time = DN_OS_DateLocalTimeNow();
|
|
DN_OSDateTimeStr8 result = DN_OS_DateLocalTimeStr8(time, date_separator, hms_separator);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_DateIsValid(DN_OSDateTime date)
|
|
{
|
|
if (date.year < 1970)
|
|
return false;
|
|
if (date.month <= 0 || date.month >= 13)
|
|
return false;
|
|
if (date.day <= 0 || date.day >= 32)
|
|
return false;
|
|
if (date.hour >= 24)
|
|
return false;
|
|
if (date.minutes >= 60)
|
|
return false;
|
|
if (date.seconds >= 60)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
// NOTE: Other /////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str8 DN_OS_EXEDir(DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str8 exe_path = DN_OS_EXEPath(tmem.arena);
|
|
DN_Str8 separators[] = {DN_STR8("/"), DN_STR8("\\")};
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplitLastArray(exe_path, separators, DN_ArrayCountU(separators));
|
|
result = DN_Str8_Copy(arena, split.lhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Counters //////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_F64 DN_OS_PerfCounterS(uint64_t begin, uint64_t end)
|
|
{
|
|
uint64_t frequency = DN_OS_PerfCounterFrequency();
|
|
uint64_t ticks = end - begin;
|
|
DN_F64 result = ticks / DN_CAST(DN_F64) frequency;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_PerfCounterMs(uint64_t begin, uint64_t end)
|
|
{
|
|
uint64_t frequency = DN_OS_PerfCounterFrequency();
|
|
uint64_t ticks = end - begin;
|
|
DN_F64 result = (ticks * 1'000) / DN_CAST(DN_F64) frequency;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_PerfCounterUs(uint64_t begin, uint64_t end)
|
|
{
|
|
uint64_t frequency = DN_OS_PerfCounterFrequency();
|
|
uint64_t ticks = end - begin;
|
|
DN_F64 result = (ticks * 1'000'000) / DN_CAST(DN_F64) frequency;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_PerfCounterNs(uint64_t begin, uint64_t end)
|
|
{
|
|
uint64_t frequency = DN_OS_PerfCounterFrequency();
|
|
uint64_t ticks = end - begin;
|
|
DN_F64 result = (ticks * 1'000'000'000) / DN_CAST(DN_F64) frequency;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSTimer DN_OS_TimerBegin()
|
|
{
|
|
DN_OSTimer result = {};
|
|
result.start = DN_OS_PerfCounterNow();
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_TimerEnd(DN_OSTimer *timer)
|
|
{
|
|
timer->end = DN_OS_PerfCounterNow();
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_TimerS(DN_OSTimer timer)
|
|
{
|
|
DN_F64 result = DN_OS_PerfCounterS(timer.start, timer.end);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_TimerMs(DN_OSTimer timer)
|
|
{
|
|
DN_F64 result = DN_OS_PerfCounterMs(timer.start, timer.end);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_TimerUs(DN_OSTimer timer)
|
|
{
|
|
DN_F64 result = DN_OS_PerfCounterUs(timer.start, timer.end);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F64 DN_OS_TimerNs(DN_OSTimer timer)
|
|
{
|
|
DN_F64 result = DN_OS_PerfCounterNs(timer.start, timer.end);
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_EstimateTSCPerSecond(uint64_t duration_ms_to_gauge_tsc_frequency)
|
|
{
|
|
uint64_t os_frequency = DN_OS_PerfCounterFrequency();
|
|
uint64_t os_target_elapsed = duration_ms_to_gauge_tsc_frequency * os_frequency / 1000ULL;
|
|
uint64_t tsc_begin = DN_CPU_TSC();
|
|
uint64_t result = 0;
|
|
if (tsc_begin) {
|
|
uint64_t os_elapsed = 0;
|
|
for (uint64_t os_begin = DN_OS_PerfCounterNow(); os_elapsed < os_target_elapsed;)
|
|
os_elapsed = DN_OS_PerfCounterNow() - os_begin;
|
|
uint64_t tsc_end = DN_CPU_TSC();
|
|
uint64_t tsc_elapsed = tsc_end - tsc_begin;
|
|
result = tsc_elapsed / os_elapsed * os_frequency;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_OS_FILE_API)
|
|
// NOTE: DN_OSPathInfo/File ////////////////////////////////////////////////////////////////////////
|
|
DN_API bool DN_OS_FileIsOlderThan(DN_Str8 file, DN_Str8 check_against)
|
|
{
|
|
DN_OSPathInfo file_info = DN_OS_PathInfo(file);
|
|
DN_OSPathInfo check_against_info = DN_OS_PathInfo(check_against);
|
|
bool result = !file_info.exists || file_info.last_write_time_in_s < check_against_info.last_write_time_in_s;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileWrite(DN_OSFile *file, DN_Str8 buffer, DN_OSErrSink *error)
|
|
{
|
|
bool result = DN_OS_FileWritePtr(file, buffer.data, buffer.size, error);
|
|
return result;
|
|
}
|
|
|
|
struct DN_OSFileWriteChunker_
|
|
{
|
|
DN_OSErrSink *err;
|
|
DN_OSFile *file;
|
|
bool success;
|
|
};
|
|
|
|
static char *DN_OS_FileWriteChunker_(const char *buf, void *user, int len)
|
|
{
|
|
DN_OSFileWriteChunker_ *chunker = DN_CAST(DN_OSFileWriteChunker_ *)user;
|
|
chunker->success = DN_OS_FileWritePtr(chunker->file, buf, len, chunker->err);
|
|
char *result = chunker->success ? DN_CAST(char *) buf : nullptr;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileWriteFV(DN_OSFile *file, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
bool result = false;
|
|
if (!file || !fmt)
|
|
return result;
|
|
|
|
DN_OSFileWriteChunker_ chunker = {};
|
|
chunker.err = error;
|
|
chunker.file = file;
|
|
char buffer[STB_SPRINTF_MIN];
|
|
STB_SPRINTF_DECORATE(vsprintfcb)(DN_OS_FileWriteChunker_, &chunker, buffer, fmt, args);
|
|
|
|
result = chunker.success;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileWriteF(DN_OSFile *file, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
bool result = DN_OS_FileWriteFV(file, error, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: R/W Entire File ///////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str8 DN_OS_ReadAll(DN_Arena *arena, DN_Str8 path, DN_OSErrSink *error)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
// NOTE: Query file size + allocate buffer /////////////////////////////////////////////////////
|
|
DN_OSPathInfo path_info = DN_OS_PathInfo(path);
|
|
if (!path_info.exists) {
|
|
DN_OS_ErrSinkAppendF(error, 1, "File does not exist/could not be queried for reading '%.*s'", DN_STR_FMT(path));
|
|
return result;
|
|
}
|
|
|
|
DN_ArenaTempMem temp_mem = DN_Arena_TempMemBegin(arena);
|
|
result = DN_Str8_Alloc(arena, path_info.size, DN_ZeroMem_No);
|
|
if (!DN_Str8_HasData(result)) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 buffer_size_str8 = DN_CVT_U64ToByteSizeStr8(tmem.arena, path_info.size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(error, 1 /*error_code*/, "Failed to allocate %.*s for reading file '%.*s'", DN_STR_FMT(buffer_size_str8), DN_STR_FMT(path));
|
|
DN_Arena_TempMemEnd(temp_mem);
|
|
result = {};
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Read the file from disk ///////////////////////////////////////////////////////////////
|
|
DN_OSFile file = DN_OS_FileOpen(path, DN_OSFileOpen_OpenIfExist, DN_OSFileAccess_Read, error);
|
|
DN_OSFileRead read = DN_OS_FileRead(&file, result.data, result.size, error);
|
|
if (file.error || !read.success) {
|
|
DN_Arena_TempMemEnd(temp_mem);
|
|
result = {};
|
|
}
|
|
DN_OS_FileClose(&file);
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAll(DN_Str8 path, DN_Str8 buffer, DN_OSErrSink *error)
|
|
{
|
|
DN_OSFile file = DN_OS_FileOpen(path, DN_OSFileOpen_CreateAlways, DN_OSFileAccess_Write, error);
|
|
bool result = DN_OS_FileWrite(&file, buffer, error);
|
|
DN_OS_FileClose(&file);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAllFV(DN_Str8 file_path, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 buffer = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
bool result = DN_OS_WriteAll(file_path, buffer, error);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAllF(DN_Str8 file_path, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
bool result = DN_OS_WriteAllFV(file_path, error, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAllSafe(DN_Str8 path, DN_Str8 buffer, DN_OSErrSink *error)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 tmp_path = DN_Str8_InitF(tmem.arena, "%.*s.tmp", DN_STR_FMT(path));
|
|
if (!DN_OS_WriteAll(tmp_path, buffer, error))
|
|
return false;
|
|
if (!DN_OS_CopyFile(tmp_path, path, true /*overwrite*/, error))
|
|
return false;
|
|
if (!DN_OS_PathDelete(tmp_path))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAllSafeFV(DN_Str8 path, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 buffer = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
bool result = DN_OS_WriteAllSafe(path, buffer, error);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_WriteAllSafeF(DN_Str8 path, DN_OSErrSink *error, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
bool result = DN_OS_WriteAllSafeFV(path, error, fmt, args);
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_OS_FILE_API)
|
|
|
|
// NOTE: DN_OSPath /////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool DN_OS_PathAddRef(DN_Arena *arena, DN_OSPath *fs_path, DN_Str8 path)
|
|
{
|
|
if (!arena || !fs_path || !DN_Str8_HasData(path))
|
|
return false;
|
|
|
|
if (path.size <= 0)
|
|
return true;
|
|
|
|
DN_Str8 const delimiter_array[] = {
|
|
DN_STR8("\\"),
|
|
DN_STR8("/")};
|
|
|
|
if (fs_path->links_size == 0)
|
|
fs_path->has_prefix_path_separator = (path.data[0] == '/');
|
|
|
|
for (;;) {
|
|
DN_Str8BinarySplitResult delimiter = DN_Str8_BinarySplitArray(path, delimiter_array, DN_ArrayCountU(delimiter_array));
|
|
for (; delimiter.lhs.data; delimiter = DN_Str8_BinarySplitArray(delimiter.rhs, delimiter_array, DN_ArrayCountU(delimiter_array))) {
|
|
if (delimiter.lhs.size <= 0)
|
|
continue;
|
|
|
|
DN_OSPathLink *link = DN_Arena_New(arena, DN_OSPathLink, DN_ZeroMem_Yes);
|
|
if (!link)
|
|
return false;
|
|
|
|
link->string = delimiter.lhs;
|
|
link->prev = fs_path->tail;
|
|
if (fs_path->tail)
|
|
fs_path->tail->next = link;
|
|
else
|
|
fs_path->head = link;
|
|
fs_path->tail = link;
|
|
fs_path->links_size += 1;
|
|
fs_path->string_size += delimiter.lhs.size;
|
|
}
|
|
|
|
if (!delimiter.lhs.data)
|
|
break;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_OS_PathAdd(DN_Arena *arena, DN_OSPath *fs_path, DN_Str8 path)
|
|
{
|
|
DN_Str8 copy = DN_Str8_Copy(arena, path);
|
|
bool result = DN_Str8_HasData(copy) ? true : DN_OS_PathAddRef(arena, fs_path, copy);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_PathAddF(DN_Arena *arena, DN_OSPath *fs_path, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 path = DN_Str8_InitFV(arena, fmt, args);
|
|
va_end(args);
|
|
bool result = DN_OS_PathAddRef(arena, fs_path, path);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_PathPop(DN_OSPath *fs_path)
|
|
{
|
|
if (!fs_path)
|
|
return false;
|
|
|
|
if (fs_path->tail) {
|
|
DN_Assert(fs_path->head);
|
|
fs_path->links_size -= 1;
|
|
fs_path->string_size -= fs_path->tail->string.size;
|
|
fs_path->tail = fs_path->tail->prev;
|
|
if (fs_path->tail)
|
|
fs_path->tail->next = nullptr;
|
|
else
|
|
fs_path->head = nullptr;
|
|
} else {
|
|
DN_Assert(!fs_path->head);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_PathTo(DN_Arena *arena, DN_Str8 path, DN_Str8 path_separator)
|
|
{
|
|
DN_OSPath fs_path = {};
|
|
DN_OS_PathAddRef(arena, &fs_path, path);
|
|
DN_Str8 result = DN_OS_PathBuildWithSeparator(arena, &fs_path, path_separator);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_PathToF(DN_Arena *arena, DN_Str8 path_separator, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 path = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
va_end(args);
|
|
DN_Str8 result = DN_OS_PathTo(arena, path, path_separator);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_Path(DN_Arena *arena, DN_Str8 path)
|
|
{
|
|
DN_Str8 result = DN_OS_PathTo(arena, path, DN_OSPathSeperatorString);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_PathF(DN_Arena *arena, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 path = DN_Str8_InitFV(tmem.arena, fmt, args);
|
|
va_end(args);
|
|
DN_Str8 result = DN_OS_Path(arena, path);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_PathBuildWithSeparator(DN_Arena *arena, DN_OSPath const *fs_path, DN_Str8 path_separator)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!fs_path || fs_path->links_size <= 0)
|
|
return result;
|
|
|
|
// NOTE: Each link except the last one needs the path separator appended to it, '/' or '\\'
|
|
DN_USize string_size = (fs_path->has_prefix_path_separator ? path_separator.size : 0) + fs_path->string_size + ((fs_path->links_size - 1) * path_separator.size);
|
|
result = DN_Str8_Alloc(arena, string_size, DN_ZeroMem_No);
|
|
if (result.data) {
|
|
char *dest = result.data;
|
|
if (fs_path->has_prefix_path_separator) {
|
|
DN_Memcpy(dest, path_separator.data, path_separator.size);
|
|
dest += path_separator.size;
|
|
}
|
|
|
|
for (DN_OSPathLink *link = fs_path->head; link; link = link->next) {
|
|
DN_Str8 string = link->string;
|
|
DN_Memcpy(dest, string.data, string.size);
|
|
dest += string.size;
|
|
|
|
if (link != fs_path->tail) {
|
|
DN_Memcpy(dest, path_separator.data, path_separator.size);
|
|
dest += path_separator.size;
|
|
}
|
|
}
|
|
}
|
|
|
|
result.data[string_size] = 0;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSExec /////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSExecResult DN_OS_Exec(DN_Slice<DN_Str8> cmd_line,
|
|
DN_OSExecArgs *args,
|
|
DN_Arena *arena,
|
|
DN_OSErrSink *error)
|
|
{
|
|
DN_OSExecAsyncHandle async_handle = DN_OS_ExecAsync(cmd_line, args, error);
|
|
DN_OSExecResult result = DN_OS_ExecWait(async_handle, arena, error);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSExecResult DN_OS_ExecOrAbort(DN_Slice<DN_Str8> cmd_line, DN_OSExecArgs *args, DN_Arena *arena)
|
|
{
|
|
DN_OSErrSink *error = DN_OS_ErrSinkBegin(DN_OSErrSinkMode_Nil);
|
|
DN_OSExecResult result = DN_OS_Exec(cmd_line, args, arena, error);
|
|
if (result.os_error_code)
|
|
DN_OS_ErrSinkEndAndExitIfErrorF(error, result.os_error_code, "OS failed to execute the requested command returning the error code %u", result.os_error_code);
|
|
|
|
if (result.exit_code)
|
|
DN_OS_ErrSinkEndAndExitIfErrorF(error, result.exit_code, "OS executed command and returned non-zero exit code %u", result.exit_code);
|
|
DN_OS_ErrSinkEndAndIgnore(error);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSThread ///////////////////////////////////////////////////////////////////////////////
|
|
static void DN_OS_ThreadExecute_(void *user_context)
|
|
{
|
|
DN_OSThread *thread = DN_CAST(DN_OSThread *) user_context;
|
|
DN_OS_TLSInit(&thread->tls, thread->tls_init_args);
|
|
DN_OS_TLSSetCurrentThreadTLS(&thread->tls);
|
|
DN_OS_SemaphoreWait(&thread->init_semaphore, DN_OS_SEMAPHORE_INFINITE_TIMEOUT);
|
|
thread->func(thread);
|
|
}
|
|
|
|
DN_API void DN_OS_ThreadSetName(DN_Str8 name)
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
tls->name_size = DN_CAST(uint8_t) DN_Min(name.size, sizeof(tls->name) - 1);
|
|
DN_Memcpy(tls->name, name.data, tls->name_size);
|
|
tls->name[tls->name_size] = 0;
|
|
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
DN_W32_ThreadSetName(name);
|
|
#else
|
|
DN_Posix_ThreadSetName(name);
|
|
#endif
|
|
}
|
|
|
|
// NOTE: DN_OSHttp /////////////////////////////////////////////////////////////////////////////////
|
|
DN_API void DN_OS_HttpRequestWait(DN_OSHttpResponse *response)
|
|
{
|
|
if (response && response->on_complete_semaphore.handle != 0)
|
|
DN_OS_SemaphoreWait(&response->on_complete_semaphore, DN_OS_SEMAPHORE_INFINITE_TIMEOUT);
|
|
}
|
|
|
|
DN_API DN_OSHttpResponse DN_OS_HttpRequest(DN_Arena *arena, DN_Str8 host, DN_Str8 path, DN_OSHttpRequestSecure secure, DN_Str8 method, DN_Str8 body, DN_Str8 headers)
|
|
{
|
|
// TODO(doyle): Revise the memory allocation and its lifetime
|
|
DN_OSHttpResponse result = {};
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
result.tmem_arena = tmem.arena;
|
|
|
|
DN_OS_HttpRequestAsync(&result, arena, host, path, secure, method, body, headers);
|
|
DN_OS_HttpRequestWait(&result);
|
|
return result;
|
|
}
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_allocator.cpp"
|
|
#define DN_OS_ALLOCATOR_CPP
|
|
|
|
static void *DN_Arena_BasicAllocFromOSHeap(DN_USize size)
|
|
{
|
|
void *result = DN_OS_MemAlloc(size, DN_ZeroMem_Yes);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Arena DN_Arena_InitFromOSHeap(DN_U64 size, DN_ArenaFlags flags)
|
|
{
|
|
DN_ArenaMemFuncs mem_funcs = {};
|
|
mem_funcs.type = DN_ArenaMemFuncType_Basic;
|
|
mem_funcs.basic_alloc = DN_Arena_BasicAllocFromOSHeap;
|
|
mem_funcs.basic_dealloc = DN_OS_MemDealloc;
|
|
DN_Arena result = DN_Arena_InitFromMemFuncs(size, size, flags, mem_funcs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Arena DN_Arena_InitFromOSVMem(DN_U64 reserve, DN_U64 commit, DN_ArenaFlags flags)
|
|
{
|
|
DN_ArenaMemFuncs mem_funcs = {};
|
|
mem_funcs.type = DN_ArenaMemFuncType_VMem;
|
|
mem_funcs.vmem_page_size = g_dn_os_core_->page_size;
|
|
mem_funcs.vmem_reserve = DN_OS_MemReserve;
|
|
mem_funcs.vmem_commit = DN_OS_MemCommit;
|
|
mem_funcs.vmem_release = DN_OS_MemRelease;
|
|
DN_Arena result = DN_Arena_InitFromMemFuncs(reserve, commit, flags, mem_funcs);
|
|
return result;
|
|
}
|
|
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_containers.cpp"
|
|
#define DN_OS_CONTAINERS_CPP
|
|
|
|
/*
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$$$\ $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$$$\ $$$$$$$\ $$$$$$\
|
|
// $$ __$$\ $$ __$$\ $$$\ $$ |\__$$ __|$$ __$$\ \_$$ _|$$$\ $$ |$$ _____|$$ __$$\ $$ __$$\
|
|
// $$ / \__|$$ / $$ |$$$$\ $$ | $$ | $$ / $$ | $$ | $$$$\ $$ |$$ | $$ | $$ |$$ / \__|
|
|
// $$ | $$ | $$ |$$ $$\$$ | $$ | $$$$$$$$ | $$ | $$ $$\$$ |$$$$$\ $$$$$$$ |\$$$$$$\
|
|
// $$ | $$ | $$ |$$ \$$$$ | $$ | $$ __$$ | $$ | $$ \$$$$ |$$ __| $$ __$$< \____$$\
|
|
// $$ | $$\ $$ | $$ |$$ |\$$$ | $$ | $$ | $$ | $$ | $$ |\$$$ |$$ | $$ | $$ |$$\ $$ |
|
|
// \$$$$$$ | $$$$$$ |$$ | \$$ | $$ | $$ | $$ |$$$$$$\ $$ | \$$ |$$$$$$$$\ $$ | $$ |\$$$$$$ |
|
|
// \______/ \______/ \__| \__| \__| \__| \__|\______|\__| \__|\________|\__| \__| \______/
|
|
//
|
|
// dn_containers.cpp
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
*/
|
|
|
|
// NOTE: DN_VArray /////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T>
|
|
DN_VArray<T> DN_VArray_InitByteSize(DN_USize byte_size)
|
|
{
|
|
DN_VArray<T> result = {};
|
|
result.data = DN_CAST(T *) DN_OS_MemReserve(byte_size, DN_MemCommit_No, DN_MemPage_ReadWrite);
|
|
if (result.data)
|
|
result.max = byte_size / sizeof(T);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_VArray<T> DN_VArray_Init(DN_USize max)
|
|
{
|
|
DN_VArray<T> result = DN_VArray_InitByteSize<T>(max * sizeof(T));
|
|
DN_Assert(result.max >= max);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_VArray<T> DN_VArray_InitSlice(DN_Slice<T> slice, DN_USize max)
|
|
{
|
|
DN_USize real_max = DN_Max(slice.size, max);
|
|
DN_VArray<T> result = DN_VArray_Init<T>(real_max);
|
|
if (DN_VArray_IsValid(&result))
|
|
DN_VArray_AddArray(&result, slice.data, slice.size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
DN_VArray<T> DN_VArray_InitCArray(T const (&items)[N], DN_USize max)
|
|
{
|
|
DN_USize real_max = DN_Max(N, max);
|
|
DN_VArray<T> result = DN_VArray_InitSlice(DN_Slice_Init(items, N), real_max);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
void DN_VArray_Deinit(DN_VArray<T> *array)
|
|
{
|
|
DN_OS_MemRelease(array->data, array->max * sizeof(T));
|
|
*array = {};
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_VArray_IsValid(DN_VArray<T> const *array)
|
|
{
|
|
bool result = array->data && array->size <= array->max;
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_Slice<T> DN_VArray_Slice(DN_VArray<T> const *array)
|
|
{
|
|
DN_Slice<T> result = {};
|
|
if (array)
|
|
result = DN_Slice_Init<T>(array->data, array->size);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_AddArray(DN_VArray<T> *array, T const *items, DN_USize count)
|
|
{
|
|
T *result = DN_VArray_MakeArray(array, count, DN_ZeroMem_No);
|
|
if (result)
|
|
DN_Memcpy(result, items, count * sizeof(T));
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_VArray_AddCArray(DN_VArray<T> *array, T const (&items)[N])
|
|
{
|
|
T *result = DN_VArray_AddArray(array, items, N);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_Add(DN_VArray<T> *array, T const &item)
|
|
{
|
|
T *result = DN_VArray_AddArray(array, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_MakeArray(DN_VArray<T> *array, DN_USize count, DN_ZeroMem zero_mem)
|
|
{
|
|
if (!DN_VArray_IsValid(array))
|
|
return nullptr;
|
|
|
|
if (!DN_CheckF((array->size + count) < array->max, "Array is out of space (user requested +%zu items, array has %zu/%zu items)", count, array->size, array->max))
|
|
return nullptr;
|
|
|
|
if (!DN_VArray_Reserve(array, count))
|
|
return nullptr;
|
|
|
|
// TODO: Use placement new
|
|
T *result = array->data + array->size;
|
|
array->size += count;
|
|
if (zero_mem == DN_ZeroMem_Yes)
|
|
DN_Memset(result, 0, count * sizeof(T));
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_Make(DN_VArray<T> *array, DN_ZeroMem zero_mem)
|
|
{
|
|
T *result = DN_VArray_MakeArray(array, 1, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_InsertArray(DN_VArray<T> *array, DN_USize index, T const *items, DN_USize count)
|
|
{
|
|
T *result = nullptr;
|
|
if (!DN_VArray_IsValid(array))
|
|
return result;
|
|
if (DN_VArray_Reserve(array, array->size + count))
|
|
result = DN_CArray_InsertArray(array->data, &array->size, array->max, index, items, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T, DN_USize N>
|
|
T *DN_VArray_InsertCArray(DN_VArray<T> *array, DN_USize index, T const (&items)[N])
|
|
{
|
|
T *result = DN_VArray_InsertArray(array, index, items, N);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_Insert(DN_VArray<T> *array, DN_USize index, T const &item)
|
|
{
|
|
T *result = DN_VArray_InsertArray(array, index, &item, 1);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_PopFront(DN_VArray<T> *array, DN_USize count)
|
|
{
|
|
T *result = DN_CArray_PopFront(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
T *DN_VArray_PopBack(DN_VArray<T> *array, DN_USize count)
|
|
{
|
|
T *result = DN_CArray_PopBack(array->data, &array->size, count);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
DN_ArrayEraseResult DN_VArray_EraseRange(DN_VArray<T> *array, DN_USize begin_index, DN_ISize count, DN_ArrayErase erase)
|
|
{
|
|
DN_ArrayEraseResult result = {};
|
|
if (!DN_VArray_IsValid(array))
|
|
return result;
|
|
result = DN_CArray_EraseRange<T>(array->data, &array->size, begin_index, count, erase);
|
|
return result;
|
|
}
|
|
|
|
template <typename T>
|
|
void DN_VArray_Clear(DN_VArray<T> *array, DN_ZeroMem zero_mem)
|
|
{
|
|
if (array) {
|
|
if (zero_mem == DN_ZeroMem_Yes)
|
|
DN_Memset(array->data, 0, array->size * sizeof(T));
|
|
array->size = 0;
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
bool DN_VArray_Reserve(DN_VArray<T> *array, DN_USize count)
|
|
{
|
|
if (!DN_VArray_IsValid(array) || count == 0)
|
|
return false;
|
|
|
|
DN_USize real_commit = (array->size + count) * sizeof(T);
|
|
DN_USize aligned_commit = DN_AlignUpPowerOfTwo(real_commit, g_dn_os_core_->page_size);
|
|
if (array->commit >= aligned_commit)
|
|
return true;
|
|
bool result = DN_OS_MemCommit(array->data, aligned_commit, DN_MemPage_ReadWrite);
|
|
array->commit = aligned_commit;
|
|
return result;
|
|
}
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_print.cpp"
|
|
#define DN_OS_PRINT_CPP
|
|
|
|
DN_API DN_LOGStyle DN_OS_PrintStyleColour(uint8_t r, uint8_t g, uint8_t b, DN_LOGBold bold)
|
|
{
|
|
DN_LOGStyle result = {};
|
|
result.bold = bold;
|
|
result.colour = true;
|
|
result.r = r;
|
|
result.g = g;
|
|
result.b = b;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_LOGStyle DN_OS_PrintStyleColourU32(uint32_t rgb, DN_LOGBold bold)
|
|
{
|
|
uint8_t r = (rgb >> 24) & 0xFF;
|
|
uint8_t g = (rgb >> 16) & 0xFF;
|
|
uint8_t b = (rgb >> 8) & 0xFF;
|
|
DN_LOGStyle result = DN_OS_PrintStyleColour(r, g, b, bold);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_LOGStyle DN_OS_PrintStyleBold()
|
|
{
|
|
DN_LOGStyle result = {};
|
|
result.bold = DN_LOGBold_Yes;
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_Print(DN_OSPrintDest dest, DN_Str8 string)
|
|
{
|
|
DN_Assert(dest == DN_OSPrintDest_Out || dest == DN_OSPrintDest_Err);
|
|
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
// NOTE: Get the output handles from kernel ////////////////////////////////////////////////////
|
|
DN_THREAD_LOCAL void *std_out_print_handle = nullptr;
|
|
DN_THREAD_LOCAL void *std_err_print_handle = nullptr;
|
|
DN_THREAD_LOCAL bool std_out_print_to_console = false;
|
|
DN_THREAD_LOCAL bool std_err_print_to_console = false;
|
|
|
|
if (!std_out_print_handle) {
|
|
unsigned long mode = 0;
|
|
(void)mode;
|
|
std_out_print_handle = GetStdHandle(STD_OUTPUT_HANDLE);
|
|
std_out_print_to_console = GetConsoleMode(std_out_print_handle, &mode) != 0;
|
|
|
|
std_err_print_handle = GetStdHandle(STD_ERROR_HANDLE);
|
|
std_err_print_to_console = GetConsoleMode(std_err_print_handle, &mode) != 0;
|
|
}
|
|
|
|
// NOTE: Select the output handle //////////////////////////////////////////////////////////////
|
|
void *print_handle = std_out_print_handle;
|
|
bool print_to_console = std_out_print_to_console;
|
|
if (dest == DN_OSPrintDest_Err) {
|
|
print_handle = std_err_print_handle;
|
|
print_to_console = std_err_print_to_console;
|
|
}
|
|
|
|
// NOTE: Write the string //////////////////////////////////////////////////////////////////////
|
|
DN_Assert(string.size < DN_CAST(unsigned long) - 1);
|
|
unsigned long bytes_written = 0;
|
|
(void)bytes_written;
|
|
if (print_to_console)
|
|
WriteConsoleA(print_handle, string.data, DN_CAST(unsigned long) string.size, &bytes_written, nullptr);
|
|
else
|
|
WriteFile(print_handle, string.data, DN_CAST(unsigned long) string.size, &bytes_written, nullptr);
|
|
#else
|
|
fprintf(dest == DN_OSPrintDest_Out ? stdout : stderr, "%.*s", DN_STR_FMT(string));
|
|
#endif
|
|
}
|
|
|
|
DN_API void DN_OS_PrintF(DN_OSPrintDest dest, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_PrintFV(dest, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_OS_PrintFStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_PrintFVStyle(dest, style, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_OS_PrintStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_Str8 string)
|
|
{
|
|
if (string.data && string.size) {
|
|
if (style.colour)
|
|
DN_OS_Print(dest, DN_LOG_ColourEscapeCodeStr8FromRGB(DN_LOGColourType_Fg, style.r, style.g, style.b));
|
|
if (style.bold == DN_LOGBold_Yes)
|
|
DN_OS_Print(dest, DN_STR8(DN_LOG_BoldEscapeCode));
|
|
DN_OS_Print(dest, string);
|
|
if (style.colour || style.bold == DN_LOGBold_Yes)
|
|
DN_OS_Print(dest, DN_STR8(DN_LOG_ResetEscapeCode));
|
|
}
|
|
}
|
|
|
|
static char *DN_OS_PrintVSPrintfChunker_(const char *buf, void *user, int len)
|
|
{
|
|
DN_Str8 string = {};
|
|
string.data = DN_CAST(char *) buf;
|
|
string.size = len;
|
|
|
|
DN_OSPrintDest dest = DN_CAST(DN_OSPrintDest) DN_CAST(uintptr_t) user;
|
|
DN_OS_Print(dest, string);
|
|
return (char *)buf;
|
|
}
|
|
|
|
DN_API void DN_OS_PrintFV(DN_OSPrintDest dest, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
char buffer[STB_SPRINTF_MIN];
|
|
STB_SPRINTF_DECORATE(vsprintfcb)
|
|
(DN_OS_PrintVSPrintfChunker_, DN_CAST(void *) DN_CAST(uintptr_t) dest, buffer, fmt, args);
|
|
}
|
|
|
|
DN_API void DN_OS_PrintFVStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
if (fmt) {
|
|
if (style.colour)
|
|
DN_OS_Print(dest, DN_LOG_ColourEscapeCodeStr8FromRGB(DN_LOGColourType_Fg, style.r, style.g, style.b));
|
|
if (style.bold == DN_LOGBold_Yes)
|
|
DN_OS_Print(dest, DN_STR8(DN_LOG_BoldEscapeCode));
|
|
DN_OS_PrintFV(dest, fmt, args);
|
|
if (style.colour || style.bold == DN_LOGBold_Yes)
|
|
DN_OS_Print(dest, DN_STR8(DN_LOG_ResetEscapeCode));
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLn(DN_OSPrintDest dest, DN_Str8 string)
|
|
{
|
|
DN_OS_Print(dest, string);
|
|
DN_OS_Print(dest, DN_STR8("\n"));
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLnF(DN_OSPrintDest dest, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_PrintLnFV(dest, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLnFV(DN_OSPrintDest dest, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_OS_PrintFV(dest, fmt, args);
|
|
DN_OS_Print(dest, DN_STR8("\n"));
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLnStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_Str8 string)
|
|
{
|
|
DN_OS_PrintStyle(dest, style, string);
|
|
DN_OS_Print(dest, DN_STR8("\n"));
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLnFStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_OS_PrintLnFVStyle(dest, style, fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_OS_PrintLnFVStyle(DN_OSPrintDest dest, DN_LOGStyle style, DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_OS_PrintFVStyle(dest, style, fmt, args);
|
|
DN_OS_Print(dest, DN_STR8("\n"));
|
|
}
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_string.cpp"
|
|
#define DN_OS_STRING_CPP
|
|
|
|
// NOTE: DN_Str8 ///////////////////////////////////////////////////////////////////////////////////
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFFromFrame(DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_InitFV(DN_OS_TLSGet()->frame_arena, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFFromOSHeap(DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
|
|
DN_Str8 result = {};
|
|
DN_USize size = DN_CStr8_FVSize(fmt, args);
|
|
if (size) {
|
|
result = DN_Str8_AllocFromOSHeap(size, DN_ZeroMem_No);
|
|
if (DN_Str8_HasData(result))
|
|
DN_VSNPrintF(result.data, DN_SaturateCastISizeToInt(size + 1 /*null-terminator*/), fmt, args);
|
|
}
|
|
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFFromTLS(DN_FMT_ATTRIB char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_InitFV(DN_OS_TLSTopArena(), fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFVFromFrame(DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 result = DN_Str8_InitFV(DN_OS_TLSGet()->frame_arena, fmt, args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_InitFVFromTLS(DN_FMT_ATTRIB char const *fmt, va_list args)
|
|
{
|
|
DN_Str8 result = DN_Str8_InitFV(DN_OS_TLSTopArena(), fmt, args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AllocFromFrame(DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_Str8 result = DN_Str8_Alloc(DN_OS_TLSGet()->frame_arena, size, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AllocFromOSHeap(DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_Str8 result = {};
|
|
result.data = DN_CAST(char *)DN_OS_MemAlloc(size + 1, zero_mem);
|
|
if (result.data)
|
|
result.size = size;
|
|
result.data[result.size] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AllocFromTLS(DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_Str8 result = DN_Str8_Alloc(DN_OS_TLSTopArena(), size, zero_mem);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_CopyFromFrame(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Copy(DN_OS_TLSGet()->frame_arena, string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_CopyFromTLS(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Copy(DN_OS_TLSTopArena(), string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Slice<DN_Str8> DN_Str8_SplitAllocFromFrame(DN_Str8 string, DN_Str8 delimiter, DN_Str8SplitIncludeEmptyStrings mode)
|
|
{
|
|
DN_Slice<DN_Str8> result = DN_Str8_SplitAlloc(DN_OS_TLSGet()->frame_arena, string, delimiter, mode);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Slice<DN_Str8> DN_Str8_SplitAllocFromTLS(DN_Str8 string, DN_Str8 delimiter, DN_Str8SplitIncludeEmptyStrings mode)
|
|
{
|
|
DN_Slice<DN_Str8> result = DN_Str8_SplitAlloc(DN_OS_TLSTopArena(), string, delimiter, mode);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_SegmentFromFrame(DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
DN_Str8 result = DN_Str8_Segment(DN_OS_TLSGet()->frame_arena, src, segment_size, segment_char);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_SegmentFromTLS(DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
DN_Str8 result = DN_Str8_Segment(DN_OS_TLSTopArena(), src, segment_size, segment_char);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_ReverseSegmentFromFrame(DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
DN_Str8 result = DN_Str8_ReverseSegment(DN_OS_TLSGet()->frame_arena, src, segment_size, segment_char);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_ReverseSegmentFromTLS(DN_Str8 src, DN_USize segment_size, char segment_char)
|
|
{
|
|
DN_Str8 result = DN_Str8_ReverseSegment(DN_OS_TLSTopArena(), src, segment_size, segment_char);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AppendFFromFrame(DN_Str8 string, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_AppendFV(DN_OS_TLSGet()->frame_arena, string, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_AppendFFromTLS(DN_Str8 string, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_AppendFV(DN_OS_TLSTopArena(), string, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FillFFromFrame(DN_USize count, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_FillFV(DN_OS_TLSGet()->frame_arena, count, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_FillFFromTLS(DN_USize count, char const *fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
DN_Str8 result = DN_Str8_FillFV(DN_OS_TLSTopArena(), count, fmt, args);
|
|
va_end(args);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8DotTruncateResult DN_Str8_DotTruncateMiddleFromFrame(DN_Str8 str8, uint32_t side_size, DN_Str8 truncator)
|
|
{
|
|
DN_Str8DotTruncateResult result = DN_Str8_DotTruncateMiddle(DN_OS_TLSGet()->frame_arena, str8, side_size, truncator);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8DotTruncateResult DN_Str8_DotTruncateMiddleFromTLS(DN_Str8 str8, uint32_t side_size, DN_Str8 truncator)
|
|
{
|
|
DN_Str8DotTruncateResult result = DN_Str8_DotTruncateMiddle(DN_OS_TLSTopArena(), str8, side_size, truncator);
|
|
return result;
|
|
}
|
|
|
|
|
|
DN_API DN_Str8 DN_Str8_PadNewLines(DN_Arena *arena, DN_Str8 src, DN_Str8 pad)
|
|
{
|
|
// TODO: Implement this without requiring TLS so it can go into base strings
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(arena);
|
|
DN_Str8Builder builder = DN_Str8Builder_InitFromTLS();
|
|
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplit(src, DN_STR8("\n"));
|
|
while (split.lhs.size) {
|
|
DN_Str8Builder_AppendRef(&builder, pad);
|
|
DN_Str8Builder_AppendRef(&builder, split.lhs);
|
|
split = DN_Str8_BinarySplit(split.rhs, DN_STR8("\n"));
|
|
if (split.lhs.size)
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8("\n"));
|
|
}
|
|
|
|
DN_Str8 result = DN_Str8Builder_Build(&builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_PadNewLinesFromFrame(DN_Str8 src, DN_Str8 pad)
|
|
{
|
|
DN_Str8 result = DN_Str8_PadNewLines(DN_OS_TLSGet()->frame_arena, src, pad);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_PadNewLinesFromTLS(DN_Str8 src, DN_Str8 pad)
|
|
{
|
|
DN_Str8 result = DN_Str8_PadNewLines(DN_OS_TLSTopArena(), src, pad);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_UpperFromFrame(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Upper(DN_OS_TLSGet()->frame_arena, string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_UpperFromTLS(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Upper(DN_OS_TLSTopArena(), string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_LowerFromFrame(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Lower(DN_OS_TLSGet()->frame_arena, string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_LowerFromTLS(DN_Str8 string)
|
|
{
|
|
DN_Str8 result = DN_Str8_Lower(DN_OS_TLSTopArena(), string);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_Replace(DN_Str8 string,
|
|
DN_Str8 find,
|
|
DN_Str8 replace,
|
|
DN_USize start_index,
|
|
DN_Arena *arena,
|
|
DN_Str8EqCase eq_case)
|
|
{
|
|
// TODO: Implement this without requiring TLS so it can go into base strings
|
|
DN_Str8 result = {};
|
|
if (!DN_Str8_HasData(string) || !DN_Str8_HasData(find) || find.size > string.size || find.size == 0 || string.size == 0) {
|
|
result = DN_Str8_Copy(arena, string);
|
|
return result;
|
|
}
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str8Builder string_builder = DN_Str8Builder_Init(tmem.arena);
|
|
DN_USize max = string.size - find.size;
|
|
DN_USize head = start_index;
|
|
|
|
for (DN_USize tail = head; tail <= max; tail++) {
|
|
DN_Str8 check = DN_Str8_Slice(string, tail, find.size);
|
|
if (!DN_Str8_Eq(check, find, eq_case))
|
|
continue;
|
|
|
|
if (start_index > 0 && string_builder.string_size == 0) {
|
|
// User provided a hint in the string to start searching from, we
|
|
// need to add the string up to the hint. We only do this if there's
|
|
// a replacement action, otherwise we have a special case for no
|
|
// replacements, where the entire string gets copied.
|
|
DN_Str8 slice = DN_Str8_Init(string.data, head);
|
|
DN_Str8Builder_AppendRef(&string_builder, slice);
|
|
}
|
|
|
|
DN_Str8 range = DN_Str8_Slice(string, head, (tail - head));
|
|
DN_Str8Builder_AppendRef(&string_builder, range);
|
|
DN_Str8Builder_AppendRef(&string_builder, replace);
|
|
head = tail + find.size;
|
|
tail += find.size - 1; // NOTE: -1 since the for loop will post increment us past the end of the find string
|
|
}
|
|
|
|
if (string_builder.string_size == 0) {
|
|
// NOTE: No replacement possible, so we just do a full-copy
|
|
result = DN_Str8_Copy(arena, string);
|
|
} else {
|
|
DN_Str8 remainder = DN_Str8_Init(string.data + head, string.size - head);
|
|
DN_Str8Builder_AppendRef(&string_builder, remainder);
|
|
result = DN_Str8Builder_Build(&string_builder, arena);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_Str8_ReplaceInsensitive(DN_Str8 string, DN_Str8 find, DN_Str8 replace, DN_USize start_index, DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = DN_Str8_Replace(string, find, replace, start_index, arena, DN_Str8EqCase_Insensitive);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_Str8Builder ////////////////////////////////////////////////////////////////////////////
|
|
|
|
DN_API DN_Str8 DN_Str8Builder_BuildFromOSHeap(DN_Str8Builder const *builder)
|
|
{
|
|
DN_Str8 result = DN_ZeroInit;
|
|
if (!builder || builder->string_size <= 0 || builder->count <= 0)
|
|
return result;
|
|
|
|
result.data = DN_CAST(char *) DN_OS_MemAlloc(builder->string_size + 1, DN_ZeroMem_No);
|
|
if (!result.data)
|
|
return result;
|
|
|
|
for (DN_Str8Link *link = builder->head; link; link = link->next) {
|
|
DN_Memcpy(result.data + result.size, link->string.data, link->string.size);
|
|
result.size += link->string.size;
|
|
}
|
|
|
|
result.data[result.size] = 0;
|
|
DN_Assert(result.size == builder->string_size);
|
|
return result;
|
|
}
|
|
|
|
#if defined(DN_PLATFORM_POSIX)
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_posix.cpp"
|
|
#define DN_OS_POSIX_CPP
|
|
|
|
#include <dirent.h> // readdir, opendir, closedir
|
|
#include <sys/statvfs.h>
|
|
|
|
// NOTE: DN_OSMem //////////////////////////////////////////////////////////////////////////////////
|
|
static DN_U32 DN_OS_MemConvertPageToOSFlags_(DN_U32 protect)
|
|
{
|
|
DN_Assert((protect & ~DN_MemPage_All) == 0);
|
|
DN_Assert(protect != 0);
|
|
DN_U32 result = 0;
|
|
|
|
if (protect & (DN_MemPage_NoAccess | DN_MemPage_Guard)) {
|
|
result = PROT_NONE;
|
|
} else {
|
|
if (protect & DN_MemPage_Read)
|
|
result = PROT_READ;
|
|
if (protect & DN_MemPage_Write)
|
|
result = PROT_WRITE;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_OS_MemReserve(DN_USize size, DN_MemCommit commit, DN_U32 page_flags)
|
|
{
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
|
|
if (commit == DN_MemCommit_Yes)
|
|
os_page_flags |= (PROT_READ | PROT_WRITE);
|
|
|
|
void *result = mmap(nullptr, size, os_page_flags, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_total, 1);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_frame, 1);
|
|
if (result == MAP_FAILED)
|
|
result = nullptr;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MemCommit(void *ptr, DN_USize size, DN_U32 page_flags)
|
|
{
|
|
bool result = false;
|
|
if (!ptr || size == 0)
|
|
return false;
|
|
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
result = mprotect(ptr, size, os_page_flags) == 0;
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_total, 1);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_frame, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MemDecommit(void *ptr, DN_USize size)
|
|
{
|
|
mprotect(ptr, size, PROT_NONE);
|
|
madvise(ptr, size, MADV_FREE);
|
|
}
|
|
|
|
DN_API void DN_OS_MemRelease(void *ptr, DN_USize size)
|
|
{
|
|
munmap(ptr, size);
|
|
}
|
|
|
|
DN_API int DN_OS_MemProtect(void *ptr, DN_USize size, DN_U32 page_flags)
|
|
{
|
|
if (!ptr || size == 0)
|
|
return 0;
|
|
|
|
static DN_Str8 const ALIGNMENT_ERROR_MSG = DN_STR8(
|
|
"Page protection requires pointers to be page aligned because we "
|
|
"can only guard memory at a multiple of the page boundary.");
|
|
DN_AssertF(DN_IsPowerOfTwoAligned(DN_CAST(uintptr_t) ptr, g_dn_os_core_->page_size),
|
|
"%s",
|
|
ALIGNMENT_ERROR_MSG.data);
|
|
DN_AssertF(
|
|
DN_IsPowerOfTwoAligned(size, g_dn_os_core_->page_size), "%s", ALIGNMENT_ERROR_MSG.data);
|
|
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
int result = mprotect(ptr, size, os_page_flags);
|
|
DN_AssertF(result == 0, "mprotect failed (%d)", errno);
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_OS_MemAlloc(DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
void *result = zero_mem == DN_ZeroMem_Yes ? calloc(1, size) : malloc(size);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MemDealloc(void *ptr)
|
|
{
|
|
free(ptr);
|
|
}
|
|
|
|
// NOTE: Date //////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSDateTime DN_OS_DateLocalTimeNow()
|
|
{
|
|
DN_OSDateTime result = {};
|
|
struct timespec ts;
|
|
clock_gettime(CLOCK_REALTIME, &ts);
|
|
|
|
// NOTE: localtime_r is used because it is thread safe
|
|
// See: https://linux.die.net/man/3/localtime
|
|
// According to POSIX.1-2004, localtime() is required to behave as though
|
|
// tzset(3) was called, while localtime_r() does not have this requirement.
|
|
// For portable code tzset(3) should be called before localtime_r().
|
|
for (static bool once = true; once; once = false)
|
|
tzset();
|
|
|
|
struct tm time = {};
|
|
localtime_r(&ts.tv_sec, &time);
|
|
|
|
result.hour = time.tm_hour;
|
|
result.minutes = time.tm_min;
|
|
result.seconds = time.tm_sec;
|
|
|
|
result.day = DN_CAST(uint8_t) time.tm_mday;
|
|
result.month = DN_CAST(uint8_t) time.tm_mon + 1;
|
|
result.year = 1900 + DN_CAST(int16_t) time.tm_year;
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_DateUnixTimeNs()
|
|
{
|
|
struct timespec ts = {};
|
|
clock_gettime(CLOCK_REALTIME, &ts);
|
|
uint64_t result = (ts.tv_sec * 1000 /*ms*/ * 1000 /*us*/ * 1000 /*ns*/) + ts.tv_nsec;
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_DateLocalToUnixTimeS(DN_OSDateTime)
|
|
{
|
|
DN_AssertOnce(!"Unimplemented");
|
|
uint64_t result = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_DateToUnixTimeS(DN_OSDateTime date)
|
|
{
|
|
DN_Assert(DN_OS_DateIsValid(date));
|
|
struct tm timeinfo = {};
|
|
timeinfo.tm_year = date.year - 1900;
|
|
timeinfo.tm_mon = date.month - 1;
|
|
timeinfo.tm_mday = date.day;
|
|
timeinfo.tm_hour = date.hour;
|
|
timeinfo.tm_min = date.minutes;
|
|
timeinfo.tm_sec = date.seconds;
|
|
uint64_t result = mktime(&timeinfo);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSDateTime DN_OS_DateUnixTimeSToDate(uint64_t time)
|
|
{
|
|
time_t posix_time = DN_CAST(time_t) time;
|
|
struct tm posix_date = *gmtime(&posix_time);
|
|
DN_OSDateTime result = {};
|
|
result.year = posix_date.tm_year + 1900;
|
|
result.month = posix_date.tm_mon + 1;
|
|
result.day = posix_date.tm_mday;
|
|
result.hour = posix_date.tm_hour;
|
|
result.minutes = posix_date.tm_min;
|
|
result.seconds = posix_date.tm_sec;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_SecureRNGBytes(void *buffer, DN_U32 size)
|
|
{
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
(void)buffer;
|
|
(void)size;
|
|
return false;
|
|
#else
|
|
if (!buffer || size < 0)
|
|
return false;
|
|
|
|
if (size == 0)
|
|
return true;
|
|
|
|
DN_AssertF(size <= 32,
|
|
"We can increase this by chunking the buffer and filling 32 bytes at a time. *Nix "
|
|
"guarantees 32 "
|
|
"bytes can always be fulfilled by this system at a time");
|
|
// TODO(doyle):
|
|
// https://github.com/jedisct1/libsodium/blob/master/src/libsodium/randombytes/sysrandom/randombytes_sysrandom.c
|
|
// TODO(doyle): https://man7.org/linux/man-pages/man2/getrandom.2.html
|
|
DN_U32 read_bytes = 0;
|
|
do {
|
|
read_bytes =
|
|
getrandom(buffer, size, 0); // NOTE: EINTR can not be triggered if size <= 32 bytes
|
|
} while (read_bytes != size || errno == EAGAIN);
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
DN_API bool DN_OS_SetEnvVar(DN_Str8 name, DN_Str8 value)
|
|
{
|
|
DN_AssertFOnce(false, "Unimplemented");
|
|
(void)name;
|
|
(void)value;
|
|
bool result = false;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSDiskSpace DN_OS_DiskSpace(DN_Str8 path)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_OSDiskSpace result = {};
|
|
DN_Str8 path_z_terminated = DN_Str8_Copy(tmem.arena, path);
|
|
|
|
struct statvfs info = {};
|
|
if (statvfs(path_z_terminated.data, &info) != 0)
|
|
return result;
|
|
|
|
result.success = true;
|
|
result.avail = info.f_bavail * info.f_frsize;
|
|
result.size = info.f_blocks * info.f_frsize;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_EXEPath(DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
int required_size_wo_null_terminator = 0;
|
|
for (int try_size = 128;; try_size *= 2) {
|
|
auto scoped_arena = DN_ArenaTempMemScope(arena);
|
|
char *try_buf = DN_Arena_NewArray(arena, char, try_size, DN_ZeroMem_No);
|
|
int bytes_written = readlink("/proc/self/exe", try_buf, try_size);
|
|
if (bytes_written == -1) {
|
|
// Failed, we're unable to determine the executable directory
|
|
break;
|
|
} else if (bytes_written == try_size) {
|
|
// Try again, if returned size was equal- we may of prematurely
|
|
// truncated according to the man pages
|
|
continue;
|
|
} else {
|
|
// readlink will give us the path to the executable. Once we
|
|
// determine the correct buffer size required to get the full file
|
|
// path, we do some post-processing on said string and extract just
|
|
// the directory.
|
|
|
|
// TODO(dn): It'd be nice if there's some way of keeping this
|
|
// try_buf around, memcopy the byte and trash the try_buf from the
|
|
// arena. Instead we just get the size and redo the call one last
|
|
// time after this "calculate" step.
|
|
DN_AssertF(bytes_written < try_size,
|
|
"bytes_written can never be greater than the try size, function writes at "
|
|
"most try_size");
|
|
required_size_wo_null_terminator = bytes_written;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (required_size_wo_null_terminator) {
|
|
DN_ArenaTempMem temp_mem = DN_Arena_TempMemBegin(arena);
|
|
char *exe_path =
|
|
DN_Arena_NewArray(arena, char, required_size_wo_null_terminator + 1, DN_ZeroMem_No);
|
|
exe_path[required_size_wo_null_terminator] = 0;
|
|
|
|
int bytes_written = readlink("/proc/self/exe", exe_path, required_size_wo_null_terminator);
|
|
if (bytes_written == -1) {
|
|
// Note that if read-link fails again can be because there's
|
|
// a potential race condition here, our exe or directory could have
|
|
// been deleted since the last call, so we need to be careful.
|
|
DN_Arena_TempMemEnd(temp_mem);
|
|
} else {
|
|
result = DN_Str8_Init(exe_path, required_size_wo_null_terminator);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_SleepMs(DN_UInt milliseconds)
|
|
{
|
|
struct timespec ts;
|
|
ts.tv_sec = milliseconds / 1000;
|
|
ts.tv_nsec = (milliseconds % 1000) * 1'000'000; // Convert remaining milliseconds to nanoseconds
|
|
// nanosleep can fail if interrupted by a signal, so we loop until the full sleep time has passed
|
|
while (nanosleep(&ts, &ts) == -1 && errno == EINTR)
|
|
;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_PerfCounterFrequency()
|
|
{
|
|
// NOTE: On Linux we use clock_gettime(CLOCK_MONOTONIC_RAW) which
|
|
// increments at nanosecond granularity.
|
|
uint64_t result = 1'000'000'000;
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_OS_PerfCounterNow()
|
|
{
|
|
struct timespec ts;
|
|
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
|
|
uint64_t result = DN_CAST(uint64_t) ts.tv_sec * 1'000'000'000 + DN_CAST(uint64_t) ts.tv_nsec;
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_OS_FILE_API)
|
|
DN_API DN_OSPathInfo DN_OS_PathInfo(DN_Str8 path)
|
|
{
|
|
DN_OSPathInfo result = {};
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
struct stat file_stat;
|
|
if (lstat(path.data, &file_stat) != -1) {
|
|
result.exists = true;
|
|
result.size = file_stat.st_size;
|
|
result.last_access_time_in_s = file_stat.st_atime;
|
|
result.last_write_time_in_s = file_stat.st_mtime;
|
|
// TODO(dn): Seems linux does not support creation time via stat. We
|
|
// shoddily deal with this.
|
|
result.create_time_in_s = DN_Min(result.last_access_time_in_s, result.last_write_time_in_s);
|
|
|
|
if (S_ISDIR(file_stat.st_mode))
|
|
result.type = DN_OSPathInfoType_Directory;
|
|
else if (S_ISREG(file_stat.st_mode))
|
|
result.type = DN_OSPathInfoType_File;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_PathDelete(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (DN_Str8_HasData(path))
|
|
result = remove(path.data) == 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileExists(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
struct stat stat_result;
|
|
if (lstat(path.data, &stat_result) != -1)
|
|
result = S_ISREG(stat_result.st_mode) || S_ISLNK(stat_result.st_mode);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_CopyFile(DN_Str8 src, DN_Str8 dest, bool overwrite, DN_OSErrSink *error)
|
|
{
|
|
bool result = false;
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
DN_OS_ErrSinkAppendF(error, 1, "Unsupported on Emscripten because of their VFS model");
|
|
#else
|
|
int src_fd = open(src.data, O_RDONLY);
|
|
if (src_fd == -1) {
|
|
int error_code = errno;
|
|
DN_OS_ErrSinkAppendF(error,
|
|
error_code,
|
|
"Failed to open file '%.*s' for copying: (%d) %s",
|
|
DN_STR_FMT(src),
|
|
error_code,
|
|
strerror(error_code));
|
|
return result;
|
|
}
|
|
DN_DEFER
|
|
{
|
|
close(src_fd);
|
|
};
|
|
|
|
// NOTE: File permission is set to read/write by owner, read by others
|
|
int dest_fd = open(dest.data, O_WRONLY | O_CREAT | (overwrite ? O_TRUNC : 0), 0644);
|
|
if (dest_fd == -1) {
|
|
int error_code = errno;
|
|
DN_OS_ErrSinkAppendF(error,
|
|
error_code,
|
|
"Failed to open file destination '%.*s' for copying to: (%d) %s",
|
|
DN_STR_FMT(src),
|
|
error_code,
|
|
strerror(error_code));
|
|
return result;
|
|
}
|
|
DN_DEFER
|
|
{
|
|
close(dest_fd);
|
|
};
|
|
|
|
struct stat stat_existing;
|
|
int fstat_result = fstat(src_fd, &stat_existing);
|
|
if (fstat_result == -1) {
|
|
int error_code = errno;
|
|
DN_OS_ErrSinkAppendF(error,
|
|
error_code,
|
|
"Failed to query file size of '%.*s' for copying: (%d) %s",
|
|
DN_STR_FMT(src),
|
|
error_code,
|
|
strerror(error_code));
|
|
return result;
|
|
}
|
|
|
|
ssize_t bytes_written = sendfile64(dest_fd, src_fd, 0, stat_existing.st_size);
|
|
result = (bytes_written == stat_existing.st_size);
|
|
if (!result) {
|
|
int error_code = errno;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 file_size_str8 =
|
|
DN_CVT_U64ToByteSizeStr8(tmem.arena, stat_existing.st_size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_Str8 bytes_written_str8 =
|
|
DN_CVT_U64ToByteSizeStr8(tmem.arena, bytes_written, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(error,
|
|
error_code,
|
|
"Failed to copy file '%.*s' to '%.*s', we copied %.*s but the file "
|
|
"size is %.*s: (%d) %s",
|
|
DN_STR_FMT(src),
|
|
DN_STR_FMT(dest),
|
|
DN_STR_FMT(bytes_written_str8),
|
|
DN_STR_FMT(file_size_str8),
|
|
error_code,
|
|
strerror(error_code));
|
|
}
|
|
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MoveFile(DN_Str8 src, DN_Str8 dest, bool overwrite, DN_OSErrSink *error)
|
|
{
|
|
// See: https://github.com/gingerBill/gb/blob/master/gb.h
|
|
bool result = false;
|
|
bool file_moved = true;
|
|
if (link(src.data, dest.data) == -1) {
|
|
// NOTE: Link can fail if we're trying to link across different volumes
|
|
// so we fall back to a binary directory.
|
|
file_moved |= DN_OS_CopyFile(src, dest, overwrite, error);
|
|
}
|
|
|
|
if (file_moved) {
|
|
result = true;
|
|
int unlink_result = unlink(src.data);
|
|
if (unlink_result == -1) {
|
|
int error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
error_code,
|
|
"File '%.*s' was moved but failed to be unlinked from old location: (%d) %s",
|
|
DN_STR_FMT(src),
|
|
error_code,
|
|
strerror(error_code));
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MakeDir(DN_Str8 path)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
bool result = true;
|
|
|
|
// TODO(doyle): Implement this without using the path indexes, it's not
|
|
// necessary. See Windows implementation.
|
|
DN_USize path_indexes_size = 0;
|
|
uint16_t path_indexes[64] = {};
|
|
|
|
DN_Str8 copy = DN_Str8_Copy(tmem.arena, path);
|
|
for (DN_USize index = copy.size - 1; index < copy.size; index--) {
|
|
bool first_char = index == (copy.size - 1);
|
|
char ch = copy.data[index];
|
|
if (ch == '/' || first_char) {
|
|
char temp = copy.data[index];
|
|
|
|
if (!first_char)
|
|
copy.data[index] = 0; // Temporarily null terminate it
|
|
|
|
bool is_file = DN_OS_FileExists(copy);
|
|
|
|
if (!first_char)
|
|
copy.data[index] = temp; // Undo null termination
|
|
|
|
if (is_file) {
|
|
// NOTE: There's something that exists in at this path, but
|
|
// it's not a directory. This request to make a directory is
|
|
// invalid.
|
|
return false;
|
|
} else if (DN_OS_DirExists(copy)) {
|
|
// NOTE: We found a directory, we can stop here and start
|
|
// building up all the directories that didn't exist up to
|
|
// this point.
|
|
break;
|
|
} else {
|
|
// NOTE: There's nothing that exists at this path, we can
|
|
// create a directory here
|
|
path_indexes[path_indexes_size++] = DN_CAST(uint16_t) index;
|
|
}
|
|
}
|
|
}
|
|
|
|
for (DN_USize index = path_indexes_size - 1; result && index < path_indexes_size; index--) {
|
|
uint16_t path_index = path_indexes[index];
|
|
char temp = copy.data[path_index];
|
|
|
|
if (index != 0)
|
|
copy.data[path_index] = 0;
|
|
result |= mkdir(copy.data, 0774) == 0;
|
|
if (index != 0)
|
|
copy.data[path_index] = temp;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_DirExists(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
struct stat stat_result;
|
|
if (lstat(path.data, &stat_result) != -1)
|
|
result = S_ISDIR(stat_result.st_mode);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_DirIterate(DN_Str8 path, DN_OSDirIterator *it)
|
|
{
|
|
if (!it->handle) {
|
|
it->handle = opendir(path.data);
|
|
if (!it->handle)
|
|
return false;
|
|
}
|
|
|
|
struct dirent *entry;
|
|
for (;;) {
|
|
entry = readdir(DN_CAST(DIR *) it->handle);
|
|
if (entry == NULL)
|
|
break;
|
|
|
|
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
|
|
continue;
|
|
|
|
DN_USize name_size = DN_CStr8_Size(entry->d_name);
|
|
DN_USize clamped_size = DN_Min(sizeof(it->buffer) - 1, name_size);
|
|
DN_AssertF(name_size == clamped_size, "name: %s, name_size: %zu, clamped_size: %zu", entry->d_name, name_size, clamped_size);
|
|
DN_Memcpy(it->buffer, entry->d_name, clamped_size);
|
|
it->buffer[clamped_size] = 0;
|
|
it->file_name = DN_Str8_Init(it->buffer, clamped_size);
|
|
return true;
|
|
}
|
|
|
|
closedir(DN_CAST(DIR *) it->handle);
|
|
it->handle = NULL;
|
|
it->file_name = {};
|
|
it->buffer[0] = 0;
|
|
return false;
|
|
}
|
|
|
|
// NOTE: R/W Stream API ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSFile DN_OS_FileOpen(DN_Str8 path,
|
|
DN_OSFileOpen open_mode,
|
|
DN_OSFileAccess access,
|
|
DN_OSErrSink *error)
|
|
{
|
|
DN_OSFile result = {};
|
|
if (!DN_Str8_HasData(path) || path.size <= 0)
|
|
return result;
|
|
|
|
if ((access & ~(DN_OSFileAccess_All) || ((access & DN_OSFileAccess_All) == 0))) {
|
|
DN_InvalidCodePath;
|
|
return result;
|
|
}
|
|
|
|
if (access & DN_OSFileAccess_Execute) {
|
|
result.error = true;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
1,
|
|
"Failed to open file '%.*s': File access flag 'execute' is not supported",
|
|
DN_STR_FMT(path));
|
|
DN_InvalidCodePath; // TODO: Not supported via fopen
|
|
return result;
|
|
}
|
|
|
|
// NOTE: fopen interface is not as expressive as the Win32
|
|
// We will fopen the file beforehand to setup the state/check for validity
|
|
// before closing and reopening it with the correct request access
|
|
// permissions.
|
|
{
|
|
FILE *handle = nullptr;
|
|
switch (open_mode) {
|
|
case DN_OSFileOpen_CreateAlways: handle = fopen(path.data, "w"); break;
|
|
case DN_OSFileOpen_OpenIfExist: handle = fopen(path.data, "r"); break;
|
|
case DN_OSFileOpen_OpenAlways: handle = fopen(path.data, "a"); break;
|
|
default: DN_InvalidCodePath; break;
|
|
}
|
|
|
|
if (!handle) { // TODO(doyle): FileOpen flag to string
|
|
result.error = true;
|
|
DN_OS_ErrSinkAppendF(error,
|
|
1,
|
|
"Failed to open file '%.*s': File could not be opened in requested "
|
|
"mode 'DN_OSFileOpen' flag %d",
|
|
DN_STR_FMT(path),
|
|
open_mode);
|
|
return result;
|
|
}
|
|
fclose(handle);
|
|
}
|
|
|
|
char const *fopen_mode = nullptr;
|
|
if (access & DN_OSFileAccess_AppendOnly)
|
|
fopen_mode = "a+";
|
|
else if (access & DN_OSFileAccess_Write)
|
|
fopen_mode = "w+";
|
|
else if (access & DN_OSFileAccess_Read)
|
|
fopen_mode = "r";
|
|
|
|
FILE *handle = fopen(path.data, fopen_mode);
|
|
if (!handle) {
|
|
result.error = true;
|
|
DN_OS_ErrSinkAppendF(error,
|
|
1,
|
|
"Failed to open file '%S': File could not be opened with requested "
|
|
"access mode 'DN_OSFileAccess' %d",
|
|
path,
|
|
fopen_mode);
|
|
return result;
|
|
}
|
|
result.handle = handle;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSFileRead DN_OS_FileRead(DN_OSFile *file, void *buffer, DN_USize size, DN_OSErrSink *err)
|
|
{
|
|
DN_OSFileRead result = {};
|
|
if (!file || !file->handle || file->error || !buffer || size <= 0)
|
|
return result;
|
|
|
|
result.bytes_read = fread(buffer, 1, size, DN_CAST(FILE *) file->handle);
|
|
if (feof(DN_CAST(FILE*)file->handle)) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 buffer_size_str8 = DN_CVT_U64ToByteSizeStr8(tmem.arena, size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(err, 1, "Failed to read %S from file", buffer_size_str8);
|
|
return result;
|
|
}
|
|
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileWritePtr(DN_OSFile *file, void const *buffer, DN_USize size, DN_OSErrSink *err)
|
|
{
|
|
if (!file || !file->handle || file->error || !buffer || size <= 0)
|
|
return false;
|
|
bool result =
|
|
fwrite(buffer, DN_CAST(DN_USize) size, 1 /*count*/, DN_CAST(FILE *) file->handle) ==
|
|
1 /*count*/;
|
|
if (!result) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 buffer_size_str8 =
|
|
DN_CVT_U64ToByteSizeStr8(tmem.arena, size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(
|
|
err, 1, "Failed to write buffer (%s) to file handle", DN_STR_FMT(buffer_size_str8));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileFlush(DN_OSFile *file, DN_OSErrSink *err)
|
|
{
|
|
// TODO: errno is not thread safe
|
|
int fd = fileno(DN_CAST(FILE *) file->handle);
|
|
if (fd == -1) {
|
|
DN_OS_ErrSinkAppendF(err, errno, "Failed to flush file buffer to disk, file handle could not be converted to descriptor (%d): %s", fd, strerror(errno));
|
|
return false;
|
|
}
|
|
|
|
int fsync_result = fsync(fd);
|
|
if (fsync_result == -1) {
|
|
DN_OS_ErrSinkAppendF(err, errno, "Failed to flush file buffer to disk (%d): %s", fsync_result, strerror(errno));
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
DN_API void DN_OS_FileClose(DN_OSFile *file)
|
|
{
|
|
if (!file || !file->handle || file->error)
|
|
return;
|
|
fclose(DN_CAST(FILE *) file->handle);
|
|
*file = {};
|
|
}
|
|
#endif // !defined(DN_NO_OS_FILE_API)
|
|
|
|
// NOTE: DN_OSExec /////////////////////////////////////////////////////////////////////////////////
|
|
DN_API void DN_OS_Exit(int32_t exit_code)
|
|
{
|
|
exit(DN_CAST(int) exit_code);
|
|
}
|
|
|
|
enum DN_OSPipeType_
|
|
{
|
|
DN_OSPipeType__Read,
|
|
DN_OSPipeType__Write,
|
|
DN_OSPipeType__Count,
|
|
};
|
|
|
|
DN_API DN_OSExecResult DN_OS_ExecWait(DN_OSExecAsyncHandle handle,
|
|
DN_Arena *arena,
|
|
DN_OSErrSink *error)
|
|
{
|
|
DN_OSExecResult result = {};
|
|
if (!handle.process || handle.os_error_code || handle.exit_code) {
|
|
if (handle.os_error_code)
|
|
result.os_error_code = handle.os_error_code;
|
|
else
|
|
result.exit_code = handle.exit_code;
|
|
|
|
DN_Assert(!handle.stdout_read);
|
|
DN_Assert(!handle.stdout_write);
|
|
DN_Assert(!handle.stderr_read);
|
|
DN_Assert(!handle.stderr_write);
|
|
return result;
|
|
}
|
|
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
DN_InvalidCodePathF("Unsupported operation");
|
|
#endif
|
|
|
|
static_assert(sizeof(pid_t) <= sizeof(handle.process),
|
|
"We store the PID opaquely in a register sized pointer");
|
|
pid_t process = {};
|
|
DN_Memcpy(&process, &handle.process, sizeof(process));
|
|
for (;;) {
|
|
int status = 0;
|
|
if (waitpid(process, &status, 0) < 0) {
|
|
result.os_error_code = errno;
|
|
break;
|
|
}
|
|
|
|
if (WIFEXITED(status)) {
|
|
result.exit_code = WEXITSTATUS(status);
|
|
break;
|
|
}
|
|
|
|
if (WIFSIGNALED(status)) {
|
|
result.os_error_code = WTERMSIG(status);
|
|
break;
|
|
}
|
|
}
|
|
|
|
int stdout_pipe[DN_OSPipeType__Count] = {};
|
|
int stderr_pipe[DN_OSPipeType__Count] = {};
|
|
DN_Memcpy(&stdout_pipe[DN_OSPipeType__Read],
|
|
&handle.stdout_read,
|
|
sizeof(stdout_pipe[DN_OSPipeType__Read]));
|
|
DN_Memcpy(&stdout_pipe[DN_OSPipeType__Write],
|
|
&handle.stdout_write,
|
|
sizeof(stdout_pipe[DN_OSPipeType__Write]));
|
|
DN_Memcpy(&stderr_pipe[DN_OSPipeType__Read],
|
|
&handle.stderr_read,
|
|
sizeof(stderr_pipe[DN_OSPipeType__Read]));
|
|
DN_Memcpy(&stderr_pipe[DN_OSPipeType__Write],
|
|
&handle.stderr_write,
|
|
sizeof(stderr_pipe[DN_OSPipeType__Write]));
|
|
|
|
// NOTE: Process has finished, stop the write end of the pipe
|
|
close(stdout_pipe[DN_OSPipeType__Write]);
|
|
close(stderr_pipe[DN_OSPipeType__Write]);
|
|
|
|
// NOTE: Read the data from the read end of the pipe
|
|
if (result.os_error_code == 0) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
if (arena && handle.stdout_read) {
|
|
char buffer[4096];
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(tmem.arena);
|
|
for (;;) {
|
|
ssize_t bytes_read =
|
|
read(stdout_pipe[DN_OSPipeType__Read], buffer, sizeof(buffer));
|
|
if (bytes_read <= 0)
|
|
break;
|
|
DN_Str8Builder_AppendF(&builder, "%.*s", bytes_read, buffer);
|
|
}
|
|
|
|
result.stdout_text = DN_Str8Builder_Build(&builder, arena);
|
|
}
|
|
|
|
if (arena && handle.stderr_read) {
|
|
char buffer[4096];
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(tmem.arena);
|
|
for (;;) {
|
|
ssize_t bytes_read =
|
|
read(stderr_pipe[DN_OSPipeType__Read], buffer, sizeof(buffer));
|
|
if (bytes_read <= 0)
|
|
break;
|
|
DN_Str8Builder_AppendF(&builder, "%.*s", bytes_read, buffer);
|
|
}
|
|
|
|
result.stderr_text = DN_Str8Builder_Build(&builder, arena);
|
|
}
|
|
}
|
|
|
|
close(stdout_pipe[DN_OSPipeType__Read]);
|
|
close(stderr_pipe[DN_OSPipeType__Read]);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSExecAsyncHandle DN_OS_ExecAsync(DN_Slice<DN_Str8> cmd_line,
|
|
DN_OSExecArgs *args,
|
|
DN_OSErrSink *error)
|
|
{
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
DN_InvalidCodePathF("Unsupported operation");
|
|
#endif
|
|
DN_AssertFOnce(args->environment.size == 0, "Unimplemented in POSIX");
|
|
|
|
DN_OSExecAsyncHandle result = {};
|
|
if (cmd_line.size == 0)
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 cmd_rendered = DN_Slice_Str8Render(tmem.arena, cmd_line, DN_STR8(" "));
|
|
int stdout_pipe[DN_OSPipeType__Count] = {};
|
|
int stderr_pipe[DN_OSPipeType__Count] = {};
|
|
|
|
// NOTE: Open stdout pipe //////////////////////////////////////////////////////////////////////
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStdout)) {
|
|
if (pipe(stdout_pipe) == -1) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to create stdout pipe to redirect the output of the command '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
DN_Assert(stdout_pipe[DN_OSPipeType__Read] != 0);
|
|
DN_Assert(stdout_pipe[DN_OSPipeType__Write] != 0);
|
|
}
|
|
|
|
DN_DEFER
|
|
{
|
|
if (result.os_error_code == 0 && result.exit_code == 0)
|
|
return;
|
|
close(stdout_pipe[DN_OSPipeType__Read]);
|
|
close(stdout_pipe[DN_OSPipeType__Write]);
|
|
};
|
|
|
|
// NOTE: Open stderr pipe //////////////////////////////////////////////////////////////////////
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStderr)) {
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_MergeStderrToStdout)) {
|
|
stderr_pipe[DN_OSPipeType__Read] = stdout_pipe[DN_OSPipeType__Read];
|
|
stderr_pipe[DN_OSPipeType__Write] = stdout_pipe[DN_OSPipeType__Write];
|
|
} else if (pipe(stderr_pipe) == -1) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to create stderr pipe to redirect the output of the command '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
DN_Assert(stderr_pipe[DN_OSPipeType__Read] != 0);
|
|
DN_Assert(stderr_pipe[DN_OSPipeType__Write] != 0);
|
|
}
|
|
|
|
DN_DEFER
|
|
{
|
|
if (result.os_error_code == 0 && result.exit_code == 0)
|
|
return;
|
|
close(stderr_pipe[DN_OSPipeType__Read]);
|
|
close(stderr_pipe[DN_OSPipeType__Write]);
|
|
};
|
|
|
|
pid_t child_pid = fork();
|
|
if (child_pid < 0) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to fork process to execute the command '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
|
|
if (child_pid == 0) { // Child process
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStdout) &&
|
|
(dup2(stdout_pipe[DN_OSPipeType__Write], STDOUT_FILENO) == -1)) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to redirect stdout 'write' pipe for output of command '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStderr) &&
|
|
(dup2(stderr_pipe[DN_OSPipeType__Write], STDERR_FILENO) == -1)) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to redirect stderr 'read' pipe for output of command '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Convert the command into something suitable for execvp
|
|
char **argv =
|
|
DN_Arena_NewArray(tmem.arena, char *, cmd_line.size + 1 /*null*/, DN_ZeroMem_Yes);
|
|
if (!argv) {
|
|
result.exit_code = -1;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to create argument values from command line '%.*s': Out of memory",
|
|
DN_STR_FMT(cmd_rendered));
|
|
return result;
|
|
}
|
|
|
|
for (DN_ForIndexU(arg_index, cmd_line.size)) {
|
|
DN_Str8 arg = cmd_line.data[arg_index];
|
|
argv[arg_index] = DN_Str8_Copy(tmem.arena, arg).data; // NOTE: Copy string to guarantee it is null-terminated
|
|
}
|
|
|
|
// NOTE: Change the working directory if there is one
|
|
char *prev_working_dir = nullptr;
|
|
DN_DEFER
|
|
{
|
|
if (!prev_working_dir)
|
|
return;
|
|
if (result.os_error_code == 0) {
|
|
int chdir_result = chdir(prev_working_dir);
|
|
(void)chdir_result;
|
|
}
|
|
free(prev_working_dir);
|
|
};
|
|
|
|
if (args->working_dir.size) {
|
|
prev_working_dir = get_current_dir_name();
|
|
DN_Str8 working_dir = DN_Str8_Copy(tmem.arena, args->working_dir);
|
|
if (chdir(working_dir.data) == -1) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to create argument values from command line '%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
}
|
|
|
|
// NOTE: Execute the command. We reuse argv because the first arg, the
|
|
// binary to execute is guaranteed to be null-terminated.
|
|
if (execvp(argv[0], argv) < 0) {
|
|
result.os_error_code = errno;
|
|
DN_OS_ErrSinkAppendF(
|
|
error,
|
|
result.os_error_code,
|
|
"Failed to execute command'%.*s': %s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
strerror(result.os_error_code));
|
|
return result;
|
|
}
|
|
}
|
|
|
|
DN_Assert(result.os_error_code == 0);
|
|
DN_Memcpy(&result.stdout_read,
|
|
&stdout_pipe[DN_OSPipeType__Read],
|
|
sizeof(stdout_pipe[DN_OSPipeType__Read]));
|
|
DN_Memcpy(&result.stdout_write,
|
|
&stdout_pipe[DN_OSPipeType__Write],
|
|
sizeof(stdout_pipe[DN_OSPipeType__Write]));
|
|
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStderr) && DN_Bit_IsNotSet(args->flags, DN_OSExecFlags_MergeStderrToStdout)) {
|
|
DN_Memcpy(&result.stderr_read,
|
|
&stderr_pipe[DN_OSPipeType__Read],
|
|
sizeof(stderr_pipe[DN_OSPipeType__Read]));
|
|
DN_Memcpy(&result.stderr_write,
|
|
&stderr_pipe[DN_OSPipeType__Write],
|
|
sizeof(stderr_pipe[DN_OSPipeType__Write]));
|
|
}
|
|
result.exec_flags = args->flags;
|
|
DN_Memcpy(&result.process, &child_pid, sizeof(child_pid));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSExecResult DN_OS_ExecPump(DN_OSExecAsyncHandle handle,
|
|
char *stdout_buffer,
|
|
size_t *stdout_size,
|
|
char *stderr_buffer,
|
|
size_t *stderr_size,
|
|
DN_U32 timeout_ms,
|
|
DN_OSErrSink *err)
|
|
{
|
|
DN_InvalidCodePath;
|
|
DN_OSExecResult result = {};
|
|
return result;
|
|
}
|
|
|
|
static DN_POSIXCore *DN_OS_GetPOSIXCore_()
|
|
{
|
|
DN_Assert(g_dn_os_core_ && g_dn_os_core_->platform_context);
|
|
DN_POSIXCore *result = DN_CAST(DN_POSIXCore *)g_dn_os_core_->platform_context;
|
|
return result;
|
|
}
|
|
|
|
static DN_POSIXSyncPrimitive *DN_OS_U64ToPOSIXSyncPrimitive_(DN_U64 u64)
|
|
{
|
|
DN_POSIXSyncPrimitive *result = nullptr;
|
|
DN_Memcpy(&result, &u64, sizeof(u64));
|
|
return result;
|
|
}
|
|
|
|
static DN_U64 DN_POSIX_SyncPrimitiveToU64(DN_POSIXSyncPrimitive *primitive)
|
|
{
|
|
DN_U64 result = 0;
|
|
static_assert(sizeof(result) == sizeof(primitive), "Pointer size mis-match");
|
|
DN_Memcpy(&result, &primitive, sizeof(result));
|
|
return result;
|
|
}
|
|
|
|
static DN_POSIXSyncPrimitive *DN_POSIX_AllocSyncPrimitive_()
|
|
{
|
|
DN_POSIXCore *posix = DN_OS_GetPOSIXCore_();
|
|
DN_POSIXSyncPrimitive *result = nullptr;
|
|
pthread_mutex_lock(&posix->sync_primitive_free_list_mutex);
|
|
{
|
|
if (posix->sync_primitive_free_list) {
|
|
result = posix->sync_primitive_free_list;
|
|
posix->sync_primitive_free_list = posix->sync_primitive_free_list->next;
|
|
result->next = nullptr;
|
|
} else {
|
|
DN_OSCore *os = g_dn_os_core_;
|
|
result = DN_Arena_New(&os->arena, DN_POSIXSyncPrimitive, DN_ZeroMem_Yes);
|
|
}
|
|
}
|
|
pthread_mutex_unlock(&posix->sync_primitive_free_list_mutex);
|
|
return result;
|
|
}
|
|
|
|
static void DN_POSIX_DeallocSyncPrimitive_(DN_POSIXSyncPrimitive *primitive)
|
|
{
|
|
if (primitive) {
|
|
DN_POSIXCore *posix = DN_OS_GetPOSIXCore_();
|
|
pthread_mutex_lock(&posix->sync_primitive_free_list_mutex);
|
|
primitive->next = posix->sync_primitive_free_list;
|
|
posix->sync_primitive_free_list = primitive;
|
|
pthread_mutex_unlock(&posix->sync_primitive_free_list_mutex);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_OSSemaphore ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSSemaphore DN_OS_SemaphoreInit(DN_U32 initial_count)
|
|
{
|
|
DN_OSSemaphore result = {};
|
|
DN_POSIXSyncPrimitive *primitive = DN_POSIX_AllocSyncPrimitive_();
|
|
if (primitive) {
|
|
int pshared = 0; // Share the semaphore across all threads in the process
|
|
if (sem_init(&primitive->sem, pshared, initial_count) == 0)
|
|
result.handle = DN_POSIX_SyncPrimitiveToU64(primitive);
|
|
else
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_SemaphoreDeinit(DN_OSSemaphore *semaphore)
|
|
{
|
|
if (semaphore && semaphore->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(semaphore->handle);
|
|
sem_destroy(&primitive->sem);
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
*semaphore = {};
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_SemaphoreIncrement(DN_OSSemaphore *semaphore, DN_U32 amount)
|
|
{
|
|
if (semaphore && semaphore->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(semaphore->handle);
|
|
#if defined(DN_OS_WIN32)
|
|
sem_post_multiple(&primitive->sem, amount); // mingw extension
|
|
#else
|
|
for (DN_ForIndexU(index, amount))
|
|
sem_post(&primitive->sem);
|
|
#endif // !defined(DN_OS_WIN32)
|
|
}
|
|
}
|
|
|
|
DN_API DN_OSSemaphoreWaitResult DN_OS_SemaphoreWait(DN_OSSemaphore *semaphore,
|
|
DN_U32 timeout_ms)
|
|
{
|
|
DN_OSSemaphoreWaitResult result = {};
|
|
if (!semaphore || semaphore->handle == 0)
|
|
return result;
|
|
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(semaphore->handle);
|
|
if (timeout_ms == DN_OS_SEMAPHORE_INFINITE_TIMEOUT) {
|
|
int wait_result = 0;
|
|
do {
|
|
wait_result = sem_wait(&primitive->sem);
|
|
} while (wait_result == -1 && errno == EINTR);
|
|
|
|
if (wait_result == 0)
|
|
result = DN_OSSemaphoreWaitResult_Success;
|
|
} else {
|
|
DN_U64 now_ms = DN_OS_DateUnixTimeMs();
|
|
DN_U64 end_ts_ms = now_ms + timeout_ms;
|
|
|
|
struct timespec abs_timeout = {};
|
|
abs_timeout.tv_sec = end_ts_ms / 1'000;
|
|
abs_timeout.tv_nsec = 1'000'000 * (end_ts_ms - (end_ts_ms / 1'000) * 1'000);
|
|
if (sem_timedwait(&primitive->sem, &abs_timeout) == 0)
|
|
result = DN_OSSemaphoreWaitResult_Success;
|
|
else if (errno == ETIMEDOUT)
|
|
result = DN_OSSemaphoreWaitResult_Timeout;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSMutex ////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSMutex DN_OS_MutexInit()
|
|
{
|
|
DN_POSIXSyncPrimitive *primitive = DN_POSIX_AllocSyncPrimitive_();
|
|
DN_OSMutex result = {};
|
|
if (primitive) {
|
|
if (pthread_mutex_init(&primitive->mutex, nullptr) == 0)
|
|
result.handle = DN_POSIX_SyncPrimitiveToU64(primitive);
|
|
else
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MutexDeinit(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(mutex->handle);
|
|
pthread_mutex_destroy(&primitive->mutex);
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
*mutex = {};
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_MutexLock(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(mutex->handle);
|
|
pthread_mutex_lock(&primitive->mutex);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_MutexUnlock(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(mutex->handle);
|
|
pthread_mutex_unlock(&primitive->mutex);
|
|
}
|
|
}
|
|
|
|
DN_API DN_OSConditionVariable DN_OS_ConditionVariableInit()
|
|
{
|
|
DN_POSIXSyncPrimitive *primitive = DN_POSIX_AllocSyncPrimitive_();
|
|
DN_OSConditionVariable result = {};
|
|
if (primitive) {
|
|
if (pthread_cond_init(&primitive->cv, nullptr) == 0)
|
|
result.handle = DN_POSIX_SyncPrimitiveToU64(primitive);
|
|
else
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableDeinit(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(cv->handle);
|
|
pthread_cond_destroy(&primitive->cv);
|
|
DN_POSIX_DeallocSyncPrimitive_(primitive);
|
|
*cv = {};
|
|
}
|
|
}
|
|
|
|
DN_API bool DN_OS_ConditionVariableWaitUntil(DN_OSConditionVariable *cv, DN_OSMutex *mutex, DN_U64 end_ts_ms)
|
|
{
|
|
bool result = false;
|
|
if (cv && mutex && mutex->handle != 0 && cv->handle != 0) {
|
|
DN_POSIXSyncPrimitive *cv_primitive = DN_OS_U64ToPOSIXSyncPrimitive_(cv->handle);
|
|
DN_POSIXSyncPrimitive *mutex_primitive = DN_OS_U64ToPOSIXSyncPrimitive_(mutex->handle);
|
|
|
|
struct timespec time = {};
|
|
time.tv_sec = end_ts_ms / 1'000;
|
|
time.tv_nsec = 1'000'000 * (end_ts_ms - (end_ts_ms / 1'000) * 1'000);
|
|
int wait_result = pthread_cond_timedwait(&cv_primitive->cv, &mutex_primitive->mutex, &time);
|
|
result = (wait_result != ETIMEDOUT);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_ConditionVariableWait(DN_OSConditionVariable *cv, DN_OSMutex *mutex, DN_U64 sleep_ms)
|
|
{
|
|
DN_U64 end_ts_ms = DN_OS_DateUnixTimeMs() + sleep_ms;
|
|
bool result = DN_OS_ConditionVariableWaitUntil(cv, mutex, end_ts_ms);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableSignal(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(cv->handle);
|
|
pthread_cond_signal(&primitive->cv);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableBroadcast(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_POSIXSyncPrimitive *primitive = DN_OS_U64ToPOSIXSyncPrimitive_(cv->handle);
|
|
pthread_cond_broadcast(&primitive->cv);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_OSThread ///////////////////////////////////////////////////////////////////////////////
|
|
static void *DN_OS_ThreadFunc_(void *user_context)
|
|
{
|
|
DN_OS_ThreadExecute_(user_context);
|
|
return nullptr;
|
|
}
|
|
|
|
DN_API bool DN_OS_ThreadInit(DN_OSThread *thread, DN_OSThreadFunc *func, void *user_context)
|
|
{
|
|
bool result = false;
|
|
if (!thread)
|
|
return result;
|
|
|
|
thread->func = func;
|
|
thread->user_context = user_context;
|
|
thread->init_semaphore = DN_OS_SemaphoreInit(0 /*initial_count*/);
|
|
|
|
// TODO(doyle): Check if semaphore is valid
|
|
// NOTE: pthread_t is essentially the thread ID. In Windows, the handle and
|
|
// the ID are different things. For pthreads then we just duplicate the
|
|
// thread ID to both variables
|
|
pthread_t p_thread = {};
|
|
static_assert(sizeof(p_thread) <= sizeof(thread->handle),
|
|
"We store the thread handle opaquely in our abstraction, "
|
|
"there must be enough bytes to store pthread's structure");
|
|
static_assert(sizeof(p_thread) <= sizeof(thread->thread_id),
|
|
"We store the thread handle opaquely in our abstraction, "
|
|
"there must be enough bytes to store pthread's structure");
|
|
|
|
pthread_attr_t attribs = {};
|
|
pthread_attr_init(&attribs);
|
|
result = pthread_create(&p_thread, &attribs, DN_OS_ThreadFunc_, thread) == 0;
|
|
pthread_attr_destroy(&attribs);
|
|
|
|
if (result) {
|
|
DN_Memcpy(&thread->handle, &p_thread, sizeof(p_thread));
|
|
DN_Memcpy(&thread->thread_id, &p_thread, sizeof(p_thread));
|
|
}
|
|
|
|
if (result) {
|
|
DN_OS_SemaphoreIncrement(&thread->init_semaphore, 1);
|
|
} else {
|
|
DN_OS_SemaphoreDeinit(&thread->init_semaphore);
|
|
*thread = {};
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ThreadDeinit(DN_OSThread *thread)
|
|
{
|
|
if (!thread || !thread->handle)
|
|
return;
|
|
|
|
pthread_t thread_id = {};
|
|
DN_Memcpy(&thread_id, &thread->thread_id, sizeof(thread_id));
|
|
|
|
void *return_val = nullptr;
|
|
pthread_join(thread_id, &return_val);
|
|
thread->handle = {};
|
|
thread->thread_id = {};
|
|
}
|
|
|
|
DN_API DN_U32 DN_OS_ThreadID()
|
|
{
|
|
pid_t result = gettid();
|
|
DN_Assert(gettid() >= 0);
|
|
return DN_CAST(DN_U32) result;
|
|
}
|
|
|
|
DN_API void DN_Posix_ThreadSetName(DN_Str8 name)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_Str8 copy = DN_Str8_Copy(tmem.arena, name);
|
|
pthread_t thread = pthread_self();
|
|
pthread_setname_np(thread, (char *)copy.data);
|
|
}
|
|
|
|
DN_API DN_POSIXProcSelfStatus DN_Posix_ProcSelfStatus()
|
|
{
|
|
DN_POSIXProcSelfStatus result = {};
|
|
|
|
// NOTE: Example
|
|
//
|
|
// ...
|
|
// VmPeak: 3352 kB
|
|
// VmSize: 3352 kB
|
|
// VmLck: 0 kB
|
|
// ...
|
|
//
|
|
// VmSize is the total virtual memory used
|
|
DN_OSFile file = DN_OS_FileOpen(DN_STR8("/proc/self/status"), DN_OSFileOpen_OpenIfExist, DN_OSFileAccess_Read, nullptr);
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
|
|
if (!file.error) {
|
|
char buf[256];
|
|
DN_Str8Builder builder = DN_Str8Builder_InitFromTLS();
|
|
for (;;) {
|
|
DN_OSFileRead read = DN_OS_FileRead(&file, buf, sizeof(buf), nullptr);
|
|
if (!read.success || read.bytes_read == 0)
|
|
break;
|
|
DN_Str8Builder_AppendF(&builder, "%.*s", DN_CAST(int)read.bytes_read, buf);
|
|
}
|
|
|
|
DN_Str8 const NAME = DN_STR8("Name:");
|
|
DN_Str8 const PID = DN_STR8("Pid:");
|
|
DN_Str8 const VM_PEAK = DN_STR8("VmPeak:");
|
|
DN_Str8 const VM_SIZE = DN_STR8("VmSize:");
|
|
DN_Str8 status_buf = DN_Str8Builder_BuildFromTLS(&builder);
|
|
DN_Slice<DN_Str8> lines = DN_Str8_SplitAllocFromTLS(status_buf, DN_STR8("\n"), DN_Str8SplitIncludeEmptyStrings_No);
|
|
|
|
for (DN_ForIt(line_it, DN_Str8, &lines)) {
|
|
DN_Str8 line = DN_Str8_TrimWhitespaceAround(*line_it.data);
|
|
if (DN_Str8_StartsWith(line, NAME, DN_Str8EqCase_Insensitive)) {
|
|
DN_Str8 str8 = DN_Str8_TrimWhitespaceAround(DN_Str8_Slice(line, NAME.size, line.size));
|
|
result.name_size = DN_Min(str8.size, sizeof(result.name));
|
|
DN_Memcpy(result.name, str8.data, result.name_size);
|
|
} else if (DN_Str8_StartsWith(line, PID, DN_Str8EqCase_Insensitive)) {
|
|
DN_Str8 str8 = DN_Str8_TrimWhitespaceAround(DN_Str8_Slice(line, PID.size, line.size));
|
|
DN_Str8ToU64Result to_u64 = DN_Str8_ToU64(str8, 0);
|
|
result.pid = to_u64.value;
|
|
DN_Assert(to_u64.success);
|
|
} else if (DN_Str8_StartsWith(line, VM_SIZE, DN_Str8EqCase_Insensitive)) {
|
|
DN_Str8 size_with_kb = DN_Str8_TrimWhitespaceAround(DN_Str8_Slice(line, VM_SIZE.size, line.size));
|
|
DN_Assert(DN_Str8_EndsWith(size_with_kb, DN_STR8("kB")));
|
|
DN_Str8 vm_size = DN_Str8_BinarySplit(size_with_kb, DN_STR8(" ")).lhs;
|
|
DN_Str8ToU64Result to_u64 = DN_Str8_ToU64(vm_size, 0);
|
|
result.vm_size = DN_Kilobytes(to_u64.value);
|
|
DN_Assert(to_u64.success);
|
|
} else if (DN_Str8_StartsWith(line, VM_PEAK, DN_Str8EqCase_Insensitive)) {
|
|
DN_Str8 size_with_kb = DN_Str8_TrimWhitespaceAround(DN_Str8_Slice(line, VM_PEAK.size, line.size));
|
|
DN_Assert(DN_Str8_EndsWith(size_with_kb, DN_STR8("kB")));
|
|
DN_Str8 vm_size = DN_Str8_BinarySplit(size_with_kb, DN_STR8(" ")).lhs;
|
|
DN_Str8ToU64Result to_u64 = DN_Str8_ToU64(vm_size, 0);
|
|
result.vm_peak = DN_Kilobytes(to_u64.value);
|
|
DN_Assert(to_u64.success);
|
|
}
|
|
}
|
|
}
|
|
DN_OS_FileClose(&file);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSHttp /////////////////////////////////////////////////////////////////////////////////
|
|
#if 0 // TODO(doyle): Implement websockets for Windows and Emscripten
|
|
static EM_BOOL EMWebSocketOnOpenCallback(int type, const EmscriptenWebSocketOpenEvent *event, void *user_context)
|
|
{
|
|
(void)user_context;
|
|
(void)type;
|
|
(void)event;
|
|
// EMSCRIPTEN_RESULT result = emscripten_websocket_send_utf8_text(event->socket, R"({"jsonrpc":"2.0","id":1,"method": "eth_subscribe","params":["newHeads"]})");
|
|
// if (result)
|
|
// DN_LOG_InfoF("Failed to emscripten_websocket_send_utf8_text(): %d\n", result);
|
|
return EM_TRUE;
|
|
}
|
|
|
|
static EM_BOOL EMWebSocketOnMsgCallback(int type, const EmscriptenWebSocketMessageEvent *event __attribute__((nonnull)), void *user_context)
|
|
{
|
|
(void)type;
|
|
(void)user_context;
|
|
(void)event;
|
|
if (event->isText) {
|
|
DN_LOG_InfoF("Received: %.*s", event->numBytes, event->data);
|
|
} else {
|
|
DN_LOG_InfoF("Received: %d bytes", event->numBytes);
|
|
}
|
|
return EM_TRUE;
|
|
}
|
|
|
|
static EM_BOOL EMWebSocketOnErrorCallback(int type, const EmscriptenWebSocketErrorEvent *event, void *user_context)
|
|
{
|
|
(void)user_context;
|
|
(void)type;
|
|
(void)event;
|
|
return EM_TRUE;
|
|
}
|
|
|
|
static EM_BOOL EMWebSocketOnCloseCallback(int type, const EmscriptenWebSocketCloseEvent *event, void *user_context)
|
|
{
|
|
(void)user_context;
|
|
(void)type;
|
|
(void)event;
|
|
return EM_TRUE;
|
|
}
|
|
#endif
|
|
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
static void DN_OS_HttpRequestEMFetchOnSuccessCallback(emscripten_fetch_t *fetch)
|
|
{
|
|
DN_OSHttpResponse *response = DN_CAST(DN_OSHttpResponse *) fetch->userData;
|
|
if (!DN_Check(response))
|
|
return;
|
|
|
|
response->http_status = DN_CAST(DN_U32) fetch->status;
|
|
response->body = DN_Str8_Alloc(response->arena, fetch->numBytes, DN_ZeroMem_No);
|
|
if (response->body.data)
|
|
DN_Memcpy(response->body.data, fetch->data, fetch->numBytes);
|
|
|
|
DN_OS_SemaphoreIncrement(&response->on_complete_semaphore, 1);
|
|
DN_Atomic_AddU32(&response->done, 1);
|
|
}
|
|
|
|
static void DN_OS_HttpRequestEMFetchOnErrorCallback(emscripten_fetch_t *fetch)
|
|
{
|
|
DN_OSHttpResponse *response = DN_CAST(DN_OSHttpResponse *) fetch->userData;
|
|
if (!DN_Check(response))
|
|
return;
|
|
|
|
response->http_status = DN_CAST(DN_U32) fetch->status;
|
|
response->body = DN_Str8_Alloc(response->arena, fetch->numBytes, DN_ZeroMem_No);
|
|
if (response->body.size)
|
|
DN_Memcpy(response->body.data, fetch->data, fetch->numBytes);
|
|
|
|
DN_OS_SemaphoreIncrement(&response->on_complete_semaphore, 1);
|
|
DN_Atomic_AddU32(&response->done, 1);
|
|
}
|
|
#endif
|
|
|
|
DN_API void DN_OS_HttpRequestAsync(DN_OSHttpResponse *response,
|
|
DN_Arena *arena,
|
|
DN_Str8 host,
|
|
DN_Str8 path,
|
|
DN_OSHttpRequestSecure secure,
|
|
DN_Str8 method,
|
|
DN_Str8 body,
|
|
DN_Str8 headers)
|
|
{
|
|
if (!response || !arena)
|
|
return;
|
|
|
|
response->arena = arena;
|
|
response->builder.arena =
|
|
response->tmem_arena ? response->tmem_arena : &response->tmp_arena;
|
|
|
|
DN_Arena *tmem = response->tmem_arena;
|
|
DN_OSTLSTMem tmem_ = DN_OS_TLSTMem(arena);
|
|
if (!tmem)
|
|
tmem = tmem_.arena;
|
|
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
emscripten_fetch_attr_t fetch_attribs = {};
|
|
emscripten_fetch_attr_init(&fetch_attribs);
|
|
|
|
if (method.size >= sizeof(fetch_attribs.requestMethod)) {
|
|
response->error_msg =
|
|
DN_Str8_InitF(arena,
|
|
"Request method in EM has a size limit of 31 characters, method was "
|
|
"'%.*s' which is %zu characters long",
|
|
DN_STR_FMT(method),
|
|
method.size);
|
|
DN_CheckF(method.size < sizeof(fetch_attribs.requestMethod),
|
|
"%.*s",
|
|
DN_STR_FMT(response->error_msg));
|
|
response->error_code = DN_CAST(DN_U32) - 1;
|
|
DN_Atomic_AddU32(&response->done, 1);
|
|
return;
|
|
}
|
|
|
|
DN_Memcpy(fetch_attribs.requestMethod, method.data, method.size);
|
|
|
|
fetch_attribs.requestData = body.data;
|
|
fetch_attribs.requestDataSize = body.size;
|
|
fetch_attribs.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY;
|
|
fetch_attribs.onsuccess = DN_OS_HttpRequestEMFetchOnSuccessCallback;
|
|
fetch_attribs.onerror = DN_OS_HttpRequestEMFetchOnErrorCallback;
|
|
fetch_attribs.userData = response;
|
|
|
|
DN_Str8 url = DN_Str8_InitF(scratch_arena, "%.*s%.*s", DN_STR_FMT(host), DN_STR_FMT(path));
|
|
DN_LOG_InfoF("Initiating HTTP '%s' request to '%.*s' with payload '%.*s'",
|
|
fetch_attribs.requestMethod,
|
|
DN_STR_FMT(url),
|
|
DN_STR_FMT(body));
|
|
response->on_complete_semaphore = DN_OS_SemaphoreInit(0);
|
|
response->em_handle = emscripten_fetch(&fetch_attribs, url.data);
|
|
#else // #elif defined(DN_OS_WIN32)
|
|
DN_InvalidCodePathF("Unimplemented function");
|
|
#endif
|
|
}
|
|
|
|
DN_API void DN_OS_HttpRequestFree(DN_OSHttpResponse *response)
|
|
{
|
|
// NOTE: Cleanup
|
|
#if defined(DN_PLATFORM_EMSCRIPTEN)
|
|
if (response->em_handle) {
|
|
emscripten_fetch_close(response->em_handle);
|
|
response->em_handle = nullptr;
|
|
}
|
|
#endif // #elif defined(DN_OS_WIN32)
|
|
|
|
DN_Arena_Deinit(&response->tmp_arena);
|
|
DN_OS_SemaphoreDeinit(&response->on_complete_semaphore);
|
|
*response = {};
|
|
}
|
|
#elif defined(DN_PLATFORM_WIN32)
|
|
// DN: Single header generator inlined this file => #include "OS/dn_os_w32.cpp"
|
|
#define DN_OS_WIN32_CPP
|
|
|
|
/*
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$\ $$\ $$\ $$$$$$\ $$$$$$\
|
|
// $$ __$$\ $$ __$$\ $$ | $\ $$ |\_$$ _|$$$\ $$ |$$ ___$$\ $$ __$$\
|
|
// $$ / $$ |$$ / \__| $$ |$$$\ $$ | $$ | $$$$\ $$ |\_/ $$ |\__/ $$ |
|
|
// $$ | $$ |\$$$$$$\ $$ $$ $$\$$ | $$ | $$ $$\$$ | $$$$$ / $$$$$$ |
|
|
// $$ | $$ | \____$$\ $$$$ _$$$$ | $$ | $$ \$$$$ | \___$$\ $$ ____/
|
|
// $$ | $$ |$$\ $$ | $$$ / \$$$ | $$ | $$ |\$$$ |$$\ $$ |$$ |
|
|
// $$$$$$ |\$$$$$$ | $$ / \$$ |$$$$$$\ $$ | \$$ |\$$$$$$ |$$$$$$$$\
|
|
// \______/ \______/ \__/ \__|\______|\__| \__| \______/ \________|
|
|
//
|
|
// dn_os_w32.cpp
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
*/
|
|
|
|
// NOTE: DN_Mem ///////////////////////////////////////////////////////////////////////////
|
|
static DN_U32 DN_OS_MemConvertPageToOSFlags_(DN_U32 protect)
|
|
{
|
|
DN_Assert((protect & ~DN_MemPage_All) == 0);
|
|
DN_Assert(protect != 0);
|
|
DN_U32 result = 0;
|
|
|
|
if (protect & DN_MemPage_NoAccess) {
|
|
result = PAGE_NOACCESS;
|
|
} else if (protect & DN_MemPage_ReadWrite) {
|
|
result = PAGE_READWRITE;
|
|
} else if (protect & DN_MemPage_Read) {
|
|
result = PAGE_READONLY;
|
|
} else if (protect & DN_MemPage_Write) {
|
|
DN_LOG_WarningF("Windows does not support write-only pages, granting read+write access");
|
|
result = PAGE_READWRITE;
|
|
}
|
|
|
|
if (protect & DN_MemPage_Guard)
|
|
result |= PAGE_GUARD;
|
|
|
|
DN_AssertF(result != PAGE_GUARD, "Page guard is a modifier, you must also specify a page permission like read or/and write");
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_OS_MemReserve(DN_USize size, DN_MemCommit commit, DN_U32 page_flags)
|
|
{
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
unsigned long flags = MEM_RESERVE;
|
|
if (commit == DN_MemCommit_Yes)
|
|
flags |= MEM_COMMIT;
|
|
|
|
void *result = VirtualAlloc(nullptr, size, flags, os_page_flags);
|
|
if (flags & MEM_COMMIT) {
|
|
DN_Assert(g_dn_os_core_);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->vmem_allocs_total, 1);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->vmem_allocs_frame, 1);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MemCommit(void *ptr, DN_USize size, DN_U32 page_flags)
|
|
{
|
|
bool result = false;
|
|
if (!ptr || size == 0)
|
|
return false;
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
result = VirtualAlloc(ptr, size, MEM_COMMIT, os_page_flags) != nullptr;
|
|
DN_Assert(g_dn_os_core_);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->vmem_allocs_total, 1);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->vmem_allocs_frame, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MemDecommit(void *ptr, DN_USize size)
|
|
{
|
|
// NOTE: This is a decommit call, which is explicitly saying to free the
|
|
// pages but not the address space, you would use OS_MemRelease to release
|
|
// everything.
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6250) // Calling 'VirtualFree' without the MEM_RELEASE flag might free memory but not address descriptors (VADs). This causes address space leaks.
|
|
VirtualFree(ptr, size, MEM_DECOMMIT);
|
|
DN_MSVC_WARNING_POP
|
|
}
|
|
|
|
DN_API void DN_OS_MemRelease(void *ptr, DN_USize size)
|
|
{
|
|
(void)size;
|
|
VirtualFree(ptr, 0, MEM_RELEASE);
|
|
}
|
|
|
|
DN_API int DN_OS_MemProtect(void *ptr, DN_USize size, DN_U32 page_flags)
|
|
{
|
|
if (!ptr || size == 0)
|
|
return 0;
|
|
|
|
static DN_Str8 const ALIGNMENT_ERROR_MSG =
|
|
DN_STR8("Page protection requires pointers to be page aligned because we can only guard memory at a multiple of the page boundary.");
|
|
DN_AssertF(DN_IsPowerOfTwoAligned(DN_CAST(uintptr_t) ptr, g_dn_os_core_->page_size), "%s", ALIGNMENT_ERROR_MSG.data);
|
|
DN_AssertF(DN_IsPowerOfTwoAligned(size, g_dn_os_core_->page_size), "%s", ALIGNMENT_ERROR_MSG.data);
|
|
|
|
unsigned long os_page_flags = DN_OS_MemConvertPageToOSFlags_(page_flags);
|
|
unsigned long prev_flags = 0;
|
|
int result = VirtualProtect(ptr, size, os_page_flags, &prev_flags);
|
|
|
|
(void)prev_flags;
|
|
if (result == 0)
|
|
DN_AssertF(result, "VirtualProtect failed");
|
|
return result;
|
|
}
|
|
|
|
DN_API void *DN_OS_MemAlloc(DN_USize size, DN_ZeroMem zero_mem)
|
|
{
|
|
DN_U32 flags = zero_mem == DN_ZeroMem_Yes ? HEAP_ZERO_MEMORY : 0;
|
|
DN_Assert(size <= DN_CAST(DWORD)(-1));
|
|
void *result = HeapAlloc(GetProcessHeap(), flags, DN_CAST(DWORD) size);
|
|
DN_Assert(g_dn_os_core_);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_total, 1);
|
|
DN_Atomic_AddU64(&g_dn_os_core_->mem_allocs_frame, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MemDealloc(void *ptr)
|
|
{
|
|
HeapFree(GetProcessHeap(), 0, ptr);
|
|
}
|
|
|
|
// NOTE: Date //////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSDateTime DN_OS_DateLocalTimeNow()
|
|
{
|
|
SYSTEMTIME sys_time;
|
|
GetLocalTime(&sys_time);
|
|
|
|
DN_OSDateTime result = {};
|
|
result.hour = DN_CAST(uint8_t) sys_time.wHour;
|
|
result.minutes = DN_CAST(uint8_t) sys_time.wMinute;
|
|
result.seconds = DN_CAST(uint8_t) sys_time.wSecond;
|
|
result.day = DN_CAST(uint8_t) sys_time.wDay;
|
|
result.month = DN_CAST(uint8_t) sys_time.wMonth;
|
|
result.year = DN_CAST(int16_t) sys_time.wYear;
|
|
return result;
|
|
}
|
|
|
|
const DN_U64 DN_OS_WIN32_UNIX_TIME_START = 0x019DB1DED53E8000; // January 1, 1970 (start of Unix epoch) in "ticks"
|
|
const DN_U64 DN_OS_WIN32_FILE_TIME_TICKS_PER_SECOND = 10'000'000; // Filetime returned is in intervals of 100 nanoseconds
|
|
|
|
DN_API DN_U64 DN_OS_DateUnixTimeNs()
|
|
{
|
|
FILETIME file_time;
|
|
GetSystemTimeAsFileTime(&file_time);
|
|
|
|
// NOTE: Filetime returned is in intervals of 100 nanoeseconds so we
|
|
// multiply by 100 to get nanoseconds.
|
|
LARGE_INTEGER date_time;
|
|
date_time.u.LowPart = file_time.dwLowDateTime;
|
|
date_time.u.HighPart = file_time.dwHighDateTime;
|
|
DN_U64 result = (date_time.QuadPart - DN_OS_WIN32_UNIX_TIME_START) * 100;
|
|
return result;
|
|
}
|
|
|
|
static SYSTEMTIME DN_OS_DateToSystemTime_(DN_OSDateTime date)
|
|
{
|
|
SYSTEMTIME result = {};
|
|
result.wYear = date.year;
|
|
result.wMonth = date.month;
|
|
result.wDay = date.day;
|
|
result.wHour = date.hour;
|
|
result.wMinute = date.minutes;
|
|
result.wSecond = date.seconds;
|
|
return result;
|
|
}
|
|
|
|
static DN_U64 DN_OS_SystemTimeToUnixTimeS_(SYSTEMTIME *sys_time)
|
|
{
|
|
FILETIME file_time = {};
|
|
SystemTimeToFileTime(sys_time, &file_time);
|
|
|
|
LARGE_INTEGER date_time;
|
|
date_time.u.LowPart = file_time.dwLowDateTime;
|
|
date_time.u.HighPart = file_time.dwHighDateTime;
|
|
DN_U64 result = (date_time.QuadPart - DN_OS_WIN32_UNIX_TIME_START) / DN_OS_WIN32_FILE_TIME_TICKS_PER_SECOND;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_OS_DateLocalToUnixTimeS(DN_OSDateTime date)
|
|
{
|
|
SYSTEMTIME local_time = DN_OS_DateToSystemTime_(date);
|
|
SYSTEMTIME sys_time = {};
|
|
TzSpecificLocalTimeToSystemTime(nullptr, &local_time, &sys_time);
|
|
DN_U64 result = DN_OS_SystemTimeToUnixTimeS_(&sys_time);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_OS_DateToUnixTimeS(DN_OSDateTime date)
|
|
{
|
|
DN_Assert(DN_OS_DateIsValid(date));
|
|
|
|
SYSTEMTIME sys_time = DN_OS_DateToSystemTime_(date);
|
|
DN_U64 result = DN_OS_SystemTimeToUnixTimeS_(&sys_time);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSDateTime DN_OS_DateUnixTimeSToDate(DN_U64 time)
|
|
{
|
|
// NOTE: Windows epoch time starts from Jan 1, 1601 and counts in
|
|
// 100-nanoseconds intervals.
|
|
//
|
|
// See: https://devblogs.microsoft.com/oldnewthing/20090306-00/?p=18913
|
|
|
|
DN_U64 w32_time = 116'444'736'000'000'000 + (time * 10'000'000);
|
|
SYSTEMTIME sys_time = {};
|
|
FILETIME file_time = {};
|
|
file_time.dwLowDateTime = (DWORD)w32_time;
|
|
file_time.dwHighDateTime = w32_time >> 32;
|
|
FileTimeToSystemTime(&file_time, &sys_time);
|
|
|
|
DN_OSDateTime result = {};
|
|
result.year = DN_CAST(uint16_t) sys_time.wYear;
|
|
result.month = DN_CAST(uint8_t) sys_time.wMonth;
|
|
result.day = DN_CAST(uint8_t) sys_time.wDay;
|
|
result.hour = DN_CAST(uint8_t) sys_time.wHour;
|
|
result.minutes = DN_CAST(uint8_t) sys_time.wMinute;
|
|
result.seconds = DN_CAST(uint8_t) sys_time.wSecond;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_SecureRNGBytes(void *buffer, DN_U32 size)
|
|
{
|
|
DN_Assert(g_dn_os_core_);
|
|
DN_W32Core *w32 = DN_CAST(DN_W32Core *) g_dn_os_core_->platform_context;
|
|
|
|
if (!buffer || size < 0 || !w32->bcrypt_init_success)
|
|
return false;
|
|
|
|
if (size == 0)
|
|
return true;
|
|
|
|
long gen_status = BCryptGenRandom(w32->bcrypt_rng_handle, DN_CAST(unsigned char *) buffer, size, 0 /*flags*/);
|
|
if (gen_status != 0) {
|
|
DN_LOG_ErrorF("Failed to generate random bytes: %d", gen_status);
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
DN_API DN_OSDiskSpace DN_OS_DiskSpace(DN_Str8 path)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_OSDiskSpace result = {};
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
|
|
ULARGE_INTEGER free_bytes_avail_to_caller;
|
|
ULARGE_INTEGER total_number_of_bytes;
|
|
ULARGE_INTEGER total_number_of_free_bytes;
|
|
if (!GetDiskFreeSpaceExW(path16.data,
|
|
&free_bytes_avail_to_caller,
|
|
&total_number_of_bytes,
|
|
&total_number_of_free_bytes))
|
|
return result;
|
|
|
|
result.success = true;
|
|
result.avail = free_bytes_avail_to_caller.QuadPart;
|
|
result.size = total_number_of_bytes.QuadPart;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_SetEnvVar(DN_Str8 name, DN_Str8 value)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_Str16 name16 = DN_W32_Str8ToStr16(tmem.arena, name);
|
|
DN_Str16 value16 = DN_W32_Str8ToStr16(tmem.arena, value);
|
|
bool result = SetEnvironmentVariableW(name16.data, value16.data) != 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_OS_EXEPath(DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena)
|
|
return result;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str16 exe_dir16 = DN_W32_EXEPathW(tmem.arena);
|
|
result = DN_W32_Str16ToStr8(arena, exe_dir16);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_SleepMs(DN_UInt milliseconds)
|
|
{
|
|
Sleep(milliseconds);
|
|
}
|
|
|
|
DN_API DN_U64 DN_OS_PerfCounterFrequency()
|
|
{
|
|
DN_Assert(g_dn_os_core_);
|
|
DN_W32Core *w32 = DN_CAST(DN_W32Core *) g_dn_os_core_->platform_context;
|
|
DN_Assert(w32->qpc_frequency.QuadPart);
|
|
DN_U64 result = w32->qpc_frequency.QuadPart;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_U64 DN_OS_PerfCounterNow()
|
|
{
|
|
LARGE_INTEGER integer = {};
|
|
QueryPerformanceCounter(&integer);
|
|
DN_U64 result = integer.QuadPart;
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_OS_FILE_API)
|
|
static DN_U64 DN_W32_FileTimeToSeconds_(FILETIME const *time)
|
|
{
|
|
ULARGE_INTEGER time_large_int = {};
|
|
time_large_int.u.LowPart = time->dwLowDateTime;
|
|
time_large_int.u.HighPart = time->dwHighDateTime;
|
|
DN_U64 result = (time_large_int.QuadPart / 10000000ULL) - 11644473600ULL;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSPathInfo DN_OS_PathInfo(DN_Str8 path)
|
|
{
|
|
DN_OSPathInfo result = {};
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
|
|
WIN32_FILE_ATTRIBUTE_DATA attrib_data = {};
|
|
if (!GetFileAttributesExW(path16.data, GetFileExInfoStandard, &attrib_data))
|
|
return result;
|
|
|
|
result.exists = true;
|
|
result.create_time_in_s = DN_W32_FileTimeToSeconds_(&attrib_data.ftCreationTime);
|
|
result.last_access_time_in_s = DN_W32_FileTimeToSeconds_(&attrib_data.ftLastAccessTime);
|
|
result.last_write_time_in_s = DN_W32_FileTimeToSeconds_(&attrib_data.ftLastWriteTime);
|
|
|
|
LARGE_INTEGER large_int = {};
|
|
large_int.u.HighPart = DN_CAST(int32_t) attrib_data.nFileSizeHigh;
|
|
large_int.u.LowPart = attrib_data.nFileSizeLow;
|
|
result.size = (DN_U64)large_int.QuadPart;
|
|
|
|
if (attrib_data.dwFileAttributes != INVALID_FILE_ATTRIBUTES) {
|
|
if (attrib_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
|
|
result.type = DN_OSPathInfoType_Directory;
|
|
else
|
|
result.type = DN_OSPathInfoType_File;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_PathDelete(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
if (path16.size) {
|
|
result = DeleteFileW(path16.data);
|
|
if (!result)
|
|
result = RemoveDirectoryW(path16.data);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileExists(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
if (path16.size) {
|
|
WIN32_FILE_ATTRIBUTE_DATA attrib_data = {};
|
|
if (GetFileAttributesExW(path16.data, GetFileExInfoStandard, &attrib_data))
|
|
result = (attrib_data.dwFileAttributes != INVALID_FILE_ATTRIBUTES) &&
|
|
!(attrib_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_CopyFile(DN_Str8 src, DN_Str8 dest, bool overwrite, DN_OSErrSink *err)
|
|
{
|
|
bool result = false;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 src16 = DN_W32_Str8ToStr16(tmem.arena, src);
|
|
DN_Str16 dest16 = DN_W32_Str8ToStr16(tmem.arena, dest);
|
|
|
|
int fail_if_exists = overwrite == false;
|
|
result = CopyFileW(src16.data, dest16.data, fail_if_exists) != 0;
|
|
|
|
if (!result) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_OS_ErrSinkAppendF(err,
|
|
win_error.code,
|
|
"Failed to copy file '%.*s' to '%.*s': (%u) %.*s",
|
|
DN_STR_FMT(src),
|
|
DN_STR_FMT(dest),
|
|
win_error.code,
|
|
DN_STR_FMT(win_error.msg));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MoveFile(DN_Str8 src, DN_Str8 dest, bool overwrite, DN_OSErrSink *err)
|
|
{
|
|
bool result = false;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 src16 = DN_W32_Str8ToStr16(tmem.arena, src);
|
|
DN_Str16 dest16 = DN_W32_Str8ToStr16(tmem.arena, dest);
|
|
|
|
unsigned long flags = MOVEFILE_COPY_ALLOWED;
|
|
if (overwrite)
|
|
flags |= MOVEFILE_REPLACE_EXISTING;
|
|
|
|
result = MoveFileExW(src16.data, dest16.data, flags) != 0;
|
|
if (!result) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_OS_ErrSinkAppendF(err,
|
|
win_error.code,
|
|
"Failed to move file '%.*s' to '%.*s': (%u) %.*s",
|
|
DN_STR_FMT(src),
|
|
DN_STR_FMT(dest),
|
|
win_error.code,
|
|
DN_STR_FMT(win_error.msg));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_MakeDir(DN_Str8 path)
|
|
{
|
|
bool result = true;
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
|
|
// NOTE: Go back from the end of the string to all the directories in the
|
|
// string, and try to create them. Since Win32 API cannot create
|
|
// intermediate directories that don't exist in a path we need to go back
|
|
// and record all the directories until we encounter one that exists.
|
|
//
|
|
// From that point onwards go forwards and make all the directories
|
|
// inbetween by null-terminating the string temporarily, creating the
|
|
// directory and so forth until we reach the end.
|
|
//
|
|
// If we find a file at some point in the path we fail out because the
|
|
// series of directories can not be made if a file exists with the same
|
|
// name.
|
|
for (DN_USize index = 0; index < path16.size; index++) {
|
|
bool first_char = index == (path16.size - 1);
|
|
wchar_t ch = path16.data[index];
|
|
if (ch == '/' || ch == '\\' || first_char) {
|
|
wchar_t temp = path16.data[index];
|
|
if (!first_char)
|
|
path16.data[index] = 0; // Temporarily null terminate it
|
|
|
|
WIN32_FILE_ATTRIBUTE_DATA attrib_data = {};
|
|
bool successful = GetFileAttributesExW(path16.data, GetFileExInfoStandard, &attrib_data); // Check
|
|
|
|
if (successful) {
|
|
if (attrib_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
|
|
// NOTE: The directory exists, continue iterating the path
|
|
} else {
|
|
// NOTE: There's some kind of file that exists at the path
|
|
// but it's not a directory. This request to make a
|
|
// directory is invalid.
|
|
return false;
|
|
}
|
|
} else {
|
|
// NOTE: There's nothing that exists at this path, we can create
|
|
// a directory here
|
|
result |= (CreateDirectoryW(path16.data, nullptr) == 0);
|
|
}
|
|
|
|
if (!first_char)
|
|
path16.data[index] = temp; // Undo null termination
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_DirExists(DN_Str8 path)
|
|
{
|
|
bool result = false;
|
|
if (!DN_Str8_HasData(path))
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
if (path16.size) {
|
|
WIN32_FILE_ATTRIBUTE_DATA attrib_data = {};
|
|
if (GetFileAttributesExW(path16.data, GetFileExInfoStandard, &attrib_data))
|
|
result = (attrib_data.dwFileAttributes != INVALID_FILE_ATTRIBUTES) &&
|
|
(attrib_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_DirIterate(DN_Str8 path, DN_OSDirIterator *it)
|
|
{
|
|
if (!DN_Str8_HasData(path) || !it || path.size <= 0)
|
|
return false;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_W32FolderIteratorW wide_it = {};
|
|
DN_Str16 path16 = {};
|
|
if (it->handle) {
|
|
wide_it.handle = it->handle;
|
|
} else {
|
|
bool needs_asterisks = DN_Str8_EndsWith(path, DN_STR8("\\")) ||
|
|
DN_Str8_EndsWith(path, DN_STR8("/"));
|
|
bool has_glob = DN_Str8_EndsWith(path, DN_STR8("\\*")) ||
|
|
DN_Str8_EndsWith(path, DN_STR8("/*"));
|
|
|
|
DN_Str8 adjusted_path = path;
|
|
if (!has_glob) {
|
|
// NOTE: We are missing the glob for enumerating the files, we will
|
|
// add those characters in this branch, so overwrite the null
|
|
// character, add the glob and re-null terminate the buffer.
|
|
if (needs_asterisks)
|
|
adjusted_path = DN_OS_PathF(tmem.arena, "%.*s*", DN_STR_FMT(path));
|
|
else
|
|
adjusted_path = DN_OS_PathF(tmem.arena, "%.*s/*", DN_STR_FMT(path));
|
|
}
|
|
|
|
path16 = DN_W32_Str8ToStr16(tmem.arena, adjusted_path);
|
|
if (path16.size <= 0) // Conversion error
|
|
return false;
|
|
}
|
|
|
|
bool result = DN_W32_DirWIterate(path16, &wide_it);
|
|
it->handle = wide_it.handle;
|
|
if (result) {
|
|
int size = DN_W32_Str16ToStr8Buffer(wide_it.file_name, it->buffer, DN_ArrayCountU(it->buffer));
|
|
it->file_name = DN_Str8_Init(it->buffer, size);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
// NOTE: R/W Stream API ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSFile DN_OS_FileOpen(DN_Str8 path, DN_OSFileOpen open_mode, DN_U32 access, DN_OSErrSink *err)
|
|
{
|
|
DN_OSFile result = {};
|
|
if (!DN_Str8_HasData(path) || path.size <= 0)
|
|
return result;
|
|
|
|
if ((access & ~DN_OSFileAccess_All) || ((access & DN_OSFileAccess_All) == 0)) {
|
|
DN_InvalidCodePath;
|
|
return result;
|
|
}
|
|
|
|
unsigned long create_flag = 0;
|
|
switch (open_mode) {
|
|
case DN_OSFileOpen_CreateAlways: create_flag = CREATE_ALWAYS; break;
|
|
case DN_OSFileOpen_OpenIfExist: create_flag = OPEN_EXISTING; break;
|
|
case DN_OSFileOpen_OpenAlways: create_flag = OPEN_ALWAYS; break;
|
|
default: DN_InvalidCodePath; return result;
|
|
}
|
|
|
|
unsigned long access_mode = 0;
|
|
if (access & DN_OSFileAccess_AppendOnly) {
|
|
DN_AssertF((access & ~DN_OSFileAccess_AppendOnly) == 0,
|
|
"Append can only be applied exclusively to the file, other access modes not permitted");
|
|
access_mode = FILE_APPEND_DATA;
|
|
} else {
|
|
if (access & DN_OSFileAccess_Read)
|
|
access_mode |= GENERIC_READ;
|
|
if (access & DN_OSFileAccess_Write)
|
|
access_mode |= GENERIC_WRITE;
|
|
if (access & DN_OSFileAccess_Execute)
|
|
access_mode |= GENERIC_EXECUTE;
|
|
}
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem.arena, path);
|
|
void *handle = CreateFileW(/*LPCWSTR lpFileName*/ path16.data,
|
|
/*DWORD dwDesiredAccess*/ access_mode,
|
|
/*DWORD dwShareMode*/ FILE_SHARE_READ | FILE_SHARE_WRITE,
|
|
/*LPSECURITY_ATTRIBUTES lpSecurityAttributes*/ nullptr,
|
|
/*DWORD dwCreationDisposition*/ create_flag,
|
|
/*DWORD dwFlagsAndAttributes*/ FILE_ATTRIBUTE_NORMAL,
|
|
/*HANDLE hTemplateFile*/ nullptr);
|
|
|
|
if (handle == INVALID_HANDLE_VALUE) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.error = true;
|
|
DN_OS_ErrSinkAppendF(err, win_error.code, "Failed to open file at '%.*s': '%.*s'", DN_STR_FMT(path), DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
result.handle = handle;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSFileRead DN_OS_FileRead(DN_OSFile *file, void *buffer, DN_USize size, DN_OSErrSink *err)
|
|
{
|
|
DN_OSFileRead result = {};
|
|
if (!file || !file->handle || file->error || !buffer || size <= 0)
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
if (!DN_Check(size <= (unsigned long)-1)) {
|
|
DN_Str8 buffer_size_str8 = DN_CVT_U64ToByteSizeStr8(tmem.arena, size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(
|
|
err,
|
|
1 /*error_code*/,
|
|
"Current implementation doesn't support reading >4GiB file (requested %.*s), implement Win32 overlapped IO",
|
|
DN_STR_FMT(buffer_size_str8));
|
|
return result;
|
|
}
|
|
|
|
unsigned long bytes_read = 0;
|
|
unsigned long read_result = ReadFile(/*HANDLE hFile*/ file->handle,
|
|
/*LPVOID lpBuffer*/ buffer,
|
|
/*DWORD nNumberOfBytesToRead*/ DN_CAST(unsigned long) size,
|
|
/*LPDWORD lpNumberOfByesRead*/ &bytes_read,
|
|
/*LPOVERLAPPED lpOverlapped*/ nullptr);
|
|
if (read_result == 0) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_OS_ErrSinkAppendF(err, win_error.code, "Failed to read data from file: (%u) %.*s", win_error.code, DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
if (bytes_read != size) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_OS_ErrSinkAppendF(
|
|
err,
|
|
win_error.code,
|
|
"Failed to read the desired number of bytes from file, we read %uB but we expected %uB: (%u) %.*s",
|
|
bytes_read,
|
|
DN_CAST(unsigned long) size,
|
|
win_error.code,
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
result.bytes_read = bytes_read;
|
|
result.success = true;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileWritePtr(DN_OSFile *file, void const *buffer, DN_USize size, DN_OSErrSink *err)
|
|
{
|
|
if (!file || !file->handle || file->error || !buffer || size <= 0)
|
|
return false;
|
|
|
|
bool result = true;
|
|
char const *end = DN_CAST(char *) buffer + size;
|
|
for (char const *ptr = DN_CAST(char const *) buffer; result && ptr != end;) {
|
|
unsigned long write_size = DN_CAST(unsigned long) DN_Min((unsigned long)-1, end - ptr);
|
|
unsigned long bytes_written = 0;
|
|
result = WriteFile(file->handle, ptr, write_size, &bytes_written, nullptr /*lpOverlapped*/) != 0;
|
|
ptr += bytes_written;
|
|
}
|
|
|
|
if (!result) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_Str8 buffer_size_str8 = DN_CVT_U64ToByteSizeStr8(tmem.arena, size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_OS_ErrSinkAppendF(err, win_error.code, "Failed to write buffer (%.*s) to file handle: %.*s", DN_STR_FMT(buffer_size_str8), DN_STR_FMT(win_error.msg));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_FileFlush(DN_OSFile *file, DN_OSErrSink *err)
|
|
{
|
|
if (!file || !file->handle || file->error)
|
|
return false;
|
|
|
|
BOOL result = FlushFileBuffers(DN_CAST(HANDLE) file->handle);
|
|
if (!result) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
DN_OS_ErrSinkAppendF(err, win_error.code, "Failed to flush file buffer to disk: %.*s", DN_STR_FMT(win_error.msg));
|
|
}
|
|
|
|
return DN_CAST(bool) result;
|
|
}
|
|
|
|
DN_API void DN_OS_FileClose(DN_OSFile *file)
|
|
{
|
|
if (!file || !file->handle || file->error)
|
|
return;
|
|
CloseHandle(file->handle);
|
|
*file = {};
|
|
}
|
|
#endif // !defined(DN_NO_OS_FILE_API)
|
|
|
|
// NOTE: DN_OSExec /////////////////////////////////////////////////////////////////////////////////
|
|
DN_API void DN_OS_Exit(int32_t exit_code)
|
|
{
|
|
ExitProcess(DN_CAST(UINT) exit_code);
|
|
}
|
|
|
|
DN_API DN_OSExecResult DN_OS_ExecPump(DN_OSExecAsyncHandle handle,
|
|
char *stdout_buffer,
|
|
size_t *stdout_size,
|
|
char *stderr_buffer,
|
|
size_t *stderr_size,
|
|
DN_U32 timeout_ms,
|
|
DN_OSErrSink *err)
|
|
{
|
|
DN_OSExecResult result = {};
|
|
size_t stdout_buffer_size = 0;
|
|
size_t stderr_buffer_size = 0;
|
|
if (stdout_size) {
|
|
stdout_buffer_size = *stdout_size;
|
|
*stdout_size = 0;
|
|
}
|
|
|
|
if (stderr_size) {
|
|
stderr_buffer_size = *stderr_size;
|
|
*stderr_size = 0;
|
|
}
|
|
|
|
if (!handle.process || handle.os_error_code || handle.exit_code) {
|
|
if (handle.os_error_code)
|
|
result.os_error_code = handle.os_error_code;
|
|
else
|
|
result.exit_code = handle.exit_code;
|
|
|
|
DN_Assert(!handle.stdout_read);
|
|
DN_Assert(!handle.stdout_write);
|
|
DN_Assert(!handle.stderr_read);
|
|
DN_Assert(!handle.stderr_write);
|
|
DN_Assert(!handle.process);
|
|
return result;
|
|
}
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DWORD stdout_bytes_available = 0;
|
|
DWORD stderr_bytes_available = 0;
|
|
PeekNamedPipe(handle.stdout_read, nullptr, 0, nullptr, &stdout_bytes_available, nullptr);
|
|
PeekNamedPipe(handle.stderr_read, nullptr, 0, nullptr, &stderr_bytes_available, nullptr);
|
|
|
|
DWORD exec_result = WAIT_TIMEOUT;
|
|
if (stdout_bytes_available == 0 && stderr_bytes_available == 0)
|
|
exec_result = WaitForSingleObject(handle.process, timeout_ms);
|
|
|
|
if (exec_result == WAIT_FAILED) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(err, result.os_error_code, "Executed command failed to terminate: %.*s", DN_STR_FMT(win_error.msg));
|
|
} else if (DN_Check(exec_result == WAIT_TIMEOUT || exec_result == WAIT_OBJECT_0)) {
|
|
// NOTE: Read stdout from process //////////////////////////////////////////////////////
|
|
// If the pipes are full, the process will block. We periodically
|
|
// flush the pipes to make sure this doesn't happen
|
|
char sink[DN_Kilobytes(8)];
|
|
stdout_bytes_available = 0;
|
|
if (PeekNamedPipe(handle.stdout_read, nullptr, 0, nullptr, &stdout_bytes_available, nullptr)) {
|
|
if (stdout_bytes_available) {
|
|
DWORD bytes_read = 0;
|
|
char *dest_buffer = handle.stdout_write && stdout_buffer ? stdout_buffer : sink;
|
|
size_t dest_size = handle.stdout_write && stdout_buffer ? stdout_buffer_size : DN_ArrayCountU(sink);
|
|
BOOL success = ReadFile(handle.stdout_read, dest_buffer, DN_CAST(DWORD) dest_size, &bytes_read, NULL);
|
|
(void)success; // TODO:
|
|
if (stdout_size)
|
|
*stdout_size = bytes_read;
|
|
}
|
|
}
|
|
|
|
// NOTE: Read stderr from process //////////////////////////////////////////////////////
|
|
stderr_bytes_available = 0;
|
|
if (PeekNamedPipe(handle.stderr_read, nullptr, 0, nullptr, &stderr_bytes_available, nullptr)) {
|
|
if (stderr_bytes_available) {
|
|
char *dest_buffer = handle.stderr_write && stderr_buffer ? stderr_buffer : sink;
|
|
size_t dest_size = handle.stderr_write && stderr_buffer ? stderr_buffer_size : DN_ArrayCountU(sink);
|
|
DWORD bytes_read = 0;
|
|
BOOL success = ReadFile(handle.stderr_read, dest_buffer, DN_CAST(DWORD) dest_size, &bytes_read, NULL);
|
|
(void)success; // TODO:
|
|
if (stderr_size)
|
|
*stderr_size = bytes_read;
|
|
}
|
|
}
|
|
}
|
|
|
|
result.finished = exec_result == WAIT_OBJECT_0 || exec_result == WAIT_FAILED;
|
|
if (exec_result == WAIT_OBJECT_0) {
|
|
DWORD exit_status;
|
|
if (GetExitCodeProcess(handle.process, &exit_status)) {
|
|
result.exit_code = exit_status;
|
|
} else {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(err,
|
|
result.os_error_code,
|
|
"Failed to retrieve command exit code: %.*s",
|
|
DN_STR_FMT(win_error.msg));
|
|
}
|
|
|
|
// NOTE: Cleanup ///////////////////////////////////////////////////////////////////////////////
|
|
CloseHandle(handle.stdout_write);
|
|
CloseHandle(handle.stderr_write);
|
|
CloseHandle(handle.stdout_read);
|
|
CloseHandle(handle.stderr_read);
|
|
CloseHandle(handle.process);
|
|
}
|
|
|
|
result.stdout_text = DN_Str8_Init(stdout_buffer, stdout_size ? *stdout_size : 0);
|
|
result.stderr_text = DN_Str8_Init(stderr_buffer, stderr_size ? *stderr_size : 0);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSExecResult DN_OS_ExecWait(DN_OSExecAsyncHandle handle, DN_Arena *arena, DN_OSErrSink *err)
|
|
{
|
|
DN_OSExecResult result = {};
|
|
if (!handle.process || handle.os_error_code || handle.exit_code) {
|
|
result.finished = true;
|
|
if (handle.os_error_code)
|
|
result.os_error_code = handle.os_error_code;
|
|
else
|
|
result.exit_code = handle.exit_code;
|
|
|
|
DN_Assert(!handle.stdout_read);
|
|
DN_Assert(!handle.stdout_write);
|
|
DN_Assert(!handle.stderr_read);
|
|
DN_Assert(!handle.stderr_write);
|
|
DN_Assert(!handle.process);
|
|
return result;
|
|
}
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str8Builder stdout_builder = {};
|
|
DN_Str8Builder stderr_builder = {};
|
|
if (arena) {
|
|
stdout_builder.arena = tmem.arena;
|
|
stderr_builder.arena = tmem.arena;
|
|
}
|
|
|
|
DN_U32 const SLOW_WAIT_TIME_MS = 100;
|
|
DN_U32 const FAST_WAIT_TIME_MS = 20;
|
|
DN_U32 wait_ms = FAST_WAIT_TIME_MS;
|
|
while (!result.finished) {
|
|
size_t stdout_size = DN_Kilobytes(8);
|
|
size_t stderr_size = DN_Kilobytes(8);
|
|
char *stdout_buffer = DN_Arena_NewArray(tmem.arena, char, stdout_size, DN_ZeroMem_No);
|
|
char *stderr_buffer = DN_Arena_NewArray(tmem.arena, char, stderr_size, DN_ZeroMem_No);
|
|
result = DN_OS_ExecPump(handle, stdout_buffer, &stdout_size, stderr_buffer, &stderr_size, wait_ms, err);
|
|
DN_Str8Builder_AppendCopy(&stdout_builder, result.stdout_text);
|
|
DN_Str8Builder_AppendCopy(&stderr_builder, result.stderr_text);
|
|
wait_ms = (DN_Str8_HasData(result.stdout_text) || DN_Str8_HasData(result.stderr_text)) ? FAST_WAIT_TIME_MS : SLOW_WAIT_TIME_MS;
|
|
}
|
|
|
|
// NOTE: Get stdout/stderr. If no arena is passed this is a no-op //////////////////////////////
|
|
result.stdout_text = DN_Str8Builder_Build(&stdout_builder, arena);
|
|
result.stderr_text = DN_Str8Builder_Build(&stderr_builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSExecAsyncHandle DN_OS_ExecAsync(DN_Slice<DN_Str8> cmd_line, DN_OSExecArgs *args, DN_OSErrSink *err)
|
|
{
|
|
// NOTE: Pre-amble /////////////////////////////////////////////////////////////////////////////
|
|
DN_OSExecAsyncHandle result = {};
|
|
if (cmd_line.size == 0)
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 cmd_rendered = DN_Slice_Str8Render(tmem.arena, cmd_line, DN_STR8(" "));
|
|
DN_Str16 cmd16 = DN_W32_Str8ToStr16(tmem.arena, cmd_rendered);
|
|
DN_Str16 working_dir16 = DN_W32_Str8ToStr16(tmem.arena, args->working_dir);
|
|
|
|
DN_Str8Builder env_builder = DN_Str8Builder_InitFromTLS();
|
|
DN_Str8Builder_AppendArrayRef(&env_builder, args->environment.data, args->environment.size);
|
|
if (env_builder.string_size)
|
|
DN_Str8Builder_AppendRef(&env_builder, DN_STR8("\0"));
|
|
|
|
DN_Str8 env_block8 = DN_Str8Builder_BuildDelimitedFromTLS(&env_builder, DN_STR8("\0"));
|
|
DN_Str16 env_block16 = {};
|
|
if (env_block8.size)
|
|
env_block16 = DN_W32_Str8ToStr16(tmem.arena, env_block8);
|
|
|
|
// NOTE: Stdout/err security attributes ////////////////////////////////////////////////////////
|
|
SECURITY_ATTRIBUTES save_std_security_attribs = {};
|
|
save_std_security_attribs.nLength = sizeof(save_std_security_attribs);
|
|
save_std_security_attribs.bInheritHandle = true;
|
|
|
|
// NOTE: Redirect stdout ///////////////////////////////////////////////////////////////////////
|
|
HANDLE stdout_read = {};
|
|
HANDLE stdout_write = {};
|
|
DN_DEFER
|
|
{
|
|
if (result.os_error_code || result.exit_code) {
|
|
CloseHandle(stdout_read);
|
|
CloseHandle(stdout_write);
|
|
}
|
|
};
|
|
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStdout)) {
|
|
if (!CreatePipe(&stdout_read, &stdout_write, &save_std_security_attribs, /*nSize*/ 0)) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(
|
|
err,
|
|
result.os_error_code,
|
|
"Failed to create stdout pipe to redirect the output of the command '%.*s': %.*s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
if (!SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0)) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(err,
|
|
result.os_error_code,
|
|
"Failed to make stdout 'read' pipe non-inheritable when trying to "
|
|
"execute command '%.*s': %.*s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
}
|
|
|
|
// NOTE: Redirect stderr ///////////////////////////////////////////////////////////////////////
|
|
HANDLE stderr_read = {};
|
|
HANDLE stderr_write = {};
|
|
DN_DEFER
|
|
{
|
|
if (result.os_error_code || result.exit_code) {
|
|
CloseHandle(stderr_read);
|
|
CloseHandle(stderr_write);
|
|
}
|
|
};
|
|
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStderr)) {
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_MergeStderrToStdout)) {
|
|
stderr_read = stdout_read;
|
|
stderr_write = stdout_write;
|
|
} else {
|
|
if (!CreatePipe(&stderr_read, &stderr_write, &save_std_security_attribs, /*nSize*/ 0)) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(
|
|
err,
|
|
result.os_error_code,
|
|
"Failed to create stderr pipe to redirect the output of the command '%.*s': %.*s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
if (!SetHandleInformation(stderr_read, HANDLE_FLAG_INHERIT, 0)) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(err,
|
|
result.os_error_code,
|
|
"Failed to make stderr 'read' pipe non-inheritable when trying to "
|
|
"execute command '%.*s': %.*s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
// NOTE: Execute command ///////////////////////////////////////////////////////////////////////
|
|
PROCESS_INFORMATION proc_info = {};
|
|
STARTUPINFOW startup_info = {};
|
|
startup_info.cb = sizeof(STARTUPINFOW);
|
|
startup_info.hStdError = stderr_write ? stderr_write : GetStdHandle(STD_ERROR_HANDLE);
|
|
startup_info.hStdOutput = stdout_write ? stdout_write : GetStdHandle(STD_OUTPUT_HANDLE);
|
|
startup_info.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
|
|
startup_info.dwFlags |= STARTF_USESTDHANDLES;
|
|
BOOL create_result = CreateProcessW(nullptr,
|
|
cmd16.data,
|
|
nullptr,
|
|
nullptr,
|
|
true,
|
|
CREATE_NO_WINDOW | CREATE_UNICODE_ENVIRONMENT,
|
|
env_block16.data,
|
|
working_dir16.data,
|
|
&startup_info,
|
|
&proc_info);
|
|
if (!create_result) {
|
|
DN_W32Error win_error = DN_W32_LastError(tmem.arena);
|
|
result.os_error_code = win_error.code;
|
|
DN_OS_ErrSinkAppendF(err,
|
|
result.os_error_code,
|
|
"Failed to execute command '%.*s': %.*s",
|
|
DN_STR_FMT(cmd_rendered),
|
|
DN_STR_FMT(win_error.msg));
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Post-amble ////////////////////////////////////////////////////////////////////////////
|
|
CloseHandle(proc_info.hThread);
|
|
result.process = proc_info.hProcess;
|
|
result.stdout_read = stdout_read;
|
|
result.stdout_write = stdout_write;
|
|
if (DN_Bit_IsSet(args->flags, DN_OSExecFlags_SaveStderr) && DN_Bit_IsNotSet(args->flags, DN_OSExecFlags_MergeStderrToStdout)) {
|
|
result.stderr_read = stderr_read;
|
|
result.stderr_write = stderr_write;
|
|
}
|
|
result.exec_flags = args->flags;
|
|
return result;
|
|
}
|
|
|
|
static DN_W32Core *DN_OS_GetW32Core_()
|
|
{
|
|
DN_Assert(g_dn_os_core_ && g_dn_os_core_->platform_context);
|
|
DN_W32Core *result = DN_CAST(DN_W32Core *)g_dn_os_core_->platform_context;
|
|
return result;
|
|
}
|
|
|
|
static DN_W32SyncPrimitive *DN_OS_U64ToW32SyncPrimitive_(DN_U64 u64)
|
|
{
|
|
DN_W32SyncPrimitive *result = nullptr;
|
|
DN_Memcpy(&result, &u64, sizeof(u64));
|
|
return result;
|
|
}
|
|
|
|
static DN_U64 DN_W32_SyncPrimitiveToU64(DN_W32SyncPrimitive *primitive)
|
|
{
|
|
DN_U64 result = 0;
|
|
static_assert(sizeof(result) == sizeof(primitive), "Pointer size mis-match");
|
|
DN_Memcpy(&result, &primitive, sizeof(result));
|
|
return result;
|
|
}
|
|
|
|
static DN_W32SyncPrimitive *DN_W32_AllocSyncPrimitive_()
|
|
{
|
|
DN_W32Core *w32 = DN_OS_GetW32Core_();
|
|
DN_W32SyncPrimitive *result = nullptr;
|
|
EnterCriticalSection(&w32->sync_primitive_free_list_mutex);
|
|
{
|
|
if (w32->sync_primitive_free_list) {
|
|
result = w32->sync_primitive_free_list;
|
|
w32->sync_primitive_free_list = w32->sync_primitive_free_list->next;
|
|
result->next = nullptr;
|
|
} else {
|
|
DN_OSCore *os = g_dn_os_core_;
|
|
result = DN_Arena_New(&os->arena, DN_W32SyncPrimitive, DN_ZeroMem_Yes);
|
|
}
|
|
}
|
|
LeaveCriticalSection(&w32->sync_primitive_free_list_mutex);
|
|
return result;
|
|
}
|
|
|
|
static void DN_W32_DeallocSyncPrimitive_(DN_W32SyncPrimitive *primitive)
|
|
{
|
|
if (primitive) {
|
|
DN_W32Core *w32 = DN_OS_GetW32Core_();
|
|
EnterCriticalSection(&w32->sync_primitive_free_list_mutex);
|
|
primitive->next = w32->sync_primitive_free_list;
|
|
w32->sync_primitive_free_list = primitive;
|
|
LeaveCriticalSection(&w32->sync_primitive_free_list_mutex);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_OSSemaphore ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSSemaphore DN_OS_SemaphoreInit(DN_U32 initial_count)
|
|
{
|
|
DN_OSSemaphore result = {};
|
|
DN_W32SyncPrimitive *primitive = DN_W32_AllocSyncPrimitive_();
|
|
if (primitive) {
|
|
SECURITY_ATTRIBUTES security_attribs = {};
|
|
primitive->sem = CreateSemaphoreA(&security_attribs, initial_count, INT32_MAX, nullptr /*name*/);
|
|
if (primitive->sem)
|
|
result.handle = DN_W32_SyncPrimitiveToU64(primitive);
|
|
if (!primitive->sem)
|
|
DN_W32_DeallocSyncPrimitive_(primitive);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_SemaphoreDeinit(DN_OSSemaphore *semaphore)
|
|
{
|
|
if (semaphore && semaphore->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(semaphore->handle);
|
|
CloseHandle(primitive->sem);
|
|
DN_W32_DeallocSyncPrimitive_(primitive);
|
|
*semaphore = {};
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_SemaphoreIncrement(DN_OSSemaphore *semaphore, DN_U32 amount)
|
|
{
|
|
if (semaphore && semaphore->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(semaphore->handle);
|
|
LONG prev_count = 0;
|
|
ReleaseSemaphore(primitive->sem, amount, &prev_count);
|
|
}
|
|
}
|
|
|
|
DN_API DN_OSSemaphoreWaitResult DN_OS_SemaphoreWait(DN_OSSemaphore *semaphore, DN_U32 timeout_ms)
|
|
{
|
|
DN_OSSemaphoreWaitResult result = {};
|
|
if (semaphore && semaphore->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(semaphore->handle);
|
|
DWORD wait_result = WaitForSingleObject(primitive->sem, timeout_ms == DN_OS_SEMAPHORE_INFINITE_TIMEOUT ? INFINITE : timeout_ms);
|
|
if (wait_result == WAIT_TIMEOUT)
|
|
result = DN_OSSemaphoreWaitResult_Timeout;
|
|
else if (wait_result == WAIT_OBJECT_0)
|
|
result = DN_OSSemaphoreWaitResult_Success;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_OSMutex ////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSMutex DN_OS_MutexInit()
|
|
{
|
|
DN_W32SyncPrimitive *primitive = DN_W32_AllocSyncPrimitive_();
|
|
if (primitive)
|
|
InitializeCriticalSection(&primitive->mutex);
|
|
DN_OSMutex result = {};
|
|
result.handle = DN_W32_SyncPrimitiveToU64(primitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_MutexDeinit(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(mutex->handle);
|
|
DeleteCriticalSection(&primitive->mutex);
|
|
DN_W32_DeallocSyncPrimitive_(primitive);
|
|
*mutex = {};
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_MutexLock(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(mutex->handle);
|
|
EnterCriticalSection(&primitive->mutex);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_MutexUnlock(DN_OSMutex *mutex)
|
|
{
|
|
if (mutex && mutex->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(mutex->handle);
|
|
LeaveCriticalSection(&primitive->mutex);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_OSConditionVariable ////////////////////////////////////////////////////////////////////
|
|
DN_API DN_OSConditionVariable DN_OS_ConditionVariableInit()
|
|
{
|
|
DN_W32SyncPrimitive *primitive = DN_W32_AllocSyncPrimitive_();
|
|
if (primitive)
|
|
InitializeConditionVariable(&primitive->cv);
|
|
DN_OSConditionVariable result = {};
|
|
result.handle = DN_W32_SyncPrimitiveToU64(primitive);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableDeinit(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(cv->handle);
|
|
DN_W32_DeallocSyncPrimitive_(primitive);
|
|
*cv = {};
|
|
}
|
|
}
|
|
|
|
DN_API bool DN_OS_ConditionVariableWaitUntil(DN_OSConditionVariable *cv, DN_OSMutex *mutex, DN_U64 end_ts_ms)
|
|
{
|
|
bool result = false;
|
|
DN_U64 now_ms = DN_OS_DateUnixTimeNs() / (1000 * 1000);
|
|
if (now_ms < end_ts_ms) {
|
|
DN_U64 sleep_ms = end_ts_ms - now_ms;
|
|
result = DN_OS_ConditionVariableWait(cv, mutex, sleep_ms);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_ConditionVariableWait(DN_OSConditionVariable *cv, DN_OSMutex *mutex, DN_U64 sleep_ms)
|
|
{
|
|
bool result = false;
|
|
if (mutex && cv && mutex->handle != 0 && cv->handle != 0 && sleep_ms > 0) {
|
|
DN_W32SyncPrimitive *mutex_primitive = DN_OS_U64ToW32SyncPrimitive_(mutex->handle);
|
|
DN_W32SyncPrimitive *cv_primitive = DN_OS_U64ToW32SyncPrimitive_(cv->handle);
|
|
result = SleepConditionVariableCS(&cv_primitive->cv, &mutex_primitive->mutex, DN_CAST(DWORD) sleep_ms);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableSignal(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(cv->handle);
|
|
WakeConditionVariable(&primitive->cv);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_ConditionVariableBroadcast(DN_OSConditionVariable *cv)
|
|
{
|
|
if (cv && cv->handle != 0) {
|
|
DN_W32SyncPrimitive *primitive = DN_OS_U64ToW32SyncPrimitive_(cv->handle);
|
|
WakeAllConditionVariable(&primitive->cv);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_OSThread ///////////////////////////////////////////////////////////////////////////////
|
|
static DWORD __stdcall DN_OS_ThreadFunc_(void *user_context)
|
|
{
|
|
DN_OS_ThreadExecute_(user_context);
|
|
return 0;
|
|
}
|
|
|
|
DN_API bool DN_OS_ThreadInit(DN_OSThread *thread, DN_OSThreadFunc *func, void *user_context)
|
|
{
|
|
bool result = false;
|
|
if (!thread)
|
|
return result;
|
|
|
|
thread->func = func;
|
|
thread->user_context = user_context;
|
|
thread->init_semaphore = DN_OS_SemaphoreInit(0 /*initial_count*/);
|
|
|
|
// TODO(doyle): Check if semaphore is valid
|
|
DWORD thread_id = 0;
|
|
SECURITY_ATTRIBUTES security_attribs = {};
|
|
thread->handle = CreateThread(&security_attribs,
|
|
0 /*stack_size*/,
|
|
DN_OS_ThreadFunc_,
|
|
thread,
|
|
0 /*creation_flags*/,
|
|
&thread_id);
|
|
|
|
result = thread->handle != INVALID_HANDLE_VALUE;
|
|
if (result)
|
|
thread->thread_id = thread_id;
|
|
|
|
// NOTE: Ensure that thread_id is set before 'thread->func' is called.
|
|
if (result) {
|
|
DN_OS_SemaphoreIncrement(&thread->init_semaphore, 1);
|
|
} else {
|
|
DN_OS_SemaphoreDeinit(&thread->init_semaphore);
|
|
*thread = {};
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_OS_ThreadDeinit(DN_OSThread *thread)
|
|
{
|
|
if (!thread || !thread->handle)
|
|
return;
|
|
|
|
WaitForSingleObject(thread->handle, INFINITE);
|
|
CloseHandle(thread->handle);
|
|
thread->handle = INVALID_HANDLE_VALUE;
|
|
thread->thread_id = {};
|
|
DN_OS_TLSDeinit(&thread->tls);
|
|
}
|
|
|
|
DN_API DN_U32 DN_OS_ThreadID()
|
|
{
|
|
unsigned long result = GetCurrentThreadId();
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_W32_ThreadSetName(DN_Str8 name)
|
|
{
|
|
DN_OSTLS *tls = DN_OS_TLSGet();
|
|
DN_ArenaTempMem tmem = DN_Arena_TempMemBegin(tls->arenas + DN_OSTLSArena_TMem0);
|
|
|
|
// NOTE: SetThreadDescription is only available in
|
|
// Windows Server 2016, Windows 10 LTSB 2016 and Windows 10 version 1607
|
|
//
|
|
// See: https://learn.microsoft.com/en-us/windows/w32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
|
|
DN_W32Core *w32 = DN_OS_GetW32Core_();
|
|
if (w32->set_thread_description) {
|
|
DN_Str16 name16 = DN_W32_Str8ToStr16(tmem.arena, name);
|
|
w32->set_thread_description(GetCurrentThread(), (WCHAR *)name16.data);
|
|
DN_Arena_TempMemEnd(tmem);
|
|
return;
|
|
}
|
|
|
|
// NOTE: Fallback to throw-exception method to set thread name
|
|
#pragma pack(push, 8)
|
|
|
|
struct DN_W32ThreadNameInfo
|
|
{
|
|
DN_U32 dwType;
|
|
char *szName;
|
|
DN_U32 dwThreadID;
|
|
DN_U32 dwFlags;
|
|
};
|
|
|
|
#pragma pack(pop)
|
|
|
|
DN_Str8 copy = DN_Str8_Copy(tmem.arena, name);
|
|
DN_W32ThreadNameInfo info = {};
|
|
info.dwType = 0x1000;
|
|
info.szName = (char *)copy.data;
|
|
info.dwThreadID = DN_OS_ThreadID();
|
|
|
|
// TODO: Review warning 6320
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6320) // Exception-filter expression is the constant EXCEPTION_EXECUTE_HANDLER. This might mask exceptions that were not intended to be handled
|
|
DN_MSVC_WARNING_DISABLE(6322) // Empty _except block
|
|
__try {
|
|
RaiseException(0x406D1388, 0, sizeof(info) / sizeof(void *), (const ULONG_PTR *)&info);
|
|
} __except (EXCEPTION_EXECUTE_HANDLER) {
|
|
}
|
|
DN_MSVC_WARNING_POP
|
|
|
|
DN_Arena_TempMemEnd(tmem);
|
|
}
|
|
|
|
// NOTE: DN_OSHttp /////////////////////////////////////////////////////////////////////////////////
|
|
void DN_OS_HttpRequestWin32Callback(HINTERNET session, DWORD *dwContext, DWORD dwInternetStatus, VOID *lpvStatusInformation, DWORD dwStatusInformationLength)
|
|
{
|
|
(void)session;
|
|
(void)dwStatusInformationLength;
|
|
|
|
DN_OSHttpResponse *response = DN_CAST(DN_OSHttpResponse *) dwContext;
|
|
HINTERNET request = DN_CAST(HINTERNET) response->w32_request_handle;
|
|
DN_W32Error error = {};
|
|
DWORD const READ_BUFFER_SIZE = DN_Megabytes(1);
|
|
|
|
if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_RESOLVING_NAME) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_NAME_RESOLVED) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_CONNECTING_TO_SERVER) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_CONNECTED_TO_SERVER) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_SENDING_REQUEST) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_REQUEST_SENT) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_RECEIVING_RESPONSE) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_RESPONSE_RECEIVED) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_CLOSING_CONNECTION) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_CONNECTION_CLOSED) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_CONNECTION_CLOSED) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_HANDLE_CREATED) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_HANDLE_CLOSING) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_DETECTING_PROXY) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_REDIRECT) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_INTERMEDIATE_RESPONSE) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_SECURE_FAILURE) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_HEADERS_AVAILABLE) {
|
|
DWORD status = 0;
|
|
DWORD status_size = sizeof(status_size);
|
|
if (WinHttpQueryHeaders(request,
|
|
WINHTTP_QUERY_STATUS_CODE | WINHTTP_QUERY_FLAG_NUMBER,
|
|
WINHTTP_HEADER_NAME_BY_INDEX,
|
|
&status,
|
|
&status_size,
|
|
WINHTTP_NO_HEADER_INDEX)) {
|
|
response->http_status = DN_CAST(uint16_t) status;
|
|
|
|
// NOTE: You can normally call into WinHttpQueryDataAvailable which means the kernel
|
|
// will buffer the response into a single buffer and return us the full size of the
|
|
// request.
|
|
//
|
|
// or
|
|
//
|
|
// You may call WinHttpReadData directly to write the memory into our buffer directly.
|
|
// This is advantageous to avoid a copy from the kernel buffer into our buffer. If the
|
|
// end user application knows the typical payload size then they can optimise for this
|
|
// to prevent unnecessary allocation on the user side.
|
|
void *buffer = DN_Arena_Alloc(response->builder.arena, READ_BUFFER_SIZE, 1 /*align*/, DN_ZeroMem_No);
|
|
if (!WinHttpReadData(request, buffer, READ_BUFFER_SIZE, nullptr))
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
} else {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
}
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_DATA_AVAILABLE) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_READ_COMPLETE) {
|
|
DWORD bytes_read = dwStatusInformationLength;
|
|
if (bytes_read) {
|
|
DN_Str8 prev_buffer = DN_Str8_Init(DN_CAST(char *) lpvStatusInformation, bytes_read);
|
|
DN_Str8Builder_AppendRef(&response->builder, prev_buffer);
|
|
|
|
void *buffer = DN_Arena_Alloc(response->builder.arena, READ_BUFFER_SIZE, 1 /*align*/, DN_ZeroMem_No);
|
|
if (!WinHttpReadData(request, buffer, READ_BUFFER_SIZE, nullptr))
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
}
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_WRITE_COMPLETE) {
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_REQUEST_ERROR) {
|
|
WINHTTP_ASYNC_RESULT *async_result = DN_CAST(WINHTTP_ASYNC_RESULT *) lpvStatusInformation;
|
|
error = DN_W32_ErrorCodeToMsg(&response->tmp_arena, DN_CAST(DN_U32) async_result->dwError);
|
|
} else if (dwInternetStatus == WINHTTP_CALLBACK_STATUS_SENDREQUEST_COMPLETE) {
|
|
if (!WinHttpReceiveResponse(request, 0))
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
}
|
|
|
|
// NOTE: If the request handle is missing, then, the response has been freed.
|
|
// MSDN says that this callback can still be called after closing the handle
|
|
// and trigger the WINHTTP_CALLBACK_STATUS_REQUEST_ERROR.
|
|
if (request) {
|
|
bool read_complete = dwInternetStatus == WINHTTP_CALLBACK_STATUS_READ_COMPLETE && dwStatusInformationLength == 0;
|
|
if (read_complete)
|
|
response->body = DN_Str8Builder_Build(&response->builder, response->arena);
|
|
|
|
if (read_complete || dwInternetStatus == WINHTTP_CALLBACK_STATUS_REQUEST_ERROR || error.code) {
|
|
DN_OS_SemaphoreIncrement(&response->on_complete_semaphore, 1);
|
|
DN_Atomic_AddU32(&response->done, 1);
|
|
}
|
|
|
|
if (error.code) {
|
|
response->error_code = error.code;
|
|
response->error_msg = error.msg;
|
|
}
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_HttpRequestAsync(DN_OSHttpResponse *response,
|
|
DN_Arena *arena,
|
|
DN_Str8 host,
|
|
DN_Str8 path,
|
|
DN_OSHttpRequestSecure secure,
|
|
DN_Str8 method,
|
|
DN_Str8 body,
|
|
DN_Str8 headers)
|
|
{
|
|
if (!response || !arena)
|
|
return;
|
|
|
|
response->arena = arena;
|
|
response->builder.arena = response->tmem_arena ? response->tmem_arena : &response->tmp_arena;
|
|
|
|
DN_Arena *tmem_arena = response->tmem_arena;
|
|
DN_OSTLSTMem tmem_ = DN_OS_TLSTMem(arena);
|
|
if (!tmem_arena)
|
|
tmem_arena = tmem_.arena;
|
|
|
|
DN_W32Error error = {};
|
|
DN_DEFER
|
|
{
|
|
response->error_msg = error.msg;
|
|
response->error_code = error.code;
|
|
if (error.code) {
|
|
// NOTE: 'Wait' handles failures gracefully, skipping the wait and
|
|
// cleans up the request
|
|
DN_OS_HttpRequestWait(response);
|
|
DN_Atomic_AddU32(&response->done, 1);
|
|
}
|
|
};
|
|
|
|
response->w32_request_session = WinHttpOpen(nullptr /*user agent*/, WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY, WINHTTP_NO_PROXY_NAME, WINHTTP_NO_PROXY_BYPASS, WINHTTP_FLAG_ASYNC);
|
|
if (!response->w32_request_session) {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
return;
|
|
}
|
|
|
|
DWORD callback_flags = WINHTTP_CALLBACK_STATUS_HEADERS_AVAILABLE |
|
|
WINHTTP_CALLBACK_STATUS_READ_COMPLETE |
|
|
WINHTTP_CALLBACK_STATUS_REQUEST_ERROR |
|
|
WINHTTP_CALLBACK_STATUS_SENDREQUEST_COMPLETE;
|
|
if (WinHttpSetStatusCallback(response->w32_request_session,
|
|
DN_CAST(WINHTTP_STATUS_CALLBACK) DN_OS_HttpRequestWin32Callback,
|
|
callback_flags,
|
|
DN_CAST(DWORD_PTR) nullptr /*dwReserved*/) == WINHTTP_INVALID_STATUS_CALLBACK) {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
return;
|
|
}
|
|
|
|
DN_Str16 host16 = DN_W32_Str8ToStr16(tmem_arena, host);
|
|
response->w32_request_connection = WinHttpConnect(response->w32_request_session, host16.data, secure ? INTERNET_DEFAULT_HTTPS_PORT : INTERNET_DEFAULT_HTTP_PORT, 0 /*reserved*/);
|
|
if (!response->w32_request_connection) {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
return;
|
|
}
|
|
|
|
DN_Str16 method16 = DN_W32_Str8ToStr16(tmem_arena, method);
|
|
DN_Str16 path16 = DN_W32_Str8ToStr16(tmem_arena, path);
|
|
response->w32_request_handle = WinHttpOpenRequest(response->w32_request_connection,
|
|
method16.data,
|
|
path16.data,
|
|
nullptr /*version*/,
|
|
nullptr /*referrer*/,
|
|
nullptr /*accept types*/,
|
|
secure ? WINHTTP_FLAG_SECURE : 0);
|
|
if (!response->w32_request_handle) {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
return;
|
|
}
|
|
|
|
DN_Str16 headers16 = DN_W32_Str8ToStr16(tmem_arena, headers);
|
|
response->on_complete_semaphore = DN_OS_SemaphoreInit(0);
|
|
if (!WinHttpSendRequest(response->w32_request_handle,
|
|
headers16.data,
|
|
DN_CAST(DWORD) headers16.size,
|
|
body.data /*optional data*/,
|
|
DN_CAST(DWORD) body.size /*optional length*/,
|
|
DN_CAST(DWORD) body.size /*total content length*/,
|
|
DN_CAST(DWORD_PTR) response)) {
|
|
error = DN_W32_LastError(&response->tmp_arena);
|
|
return;
|
|
}
|
|
}
|
|
|
|
DN_API void DN_OS_HttpRequestFree(DN_OSHttpResponse *response)
|
|
{
|
|
// NOTE: Cleanup
|
|
// NOTE: These calls are synchronous even when the HTTP request is async.
|
|
WinHttpCloseHandle(response->w32_request_handle);
|
|
WinHttpCloseHandle(response->w32_request_connection);
|
|
WinHttpCloseHandle(response->w32_request_session);
|
|
|
|
response->w32_request_session = nullptr;
|
|
response->w32_request_connection = nullptr;
|
|
response->w32_request_handle = nullptr;
|
|
DN_Arena_Deinit(&response->tmp_arena);
|
|
DN_OS_SemaphoreDeinit(&response->on_complete_semaphore);
|
|
|
|
*response = {};
|
|
}
|
|
|
|
// NOTE: DN_W32 ////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_Str16 DN_W32_ErrorCodeToMsg16Alloc(DN_U32 error_code)
|
|
{
|
|
DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
|
|
void *module_to_get_errors_from = nullptr;
|
|
if (error_code >= 12000 && error_code <= 12175) {
|
|
flags |= FORMAT_MESSAGE_FROM_HMODULE;
|
|
module_to_get_errors_from = GetModuleHandleA("winhttp.dll");
|
|
}
|
|
|
|
wchar_t *result16 = nullptr;
|
|
DWORD size = FormatMessageW(/*DWORD dwFlags */ flags | FORMAT_MESSAGE_ALLOCATE_BUFFER,
|
|
/*LPCVOID lpSource */ module_to_get_errors_from,
|
|
/*DWORD dwMessageId */ error_code,
|
|
/*DWORD dwLanguageId*/ 0,
|
|
/*LPWSTR lpBuffer */ (LPWSTR)&result16,
|
|
/*DWORD nSize */ 0,
|
|
/*va_list *Arguments */ nullptr);
|
|
|
|
DN_Str16 result = {result16, size};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_W32Error DN_W32_ErrorCodeToMsgAlloc(DN_U32 error_code)
|
|
{
|
|
DN_W32Error result = {};
|
|
result.code = error_code;
|
|
DN_Str16 error16 = DN_W32_ErrorCodeToMsg16Alloc(error_code);
|
|
if (error16.size)
|
|
result.msg = DN_W32_Str16ToStr8FromHeap(error16);
|
|
if (error16.data)
|
|
LocalFree(error16.data);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_W32Error DN_W32_ErrorCodeToMsg(DN_Arena *arena, DN_U32 error_code)
|
|
{
|
|
DN_W32Error result = {};
|
|
result.code = error_code;
|
|
if (arena) {
|
|
DN_Str16 error16 = DN_W32_ErrorCodeToMsg16Alloc(error_code);
|
|
if (error16.size)
|
|
result.msg = DN_W32_Str16ToStr8(arena, error16);
|
|
if (error16.data)
|
|
LocalFree(error16.data);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_W32Error DN_W32_LastError(DN_Arena *arena)
|
|
{
|
|
DN_W32Error result = DN_W32_ErrorCodeToMsg(arena, GetLastError());
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_W32Error DN_W32_LastErrorAlloc()
|
|
{
|
|
DN_W32Error result = DN_W32_ErrorCodeToMsgAlloc(GetLastError());
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_W32_MakeProcessDPIAware()
|
|
{
|
|
typedef bool SetProcessDpiAwareProc(void);
|
|
typedef bool SetProcessDpiAwarenessProc(DPI_AWARENESS);
|
|
typedef bool SetProcessDpiAwarenessContextProc(void * /*DPI_AWARENESS_CONTEXT*/);
|
|
|
|
// NOTE(doyle): Taken from cmuratori/refterm snippet on DPI awareness. It
|
|
// appears we can make this robust by just loading user32.dll and using
|
|
// GetProcAddress on the DPI function. If it's not there, we're on an old
|
|
// version of windows, so we can call an older version of the API.
|
|
void *lib_handle = LoadLibraryA("user32.dll");
|
|
if (!lib_handle)
|
|
return;
|
|
|
|
if (auto *set_process_dpi_awareness_context = DN_CAST(SetProcessDpiAwarenessContextProc *) GetProcAddress(DN_CAST(HMODULE) lib_handle, "SetProcessDpiAwarenessContext"))
|
|
set_process_dpi_awareness_context(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2);
|
|
else if (auto *set_process_dpi_awareness = DN_CAST(SetProcessDpiAwarenessProc *) GetProcAddress(DN_CAST(HMODULE) lib_handle, "SetProcessDpiAwareness"))
|
|
set_process_dpi_awareness(DPI_AWARENESS_PER_MONITOR_AWARE);
|
|
else if (auto *set_process_dpi_aware = DN_CAST(SetProcessDpiAwareProc *) GetProcAddress(DN_CAST(HMODULE) lib_handle, "SetProcessDpiAware"))
|
|
set_process_dpi_aware();
|
|
}
|
|
|
|
// NOTE: Windows UTF8 to Str16 //////////////////////////////////////////////
|
|
DN_API DN_Str16 DN_W32_Str8ToStr16(DN_Arena *arena, DN_Str8 src)
|
|
{
|
|
DN_Str16 result = {};
|
|
if (!arena || !DN_Str8_HasData(src))
|
|
return result;
|
|
|
|
int required_size = MultiByteToWideChar(CP_UTF8, 0 /*dwFlags*/, src.data, DN_CAST(int) src.size, nullptr /*dest*/, 0 /*dest size*/);
|
|
if (required_size <= 0)
|
|
return result;
|
|
|
|
wchar_t *buffer = DN_Arena_NewArray(arena, wchar_t, required_size + 1, DN_ZeroMem_No);
|
|
if (!buffer)
|
|
return result;
|
|
|
|
int chars_written = MultiByteToWideChar(CP_UTF8, 0 /*dwFlags*/, src.data, DN_CAST(int) src.size, buffer, required_size);
|
|
if (DN_Check(chars_written == required_size)) {
|
|
result.data = buffer;
|
|
result.size = chars_written;
|
|
result.data[result.size] = 0;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API int DN_W32_Str8ToStr16Buffer(DN_Str8 src, wchar_t *dest, int dest_size)
|
|
{
|
|
int result = 0;
|
|
if (!DN_Str8_HasData(src))
|
|
return result;
|
|
|
|
result = MultiByteToWideChar(CP_UTF8, 0 /*dwFlags*/, src.data, DN_CAST(int) src.size, nullptr /*dest*/, 0 /*dest size*/);
|
|
if (result <= 0 || result > dest_size || !dest)
|
|
return result;
|
|
|
|
result = MultiByteToWideChar(CP_UTF8, 0 /*dwFlags*/, src.data, DN_CAST(int) src.size, dest, DN_CAST(int) dest_size);
|
|
dest[DN_Min(result, dest_size - 1)] = 0;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Windows Str16 To UTF8 //////////////////////////////////////////////////////////////////
|
|
DN_API int DN_W32_Str16ToStr8Buffer(DN_Str16 src, char *dest, int dest_size)
|
|
{
|
|
int result = 0;
|
|
if (!DN_Str16_HasData(src))
|
|
return result;
|
|
|
|
int src_size = DN_SaturateCastISizeToInt(src.size);
|
|
if (src_size <= 0)
|
|
return result;
|
|
|
|
result = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, nullptr /*dest*/, 0 /*dest size*/, nullptr, nullptr);
|
|
if (result <= 0 || result > dest_size || !dest)
|
|
return result;
|
|
|
|
result = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, dest, DN_CAST(int) dest_size, nullptr, nullptr);
|
|
dest[DN_Min(result, dest_size - 1)] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_W32_Str16ToStr8(DN_Arena *arena, DN_Str16 src)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!arena || !DN_Str16_HasData(src))
|
|
return result;
|
|
|
|
int src_size = DN_SaturateCastISizeToInt(src.size);
|
|
if (src_size <= 0)
|
|
return result;
|
|
|
|
int required_size = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, nullptr /*dest*/, 0 /*dest size*/, nullptr, nullptr);
|
|
if (required_size <= 0)
|
|
return result;
|
|
|
|
// NOTE: Str8 allocate ensures there's one extra byte for
|
|
// null-termination already so no-need to +1 the required size
|
|
DN_ArenaTempMemScope temp_mem = DN_ArenaTempMemScope(arena);
|
|
DN_Str8 buffer = DN_Str8_Alloc(arena, required_size, DN_ZeroMem_No);
|
|
if (!DN_Str8_HasData(buffer))
|
|
return result;
|
|
|
|
int chars_written = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, buffer.data, DN_CAST(int) buffer.size, nullptr, nullptr);
|
|
if (DN_Check(chars_written == required_size)) {
|
|
result = buffer;
|
|
result.data[result.size] = 0;
|
|
temp_mem.mem = {};
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_W32_Str16ToStr8FromHeap(DN_Str16 src)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!DN_Str16_HasData(src))
|
|
return result;
|
|
|
|
int src_size = DN_SaturateCastISizeToInt(src.size);
|
|
if (src_size <= 0)
|
|
return result;
|
|
|
|
int required_size = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, nullptr /*dest*/, 0 /*dest size*/, nullptr, nullptr);
|
|
if (required_size <= 0)
|
|
return result;
|
|
|
|
// NOTE: Str8 allocate ensures there's one extra byte for
|
|
// null-termination already so no-need to +1 the required size
|
|
DN_Str8 buffer = DN_Str8_AllocFromOSHeap(required_size, DN_ZeroMem_No);
|
|
if (!DN_Str8_HasData(buffer))
|
|
return result;
|
|
|
|
int chars_written = WideCharToMultiByte(CP_UTF8, 0 /*dwFlags*/, src.data, src_size, buffer.data, DN_CAST(int) buffer.size, nullptr, nullptr);
|
|
if (DN_Check(chars_written == required_size)) {
|
|
result = buffer;
|
|
result.data[result.size] = 0;
|
|
} else {
|
|
DN_OS_MemDealloc(buffer.data);
|
|
buffer = {};
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Windows Executable Directory //////////////////////////////////////////
|
|
DN_API DN_Str16 DN_W32_EXEPathW(DN_Arena *arena)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str16 result = {};
|
|
DN_USize module_size = 0;
|
|
wchar_t *module_path = nullptr;
|
|
do {
|
|
module_size += 256;
|
|
module_path = DN_Arena_NewArray(tmem.arena, wchar_t, module_size, DN_ZeroMem_No);
|
|
if (!module_path)
|
|
return result;
|
|
module_size = DN_CAST(DN_USize) GetModuleFileNameW(nullptr /*module*/, module_path, DN_CAST(int) module_size);
|
|
} while (GetLastError() == ERROR_INSUFFICIENT_BUFFER);
|
|
|
|
DN_USize index_of_last_slash = 0;
|
|
for (DN_USize index = module_size - 1; !index_of_last_slash && index < module_size; index--)
|
|
index_of_last_slash = module_path[index] == '\\' ? index : 0;
|
|
|
|
result.data = DN_Arena_NewArray(arena, wchar_t, module_size + 1, DN_ZeroMem_No);
|
|
result.size = module_size;
|
|
DN_Memcpy(result.data, module_path, sizeof(wchar_t) * result.size);
|
|
result.data[result.size] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str16 DN_W32_EXEDirW(DN_Arena *arena)
|
|
{
|
|
// TODO(doyle): Implement a DN_Str16_BinarySearchReverse
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str16 result = {};
|
|
DN_USize module_size = 0;
|
|
wchar_t *module_path = nullptr;
|
|
do {
|
|
module_size += 256;
|
|
module_path = DN_Arena_NewArray(tmem.arena, wchar_t, module_size, DN_ZeroMem_No);
|
|
if (!module_path)
|
|
return result;
|
|
module_size = DN_CAST(DN_USize) GetModuleFileNameW(nullptr /*module*/, module_path, DN_CAST(int) module_size);
|
|
} while (GetLastError() == ERROR_INSUFFICIENT_BUFFER);
|
|
|
|
DN_USize index_of_last_slash = 0;
|
|
for (DN_USize index = module_size - 1; !index_of_last_slash && index < module_size; index--)
|
|
index_of_last_slash = module_path[index] == '\\' ? index : 0;
|
|
|
|
result.data = DN_Arena_NewArray(arena, wchar_t, index_of_last_slash + 1, DN_ZeroMem_No);
|
|
result.size = index_of_last_slash;
|
|
DN_Memcpy(result.data, module_path, sizeof(wchar_t) * result.size);
|
|
result.data[result.size] = 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_W32_WorkingDir(DN_Arena *arena, DN_Str8 suffix)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str16 suffix16 = DN_W32_Str8ToStr16(tmem.arena, suffix);
|
|
DN_Str16 dir16 = DN_W32_WorkingDirW(tmem.arena, suffix16);
|
|
DN_Str8 result = DN_W32_Str16ToStr8(arena, dir16);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str16 DN_W32_WorkingDirW(DN_Arena *arena, DN_Str16 suffix)
|
|
{
|
|
DN_Assert(suffix.size >= 0);
|
|
DN_Str16 result = {};
|
|
|
|
// NOTE: required_size is the size required *including* the null-terminator
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
unsigned long required_size = GetCurrentDirectoryW(0, nullptr);
|
|
unsigned long desired_size = required_size + DN_CAST(unsigned long) suffix.size;
|
|
|
|
wchar_t *tmem_w_path = DN_Arena_NewArray(tmem.arena, wchar_t, desired_size, DN_ZeroMem_No);
|
|
if (!tmem_w_path)
|
|
return result;
|
|
|
|
unsigned long bytes_written_wo_null_terminator = GetCurrentDirectoryW(desired_size, tmem_w_path);
|
|
if ((bytes_written_wo_null_terminator + 1) != required_size) {
|
|
// TODO(dn): Error
|
|
return result;
|
|
}
|
|
|
|
wchar_t *w_path = DN_Arena_NewArray(arena, wchar_t, desired_size, DN_ZeroMem_No);
|
|
if (!w_path)
|
|
return result;
|
|
|
|
if (suffix.size) {
|
|
DN_Memcpy(w_path, tmem_w_path, sizeof(*tmem_w_path) * bytes_written_wo_null_terminator);
|
|
DN_Memcpy(w_path + bytes_written_wo_null_terminator, suffix.data, sizeof(suffix.data[0]) * suffix.size);
|
|
w_path[desired_size] = 0;
|
|
}
|
|
|
|
result = DN_Str16{w_path, DN_CAST(DN_USize)(desired_size - 1)};
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_W32_DirWIterate(DN_Str16 path, DN_W32FolderIteratorW *it)
|
|
{
|
|
WIN32_FIND_DATAW find_data = {};
|
|
if (it->handle) {
|
|
if (FindNextFileW(it->handle, &find_data) == 0) {
|
|
FindClose(it->handle);
|
|
return false;
|
|
}
|
|
} else {
|
|
it->handle = FindFirstFileExW(path.data, /*LPCWSTR lpFileName,*/
|
|
FindExInfoStandard, /*FINDEX_INFO_LEVELS fInfoLevelId,*/
|
|
&find_data, /*LPVOID lpFindFileData,*/
|
|
FindExSearchNameMatch, /*FINDEX_SEARCH_OPS fSearchOp,*/
|
|
nullptr, /*LPVOID lpSearchFilter,*/
|
|
FIND_FIRST_EX_LARGE_FETCH /*unsigned long dwAdditionalFlags)*/);
|
|
|
|
if (it->handle == INVALID_HANDLE_VALUE)
|
|
return false;
|
|
}
|
|
|
|
it->file_name_buf[0] = 0;
|
|
it->file_name = DN_Str16{it->file_name_buf, 0};
|
|
|
|
do {
|
|
if (find_data.cFileName[0] == '.' || (find_data.cFileName[0] == '.' && find_data.cFileName[1] == '.'))
|
|
continue;
|
|
|
|
it->file_name.size = DN_CStr16_Size(find_data.cFileName);
|
|
DN_Assert(it->file_name.size < (DN_ArrayCountU(it->file_name_buf) - 1));
|
|
DN_Memcpy(it->file_name.data, find_data.cFileName, it->file_name.size * sizeof(wchar_t));
|
|
it->file_name_buf[it->file_name.size] = 0;
|
|
break;
|
|
} while (FindNextFileW(it->handle, &find_data) != 0);
|
|
|
|
bool result = it->file_name.size > 0;
|
|
if (!result)
|
|
FindClose(it->handle);
|
|
return result;
|
|
}
|
|
#else
|
|
#error Please define a platform e.g. 'DN_PLATFORM_WIN32' to enable the correct implementation for platform APIs
|
|
#endif
|
|
#define DN_CORE_INC_CPP
|
|
|
|
// DN: Single header generator inlined this file => #include "Core/dn_core.cpp"
|
|
static DN_Core *g_dn_core;
|
|
|
|
DN_API void DN_Core_Init(DN_Core *core, DN_CoreOnInit on_init)
|
|
{
|
|
DN_Assert(g_dn_os_core_);
|
|
g_dn_core = core;
|
|
|
|
// NOTE Initialise fields //////////////////////////////////////////////////////////////////////
|
|
#if !defined(DN_NO_PROFILER)
|
|
core->profiler = &core->profiler_default_instance;
|
|
#endif
|
|
|
|
#if defined(DN_LEAK_TRACKING)
|
|
// NOTE: Setup the allocation table with allocation tracking turned off on
|
|
// the arena we're using to initialise the table.
|
|
core->alloc_table_arena = DN_Arena_InitFromOSVMem(DN_Megabytes(1), DN_Kilobytes(512), DN_ArenaFlags_NoAllocTrack | DN_ArenaFlags_AllocCanLeak);
|
|
core->alloc_table = DN_DSMap_Init<DN_DebugAlloc>(&core->alloc_table_arena, 4096, DN_DSMapFlags_Nil);
|
|
#endif
|
|
|
|
// NOTE: Print out init features ///////////////////////////////////////////////////////////////
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(nullptr);
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(tmem.arena);
|
|
if (on_init & DN_CoreOnInit_LogLibFeatures) {
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8("DN initialised:\n"));
|
|
|
|
DN_F64 page_size_kib = g_dn_os_core_->page_size / 1024.0;
|
|
DN_F64 alloc_granularity_kib = g_dn_os_core_->alloc_granularity / 1024.0;
|
|
DN_Str8Builder_AppendF(
|
|
&builder, " OS Page Size/Alloc Granularity: %.1f/%.1fKiB\n", page_size_kib, alloc_granularity_kib);
|
|
|
|
#if DN_HAS_FEATURE(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
|
if (DN_ASAN_POISON) {
|
|
DN_Str8Builder_AppendF(
|
|
&builder, " ASAN manual poisoning%s\n", DN_ASAN_VET_POISON ? " (+vet sanity checks)" : "");
|
|
DN_Str8Builder_AppendF(&builder, " ASAN poison guard size: %u\n", DN_ASAN_POISON_GUARD_SIZE);
|
|
}
|
|
#endif
|
|
|
|
#if defined(DN_LEAK_TRACKING)
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8(" Allocation leak tracing\n"));
|
|
#endif
|
|
|
|
#if !defined(DN_NO_PROFILER)
|
|
DN_Str8Builder_AppendRef(&builder, DN_STR8(" TSC profiler available\n"));
|
|
#endif
|
|
// TODO(doyle): Add stacktrace feature log
|
|
}
|
|
|
|
if (on_init & DN_CoreOnInit_LogCPUFeatures) {
|
|
DN_CPUReport const *report = &g_dn_os_core_->cpu_report;
|
|
DN_Str8 brand = DN_Str8_TrimWhitespaceAround(DN_Str8_Init(report->brand, sizeof(report->brand) - 1));
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(6284) // Object passed as _Param_(3) when a string is required in call to 'DN_Str8Builder_AppendF' Actual type: 'struct DN_Str8'.
|
|
DN_Str8Builder_AppendF(&builder, " CPU '%S' from '%s' detected:\n", brand, report->vendor);
|
|
DN_MSVC_WARNING_POP
|
|
|
|
DN_USize longest_feature_name = 0;
|
|
for (DN_ForIndexU(feature_index, DN_CPUFeature_Count)) {
|
|
DN_CPUFeatureDecl feature_decl = g_dn_cpu_feature_decl[feature_index];
|
|
longest_feature_name = DN_Max(longest_feature_name, feature_decl.label.size);
|
|
}
|
|
|
|
for (DN_ForIndexU(feature_index, DN_CPUFeature_Count)) {
|
|
DN_CPUFeatureDecl feature_decl = g_dn_cpu_feature_decl[feature_index];
|
|
bool has_feature = DN_CPU_HasFeature(report, feature_decl.value);
|
|
DN_Str8Builder_AppendF(&builder,
|
|
" %.*s:%*s%s\n",
|
|
DN_STR_FMT(feature_decl.label),
|
|
DN_CAST(int)(longest_feature_name - feature_decl.label.size),
|
|
"",
|
|
has_feature ? "available" : "not available");
|
|
}
|
|
}
|
|
|
|
DN_Str8 info_log = DN_Str8Builder_Build(&builder, tmem.arena);
|
|
if (DN_Str8_HasData(info_log))
|
|
DN_LOG_DebugF("%.*s", DN_STR_FMT(info_log));
|
|
}
|
|
|
|
DN_API void DN_Core_BeginFrame()
|
|
{
|
|
DN_Atomic_SetValue64(&g_dn_os_core_->mem_allocs_frame, 0);
|
|
}
|
|
|
|
#if !defined(DN_NO_PROFILER)
|
|
DN_API void DN_Core_SetProfiler(DN_Profiler *profiler)
|
|
{
|
|
if (profiler)
|
|
g_dn_core->profiler = profiler;
|
|
}
|
|
#endif
|
|
// DN: Single header generator inlined this file => #include "Core/dn_core_debug.cpp"
|
|
#define DN_CORE_DEBUG_CPP
|
|
|
|
DN_API DN_StackTraceWalkResult DN_StackTrace_Walk(DN_Arena *arena, uint16_t limit)
|
|
{
|
|
DN_StackTraceWalkResult result = {};
|
|
#if defined(DN_OS_WIN32)
|
|
if (!arena)
|
|
return result;
|
|
|
|
static DN_TicketMutex mutex = {};
|
|
DN_TicketMutex_Begin(&mutex);
|
|
|
|
HANDLE thread = GetCurrentThread();
|
|
result.process = GetCurrentProcess();
|
|
|
|
DN_W32Core *w32 = DN_OS_GetW32Core_();
|
|
if (!w32->sym_initialised) {
|
|
w32->sym_initialised = true;
|
|
SymSetOptions(SYMOPT_LOAD_LINES);
|
|
if (!SymInitialize(result.process, nullptr /*UserSearchPath*/, true /*fInvadeProcess*/)) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_W32Error error = DN_W32_LastError(tmem.arena);
|
|
DN_LOG_ErrorF("SymInitialize failed, stack trace can not be generated (%lu): %.*s\n", error.code, DN_STR_FMT(error.msg));
|
|
}
|
|
}
|
|
|
|
CONTEXT context;
|
|
RtlCaptureContext(&context);
|
|
|
|
STACKFRAME64 frame = {};
|
|
frame.AddrPC.Offset = context.Rip;
|
|
frame.AddrPC.Mode = AddrModeFlat;
|
|
frame.AddrFrame.Offset = context.Rbp;
|
|
frame.AddrFrame.Mode = AddrModeFlat;
|
|
frame.AddrStack.Offset = context.Rsp;
|
|
frame.AddrStack.Mode = AddrModeFlat;
|
|
|
|
DN_FArray<uint64_t, 256> raw_frames = {};
|
|
while (raw_frames.size < limit) {
|
|
if (!StackWalk64(IMAGE_FILE_MACHINE_AMD64,
|
|
result.process,
|
|
thread,
|
|
&frame,
|
|
&context,
|
|
nullptr /*ReadMemoryRoutine*/,
|
|
SymFunctionTableAccess64,
|
|
SymGetModuleBase64,
|
|
nullptr /*TranslateAddress*/))
|
|
break;
|
|
|
|
// NOTE: It might be useful one day to use frame.AddrReturn.Offset.
|
|
// If AddrPC.Offset == AddrReturn.Offset then we can detect recursion.
|
|
DN_FArray_Add(&raw_frames, frame.AddrPC.Offset);
|
|
}
|
|
DN_TicketMutex_End(&mutex);
|
|
|
|
result.base_addr = DN_Arena_NewArray(arena, uint64_t, raw_frames.size, DN_ZeroMem_No);
|
|
result.size = DN_CAST(uint16_t) raw_frames.size;
|
|
DN_Memcpy(result.base_addr, raw_frames.data, raw_frames.size * sizeof(raw_frames.data[0]));
|
|
#else
|
|
(void)limit;
|
|
(void)arena;
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
static void DN_StackTrace_AddWalkToStr8Builder_(DN_StackTraceWalkResult const *walk, DN_Str8Builder *builder, DN_USize skip)
|
|
{
|
|
DN_StackTraceRawFrame raw_frame = {};
|
|
raw_frame.process = walk->process;
|
|
for (DN_USize index = skip; index < walk->size; index++) {
|
|
raw_frame.base_addr = walk->base_addr[index];
|
|
DN_StackTraceFrame frame = DN_StackTrace_RawFrameToFrame(builder->arena, raw_frame);
|
|
DN_Str8Builder_AppendF(builder, "%.*s(%zu): %.*s%s", DN_STR_FMT(frame.file_name), frame.line_number, DN_STR_FMT(frame.function_name), (DN_CAST(int) index == walk->size - 1) ? "" : "\n");
|
|
}
|
|
}
|
|
|
|
DN_API bool DN_StackTrace_WalkResultIterate(DN_StackTraceWalkResultIterator *it, DN_StackTraceWalkResult const *walk)
|
|
{
|
|
bool result = false;
|
|
if (!it || !walk || !walk->base_addr || !walk->process)
|
|
return result;
|
|
|
|
if (it->index >= walk->size)
|
|
return false;
|
|
|
|
result = true;
|
|
it->raw_frame.process = walk->process;
|
|
it->raw_frame.base_addr = walk->base_addr[it->index++];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_StackTrace_WalkResultToStr8(DN_Arena *arena, DN_StackTraceWalkResult const *walk, uint16_t skip)
|
|
{
|
|
DN_Str8 result{};
|
|
if (!walk || !arena)
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(tmem.arena);
|
|
DN_StackTrace_AddWalkToStr8Builder_(walk, &builder, skip);
|
|
result = DN_Str8Builder_Build(&builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_StackTrace_WalkStr8(DN_Arena *arena, uint16_t limit, uint16_t skip)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSPushTMem(arena);
|
|
DN_StackTraceWalkResult walk = DN_StackTrace_Walk(tmem.arena, limit);
|
|
DN_Str8 result = DN_StackTrace_WalkResultToStr8(arena, &walk, skip);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_StackTrace_WalkStr8FromHeap(uint16_t limit, uint16_t skip)
|
|
{
|
|
// NOTE: We don't use WalkResultToStr8 because that uses the TLS arenas which
|
|
// does not use the OS heap.
|
|
DN_Arena arena = DN_Arena_InitFromOSHeap(DN_Kilobytes(64), DN_ArenaFlags_NoAllocTrack);
|
|
DN_Str8Builder builder = DN_Str8Builder_Init(&arena);
|
|
DN_StackTraceWalkResult walk = DN_StackTrace_Walk(&arena, limit);
|
|
DN_StackTrace_AddWalkToStr8Builder_(&walk, &builder, skip);
|
|
DN_Str8 result = DN_Str8Builder_BuildFromOSHeap(&builder);
|
|
DN_Arena_Deinit(&arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Slice<DN_StackTraceFrame> DN_StackTrace_GetFrames(DN_Arena *arena, uint16_t limit)
|
|
{
|
|
DN_Slice<DN_StackTraceFrame> result = {};
|
|
if (!arena)
|
|
return result;
|
|
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(arena);
|
|
DN_StackTraceWalkResult walk = DN_StackTrace_Walk(tmem.arena, limit);
|
|
if (!walk.size)
|
|
return result;
|
|
|
|
DN_USize slice_index = 0;
|
|
result = DN_Slice_Alloc<DN_StackTraceFrame>(arena, walk.size, DN_ZeroMem_No);
|
|
for (DN_StackTraceWalkResultIterator it = {}; DN_StackTrace_WalkResultIterate(&it, &walk); ) {
|
|
result.data[slice_index++] = DN_StackTrace_RawFrameToFrame(arena, it.raw_frame);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_StackTraceFrame DN_StackTrace_RawFrameToFrame(DN_Arena *arena, DN_StackTraceRawFrame raw_frame)
|
|
{
|
|
#if defined(DN_OS_WIN32)
|
|
// NOTE: Get line+filename /////////////////////////////////////////////////////////////////////
|
|
|
|
// TODO: Why does zero-initialising this with `line = {};` cause
|
|
// SymGetLineFromAddr64 function to fail once we are at
|
|
// __scrt_commain_main_seh and hit BaseThreadInitThunk frame? The
|
|
// line and file number are still valid in the result which we use, so,
|
|
// we silently ignore this error.
|
|
IMAGEHLP_LINEW64 line;
|
|
line.SizeOfStruct = sizeof(line);
|
|
DWORD line_displacement = 0;
|
|
if (!SymGetLineFromAddrW64(raw_frame.process, raw_frame.base_addr, &line_displacement, &line)) {
|
|
line = {};
|
|
}
|
|
|
|
// NOTE: Get function name /////////////////////////////////////////////////////////////////////
|
|
|
|
alignas(SYMBOL_INFOW) char buffer[sizeof(SYMBOL_INFOW) + (MAX_SYM_NAME * sizeof(wchar_t))] = {};
|
|
SYMBOL_INFOW *symbol = DN_CAST(SYMBOL_INFOW *)buffer;
|
|
symbol->SizeOfStruct = sizeof(*symbol);
|
|
symbol->MaxNameLen = sizeof(buffer) - sizeof(*symbol);
|
|
|
|
uint64_t symbol_displacement = 0; // Offset to the beginning of the symbol to the address
|
|
SymFromAddrW(raw_frame.process, raw_frame.base_addr, &symbol_displacement, symbol);
|
|
|
|
// NOTE: Construct result //////////////////////////////////////////////////////////////////////
|
|
|
|
DN_Str16 file_name16 = DN_Str16{line.FileName, DN_CStr16_Size(line.FileName)};
|
|
DN_Str16 function_name16 = DN_Str16{symbol->Name, symbol->NameLen};
|
|
|
|
DN_StackTraceFrame result = {};
|
|
result.address = raw_frame.base_addr;
|
|
result.line_number = line.LineNumber;
|
|
result.file_name = DN_W32_Str16ToStr8(arena, file_name16);
|
|
result.function_name = DN_W32_Str16ToStr8(arena, function_name16);
|
|
|
|
if (!DN_Str8_HasData(result.function_name))
|
|
result.function_name = DN_STR8("<unknown function>");
|
|
if (!DN_Str8_HasData(result.file_name))
|
|
result.file_name = DN_STR8("<unknown file>");
|
|
#else
|
|
DN_StackTraceFrame result = {};
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_StackTrace_Print(uint16_t limit)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Slice<DN_StackTraceFrame> stack_trace = DN_StackTrace_GetFrames(tmem.arena, limit);
|
|
for (DN_StackTraceFrame &frame : stack_trace)
|
|
DN_OS_PrintErrLnF("%.*s(%I64u): %.*s", DN_STR_FMT(frame.file_name), frame.line_number, DN_STR_FMT(frame.function_name));
|
|
}
|
|
|
|
DN_API void DN_StackTrace_ReloadSymbols()
|
|
{
|
|
#if defined(DN_OS_WIN32)
|
|
HANDLE process = GetCurrentProcess();
|
|
SymRefreshModuleList(process);
|
|
#endif
|
|
}
|
|
|
|
// NOTE: DN_Debug //////////////////////////////////////////////////////////////////////////////////
|
|
#if defined(DN_LEAK_TRACKING)
|
|
DN_API void DN_Debug_TrackAlloc(void *ptr, DN_USize size, bool leak_permitted)
|
|
{
|
|
if (!ptr)
|
|
return;
|
|
|
|
DN_TicketMutex_Begin(&g_dn_core->alloc_table_mutex);
|
|
DN_DEFER
|
|
{
|
|
DN_TicketMutex_End(&g_dn_core->alloc_table_mutex);
|
|
};
|
|
|
|
// NOTE: If the entry was not added, we are reusing a pointer that has been freed.
|
|
// TODO: Add API for always making the item but exposing a var to indicate if the item was newly created or it
|
|
// already existed.
|
|
DN_Str8 stack_trace = DN_StackTrace_WalkStr8FromHeap(128, 3 /*skip*/);
|
|
DN_DSMap<DN_DebugAlloc> *alloc_table = &g_dn_core->alloc_table;
|
|
DN_DSMapResult<DN_DebugAlloc> alloc_entry = DN_DSMap_MakeKeyU64(alloc_table, DN_CAST(uint64_t) ptr);
|
|
DN_DebugAlloc *alloc = alloc_entry.value;
|
|
if (alloc_entry.found) {
|
|
if ((alloc->flags & DN_DebugAllocFlag_Freed) == 0) {
|
|
DN_Str8 alloc_size = DN_CVT_U64ToByteSizeStr8(alloc_table->arena, alloc->size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_Str8 new_alloc_size = DN_CVT_U64ToByteSizeStr8(alloc_table->arena, size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_HardAssertF(
|
|
alloc->flags & DN_DebugAllocFlag_Freed,
|
|
"This pointer is already in the leak tracker, however it has not been freed yet. This "
|
|
"same pointer is being ask to be tracked twice in the allocation table, e.g. one if its "
|
|
"previous free calls has not being marked freed with an equivalent call to "
|
|
"DN_Debug_TrackDealloc()\n"
|
|
"\n"
|
|
"The pointer (0x%p) originally allocated %.*s at:\n"
|
|
"\n"
|
|
"%.*s\n"
|
|
"\n"
|
|
"The pointer is allocating %.*s again at:\n"
|
|
"\n"
|
|
"%.*s\n",
|
|
ptr,
|
|
DN_STR_FMT(alloc_size),
|
|
DN_STR_FMT(alloc->stack_trace),
|
|
DN_STR_FMT(new_alloc_size),
|
|
DN_STR_FMT(stack_trace));
|
|
}
|
|
|
|
// NOTE: Pointer was reused, clean up the prior entry
|
|
g_dn_core->alloc_table_bytes_allocated_for_stack_traces -= alloc->stack_trace.size;
|
|
g_dn_core->alloc_table_bytes_allocated_for_stack_traces -= alloc->freed_stack_trace.size;
|
|
|
|
DN_OS_MemDealloc(alloc->stack_trace.data);
|
|
DN_OS_MemDealloc(alloc->freed_stack_trace.data);
|
|
*alloc = {};
|
|
}
|
|
|
|
alloc->ptr = ptr;
|
|
alloc->size = size;
|
|
alloc->stack_trace = stack_trace;
|
|
alloc->flags |= leak_permitted ? DN_DebugAllocFlag_LeakPermitted : 0;
|
|
g_dn_core->alloc_table_bytes_allocated_for_stack_traces += alloc->stack_trace.size;
|
|
}
|
|
|
|
DN_API void DN_Debug_TrackDealloc(void *ptr)
|
|
{
|
|
if (!ptr)
|
|
return;
|
|
|
|
DN_TicketMutex_Begin(&g_dn_core->alloc_table_mutex);
|
|
DN_DEFER { DN_TicketMutex_End(&g_dn_core->alloc_table_mutex); };
|
|
|
|
DN_Str8 stack_trace = DN_StackTrace_WalkStr8FromHeap(128, 3 /*skip*/);
|
|
DN_DSMap<DN_DebugAlloc> *alloc_table = &g_dn_core->alloc_table;
|
|
DN_DSMapResult<DN_DebugAlloc> alloc_entry = DN_DSMap_FindKeyU64(alloc_table, DN_CAST(uintptr_t) ptr);
|
|
DN_HardAssertF(alloc_entry.found,
|
|
"Allocated pointer can not be removed as it does not exist in the "
|
|
"allocation table. When this memory was allocated, the pointer was "
|
|
"not added to the allocation table [ptr=%p]",
|
|
ptr);
|
|
|
|
DN_DebugAlloc *alloc = alloc_entry.value;
|
|
if (alloc->flags & DN_DebugAllocFlag_Freed) {
|
|
DN_Str8 freed_size = DN_CVT_U64ToByteSizeStr8(alloc_table->arena, alloc->freed_size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_HardAssertF((alloc->flags & DN_DebugAllocFlag_Freed) == 0,
|
|
"Double free detected, pointer to free was already marked "
|
|
"as freed. Either the pointer was reallocated but not "
|
|
"traced, or, the pointer was freed twice.\n"
|
|
"\n"
|
|
"The pointer (0x%p) originally allocated %.*s at:\n"
|
|
"\n"
|
|
"%.*s\n"
|
|
"\n"
|
|
"The pointer was freed at:\n"
|
|
"\n"
|
|
"%.*s\n"
|
|
"\n"
|
|
"The pointer is being freed again at:\n"
|
|
"\n"
|
|
"%.*s\n"
|
|
,
|
|
ptr, DN_STR_FMT(freed_size),
|
|
DN_STR_FMT(alloc->stack_trace),
|
|
DN_STR_FMT(alloc->freed_stack_trace),
|
|
DN_STR_FMT(stack_trace));
|
|
}
|
|
|
|
DN_Assert(!DN_Str8_HasData(alloc->freed_stack_trace));
|
|
alloc->flags |= DN_DebugAllocFlag_Freed;
|
|
alloc->freed_stack_trace = stack_trace;
|
|
g_dn_core->alloc_table_bytes_allocated_for_stack_traces += alloc->freed_stack_trace.size;
|
|
}
|
|
|
|
DN_API void DN_Debug_DumpLeaks()
|
|
{
|
|
uint64_t leak_count = 0;
|
|
uint64_t leaked_bytes = 0;
|
|
for (DN_USize index = 1; index < g_dn_core->alloc_table.occupied; index++) {
|
|
DN_DSMapSlot<DN_DebugAlloc> *slot = g_dn_core->alloc_table.slots + index;
|
|
DN_DebugAlloc *alloc = &slot->value;
|
|
bool alloc_leaked = (alloc->flags & DN_DebugAllocFlag_Freed) == 0;
|
|
bool leak_permitted = (alloc->flags & DN_DebugAllocFlag_LeakPermitted);
|
|
if (alloc_leaked && !leak_permitted) {
|
|
leaked_bytes += alloc->size;
|
|
leak_count++;
|
|
DN_Str8 alloc_size = DN_CVT_U64ToByteSizeStr8(g_dn_core->alloc_table.arena, alloc->size, DN_CVTU64ByteSizeType_Auto);
|
|
DN_LOG_WarningF("Pointer (0x%p) leaked %.*s at:\n"
|
|
"%.*s",
|
|
alloc->ptr, DN_STR_FMT(alloc_size),
|
|
DN_STR_FMT(alloc->stack_trace));
|
|
}
|
|
}
|
|
|
|
if (leak_count) {
|
|
char buffer[512];
|
|
DN_Arena arena = DN_Arena_InitFromBuffer(buffer, sizeof(buffer), DN_ArenaFlags_Nil);
|
|
DN_Str8 leak_size = DN_CVT_U64ToByteSizeStr8(&arena, leaked_bytes, DN_CVTU64ByteSizeType_Auto);
|
|
DN_LOG_WarningF("There were %I64u leaked allocations totalling %.*s", leak_count, DN_STR_FMT(leak_size));
|
|
}
|
|
}
|
|
#endif // DN_LEAK_TRACKING
|
|
|
|
#if !defined(DN_NO_PROFILER)
|
|
// NOTE: DN_Profiler ///////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_ProfilerZoneScope::DN_ProfilerZoneScope(DN_Str8 name, uint16_t anchor_index)
|
|
{
|
|
zone = DN_Profiler_BeginZoneAtIndex(name, anchor_index);
|
|
}
|
|
|
|
DN_API DN_ProfilerZoneScope::~DN_ProfilerZoneScope()
|
|
{
|
|
DN_Profiler_EndZone(zone);
|
|
}
|
|
|
|
DN_API DN_ProfilerAnchor *DN_Profiler_ReadBuffer()
|
|
{
|
|
uint8_t mask = DN_ArrayCountU(g_dn_core->profiler->anchors) - 1;
|
|
DN_ProfilerAnchor *result = g_dn_core->profiler->anchors[(g_dn_core->profiler->active_anchor_buffer - 1) & mask];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ProfilerAnchor *DN_Profiler_WriteBuffer()
|
|
{
|
|
uint8_t mask = DN_ArrayCountU(g_dn_core->profiler->anchors) - 1;
|
|
DN_ProfilerAnchor *result = g_dn_core->profiler->anchors[(g_dn_core->profiler->active_anchor_buffer + 0) & mask];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_ProfilerZone DN_Profiler_BeginZoneAtIndex(DN_Str8 name, uint16_t anchor_index)
|
|
{
|
|
DN_ProfilerAnchor *anchor = DN_Profiler_WriteBuffer() + anchor_index;
|
|
// TODO: We need per-thread-local-storage profiler so that we can use these apis
|
|
// across threads. For now, we let them overwrite each other but this is not tenable.
|
|
#if 0
|
|
if (DN_Str8_HasData(anchor->name) && anchor->name != name)
|
|
DN_AssertF(name == anchor->name, "Potentially overwriting a zone by accident? Anchor is '%.*s', name is '%.*s'", DN_STR_FMT(anchor->name), DN_STR_FMT(name));
|
|
#endif
|
|
anchor->name = name;
|
|
DN_ProfilerZone result = {};
|
|
result.begin_tsc = DN_CPU_TSC();
|
|
result.anchor_index = anchor_index;
|
|
result.parent_zone = g_dn_core->profiler->parent_zone;
|
|
result.elapsed_tsc_at_zone_start = anchor->tsc_inclusive;
|
|
g_dn_core->profiler->parent_zone = anchor_index;
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_Profiler_EndZone(DN_ProfilerZone zone)
|
|
{
|
|
uint64_t elapsed_tsc = DN_CPU_TSC() - zone.begin_tsc;
|
|
DN_ProfilerAnchor *anchor_buffer = DN_Profiler_WriteBuffer();
|
|
DN_ProfilerAnchor *anchor = anchor_buffer + zone.anchor_index;
|
|
|
|
anchor->hit_count++;
|
|
anchor->tsc_inclusive = zone.elapsed_tsc_at_zone_start + elapsed_tsc;
|
|
anchor->tsc_exclusive += elapsed_tsc;
|
|
|
|
DN_ProfilerAnchor *parent_anchor = anchor_buffer + zone.parent_zone;
|
|
parent_anchor->tsc_exclusive -= elapsed_tsc;
|
|
g_dn_core->profiler->parent_zone = zone.parent_zone;
|
|
}
|
|
|
|
DN_API void DN_Profiler_SwapAnchorBuffer()
|
|
{
|
|
g_dn_core->profiler->active_anchor_buffer++;
|
|
g_dn_core->profiler->parent_zone = 0;
|
|
DN_ProfilerAnchor *anchors = DN_Profiler_WriteBuffer();
|
|
DN_Memset(anchors,
|
|
0,
|
|
DN_ArrayCountU(g_dn_core->profiler->anchors[0]) * sizeof(g_dn_core->profiler->anchors[0][0]));
|
|
}
|
|
|
|
DN_API void DN_Profiler_Dump(uint64_t tsc_per_second)
|
|
{
|
|
DN_ProfilerAnchor *anchors = DN_Profiler_ReadBuffer();
|
|
for (size_t anchor_index = 1; anchor_index < DN_PROFILER_ANCHOR_BUFFER_SIZE; anchor_index++) {
|
|
DN_ProfilerAnchor const *anchor = anchors + anchor_index;
|
|
if (!anchor->hit_count)
|
|
continue;
|
|
|
|
uint64_t tsc_exclusive = anchor->tsc_exclusive;
|
|
uint64_t tsc_inclusive = anchor->tsc_inclusive;
|
|
DN_F64 tsc_exclusive_milliseconds = tsc_exclusive * 1000 / DN_CAST(DN_F64) tsc_per_second;
|
|
if (tsc_exclusive == tsc_inclusive) {
|
|
DN_OS_PrintOutLnF("%.*s[%u]: %.1fms", DN_STR_FMT(anchor->name), anchor->hit_count, tsc_exclusive_milliseconds);
|
|
} else {
|
|
DN_F64 tsc_inclusive_milliseconds = tsc_inclusive * 1000 / DN_CAST(DN_F64) tsc_per_second;
|
|
DN_OS_PrintOutLnF("%.*s[%u]: %.1f/%.1fms",
|
|
DN_STR_FMT(anchor->name),
|
|
anchor->hit_count,
|
|
tsc_exclusive_milliseconds,
|
|
tsc_inclusive_milliseconds);
|
|
}
|
|
}
|
|
}
|
|
#endif // !defined(DN_NO_PROFILER)
|
|
|
|
// DN: Single header generator inlined this file => #include "Core/dn_core_demo.cpp"
|
|
/*
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// $$$$$$$\ $$$$$$\ $$$$$$\ $$$$$$\
|
|
// $$ __$$\ $$ __$$\ $$ __$$\ $$ __$$\
|
|
// $$ | $$ |$$ / $$ |$$ / \__|$$ / \__|
|
|
// $$ | $$ |$$ | $$ |$$ | \$$$$$$\
|
|
// $$ | $$ |$$ | $$ |$$ | \____$$\
|
|
// $$ | $$ |$$ | $$ |$$ | $$\ $$\ $$ |
|
|
// $$$$$$$ | $$$$$$ |\$$$$$$ |\$$$$$$ |
|
|
// \_______/ \______/ \______/ \______/
|
|
//
|
|
// dn_docs.cpp -- Library documentation via real code examples
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Use this file for documentation and examples of the various APIs in this
|
|
// library. Normally docs are written as inline comments in header files,
|
|
// however, these quickly go out of date as APIs change. Instead, I provide
|
|
// some example code that compiles here that serves to also document the API.
|
|
//
|
|
// The library header files then become a very minimal reference of exactly the
|
|
// function prototypes and definitions instead of massive reams of inline
|
|
// comments that visually space out the functions and hinders discoverability
|
|
// and/or conciseness of being able to learn the breadth of the APIs.
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
*/
|
|
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(4702) // unreachable code
|
|
|
|
void DN_Docs_Demo()
|
|
{
|
|
// NOTE: Before using anything in the library, DN_Core_Init() must be
|
|
// called, for example:
|
|
#if 0
|
|
DN_Core core = {};
|
|
DN_Core_Init(&core, DN_CoreOnInit_Nil);
|
|
#endif
|
|
|
|
// NOTE: DN_Atomic_SetValue64 /////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_Atomic_SetValue32 /////////////////////////////////////////////////////////////////
|
|
// Atomically set the value into the target using an atomic compare and swap
|
|
// idiom. The return value of the function is the value that was last stored
|
|
// in the target.
|
|
{
|
|
uint64_t target = 8;
|
|
uint64_t value_to_set = 0xCAFE;
|
|
if (DN_Atomic_SetValue64(&target, value_to_set) == 8) {
|
|
// Atomic swap was successful, e.g. the last value that this thread
|
|
// observed was '8' which is the value we initialised with e.g. no
|
|
// other thread has modified the value.
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_CVT_BytesToHex ////////////////////////////////////////////////////////////////////////
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
unsigned char bytes[2] = {0xFA, 0xCE};
|
|
DN_Str8 hex = DN_CVT_BytesToHex(tmem.arena, bytes, sizeof(bytes));
|
|
DN_Assert(hex == DN_STR8("face")); // NOTE: Guaranteed to be null-terminated
|
|
}
|
|
|
|
// NOTE: DN_Check /////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Check the expression trapping in debug, whilst in release- trapping is
|
|
// removed and the expression is evaluated as if it were a normal 'if' branch.
|
|
//
|
|
// This allows handling of the condition gracefully when compiled out but
|
|
// traps to notify the developer in builds when it's compiled in.
|
|
{
|
|
bool flag = true;
|
|
if (DN_CheckF(flag, "Flag was false!")) {
|
|
/// This branch will execute!
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_CPUID /////////////////////////////////////////////////////////////////////////////
|
|
// Execute the 'CPUID' instruction which lets you query the capabilities of
|
|
// the current CPU.
|
|
|
|
// NOTE: DN_DEFER
|
|
//
|
|
// A macro that expands to a C++ lambda that executes arbitrary code on
|
|
// scope exit.
|
|
{
|
|
int x = 0;
|
|
DN_DEFER
|
|
{
|
|
x = 3;
|
|
};
|
|
x = 1;
|
|
// On scope exit, DN_DEFER object executes and assigns x = 3
|
|
}
|
|
|
|
// NOTE: DN_DSMap /////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// A hash table configured using the presets recommended by Demitri Spanos
|
|
// from the Handmade Network (HMN),
|
|
//
|
|
// - power of two capacity
|
|
// - grow by 2x on load >= 75%
|
|
// - open-addressing with linear probing
|
|
// - separate large values (esp. variable length values) into a separate table
|
|
// - use a well-known hash function: MurmurHash3 (or xxhash, city, spooky ...)
|
|
// - chain-repair on delete (rehash items in the probe chain after delete)
|
|
// - shrink by 1/2 on load < 25% (suggested by Martins Mmozeiko of HMN)
|
|
//
|
|
// Source: discord.com/channels/239737791225790464/600063880533770251/941835678424129597
|
|
//
|
|
// This hash-table stores slots (values) separate from the hash mapping.
|
|
// Hashes are mapped to slots using the hash-to-slot array which is an array
|
|
// of slot indexes. This array intentionally only stores indexes to maximise
|
|
// usage of the cache line. Linear probing on collision will only cost a
|
|
// couple of cycles to fetch from L1 cache the next slot index to attempt.
|
|
//
|
|
// The slots array stores values contiguously, non-sorted allowing iteration
|
|
// of the map. On element erase, the last element is swapped into the
|
|
// deleted element causing the non-sorted property of this table.
|
|
//
|
|
// The 0th slot (DN_DS_MAP_SENTINEL_SLOT) in the slots array is reserved
|
|
// for a sentinel value, e.g. all zeros value. After map initialisation the
|
|
// 'occupied' value of the array will be set to 1 to exclude the sentinel
|
|
// from the capacity of the table. Skip the first value if you are iterating
|
|
// the hash table!
|
|
//
|
|
// This hash-table accept either a U64 or a buffer (ptr + len) as the key.
|
|
// In practice this covers a majority of use cases (with string, buffer and
|
|
// number keys). It also allows us to minimise our C++ templates to only
|
|
// require 1 variable which is the Value part of the hash-table simplifying
|
|
// interface complexity and cruft brought by C++.
|
|
//
|
|
// Keys are value-copied into the hash-table. If the key uses a pointer to a
|
|
// buffer, this buffer must be valid throughout the lifetime of the hash
|
|
// table!
|
|
{
|
|
// NOTE: DN_DSMap_Init //////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_Deinit //////////////////////////////////////////////////////////////////
|
|
//
|
|
// Initialise a hash table where the table size *must* be a
|
|
// power-of-two, otherwise an assert will be triggered. If
|
|
// initialisation fails (e.g. memory allocation failure) the table is
|
|
// returned zero-initialised where a call to 'IsValid' will return
|
|
// false.
|
|
//
|
|
// The map takes ownership of the arena. This means in practice that if the
|
|
// map needs to resize (e.g. because the load threshold of the table is
|
|
// exceeded), the arena associated with it will be released and the memory
|
|
// will be reallocated with the larger capacity and reassigned to the arena.
|
|
//
|
|
// In simple terms, when the map resizes it invalidates all memory that was
|
|
// previously allocated with the given arena!
|
|
//
|
|
// A 'Deinit' of the map will similarly deallocate the passed in arena (as
|
|
// the map takes ownership of the arena).
|
|
DN_Arena arena = DN_Arena_InitFromOSVMem(0, 0, DN_ArenaFlags_Nil);
|
|
DN_DSMap<int> map = DN_DSMap_Init<int>(&arena, /*size*/ 1024, DN_DSMapFlags_Nil); // Size must be PoT!
|
|
DN_Assert(DN_DSMap_IsValid(&map)); // Valid if no initialisation failure (e.g. mem alloc failure)
|
|
|
|
// NOTE: DN_DSMap_KeyCStringLit ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_KeyU64 ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_KeyU64NoHash ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_KeyBuffer ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_KeyStr8 ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_KeyStr8Copy ///////////////////////////////////////////////////////////
|
|
// Create a hash-table key where:
|
|
//
|
|
// KeyCStringLit: Uses a Hash(cstring literal)
|
|
// KeyU64: Uses a Hash(U64)
|
|
// KeyU64NoHash: Uses a U64 (where it's truncated to 4 bytes)
|
|
// KeyBuffer: Uses a Hash(ptr+len) slice of bytes
|
|
// KeyStr8: Uses a Hash(string)
|
|
// KeyStr8Copy: Uses a Hash(string) that is copied first using the arena
|
|
//
|
|
// Buffer-based keys memory must persist throughout lifetime of the map.
|
|
// Keys are valued copied into the map, alternatively, copy the
|
|
// key/buffer before constructing the key.
|
|
//
|
|
// You *can't* use the map's arena to allocate keys because on resize it
|
|
// will deallocate then reallocate the entire arena.
|
|
//
|
|
// KeyU64NoHash may be useful if you have a source of data that is
|
|
// already sufficiently uniformly distributed already (e.g. using 8
|
|
// bytes taken from a SHA256 hash as the key) and the first 4 bytes
|
|
// will be used verbatim.
|
|
DN_DSMapKey key = DN_DSMap_KeyStr8(&map, DN_STR8("Sample Key"));
|
|
|
|
// NOTE: DN_DSMap_Find ////////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_Make ////////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_DSMap_Set ////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Query or commit key-value pair to the table, where:
|
|
//
|
|
// Find: does a key-lookup on the table and returns the hash table slot's value
|
|
// Make: assigns the key to the table and returns the hash table slot's value
|
|
// Set: assigns the key-value to the table and returns the hash table slot's value
|
|
//
|
|
// A find query will set 'found' to false if it does not exist.
|
|
//
|
|
// For 'Make' and 'Set', 'found' can be set to 'true' if the item already
|
|
// existed in the map prior to the call. If it's the first time the
|
|
// key-value pair is being inserted 'found' will be set to 'false'.
|
|
//
|
|
// If by adding the key-value pair to the table puts the table over 75% load,
|
|
// the table will be grown to 2x the current the size before insertion
|
|
// completes.
|
|
{
|
|
DN_DSMapResult<int> set_result = DN_DSMap_Set(&map, key, 0xCAFE);
|
|
DN_Assert(!set_result.found); // First time we are setting the key-value pair, it wasn't previously in the table
|
|
DN_Assert(map.occupied == 2); // Sentinel + new element == 2
|
|
}
|
|
|
|
// Iterating elements in the array, note that index '0' is the sentinel
|
|
// slot! You typically don't care about it!
|
|
for (DN_USize index = 1; index < map.occupied; index++) {
|
|
DN_DSMapSlot<int> *it = map.slots + index;
|
|
DN_DSMapKey it_key = it->key;
|
|
int *it_value = &it->value;
|
|
DN_Assert(*it_value == 0xCAFE);
|
|
|
|
DN_Assert(DN_Str8_Init(it_key.buffer_data, it_key.buffer_size) == DN_STR8("Sample Key"));
|
|
}
|
|
|
|
// NOTE: DN_DSMap_Erase ///////////////////////////////////////////////////////////////////
|
|
//
|
|
// Remove the key-value pair from the table. If by erasing the key-value
|
|
// pair from the table puts the table under 25% load, the table will be
|
|
// shrunk by 1/2 the current size after erasing. The table will not shrink
|
|
// below the initial size that the table was initialised as.
|
|
{
|
|
bool erased = DN_DSMap_Erase(&map, key);
|
|
DN_Assert(erased);
|
|
DN_Assert(map.occupied == 1); // Sentinel element
|
|
}
|
|
|
|
DN_DSMap_Deinit(&map, DN_ZeroMem_Yes); // Deallocates the 'arena' for us!
|
|
}
|
|
|
|
// NOTE: DN_DSMap_Hash ////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Hash the input key using the custom hash function if it's set on the map,
|
|
// otherwise uses the default hashing function (32bit Murmur3).
|
|
|
|
// NOTE: DN_DSMap_HashToSlotIndex /////////////////////////////////////////////////////////////
|
|
//
|
|
// Calculate the index into the map's 'slots' array from the given hash.
|
|
|
|
// NOTE: DN_DSMap_Resize //////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Resize the table and move all elements to the new map, note that the new
|
|
// size must be a power of two. This function wil fail on memory allocation
|
|
// failure, or the requested size is smaller than the current number of
|
|
// elements in the map to resize.
|
|
|
|
// NOTE: DN_OSErrSink /////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Error sinks are a way of accumulating errors from API calls related or
|
|
// unrelated into 1 unified error handling pattern. The implemenation of a
|
|
// sink requires 2 fundamental design constraints on the APIs supporting
|
|
// this pattern.
|
|
//
|
|
// 1. Pipelining of errors
|
|
// Errors emitted over the course of several API calls are accumulated
|
|
// into a sink which save the error code and message of the first error
|
|
// encountered and can be checked later.
|
|
//
|
|
// 2. Error proof APIs
|
|
// Functions that produce errors must return objects/handles that are
|
|
// marked to trigger no-ops used in subsequent functions dependent on it.
|
|
//
|
|
// Consider the following example demonstrating a conventional error
|
|
// handling approach (error values by return/sentinel values) and error
|
|
// handling using error-proof and pipelining.
|
|
|
|
// (A) Conventional error checking patterns using return/sentinel values
|
|
#if 0
|
|
DN_OSFile *file = DN_OS_FileOpen("/path/to/file", ...);
|
|
if (file) {
|
|
if (!DN_OS_FileWrite(file, "abc")) {
|
|
// Error handling!
|
|
}
|
|
Dnq_OS_FileClose(file);
|
|
} else {
|
|
// Error handling!
|
|
}
|
|
#endif
|
|
|
|
// (B) Error handling using pipelining and and error proof APIs. APIs that
|
|
// produce errors take in the error sink as a parameter.
|
|
if (0) {
|
|
DN_OSErrSink *error = DN_OS_ErrSinkBegin(DN_OSErrSinkMode_Nil);
|
|
DN_OSFile file = DN_OS_FileOpen(DN_STR8("/path/to/file"), DN_OSFileOpen_OpenIfExist, DN_OSFileAccess_ReadWrite, error);
|
|
DN_OS_FileWrite(&file, DN_STR8("abc"), error);
|
|
DN_OS_FileClose(&file);
|
|
if (DN_OS_ErrSinkEndAndLogErrorF(error, "Failed to write to file")) {
|
|
// Do error handling!
|
|
}
|
|
}
|
|
|
|
// Pipeling and error-proof APIs lets you write sequence of instructions and
|
|
// defer error checking until it is convenient or necessary. Functions are
|
|
// *guaranteed* to return an object that is usable. There are no hidden
|
|
// exceptions to be thrown. Functions may opt to still return error values
|
|
// by way of return values thereby *not* precluding the ability to check
|
|
// every API call either.
|
|
//
|
|
// Ultimately, this error handling approach gives more flexibility on the
|
|
// manner in how errors are handled with less code.
|
|
//
|
|
// Error sinks can nest begin and end statements. This will open a new scope
|
|
// whereby the current captured error pushed onto a stack and the sink will
|
|
// be populated by the first error encountered in that scope.
|
|
|
|
if (0) {
|
|
DN_OSErrSink *error = DN_OS_ErrSinkBegin(DN_OSErrSinkMode_Nil);
|
|
DN_OSFile file = DN_OS_FileOpen(DN_STR8("/path/to/file"), DN_OSFileOpen_OpenIfExist, DN_OSFileAccess_ReadWrite, error);
|
|
DN_OS_FileWrite(&file, DN_STR8("abc"), error);
|
|
DN_OS_FileClose(&file);
|
|
|
|
{
|
|
// NOTE: My error sinks are thread-local, so the returned 'error' is
|
|
// the same as the 'error' value above.
|
|
DN_OS_ErrSinkBegin(DN_OSErrSinkMode_Nil);
|
|
DN_OS_WriteAll(DN_STR8("/path/to/another/file"), DN_STR8("123"), error);
|
|
DN_OS_ErrSinkEndAndLogErrorF(error, "Failed to write to another file");
|
|
}
|
|
|
|
if (DN_OS_ErrSinkEndAndLogErrorF(error, "Failed to write to file")) {
|
|
// Do error handling!
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_FStr8_Max /////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Return the maximum capacity of the string, e.g. the 'N' template
|
|
// parameter of FStr8<N>
|
|
|
|
// NOTE: DN_FStr8_ToStr8 //////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Create a slice of the string into a pointer and length string (DN_Str8).
|
|
// The lifetime of the slice is bound to the lifetime of the FStr8 and is
|
|
// invalidated when the FStr8 is.
|
|
|
|
// NOTE: DN_CVT_HexToBytes ////////////////////////////////////////////////////////////////////////
|
|
{
|
|
unsigned char bytes[2];
|
|
DN_USize bytes_written = DN_CVT_HexToBytesPtr(DN_STR8("0xFACE"), bytes, sizeof(bytes));
|
|
DN_Assert(bytes_written == 2);
|
|
DN_Assert(bytes[0] == 0xFA);
|
|
DN_Assert(bytes[1] == 0xCE);
|
|
}
|
|
|
|
// NOTE: DN_JSONBuilder_Build /////////////////////////////////////////////////////////////////
|
|
//
|
|
// Convert the internal JSON buffer in the builder into a string.
|
|
|
|
// NOTE: DN_JSONBuilder_KeyValue, DN_JSONBuilder_KeyValueF
|
|
//
|
|
// Add a JSON key value pair untyped. The value is emitted directly without
|
|
// checking the contents of value.
|
|
//
|
|
// All other functions internally call into this function which is the main
|
|
// workhorse of the builder.
|
|
|
|
// NOTE: DN_JSON_Builder_ObjectEnd
|
|
//
|
|
// End a JSON object in the builder, generates internally a '}' string
|
|
|
|
// NOTE: DN_JSON_Builder_ArrayEnd
|
|
//
|
|
// End a JSON array in the builder, generates internally a ']' string
|
|
|
|
// NOTE: DN_JSONBuilder_LiteralNamed
|
|
//
|
|
// Add a named JSON key-value object whose value is directly written to
|
|
// the following '"<key>": <value>' (e.g. useful for emitting the 'null'
|
|
// value)
|
|
|
|
// NOTE: DN_JSONBuilder_U64 /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_U64Named /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_I64 /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_I64Named /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_F64 /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_F64Named /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_Bool /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_JSONBuilder_BoolNamed /////////////////////////////////////////////////////////////
|
|
//
|
|
// Add the named JSON data type as a key-value object. The named variants
|
|
// generates internally the key-value pair, e.g.
|
|
//
|
|
// "<name>": <value>
|
|
//
|
|
// And the non-named version emit just the 'value' portion
|
|
|
|
// NOTE: DN_List_Iterate //////////////////////////////////////////////////////////////////////
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_List<int> list = DN_List_Init<int>(/*chunk_size*/ 128);
|
|
for (DN_ListIterator<int> it = {}; DN_List_Iterate(&list, &it, 0);) {
|
|
int *item = it.data;
|
|
(void)item;
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_LOGProc ///////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Function prototype of the logging interface exposed by this library. Logs
|
|
// emitted using the DN_LOG_* family of functions are routed through this
|
|
// routine.
|
|
|
|
// NOTE: DN_FNV1A /////////////////////////////////////////////////////////////////////////////
|
|
#if 0
|
|
{
|
|
// Using the default hash as defined by DN_FNV1A32_SEED and
|
|
// DN_FNV1A64_SEED for 32/64bit hashes respectively
|
|
uint32_t buffer1 = 0xCAFE0000;
|
|
uint32_t buffer2 = 0xDEAD0000;
|
|
{
|
|
uint64_t hash = DN_FNV1A64_Hash(&buffer1, sizeof(buffer1));
|
|
hash = DN_FNV1A64_Iterate(&buffer2, sizeof(buffer2), hash); // Chained hashing
|
|
(void)hash;
|
|
}
|
|
|
|
// You can use a custom seed by skipping the 'Hash' call and instead
|
|
// calling 'Iterate' immediately.
|
|
{
|
|
uint64_t custom_seed = 0xABCDEF12;
|
|
uint64_t hash = DN_FNV1A64_Iterate(&buffer1, sizeof(buffer1), custom_seed);
|
|
hash = DN_FNV1A64_Iterate(&buffer2, sizeof(buffer2), hash);
|
|
(void)hash;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
// NOTE: DN_FmtBuffer3DotTruncate //////////////////////////////////////////////////////////////
|
|
{
|
|
char buffer[8] = {};
|
|
int buffer_chars_written = DN_CVT_FmtBuffer3DotTruncate(buffer, sizeof(buffer), "This string is longer than %d characters", DN_CAST(int)(sizeof(buffer) - 1));
|
|
if (0) // Prints "This ..." which is exactly 8 characters long
|
|
printf("%.*s", buffer_chars_written, buffer);
|
|
}
|
|
|
|
// NOTE: DN_MurmurHash3 ///////////////////////////////////////////////////////////////////////
|
|
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
|
// domain. The author (Austin Appleby) hereby disclaims copyright to this source
|
|
// code.
|
|
//
|
|
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
|
// algorithms are optimized for their respective platforms. You can still
|
|
// compile and run any of them on any platform, but your performance with the
|
|
// non-native version will be less than optimal.
|
|
|
|
// NOTE: DN_OS_DateUnixTime
|
|
//
|
|
// Produce the time elapsed since the unix epoch
|
|
{
|
|
uint64_t now = DN_OS_DateUnixTimeS();
|
|
(void)now;
|
|
}
|
|
|
|
// NOTE: DN_OS_DirIterate /////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Iterate the files within the passed in folder
|
|
for (DN_OSDirIterator it = {}; DN_OS_DirIterate(DN_STR8("."), &it);) {
|
|
// printf("%.*s\n", DN_STR_FMT(it.file_name));
|
|
}
|
|
|
|
// NOTE: DN_OS_FileDelete
|
|
//
|
|
// This function can only delete files and it can *only* delete directories
|
|
// if it is empty otherwise this function fails.
|
|
|
|
// NOTE: DN_OS_WriteAllSafe
|
|
// Writes the file at the path first by appending '.tmp' to the 'path' to
|
|
// write to. If the temporary file is written successfully then the file is
|
|
// copied into 'path', for example:
|
|
//
|
|
// path: C:/Home/my.txt
|
|
// tmp_path: C:/Home/my.txt.tmp
|
|
//
|
|
// If 'tmp_path' is written to successfuly, the file will be copied over into
|
|
// 'path'.
|
|
if (0) {
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_OSErrSink *error = DN_OS_ErrSinkBegin(DN_OSErrSinkMode_Nil);
|
|
DN_OS_WriteAllSafe(/*path*/ DN_STR8("C:/Home/my.txt"), /*buffer*/ DN_STR8("Hello world"), error);
|
|
DN_OS_ErrSinkEndAndLogErrorF(error, "");
|
|
}
|
|
|
|
// NOTE: DN_OS_EstimateTSCPerSecond ///////////////////////////////////////////////////////////
|
|
//
|
|
// Estimate how many timestamp count's (TSC) there are per second. TSC
|
|
// is evaluated by calling __rdtsc() or the equivalent on the platform. This
|
|
// value can be used to convert TSC durations into seconds.
|
|
//
|
|
// The 'duration_ms_to_gauge_tsc_frequency' parameter specifies how many
|
|
// milliseconds to spend measuring the TSC rate of the current machine.
|
|
// 100ms is sufficient to produce a fairly accurate result with minimal
|
|
// blocking in applications if calculated on startup..
|
|
//
|
|
// This may return 0 if querying the CPU timestamp counter is not supported
|
|
// on the platform (e.g. __rdtsc() or __builtin_readcyclecounter() returns 0).
|
|
|
|
// NOTE: DN_OS_EXEDir /////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Retrieve the executable directory without the trailing '/' or ('\' for
|
|
// windows). If this fails an empty string is returned.
|
|
|
|
// NOTE: DN_OS_PerfCounterFrequency ///////////////////////////////////////////////////////////
|
|
//
|
|
// Get the number of ticks in the performance counter per second for the
|
|
// operating system you're running on. This value can be used to calculate
|
|
// duration from OS performance counter ticks.
|
|
|
|
// NOTE: DN_OS_Path* //////////////////////////////////////////////////////////////////////////
|
|
// Construct paths ensuring the native OS path separators are used in the
|
|
// string. In 99% of cases you can use 'PathConvertF' which converts the
|
|
// given path in one shot ensuring native path separators in the string.
|
|
//
|
|
// path: C:\Home/My/Folder
|
|
// converted: C:/Home/My/Folder (On Unix)
|
|
// C:\Home\My\Folder (On Windows)
|
|
//
|
|
// If you need to construct a path dynamically you can use the builder-esque
|
|
// interface to build a path's step-by-step using the 'OSPath' data structure.
|
|
// With this API you can append paths piece-meal to build the path after all
|
|
// pieces are appended.
|
|
//
|
|
// You may append a singular or nested path to the builder. In the builder,
|
|
// the string is scanned and separated into path separated chunks and stored
|
|
// in the builder, e.g. these are all valid to pass into 'PathAdd',
|
|
// 'PathAddRef' ... e.t.c
|
|
//
|
|
// "path/to/your/desired/folder" is valid
|
|
// "path" is valid
|
|
// "path/to\your/desired\folder" is valid
|
|
//
|
|
// 'PathPop' removes the last appended path from the current path stored in
|
|
// the 'OSPath':
|
|
//
|
|
// path: path/to/your/desired/folder
|
|
// popped_path: path/to/your/desired
|
|
|
|
// NOTE: DN_OS_SecureRNGBytes /////////////////////////////////////////////////////////////////
|
|
//
|
|
// Generate cryptographically secure bytes
|
|
|
|
#if 0
|
|
// NOTE: DN_PCG32 /////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Random number generator of the PCG family. Implementation taken from
|
|
// Martins Mmozeiko from Handmade Network.
|
|
// https://gist.github.com/mmozeiko/1561361cd4105749f80bb0b9223e9db8
|
|
{
|
|
DN_PCG32 rng = DN_PCG32_Init(0xb917'a66c'1d9b'3bd8);
|
|
|
|
// NOTE: DN_PCG32_Range ///////////////////////////////////////////////////////////////////
|
|
//
|
|
// Generate a value in the [low, high) interval
|
|
uint32_t u32_value = DN_PCG32_Range(&rng, 32, 64);
|
|
DN_Assert(u32_value >= 32 && u32_value < 64);
|
|
|
|
// NOTE: DN_PCG32_NextF32 /////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_PCG32_NextF64 /////////////////////////////////////////////////////////////////
|
|
//
|
|
// Generate a float/double in the [0, 1) interval
|
|
DN_F64 f64_value = DN_PCG32_NextF64(&rng);
|
|
DN_Assert(f64_value >= 0.f && f64_value < 1.f);
|
|
|
|
// NOTE: DN_PCG32_Advance /////////////////////////////////////////////////////////////////
|
|
//
|
|
// Step the random number generator by 'delta' steps
|
|
DN_PCG32_Advance(&rng, /*delta*/ 5);
|
|
}
|
|
#endif
|
|
|
|
#if 0
|
|
#if !defined(DN_NO_PROFILER)
|
|
// NOTE: DN_Profiler /////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// A profiler based off Casey Muratori's Computer Enhance course, Performance
|
|
// Aware Programming. This profiler measures function elapsed time using the
|
|
// CPU's time stamp counter (e.g. rdtsc) providing a rough cycle count
|
|
// that can be converted into a duration.
|
|
//
|
|
// This profiler uses a double buffer scheme for storing profiling markers.
|
|
// After an application's typical update/frame cycle you can swap the
|
|
// profiler's buffer whereby the front buffer contains the previous frames
|
|
// profiling metrics and the back buffer will be populated with the new
|
|
// frame's profiling metrics.
|
|
{
|
|
enum Zone
|
|
{
|
|
Zone_MainLoop,
|
|
Zone_Count
|
|
};
|
|
|
|
DN_ProfilerZone profiler_zone_main_update = DN_Profiler_BeginZone(Zone_MainLoop);
|
|
|
|
// NOTE: DN_Profiler_AnchorBuffer /////////////////////////////////////////////////////
|
|
//
|
|
// Retrieve the requested buffer from the profiler for
|
|
// writing/reading profiling metrics. Pass in the enum to specify
|
|
// which buffer to grab from the profiler.
|
|
//
|
|
// The front buffer contains the previous frame's profiling metrics
|
|
// and the back buffer is where the profiler is currently writing
|
|
// to.
|
|
//
|
|
// For end user intents and purposes, you likely only need to read
|
|
// the front buffer which contain the metrics that you can visualise
|
|
// regarding the most profiling metrics recorded.
|
|
|
|
// NOTE: DN_Profiler_ReadBuffer ///////////////////////////////////////////////////////////
|
|
//
|
|
// Retrieve the buffer of anchors of which there are
|
|
// `DN_PROFILER_ANCHOR_BUFFER_SIZE` anchors from the most recent run
|
|
// of the profiler after you have called `SwapAnchorBuffer` to trigger
|
|
// the double buffer
|
|
DN_ProfilerAnchor *read_anchors = DN_Profiler_ReadBuffer();
|
|
for (DN_USize index = 0; index < DN_PROFILER_ANCHOR_BUFFER_SIZE; index++) {
|
|
DN_ProfilerAnchor *anchor = read_anchors + index;
|
|
if (DN_Str8_HasData(anchor->name)) {
|
|
// ...
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_Profiler_WriteBuffer //////////////////////////////////////////////////////////
|
|
//
|
|
// Same as `ReadBuffer` however we return the buffer that the profiler
|
|
// is currently writing anchors into.
|
|
DN_ProfilerAnchor *write_anchors = DN_Profiler_WriteBuffer();
|
|
for (DN_USize index = 0; index < DN_PROFILER_ANCHOR_BUFFER_SIZE; index++) {
|
|
DN_ProfilerAnchor *anchor = write_anchors + index;
|
|
if (DN_Str8_HasData(anchor->name)) {
|
|
// ...
|
|
}
|
|
}
|
|
|
|
DN_Profiler_EndZone(profiler_zone_main_update);
|
|
DN_Profiler_SwapAnchorBuffer(); // Should occur after all profiling zones are ended!
|
|
DN_Memset(g_dn_core->profiler, 0, sizeof(*g_dn_core->profiler));
|
|
}
|
|
#endif // !defined(DN_NO_PROFILER)
|
|
#endif
|
|
|
|
// NOTE: DN_Raycast_LineIntersectV2 ///////////////////////////////////////////////////////////
|
|
// Calculate the intersection point of 2 rays returning a `t` value
|
|
// which is how much along the direction of the 'ray' did the intersection
|
|
// occur.
|
|
//
|
|
// The arguments passed in do not need to be normalised for the function to
|
|
// work.
|
|
|
|
// NOTE: DN_Safe_* ////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Performs the arithmetic operation and uses DN_Check on the operation to
|
|
// check if it overflows. If it overflows the MAX value of the integer is
|
|
// returned in add and multiply operations, and, the minimum is returned in
|
|
// subtraction and division.
|
|
|
|
// NOTE: DN_SaturateCast* ////////////////////////////////////////////////////////////////
|
|
//
|
|
// Truncate the passed in value to the return type clamping the resulting
|
|
// value to the max value of the desired data type. It DN_Check's the
|
|
// truncation.
|
|
//
|
|
// The following sentinel values are returned when saturated,
|
|
// USize -> Int: INT_MAX
|
|
// USize -> I8: INT8_MAX
|
|
// USize -> I16: INT16_MAX
|
|
// USize -> I32: INT32_MAX
|
|
// USize -> I64: INT64_MAX
|
|
//
|
|
// U64 -> UInt: UINT_MAX
|
|
// U64 -> U8: UINT8_MAX
|
|
// U64 -> U16: UINT16_MAX
|
|
// U64 -> U32: UINT32_MAX
|
|
//
|
|
// USize -> U8: UINT8_MAX
|
|
// USize -> U16: UINT16_MAX
|
|
// USize -> U32: UINT32_MAX
|
|
// USize -> U64: UINT64_MAX
|
|
//
|
|
// ISize -> Int: INT_MIN or INT_MAX
|
|
// ISize -> I8: INT8_MIN or INT8_MAX
|
|
// ISize -> I16: INT16_MIN or INT16_MAX
|
|
// ISize -> I32: INT32_MIN or INT32_MAX
|
|
// ISize -> I64: INT64_MIN or INT64_MAX
|
|
//
|
|
// ISize -> UInt: 0 or UINT_MAX
|
|
// ISize -> U8: 0 or UINT8_MAX
|
|
// ISize -> U16: 0 or UINT16_MAX
|
|
// ISize -> U32: 0 or UINT32_MAX
|
|
// ISize -> U64: 0 or UINT64_MAX
|
|
//
|
|
// I64 -> ISize: DN_ISIZE_MIN or DN_ISIZE_MAX
|
|
// I64 -> I8: INT8_MIN or INT8_MAX
|
|
// I64 -> I16: INT16_MIN or INT16_MAX
|
|
// I64 -> I32: INT32_MIN or INT32_MAX
|
|
//
|
|
// Int -> I8: INT8_MIN or INT8_MAX
|
|
// Int -> I16: INT16_MIN or INT16_MAX
|
|
// Int -> U8: 0 or UINT8_MAX
|
|
// Int -> U16: 0 or UINT16_MAX
|
|
// Int -> U32: 0 or UINT32_MAX
|
|
// Int -> U64: 0 or UINT64_MAX
|
|
|
|
// NOTE: DN_StackTrace ////////////////////////////////////////////////////////////////////////
|
|
// Emit stack traces at the calling site that these functions are invoked
|
|
// from.
|
|
//
|
|
// For some applications, it may be viable to generate raw stack traces and
|
|
// store just the base addresses of the call stack from the 'Walk'
|
|
// functions. This reduces the memory overhead and required to hold onto
|
|
// stack traces and resolve the addresses on-demand when required.
|
|
//
|
|
// However if your application is loading and/or unloading shared libraries,
|
|
// on Windows it may be impossible for the application to resolve raw base
|
|
// addresses if they become invalid over time. In these applications you
|
|
// must convert the raw stack traces before the unloading occurs, and when
|
|
// loading new shared libraries, 'ReloadSymbols' must be called to ensure
|
|
// the debug APIs are aware of how to resolve the new addresses imported
|
|
// into the address space.
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
|
|
// NOTE: DN_StackTrace_Walk ///////////////////////////////////////////////////////////////
|
|
//
|
|
// Generate a stack trace as a series of addresses to the base of the
|
|
// functions on the call-stack at the current instruction pointer. The
|
|
// addresses are stored in order from the current executing function
|
|
// first to the most ancestor function last in the walk.
|
|
DN_StackTraceWalkResult walk = DN_StackTrace_Walk(tmem.arena, /*depth limit*/ 128);
|
|
|
|
// Loop over the addresses produced in the stack trace
|
|
for (DN_StackTraceWalkResultIterator it = {}; DN_StackTrace_WalkResultIterate(&it, &walk);) {
|
|
// NOTE: DN_StackTrace_RawFrameToFrame ////////////////////////////////////////////////
|
|
//
|
|
// Converts the base address into a human readable stack trace
|
|
// entry (e.g. address, line number, file and function name).
|
|
DN_StackTraceFrame frame = DN_StackTrace_RawFrameToFrame(tmem.arena, it.raw_frame);
|
|
|
|
// You may then print out the frame like so
|
|
if (0)
|
|
printf("%.*s(%" PRIu64 "): %.*s\n", DN_STR_FMT(frame.file_name), frame.line_number, DN_STR_FMT(frame.function_name));
|
|
}
|
|
|
|
// If you load new shared-libraries into the address space it maybe
|
|
// necessary to call into 'ReloadSymbols' to ensure that the OS is able
|
|
// to resolve the new addresses.
|
|
DN_StackTrace_ReloadSymbols();
|
|
|
|
// NOTE: DN_StackTrace_GetFrames //////////////////////////////////////////////////////////
|
|
//
|
|
// Helper function to create a stack trace and automatically convert the
|
|
// raw frames into human readable frames. This function effectively
|
|
// calls 'Walk' followed by 'RawFrameToFrame'.
|
|
DN_Slice<DN_StackTraceFrame> frames = DN_StackTrace_GetFrames(tmem.arena, /*depth limit*/ 128);
|
|
(void)frames;
|
|
}
|
|
|
|
// NOTE: DN_Str8_Alloc ////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Allocates a string with the requested 'size'. An additional byte is
|
|
// always requested from the allocator to null-terminate the buffer. This
|
|
// allows the string to be used with C-style string APIs.
|
|
//
|
|
// The returned string's 'size' member variable does *not* include this
|
|
// additional null-terminating byte.
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 string = DN_Str8_Alloc(tmem.arena, /*size*/ 1, DN_ZeroMem_Yes);
|
|
DN_Assert(string.size == 1);
|
|
DN_Assert(string.data[string.size] == 0); // It is null-terminated!
|
|
}
|
|
|
|
// NOTE: DN_Str8_BinarySplit //////////////////////////////////////////////////////////////////
|
|
//
|
|
// Splits a string into 2 substrings occuring prior and after the first
|
|
// occurence of the delimiter. Neither strings include the matched
|
|
// delimiter. If no delimiter is found, the 'rhs' of the split will be
|
|
// empty.
|
|
{
|
|
DN_Str8BinarySplitResult dot_split = DN_Str8_BinarySplit(/*string*/ DN_STR8("abc.def.ghi"), /*delimiter*/ DN_STR8("."));
|
|
DN_Str8BinarySplitResult slash_split = DN_Str8_BinarySplit(/*string*/ DN_STR8("abc.def.ghi"), /*delimiter*/ DN_STR8("/"));
|
|
DN_Assert(dot_split.lhs == DN_STR8("abc") && dot_split.rhs == DN_STR8("def.ghi"));
|
|
DN_Assert(slash_split.lhs == DN_STR8("abc.def.ghi") && slash_split.rhs == DN_STR8(""));
|
|
|
|
// Loop that walks the string and produces ("abc", "def", "ghi")
|
|
for (DN_Str8 it = DN_STR8("abc.def.ghi"); it.size;) {
|
|
DN_Str8BinarySplitResult split = DN_Str8_BinarySplit(it, DN_STR8("."));
|
|
DN_Str8 chunk = split.lhs; // "abc", "def", ...
|
|
it = split.rhs;
|
|
(void)chunk;
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_Str8_FileNameFromPath /////////////////////////////////////////////////////////////
|
|
//
|
|
// Takes a slice to the file name from a file path. The file name is
|
|
// evaluated by searching from the end of the string backwards to the first
|
|
// occurring path separator '/' or '\'. If no path separator is found, the
|
|
// original string is returned. This function preserves the file extension
|
|
// if there were any.
|
|
{
|
|
{
|
|
DN_Str8 string = DN_Str8_FileNameFromPath(DN_STR8("C:/Folder/item.txt"));
|
|
DN_Assert(string == DN_STR8("item.txt"));
|
|
}
|
|
{
|
|
// TODO(doyle): Intuitively this seems incorrect. Empty string instead?
|
|
DN_Str8 string = DN_Str8_FileNameFromPath(DN_STR8("C:/Folder/"));
|
|
DN_Assert(string == DN_STR8("C:/Folder"));
|
|
}
|
|
{
|
|
DN_Str8 string = DN_Str8_FileNameFromPath(DN_STR8("C:/Folder"));
|
|
DN_Assert(string == DN_STR8("Folder"));
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_Str8_FilePathNoExtension //////////////////////////////////////////////////////////
|
|
//
|
|
// This function preserves the original string if no extension was found.
|
|
// An extension is defined as the substring after the last '.' encountered
|
|
// in the string.
|
|
{
|
|
DN_Str8 string = DN_Str8_FilePathNoExtension(DN_STR8("C:/Folder/item.txt.bak"));
|
|
DN_Assert(string == DN_STR8("C:/Folder/item.txt"));
|
|
}
|
|
|
|
// NOTE: DN_Str8_FileNameNoExtension //////////////////////////////////////////////////////////
|
|
//
|
|
// This function is the same as calling 'FileNameFromPath' followed by
|
|
// 'FilePathNoExtension'
|
|
{
|
|
DN_Str8 string = DN_Str8_FileNameNoExtension(DN_STR8("C:/Folder/item.txt.bak"));
|
|
DN_Assert(string == DN_STR8("item.txt"));
|
|
}
|
|
|
|
// NOTE: DN_Str8_Replace ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8_ReplaceInsensitive ///////////////////////////////////////////////////////////
|
|
//
|
|
// Replace any matching substring 'find' with 'replace' in the passed in
|
|
// 'string'. The 'start_index' may be specified to offset which index the
|
|
// string will start doing replacements from.
|
|
//
|
|
// String replacements are not done inline and the returned string will
|
|
// always be a newly allocated copy, irrespective of if any replacements
|
|
// were done or not.
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 string = DN_Str8_Replace(/*string*/ DN_STR8("Foo Foo Bar"),
|
|
/*find*/ DN_STR8("Foo"),
|
|
/*replace*/ DN_STR8("Moo"),
|
|
/*start_index*/ 1,
|
|
/*arena*/ tmem.arena,
|
|
/*eq_case*/ DN_Str8EqCase_Sensitive);
|
|
DN_Assert(string == DN_STR8("Foo Moo Bar"));
|
|
}
|
|
|
|
// NOTE: DN_Str8_Segment //////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Add a delimiting 'segment_char' every 'segment_size' number of characters
|
|
// in the string.
|
|
//
|
|
// Reverse segment delimits the string counting 'segment_size' from the back
|
|
// of the string.
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 string = DN_Str8_Segment(tmem.arena, /*string*/ DN_STR8("123456789"), /*segment_size*/ 3, /*segment_char*/ ',');
|
|
DN_Assert(string == DN_STR8("123,456,789"));
|
|
}
|
|
|
|
// NOTE: DN_Str8_Split ////////////////////////////////////////////////////////////////////////
|
|
{
|
|
// Splits the string at each delimiter into substrings occuring prior and
|
|
// after until the next delimiter.
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
{
|
|
DN_Slice<DN_Str8> splits = DN_Str8_SplitAlloc(/*arena*/ tmem.arena,
|
|
/*string*/ DN_STR8("192.168.8.1"),
|
|
/*delimiter*/ DN_STR8("."),
|
|
/*mode*/ DN_Str8SplitIncludeEmptyStrings_No);
|
|
DN_Assert(splits.size == 4);
|
|
DN_Assert(splits.data[0] == DN_STR8("192") && splits.data[1] == DN_STR8("168") && splits.data[2] == DN_STR8("8") && splits.data[3] == DN_STR8("1"));
|
|
}
|
|
|
|
// You can include empty strings that occur when splitting by setting
|
|
// the split mode to include empty strings.
|
|
{
|
|
DN_Slice<DN_Str8> splits = DN_Str8_SplitAlloc(/*arena*/ tmem.arena,
|
|
/*string*/ DN_STR8("a--b"),
|
|
/*delimiter*/ DN_STR8("-"),
|
|
/*mode*/ DN_Str8SplitIncludeEmptyStrings_Yes);
|
|
DN_Assert(splits.size == 3);
|
|
DN_Assert(splits.data[0] == DN_STR8("a") && splits.data[1] == DN_STR8("") && splits.data[2] == DN_STR8("b"));
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_Str8_ToI64 ////////////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8_ToU64 ////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Convert a number represented as a string to a signed 64 bit number.
|
|
//
|
|
// The 'separator' is an optional digit separator for example, if
|
|
// 'separator' is set to ',' then '1,234' will successfully be parsed to
|
|
// '1234'. If no separator is desired, you may pass in '0' in which
|
|
// '1,234' will *not* be succesfully parsed.
|
|
//
|
|
// Real numbers are truncated. Both '-' and '+' prefixed strings are permitted,
|
|
// i.e. "+1234" -> 1234 and "-1234" -> -1234. Strings must consist entirely of
|
|
// digits, the seperator or the permitted prefixes as previously mentioned
|
|
// otherwise this function will return false, i.e. "1234 dog" will cause the
|
|
// function to return false, however, the output is greedily converted and
|
|
// will be evaluated to "1234".
|
|
//
|
|
// 'ToU64' only '+' prefix is permitted
|
|
// 'ToI64' either '+' or '-' prefix is permitted
|
|
{
|
|
{
|
|
DN_Str8ToI64Result result = DN_Str8_ToI64(DN_STR8("-1,234"), /*separator*/ ',');
|
|
DN_Assert(result.success && result.value == -1234);
|
|
}
|
|
{
|
|
DN_Str8ToI64Result result = DN_Str8_ToI64(DN_STR8("-1,234"), /*separator*/ 0);
|
|
DN_Assert(!result.success && result.value == 1); // 1 because it's a greedy conversion
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_Str8_TrimByteOrderMark ////////////////////////////////////////////////////////////
|
|
//
|
|
// Removes a leading UTF8, UTF16 BE/LE, UTF32 BE/LE byte order mark from the
|
|
// string if it's present.
|
|
|
|
// NOTE: DN_STR_FMT ///////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Unpacks a string struct that has the fields {.data, .size} for printing a
|
|
// pointer and length style string using the printf format specifier "%.*s"
|
|
//
|
|
// printf("%.*s\n", DN_STR_FMT(DN_STR8("Hello world")));
|
|
|
|
// NOTE: DN_Str8Builder_AppendF ////////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8Builder_AppendFV ////////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8Builder_AppendRef ////////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8Builder_AppendCopy ////////////////////////////////////////////////////////////
|
|
//
|
|
// - Appends a string to the string builder as follows
|
|
//
|
|
// AppendRef: Stores the string slice by value
|
|
// AppendCopy: Stores the string slice by copy (with builder's arena)
|
|
// AppendF/V: Constructs a format string and calls 'AppendRef'
|
|
|
|
// NOTE: DN_Str8Builder_Build ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_Str8Builder_BuildCRT ///////////////////////////////////////////////////////////
|
|
//
|
|
// Constructs the final string by merging all the appended strings into
|
|
// one merged string.
|
|
//
|
|
// The CRT variant calls into 'malloc' and the string *must* be released
|
|
// using 'free'.
|
|
|
|
// NOTE: DN_Str8Builder_BuildSlice ///////////////////////////////////////////////////////////
|
|
//
|
|
// Constructs the final string into an array of strings (e.g. a slice)
|
|
|
|
// NOTE: DN_TicketMutex ///////////////////////////////////////////////////////////////////////
|
|
//
|
|
// A mutex implemented using an atomic compare and swap on tickets handed
|
|
// out for each critical section.
|
|
//
|
|
// This mutex serves ticket in order and will block all other threads until
|
|
// the tickets are returned in order. The thread with the oldest ticket that
|
|
// has not been returned has right of way to execute, all other threads will
|
|
// be blocked in an atomic compare and swap loop. block execution by going
|
|
// into an atomic
|
|
//
|
|
// When a thread is blocked by this mutex, a spinlock intrinsic '_mm_pause' is
|
|
// used to yield the CPU and reduce spinlock on the thread. This mutex is not
|
|
// ideal for long blocking operations. This mutex does not issue any syscalls
|
|
// and relies entirely on atomic instructions.
|
|
{
|
|
DN_TicketMutex mutex = {};
|
|
DN_TicketMutex_Begin(&mutex); // Simple procedural mutual exclusion lock
|
|
DN_TicketMutex_End(&mutex);
|
|
|
|
// NOTE: DN_TicketMutex_MakeTicket ////////////////////////////////////////////////////////
|
|
//
|
|
// Request the next available ticket for locking from the mutex.
|
|
DN_UInt ticket = DN_TicketMutex_MakeTicket(&mutex);
|
|
|
|
if (DN_TicketMutex_CanLock(&mutex, ticket)) {
|
|
// NOTE: DN_TicketMutex_BeginTicket ///////////////////////////////////////////////////
|
|
//
|
|
// Locks the mutex using the given ticket if possible. If it's not
|
|
// the next ticket to be locked the executing thread will block
|
|
// until the mutex can lock the ticket, i.e. All prior tickets are
|
|
// returned, in sequence, to the mutex.
|
|
DN_TicketMutex_BeginTicket(&mutex, ticket);
|
|
DN_TicketMutex_End(&mutex);
|
|
}
|
|
}
|
|
|
|
// NOTE: DN_ThreadContext /////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Each thread is assigned in their thread-local storage (TLS) tmem and
|
|
// permanent arena allocators. These can be used for allocations with a
|
|
// lifetime scoped to the lexical scope or for storing data permanently
|
|
// using the arena paradigm.
|
|
//
|
|
// TLS in this implementation is implemented using the `thread_local` C/C++
|
|
// keyword.
|
|
//
|
|
// 99% of the time you will want DN_OS_TLSTMem(...) which returns you a
|
|
// temporary arena for function lifetime allocations. On scope exit, the
|
|
// arena is cleared out.
|
|
//
|
|
// This library's paradigm revolves heavily around arenas including tmem
|
|
// arenas into child functions for temporary calculations. If an arena is
|
|
// passed into a function, this poses a problem sometimes known as
|
|
// 'arena aliasing'.
|
|
//
|
|
// If an arena aliases another arena (e.g. the arena passed in) is the same
|
|
// as the tmem arena requested in the function, we risk the tmem arena
|
|
// on scope exit deallocating memory belonging to the caller.
|
|
//
|
|
// To avoid this we the 'DN_OS_TLSTMem(...)' API takes in a list of arenas
|
|
// to ensure that we provide a tmem arena that *won't* alias with the
|
|
// caller's arena. If arena aliasing occurs, with ASAN on, generally
|
|
// the library will trap and report use-after-poison once violated.
|
|
{
|
|
DN_OSTLSTMem tmem_a = DN_OS_TLSTMem(nullptr);
|
|
|
|
// Now imagine we call a function where we pass tmem_a.arena down
|
|
// into it .. If we call tmem again, we need to pass in the arena
|
|
// to prevent aliasing.
|
|
DN_OSTLSTMem tmem_b = DN_OS_TLSTMem(tmem_a.arena);
|
|
DN_Assert(tmem_a.arena != tmem_b.arena);
|
|
}
|
|
|
|
// @proc DN_Thread_GetTMem
|
|
// @desc Retrieve the per-thread temporary arena allocator that is reset on scope
|
|
// exit.
|
|
|
|
// The tmem arena must be deconflicted with any existing arenas in the
|
|
// function to avoid trampling over each other's memory. Consider the situation
|
|
// where the tmem arena is passed into the function. Inside the function, if
|
|
// the same arena is reused then, if both arenas allocate, when the inner arena
|
|
// is reset, this will undo the passed in arena's allocations in the function.
|
|
|
|
// @param[in] conflict_arena A pointer to the arena currently being used in the
|
|
// function
|
|
|
|
// NOTE: DN_CVT_U64ToStr8 /////////////////////////////////////////////////////////////////////////
|
|
{
|
|
DN_CVTU64Str8 string = DN_CVT_U64ToStr8(123123, ',');
|
|
if (0) // Prints "123,123"
|
|
printf("%.*s", DN_STR_FMT(string));
|
|
}
|
|
|
|
// NOTE: DN_CVT_U64ToAge //////////////////////////////////////////////////////////////////////////
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_Str8 string = DN_CVT_U64ToAge(tmem.arena, DN_HoursToSec(2) + DN_MinutesToSec(30), DN_CVTU64AgeUnit_All);
|
|
DN_Assert(DN_Str8_Eq(string, DN_STR8("2h 30m")));
|
|
}
|
|
|
|
// NOTE: DN_VArray ////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// An array that is backed by virtual memory by reserving addressing space
|
|
// and comitting pages as items are allocated in the array. This array never
|
|
// reallocs, instead you should reserve the upper bound of the memory you
|
|
// will possibly ever need (e.g. 16GB) and let the array commit physical
|
|
// pages on demand.
|
|
//
|
|
// On 64 bit operating systems you are given 48 bits of addressable space
|
|
// giving you 256 TB of reservable memory. This gives you practically
|
|
// an unlimited array capacity that avoids reallocs and only consumes memory
|
|
// that is actually occupied by the array.
|
|
//
|
|
// Each page that is committed into the array will be at page/allocation
|
|
// granularity which are always cache aligned. This array essentially retains
|
|
// all the benefits of normal arrays,
|
|
//
|
|
// - contiguous memory
|
|
// - O(1) random access
|
|
// - O(N) iterate
|
|
//
|
|
// In addition to no realloc on expansion or shrinking.
|
|
//
|
|
{
|
|
// NOTE: DN_VArray_Init ///////////////////////////////////////////////////////////
|
|
// NOTE: DN_VArray_InitByteSize ///////////////////////////////////////////////////////////
|
|
//
|
|
// Initialise an array with the requested byte size or item capacity
|
|
// respectively. The returned array may have a higher capacity than the
|
|
// requested amount since requested memory from the OS may have a certain
|
|
// alignment requirement (e.g. on Windows reserve/commit are 64k/4k
|
|
// aligned).
|
|
DN_VArray<int> array = DN_VArray_Init<int>(1024);
|
|
DN_Assert(array.size == 0 && array.max >= 1024);
|
|
|
|
// NOTE: DN_VArray_Make //////////////////////////////////////////////////////////////
|
|
// NOTE: DN_VArray_Add //////////////////////////////////////////////////////////////
|
|
// NOTE: DN_VArray_MakeArray //////////////////////////////////////////////////////////////
|
|
// NOTE: DN_VArray_AddArray //////////////////////////////////////////////////////////////
|
|
//
|
|
// Allocate items from the array where:
|
|
//
|
|
// Make: creates a zero-init item from the array
|
|
// Add: creates a zero-init item and memcpy passed in data into the item
|
|
//
|
|
// If the array has run out of capacity or was never initialised, a null
|
|
// pointer is returned.
|
|
int *item = DN_VArray_Add(&array, 0xCAFE);
|
|
DN_Assert(*item == 0xCAFE && array.size == 1);
|
|
|
|
// NOTE: DN_VArray_AddCArray /////////////////////////////////////////////////////////////
|
|
DN_VArray_AddCArray(&array, {1, 2, 3});
|
|
DN_Assert(array.size == 4);
|
|
|
|
// TODO(doyle): There's a bug here with the negative erase!
|
|
// Loop over the array items and erase 1 item.
|
|
#if 0
|
|
for (DN_USize index = 0; index < array.size; index++) {
|
|
if (index != 1)
|
|
continue;
|
|
|
|
// NOTE: DN_VArray_EraseRange /////////////////////////////////////////////////////////
|
|
//
|
|
// Erase the next 'count' items at 'begin_index' in the array.
|
|
// 'count' can be positive or negative which dictates the if we
|
|
// erase forward from the 'begin_index' or in reverse.
|
|
//
|
|
// This operation will invalidate all pointers to the array!
|
|
//
|
|
// A stable erase will shift all elements after the erase ranged
|
|
// into the range preserving the order of prior elements. Unstable
|
|
// erase will move the tail elements into the range being erased.
|
|
//
|
|
// Erase range returns a result that contains the next iterator
|
|
// index that can be used to update the your for loop index if you
|
|
// are trying to iterate over the array.
|
|
|
|
// TODO(doyle): There's a bug here! This doesn't work.
|
|
// Erase index 0 with the negative count!
|
|
DN_ArrayEraseResult erase_result = DN_VArray_EraseRange(&array,
|
|
/*begin_index*/ index,
|
|
/*count*/ -1,
|
|
/*erase*/ DN_ArrayErase_Stable);
|
|
DN_Assert(erase_result.items_erased == 1);
|
|
|
|
// Use the index returned to continue linearly iterating the array
|
|
index = erase_result.it_index;
|
|
DN_Assert(array.data[index + 1] == 2); // Next loop iteration will process item '2'
|
|
}
|
|
|
|
DN_Assert(array.size == 3 &&
|
|
array.data[0] == 1 &&
|
|
array.data[1] == 2 &&
|
|
array.data[2] == 3);
|
|
#endif
|
|
|
|
// NOTE: DN_VArray_Reserve ////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Ensure that the requested number of items are backed by physical pages
|
|
// from the OS. Calling this pre-emptively will minimise syscalls into the
|
|
// kernel to request memory. The requested items will be rounded up to the
|
|
// in bytes to the allocation granularity of OS allocation APIs hence the
|
|
// reserved space may be greater than the requested amount (e.g. this is 4k
|
|
// on Windows).
|
|
DN_VArray_Reserve(&array, /*count*/ 8);
|
|
|
|
DN_VArray_Deinit(&array);
|
|
}
|
|
|
|
// NOTE: DN_W32_LastError /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_W32_ErrorCodeToMsg /////////////////////////////////////////////////////////////
|
|
#if defined(DN_PLATFORM_WIN32)
|
|
if (0) {
|
|
// Generate the error string for the last Win32 API called that return
|
|
// an error value.
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(nullptr);
|
|
DN_W32Error get_last_error = DN_W32_LastError(tmem.arena);
|
|
printf("Error (%lu): %.*s", get_last_error.code, DN_STR_FMT(get_last_error.msg));
|
|
|
|
// Alternatively, pass in the error code directly
|
|
DN_W32Error error_msg_for_code = DN_W32_ErrorCodeToMsg(tmem.arena, /*error_code*/ 0);
|
|
printf("Error (%lu): %.*s", error_msg_for_code.code, DN_STR_FMT(error_msg_for_code.msg));
|
|
}
|
|
|
|
// NOTE: DN_W32_MakeProcessDPIAware ///////////////////////////////////////////////////////////
|
|
//
|
|
// Call once at application start-up to ensure that the application is DPI
|
|
// aware on Windows and ensure that application UI is scaled up
|
|
// appropriately for the monitor.
|
|
|
|
// NOTE: DN_W32_Str8ToStr16 /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_W32_Str8ToStr16Buffer /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_W32_Str16ToStr8 /////////////////////////////////////////////////////////////
|
|
// NOTE: DN_W32_Str16ToStr8Buffer /////////////////////////////////////////////////////////////
|
|
//
|
|
// Convert a UTF8 <-> UTF16 string.
|
|
//
|
|
// The exact size buffer required for this function can be determined by
|
|
// calling this function with the 'dest' set to null and 'dest_size' set to
|
|
// 0, the return size is the size required for conversion not-including
|
|
// space for the null-terminator. This function *always* null-terminates the
|
|
// input buffer.
|
|
//
|
|
// Returns the number of u8's (for UTF16->8) OR u16's (for UTF8->16)
|
|
// written/required for conversion. 0 if there was a conversion error and can be
|
|
// queried using 'DN_W32_LastError'
|
|
#endif
|
|
}
|
|
|
|
DN_MSVC_WARNING_POP
|
|
#define DN_MATH_CPP
|
|
|
|
#if !defined(DN_NO_V2)
|
|
// NOTE: DN_V2 /////////////////////////////////////////////////////////////////////////////////////
|
|
// NOTE: DN_V2I32
|
|
DN_API bool operator==(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = (lhs.x == rhs.x) && (lhs.y == rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator!=(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>=(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = (lhs.x >= rhs.x) && (lhs.y >= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<=(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = (lhs.x <= rhs.x) && (lhs.y <= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = (lhs.x < rhs.x) && (lhs.y < rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
bool result = (lhs.x > rhs.x) && (lhs.y > rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator-(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x - rhs.x, lhs.y - rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator-(DN_V2I32 lhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(-lhs.x, -lhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator+(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x + rhs.x, lhs.y + rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator*(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x * rhs.x, lhs.y * rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator*(DN_V2I32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator*(DN_V2I32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator/(DN_V2I32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x / rhs.x, lhs.y / rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator/(DN_V2I32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 operator/(DN_V2I32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator*=(DN_V2I32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator*=(DN_V2I32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator*=(DN_V2I32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator/=(DN_V2I32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator/=(DN_V2I32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator/=(DN_V2I32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator-=(DN_V2I32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 &operator+=(DN_V2I32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2I32 DN_V2I32_Min(DN_V2I32 a, DN_V2I32 b)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(DN_Min(a.x, b.x), DN_Min(a.y, b.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 DN_V2I32_Max(DN_V2I32 a, DN_V2I32 b)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(DN_Max(a.x, b.x), DN_Max(a.y, b.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2I32 DN_V2I32_Abs(DN_V2I32 a)
|
|
{
|
|
DN_V2I32 result = DN_V2I32_Init2N(DN_Abs(a.x), DN_Abs(a.y));
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2U16
|
|
DN_API bool operator!=(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator==(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = (lhs.x == rhs.x) && (lhs.y == rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>=(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = (lhs.x >= rhs.x) && (lhs.y >= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<=(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = (lhs.x <= rhs.x) && (lhs.y <= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = (lhs.x < rhs.x) && (lhs.y < rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
bool result = (lhs.x > rhs.x) && (lhs.y > rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator-(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x - rhs.x, lhs.y - rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator+(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x + rhs.x, lhs.y + rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator*(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x * rhs.x, lhs.y * rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator*(DN_V2U16 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator*(DN_V2U16 lhs, int32_t rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator/(DN_V2U16 lhs, DN_V2U16 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x / rhs.x, lhs.y / rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator/(DN_V2U16 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 operator/(DN_V2U16 lhs, int32_t rhs)
|
|
{
|
|
DN_V2U16 result = DN_V2U16_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator*=(DN_V2U16 &lhs, DN_V2U16 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator*=(DN_V2U16 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator*=(DN_V2U16 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator/=(DN_V2U16 &lhs, DN_V2U16 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator/=(DN_V2U16 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator/=(DN_V2U16 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator-=(DN_V2U16 &lhs, DN_V2U16 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2U16 &operator+=(DN_V2U16 &lhs, DN_V2U16 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
// NOTE: DN_V2
|
|
DN_API bool operator!=(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator==(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = (lhs.x == rhs.x) && (lhs.y == rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>=(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = (lhs.x >= rhs.x) && (lhs.y >= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<=(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = (lhs.x <= rhs.x) && (lhs.y <= rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = (lhs.x < rhs.x) && (lhs.y < rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
bool result = (lhs.x > rhs.x) && (lhs.y > rhs.y);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator- //////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 operator-(DN_V2F32 lhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(-lhs.x, -lhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator-(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x - rhs.x, lhs.y - rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator-(DN_V2F32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x - rhs.x, lhs.y - rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator-(DN_V2F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x - rhs, lhs.y - rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator-(DN_V2F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x - rhs, lhs.y - rhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator+ //////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 operator+(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x + rhs.x, lhs.y + rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator+(DN_V2F32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x + rhs.x, lhs.y + rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator+(DN_V2F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x + rhs, lhs.y + rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator+(DN_V2F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x + rhs, lhs.y + rhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator* //////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 operator*(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x * rhs.x, lhs.y * rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator*(DN_V2F32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x * rhs.x, lhs.y * rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator*(DN_V2F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator*(DN_V2F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x * rhs, lhs.y * rhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator/ //////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 operator/(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x / rhs.x, lhs.y / rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator/(DN_V2F32 lhs, DN_V2I32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x / rhs.x, lhs.y / rhs.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator/(DN_V2F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 operator/(DN_V2F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(lhs.x / rhs, lhs.y / rhs);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator*/ /////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 &operator*=(DN_V2F32 &lhs, DN_V2F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator*=(DN_V2F32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator*=(DN_V2F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator*=(DN_V2F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator// /////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 &operator/=(DN_V2F32 &lhs, DN_V2F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator/=(DN_V2F32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator/=(DN_V2F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator/=(DN_V2F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator-/ /////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 &operator-=(DN_V2F32 &lhs, DN_V2F32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator-=(DN_V2F32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator-=(DN_V2F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator-=(DN_V2F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
// NOTE: DN_V2F32 operator+/ /////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 &operator+=(DN_V2F32 &lhs, DN_V2F32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator+=(DN_V2F32 &lhs, DN_V2I32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator+=(DN_V2F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 &operator+=(DN_V2F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Min(DN_V2F32 a, DN_V2F32 b)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(DN_Min(a.x, b.x), DN_Min(a.y, b.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Max(DN_V2F32 a, DN_V2F32 b)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(DN_Max(a.x, b.x), DN_Max(a.y, b.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Abs(DN_V2F32 a)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(DN_Abs(a.x), DN_Abs(a.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_Dot(DN_V2F32 a, DN_V2F32 b)
|
|
{
|
|
// NOTE: Scalar projection of B onto A /////////////////////////////////////////////////////////
|
|
//
|
|
// Scalar projection calculates the signed distance between `b` and `a`
|
|
// where `a` is a unit vector then, the dot product calculates the projection
|
|
// of `b` onto the infinite line that the direction of `a` represents. This
|
|
// calculation is the signed distance.
|
|
//
|
|
// signed_distance = dot_product(a, b) = (a.x * b.x) + (a.y * b.y)
|
|
//
|
|
// Y
|
|
// ^ b
|
|
// | /|
|
|
// | / |
|
|
// | / |
|
|
// | / | Projection
|
|
// | / |
|
|
// |/ V
|
|
// +--->--------> X
|
|
// . a .
|
|
// . .
|
|
// |------| <- Calculated signed distance
|
|
//
|
|
// The signed-ness of the result indicates the relationship:
|
|
//
|
|
// Distance <0 means `b` is behind `a`
|
|
// Distance >0 means `b` is in-front of `a`
|
|
// Distance ==0 means `b` is perpendicular to `a`
|
|
//
|
|
// If `a` is not normalized then the signed-ness of the result still holds
|
|
// however result no longer represents the actual distance between the
|
|
// 2 objects. One of the vectors must be normalised (e.g. turned into a unit
|
|
// vector).
|
|
//
|
|
// NOTE: DN_V projection /////////////////////////////////////////////////////////////////////
|
|
//
|
|
// DN_V projection calculates the exact X,Y coordinates of where `b` meets
|
|
// `a` when it was projected. This is calculated by multipying the
|
|
// 'scalar projection' result by the unit vector of `a`
|
|
//
|
|
// vector_projection = a * signed_distance = a * dot_product(a, b)
|
|
|
|
DN_F32 result = (a.x * b.x) + (a.y * b.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_LengthSq_V2x2(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
// NOTE: Pythagoras's theorem (a^2 + b^2 = c^2) without the square root
|
|
DN_F32 a = rhs.x - lhs.x;
|
|
DN_F32 b = rhs.y - lhs.y;
|
|
DN_F32 c_squared = DN_Squared(a) + DN_Squared(b);
|
|
DN_F32 result = c_squared;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_Length_V2x2(DN_V2F32 lhs, DN_V2F32 rhs)
|
|
{
|
|
DN_F32 result_squared = DN_V2F32_LengthSq_V2x2(lhs, rhs);
|
|
DN_F32 result = DN_SqrtF32(result_squared);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_LengthSq(DN_V2F32 lhs)
|
|
{
|
|
// NOTE: Pythagoras's theorem without the square root
|
|
DN_F32 c_squared = DN_Squared(lhs.x) + DN_Squared(lhs.y);
|
|
DN_F32 result = c_squared;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_Length(DN_V2F32 lhs)
|
|
{
|
|
DN_F32 c_squared = DN_V2F32_LengthSq(lhs);
|
|
DN_F32 result = DN_SqrtF32(c_squared);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Normalise(DN_V2F32 a)
|
|
{
|
|
DN_F32 length = DN_V2F32_Length(a);
|
|
DN_V2F32 result = a / length;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Perpendicular(DN_V2F32 a)
|
|
{
|
|
// NOTE: Matrix form of a 2D vector can be defined as
|
|
//
|
|
// x' = x cos(t) - y sin(t)
|
|
// y' = x sin(t) + y cos(t)
|
|
//
|
|
// Calculate a line perpendicular to a vector means rotating the vector by
|
|
// 90 degrees
|
|
//
|
|
// x' = x cos(90) - y sin(90)
|
|
// y' = x sin(90) + y cos(90)
|
|
//
|
|
// Where `cos(90) = 0` and `sin(90) = 1` then,
|
|
//
|
|
// x' = -y
|
|
// y' = +x
|
|
|
|
DN_V2F32 result = DN_V2F32_Init2N(-a.y, a.x);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_V2F32_Reflect(DN_V2F32 in, DN_V2F32 surface)
|
|
{
|
|
DN_V2F32 normal = DN_V2F32_Perpendicular(surface);
|
|
DN_V2F32 normal_norm = DN_V2F32_Normalise(normal);
|
|
DN_F32 signed_dist = DN_V2F32_Dot(in, normal_norm);
|
|
DN_V2F32 result = DN_V2F32_Init2N(in.x, in.y + (-signed_dist * 2.f));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V2F32_Area(DN_V2F32 a)
|
|
{
|
|
DN_F32 result = a.w * a.h;
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_V2)
|
|
|
|
#if !defined(DN_NO_V3)
|
|
// NOTE: DN_V3 /////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool operator!=(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator==(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = (lhs.x == rhs.x) && (lhs.y == rhs.y) && (lhs.z == rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>=(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = (lhs.x >= rhs.x) && (lhs.y >= rhs.y) && (lhs.z >= rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<=(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = (lhs.x <= rhs.x) && (lhs.y <= rhs.y) && (lhs.z <= rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = (lhs.x < rhs.x) && (lhs.y < rhs.y) && (lhs.z < rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
bool result = (lhs.x > rhs.x) && (lhs.y > rhs.y) && (lhs.z > rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator-(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x - rhs.x, lhs.y - rhs.y, lhs.z - rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator-(DN_V3F32 lhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(-lhs.x, -lhs.y, -lhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator+(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x + rhs.x, lhs.y + rhs.y, lhs.z + rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator*(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator*(DN_V3F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator*(DN_V3F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator/(DN_V3F32 lhs, DN_V3F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x / rhs.x, lhs.y / rhs.y, lhs.z / rhs.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator/(DN_V3F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x / rhs, lhs.y / rhs, lhs.z / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 operator/(DN_V3F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V3F32 result = DN_V3F32_Init3F32(lhs.x / rhs, lhs.y / rhs, lhs.z / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator*=(DN_V3F32 &lhs, DN_V3F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator*=(DN_V3F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator*=(DN_V3F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator/=(DN_V3F32 &lhs, DN_V3F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator/=(DN_V3F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator/=(DN_V3F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs / rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator-=(DN_V3F32 &lhs, DN_V3F32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V3F32 &operator+=(DN_V3F32 &lhs, DN_V3F32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V3_LengthSq(DN_V3F32 a)
|
|
{
|
|
DN_F32 result = DN_Squared(a.x) + DN_Squared(a.y) + DN_Squared(a.z);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V3_Length(DN_V3F32 a)
|
|
{
|
|
DN_F32 length_sq = DN_Squared(a.x) + DN_Squared(a.y) + DN_Squared(a.z);
|
|
DN_F32 result = DN_SqrtF32(length_sq);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V3F32 DN_V3_Normalise(DN_V3F32 a)
|
|
{
|
|
DN_F32 length = DN_V3_Length(a);
|
|
DN_V3F32 result = a / length;
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_V3)
|
|
|
|
#if !defined(DN_NO_V4)
|
|
// NOTE: DN_V4 /////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool operator==(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = (lhs.x == rhs.x) && (lhs.y == rhs.y) && (lhs.z == rhs.z) && (lhs.w == rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator!=(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>=(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = (lhs.x >= rhs.x) && (lhs.y >= rhs.y) && (lhs.z >= rhs.z) && (lhs.w >= rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<=(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = (lhs.x <= rhs.x) && (lhs.y <= rhs.y) && (lhs.z <= rhs.z) && (lhs.w <= rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator<(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = (lhs.x < rhs.x) && (lhs.y < rhs.y) && (lhs.z < rhs.z) && (lhs.w < rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator>(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
bool result = (lhs.x > rhs.x) && (lhs.y > rhs.y) && (lhs.z > rhs.z) && (lhs.w > rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator-(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x - rhs.x, lhs.y - rhs.y, lhs.z - rhs.z, lhs.w - rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator-(DN_V4F32 lhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(-lhs.x, -lhs.y, -lhs.z, -lhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator+(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x + rhs.x, lhs.y + rhs.y, lhs.z + rhs.z, lhs.w + rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator*(DN_V4F32 lhs, DN_V4F32 rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z, lhs.w * rhs.w);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator*(DN_V4F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs, lhs.w * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator*(DN_V4F32 lhs, int32_t rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs, lhs.w * rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 operator/(DN_V4F32 lhs, DN_F32 rhs)
|
|
{
|
|
DN_V4F32 result = DN_V4F32_Init4N(lhs.x / rhs, lhs.y / rhs, lhs.z / rhs, lhs.w / rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V4F32 &operator*=(DN_V4F32 &lhs, DN_V4F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V4F32 &operator*=(DN_V4F32 &lhs, DN_F32 rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V4F32 &operator*=(DN_V4F32 &lhs, int32_t rhs)
|
|
{
|
|
lhs = lhs * rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V4F32 &operator-=(DN_V4F32 &lhs, DN_V4F32 rhs)
|
|
{
|
|
lhs = lhs - rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_V4F32 &operator+=(DN_V4F32 &lhs, DN_V4F32 rhs)
|
|
{
|
|
lhs = lhs + rhs;
|
|
return lhs;
|
|
}
|
|
|
|
DN_API DN_F32 DN_V4F32Dot(DN_V4F32 a, DN_V4F32 b)
|
|
{
|
|
DN_F32 result = (a.x * b.x) + (a.y * b.y) + (a.z * b.z) + (a.w * b.w);
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_V4)
|
|
|
|
#if !defined(DN_NO_M4)
|
|
// NOTE: DN_M4 /////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_M4 DN_M4_Identity()
|
|
{
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{1, 0, 0, 0},
|
|
{0, 1, 0, 0},
|
|
{0, 0, 1, 0},
|
|
{0, 0, 0, 1},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_ScaleF(DN_F32 x, DN_F32 y, DN_F32 z)
|
|
{
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{x, 0, 0, 0},
|
|
{0, y, 0, 0},
|
|
{0, 0, z, 0},
|
|
{0, 0, 0, 1},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Scale(DN_V3F32 xyz)
|
|
{
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{xyz.x, 0, 0, 0},
|
|
{0, xyz.y, 0, 0},
|
|
{0, 0, xyz.z, 0},
|
|
{0, 0, 0, 1},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_TranslateF(DN_F32 x, DN_F32 y, DN_F32 z)
|
|
{
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{1, 0, 0, 0},
|
|
{0, 1, 0, 0},
|
|
{0, 0, 1, 0},
|
|
{x, y, z, 1},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Translate(DN_V3F32 xyz)
|
|
{
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{1, 0, 0, 0},
|
|
{0, 1, 0, 0},
|
|
{0, 0, 1, 0},
|
|
{xyz.x, xyz.y, xyz.z, 1},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Transpose(DN_M4 mat)
|
|
{
|
|
DN_M4 result = {};
|
|
for (int col = 0; col < 4; col++)
|
|
for (int row = 0; row < 4; row++)
|
|
result.columns[col][row] = mat.columns[row][col];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Rotate(DN_V3F32 axis01, DN_F32 radians)
|
|
{
|
|
DN_AssertF(DN_Abs(DN_V3_Length(axis01) - 1.f) <= 0.01f,
|
|
"Rotation axis must be normalised, length = %f",
|
|
DN_V3_Length(axis01));
|
|
|
|
DN_F32 sin = DN_SinF32(radians);
|
|
DN_F32 cos = DN_CosF32(radians);
|
|
DN_F32 one_minus_cos = 1.f - cos;
|
|
|
|
DN_F32 x = axis01.x;
|
|
DN_F32 y = axis01.y;
|
|
DN_F32 z = axis01.z;
|
|
DN_F32 x2 = DN_Squared(x);
|
|
DN_F32 y2 = DN_Squared(y);
|
|
DN_F32 z2 = DN_Squared(z);
|
|
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{cos + x2 * one_minus_cos, y * x * one_minus_cos + z * sin, z * x * one_minus_cos - y * sin, 0}, // Col 1
|
|
{x * y * one_minus_cos - z * sin, cos + y2 * one_minus_cos, z * y * one_minus_cos + x * sin, 0}, // Col 2
|
|
{x * z * one_minus_cos + y * sin, y * z * one_minus_cos - x * sin, cos + z2 * one_minus_cos, 0}, // Col 3
|
|
{0, 0, 0, 1}, // Col 4
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Orthographic(DN_F32 left, DN_F32 right, DN_F32 bottom, DN_F32 top, DN_F32 z_near, DN_F32 z_far)
|
|
{
|
|
// NOTE: Here is the matrix in column major for readability. Below it's
|
|
// transposed due to how you have to declare column major matrices in C/C++.
|
|
//
|
|
// m = [2/r-l, 0, 0, -1*(r+l)/(r-l)]
|
|
// [0, 2/t-b, 0, 1*(t+b)/(t-b)]
|
|
// [0, 0, -2/f-n, -1*(f+n)/(f-n)]
|
|
// [0, 0, 0, 1 ]
|
|
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{2.f / (right - left), 0.f, 0.f, 0.f},
|
|
{0.f, 2.f / (top - bottom), 0.f, 0.f},
|
|
{0.f, 0.f, -2.f / (z_far - z_near), 0.f},
|
|
{(-1.f * (right + left)) / (right - left), (-1.f * (top + bottom)) / (top - bottom), (-1.f * (z_far + z_near)) / (z_far - z_near), 1.f},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Perspective(DN_F32 fov /*radians*/, DN_F32 aspect, DN_F32 z_near, DN_F32 z_far)
|
|
{
|
|
DN_F32 tan_fov = DN_TanF32(fov / 2.f);
|
|
DN_M4 result =
|
|
{
|
|
{
|
|
{1.f / (aspect * tan_fov), 0.f, 0.f, 0.f},
|
|
{0, 1.f / tan_fov, 0.f, 0.f},
|
|
{0.f, 0.f, (z_near + z_far) / (z_near - z_far), -1.f},
|
|
{0.f, 0.f, (2.f * z_near * z_far) / (z_near - z_far), 0.f},
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Add(DN_M4 lhs, DN_M4 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] + rhs.columns[col][it];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Sub(DN_M4 lhs, DN_M4 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] - rhs.columns[col][it];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Mul(DN_M4 lhs, DN_M4 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++) {
|
|
for (int row = 0; row < 4; row++) {
|
|
DN_F32 sum = 0;
|
|
for (int f32_it = 0; f32_it < 4; f32_it++)
|
|
sum += lhs.columns[f32_it][row] * rhs.columns[col][f32_it];
|
|
|
|
result.columns[col][row] = sum;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_Div(DN_M4 lhs, DN_M4 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] / rhs.columns[col][it];
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_AddF(DN_M4 lhs, DN_F32 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] + rhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_SubF(DN_M4 lhs, DN_F32 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] - rhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_MulF(DN_M4 lhs, DN_F32 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] * rhs;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M4 DN_M4_DivF(DN_M4 lhs, DN_F32 rhs)
|
|
{
|
|
DN_M4 result;
|
|
for (int col = 0; col < 4; col++)
|
|
for (int it = 0; it < 4; it++)
|
|
result.columns[col][it] = lhs.columns[col][it] / rhs;
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_FSTR8)
|
|
DN_API DN_FStr8<256> DN_M4_ColumnMajorString(DN_M4 mat)
|
|
{
|
|
DN_FStr8<256> result = {};
|
|
for (int row = 0; row < 4; row++) {
|
|
for (int it = 0; it < 4; it++) {
|
|
if (it == 0)
|
|
DN_FStr8_Add(&result, DN_STR8("|"));
|
|
DN_FStr8_AddF(&result, "%.5f", mat.columns[it][row]);
|
|
if (it != 3)
|
|
DN_FStr8_Add(&result, DN_STR8(", "));
|
|
else
|
|
DN_FStr8_Add(&result, DN_STR8("|\n"));
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
#endif
|
|
#endif // !defined(DN_M4)
|
|
|
|
// NOTE: DN_M2x3 ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool operator==(DN_M2x3 const &lhs, DN_M2x3 const &rhs)
|
|
{
|
|
bool result = DN_Memcmp(lhs.e, rhs.e, sizeof(lhs.e[0]) * DN_ArrayCountU(lhs.e)) == 0;
|
|
return result;
|
|
}
|
|
|
|
DN_API bool operator!=(DN_M2x3 const &lhs, DN_M2x3 const &rhs)
|
|
{
|
|
bool result = !(lhs == rhs);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M2x3 DN_M2x3_Identity()
|
|
{
|
|
DN_M2x3 result = {
|
|
{
|
|
1,
|
|
0,
|
|
0,
|
|
0,
|
|
1,
|
|
0,
|
|
}
|
|
};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M2x3 DN_M2x3_Translate(DN_V2F32 offset)
|
|
{
|
|
DN_M2x3 result = {
|
|
{
|
|
1,
|
|
0,
|
|
offset.x,
|
|
0,
|
|
1,
|
|
offset.y,
|
|
}
|
|
};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M2x3 DN_M2x3_Scale(DN_V2F32 scale)
|
|
{
|
|
DN_M2x3 result = {
|
|
{
|
|
scale.x,
|
|
0,
|
|
0,
|
|
0,
|
|
scale.y,
|
|
0,
|
|
}
|
|
};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M2x3 DN_M2x3_Rotate(DN_F32 radians)
|
|
{
|
|
DN_M2x3 result = {
|
|
{
|
|
DN_CosF32(radians),
|
|
DN_SinF32(radians),
|
|
0,
|
|
-DN_SinF32(radians),
|
|
DN_CosF32(radians),
|
|
0,
|
|
}
|
|
};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_M2x3 DN_M2x3_Mul(DN_M2x3 m1, DN_M2x3 m2)
|
|
{
|
|
// NOTE: Ordinarily you can't multiply M2x3 with M2x3 because column count
|
|
// (3) != row count (2). We pretend we have two 3x3 matrices with the last
|
|
// row set to [0 0 1] and perform a 3x3 matrix multiply.
|
|
//
|
|
// | (0)a (1)b (2)c | | (0)g (1)h (2)i |
|
|
// | (3)d (4)e (5)f | x | (3)j (4)k (5)l |
|
|
// | (6)0 (7)0 (8)1 | | (6)0 (7)0 (8)1 |
|
|
|
|
DN_M2x3 result = {
|
|
{
|
|
m1.e[0] * m2.e[0] + m1.e[1] * m2.e[3], // a*g + b*j + c*0[omitted],
|
|
m1.e[0] * m2.e[1] + m1.e[1] * m2.e[4], // a*h + b*k + c*0[omitted],
|
|
m1.e[0] * m2.e[2] + m1.e[1] * m2.e[5] + m1.e[2], // a*i + b*l + c*1,
|
|
|
|
m1.e[3] * m2.e[0] + m1.e[4] * m2.e[3], // d*g + e*j + f*0[omitted],
|
|
m1.e[3] * m2.e[1] + m1.e[4] * m2.e[4], // d*h + e*k + f*0[omitted],
|
|
m1.e[3] * m2.e[2] + m1.e[4] * m2.e[5] + m1.e[5], // d*i + e*l + f*1,
|
|
}
|
|
};
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_M2x3_Mul2F32(DN_M2x3 m1, DN_F32 x, DN_F32 y)
|
|
{
|
|
// NOTE: Ordinarily you can't multiply M2x3 with V2 because column count (3)
|
|
// != row count (2). We pretend we have a V3 with `z` set to `1`.
|
|
//
|
|
// | (0)a (1)b (2)c | | x |
|
|
// | (3)d (4)e (5)f | x | y |
|
|
// | 1 |
|
|
|
|
DN_V2F32 result = {
|
|
{
|
|
m1.e[0] * x + m1.e[1] * y + m1.e[2], // a*x + b*y + c*1
|
|
m1.e[3] * x + m1.e[4] * y + m1.e[5], // d*x + e*y + f*1
|
|
}
|
|
};
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_M2x3_MulV2(DN_M2x3 m1, DN_V2F32 v2)
|
|
{
|
|
DN_V2F32 result = DN_M2x3_Mul2F32(m1, v2.x, v2.y);
|
|
return result;
|
|
}
|
|
|
|
#if !defined(DN_NO_RECT)
|
|
// NOTE: DN_Rect ///////////////////////////////////////////////////////////////////////////////////
|
|
DN_API bool operator==(const DN_Rect &lhs, const DN_Rect &rhs)
|
|
{
|
|
bool result = (lhs.pos == rhs.pos) && (lhs.size == rhs.size);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_Center(DN_Rect rect)
|
|
{
|
|
DN_V2F32 result = rect.pos + (rect.size * .5f);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Rect_ContainsPoint(DN_Rect rect, DN_V2F32 p)
|
|
{
|
|
DN_V2F32 min = rect.pos;
|
|
DN_V2F32 max = rect.pos + rect.size;
|
|
bool result = (p.x >= min.x && p.x <= max.x && p.y >= min.y && p.y <= max.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Rect_ContainsRect(DN_Rect a, DN_Rect b)
|
|
{
|
|
DN_V2F32 a_min = a.pos;
|
|
DN_V2F32 a_max = a.pos + a.size;
|
|
DN_V2F32 b_min = b.pos;
|
|
DN_V2F32 b_max = b.pos + b.size;
|
|
bool result = (b_min >= a_min && b_max <= a_max);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_Expand(DN_Rect a, DN_F32 amount)
|
|
{
|
|
DN_Rect result = a;
|
|
result.pos -= amount;
|
|
result.size += (amount * 2.f);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_ExpandV2(DN_Rect a, DN_V2F32 amount)
|
|
{
|
|
DN_Rect result = a;
|
|
result.pos -= amount;
|
|
result.size += (amount * 2.f);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_Rect_Intersects(DN_Rect a, DN_Rect b)
|
|
{
|
|
DN_V2F32 a_min = a.pos;
|
|
DN_V2F32 a_max = a.pos + a.size;
|
|
DN_V2F32 b_min = b.pos;
|
|
DN_V2F32 b_max = b.pos + b.size;
|
|
bool result = (a_min.x <= b_max.x && a_max.x >= b_min.x) &&
|
|
(a_min.y <= b_max.y && a_max.y >= b_min.y);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_Intersection(DN_Rect a, DN_Rect b)
|
|
{
|
|
DN_Rect result = DN_Rect_Init2V2(a.pos, DN_V2F32_Init1N(0));
|
|
if (DN_Rect_Intersects(a, b)) {
|
|
DN_V2F32 a_min = a.pos;
|
|
DN_V2F32 a_max = a.pos + a.size;
|
|
DN_V2F32 b_min = b.pos;
|
|
DN_V2F32 b_max = b.pos + b.size;
|
|
|
|
DN_V2F32 min = {};
|
|
DN_V2F32 max = {};
|
|
min.x = DN_Max(a_min.x, b_min.x);
|
|
min.y = DN_Max(a_min.y, b_min.y);
|
|
max.x = DN_Min(a_max.x, b_max.x);
|
|
max.y = DN_Min(a_max.y, b_max.y);
|
|
result = DN_Rect_Init2V2(min, max - min);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_Union(DN_Rect a, DN_Rect b)
|
|
{
|
|
DN_V2F32 a_min = a.pos;
|
|
DN_V2F32 a_max = a.pos + a.size;
|
|
DN_V2F32 b_min = b.pos;
|
|
DN_V2F32 b_max = b.pos + b.size;
|
|
|
|
DN_V2F32 min, max;
|
|
min.x = DN_Min(a_min.x, b_min.x);
|
|
min.y = DN_Min(a_min.y, b_min.y);
|
|
max.x = DN_Max(a_max.x, b_max.x);
|
|
max.y = DN_Max(a_max.y, b_max.y);
|
|
DN_Rect result = DN_Rect_Init2V2(min, max - min);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_RectMinMax DN_Rect_MinMax(DN_Rect a)
|
|
{
|
|
DN_RectMinMax result = {};
|
|
result.min = a.pos;
|
|
result.max = a.pos + a.size;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_Rect_Area(DN_Rect a)
|
|
{
|
|
DN_F32 result = a.size.w * a.size.h;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_CutLeftClip(DN_Rect *rect, DN_F32 amount, DN_RectCutClip clip)
|
|
{
|
|
DN_F32 min_x = rect->pos.x;
|
|
DN_F32 max_x = rect->pos.x + rect->size.w;
|
|
DN_F32 result_max_x = min_x + amount;
|
|
if (clip)
|
|
result_max_x = DN_Min(result_max_x, max_x);
|
|
DN_Rect result = DN_Rect_Init4N(min_x, rect->pos.y, result_max_x - min_x, rect->size.h);
|
|
rect->pos.x = result_max_x;
|
|
rect->size.w = max_x - result_max_x;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_CutRightClip(DN_Rect *rect, DN_F32 amount, DN_RectCutClip clip)
|
|
{
|
|
DN_F32 min_x = rect->pos.x;
|
|
DN_F32 max_x = rect->pos.x + rect->size.w;
|
|
DN_F32 result_min_x = max_x - amount;
|
|
if (clip)
|
|
result_min_x = DN_Max(result_min_x, 0);
|
|
DN_Rect result = DN_Rect_Init4N(result_min_x, rect->pos.y, max_x - result_min_x, rect->size.h);
|
|
rect->size.w = result_min_x - min_x;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_CutTopClip(DN_Rect *rect, DN_F32 amount, DN_RectCutClip clip)
|
|
{
|
|
DN_F32 min_y = rect->pos.y;
|
|
DN_F32 max_y = rect->pos.y + rect->size.h;
|
|
DN_F32 result_max_y = min_y + amount;
|
|
if (clip)
|
|
result_max_y = DN_Min(result_max_y, max_y);
|
|
DN_Rect result = DN_Rect_Init4N(rect->pos.x, min_y, rect->size.w, result_max_y - min_y);
|
|
rect->pos.y = result_max_y;
|
|
rect->size.h = max_y - result_max_y;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_Rect_CutBottomClip(DN_Rect *rect, DN_F32 amount, DN_RectCutClip clip)
|
|
{
|
|
DN_F32 min_y = rect->pos.y;
|
|
DN_F32 max_y = rect->pos.y + rect->size.h;
|
|
DN_F32 result_min_y = max_y - amount;
|
|
if (clip)
|
|
result_min_y = DN_Max(result_min_y, 0);
|
|
DN_Rect result = DN_Rect_Init4N(rect->pos.x, result_min_y, rect->size.w, max_y - result_min_y);
|
|
rect->size.h = result_min_y - min_y;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Rect DN_RectCut_Cut(DN_RectCut rect_cut, DN_V2F32 size, DN_RectCutClip clip)
|
|
{
|
|
DN_Rect result = {};
|
|
if (rect_cut.rect) {
|
|
switch (rect_cut.side) {
|
|
case DN_RectCutSide_Left: result = DN_Rect_CutLeftClip(rect_cut.rect, size.w, clip); break;
|
|
case DN_RectCutSide_Right: result = DN_Rect_CutRightClip(rect_cut.rect, size.w, clip); break;
|
|
case DN_RectCutSide_Top: result = DN_Rect_CutTopClip(rect_cut.rect, size.h, clip); break;
|
|
case DN_RectCutSide_Bottom: result = DN_Rect_CutBottomClip(rect_cut.rect, size.h, clip); break;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_InterpolatedPoint(DN_Rect rect, DN_V2F32 t01)
|
|
{
|
|
DN_V2F32 result = DN_V2F32_Init2N(rect.pos.w + (rect.size.w * t01.x),
|
|
rect.pos.h + (rect.size.h * t01.y));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_TopLeft(DN_Rect rect)
|
|
{
|
|
DN_V2F32 result = DN_Rect_InterpolatedPoint(rect, DN_V2F32_Init2N(0, 0));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_TopRight(DN_Rect rect)
|
|
{
|
|
DN_V2F32 result = DN_Rect_InterpolatedPoint(rect, DN_V2F32_Init2N(1, 0));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_BottomLeft(DN_Rect rect)
|
|
{
|
|
DN_V2F32 result = DN_Rect_InterpolatedPoint(rect, DN_V2F32_Init2N(0, 1));
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_V2F32 DN_Rect_BottomRight(DN_Rect rect)
|
|
{
|
|
DN_V2F32 result = DN_Rect_InterpolatedPoint(rect, DN_V2F32_Init2N(1, 1));
|
|
return result;
|
|
}
|
|
#endif // !defined(DN_NO_RECT)
|
|
|
|
// NOTE: Raycast ///////////////////////////////////////////////////////////////////////////////////
|
|
|
|
DN_API DN_RaycastLineIntersectV2Result DN_Raycast_LineIntersectV2(DN_V2F32 origin_a, DN_V2F32 dir_a, DN_V2F32 origin_b, DN_V2F32 dir_b)
|
|
{
|
|
// NOTE: Parametric equation of a line
|
|
//
|
|
// p = o + (t*d)
|
|
//
|
|
// - o is the starting 2d point
|
|
// - d is the direction of the line
|
|
// - t is a scalar that scales along the direction of the point
|
|
//
|
|
// To determine if a ray intersections a ray, we want to solve
|
|
//
|
|
// (o_a + (t_a * d_a)) = (o_b + (t_b * d_b))
|
|
//
|
|
// Where '_a' and '_b' represent the 1st and 2nd point's origin, direction
|
|
// and 't' components respectively. This is 2 equations with 2 unknowns
|
|
// (`t_a` and `t_b`) which we can solve for by expressing the equation in
|
|
// terms of `t_a` and `t_b`.
|
|
//
|
|
// Working that math out produces the formula below for 't'.
|
|
|
|
DN_RaycastLineIntersectV2Result result = {};
|
|
DN_F32 denominator = ((dir_b.y * dir_a.x) - (dir_b.x * dir_a.y));
|
|
if (denominator != 0.0f) {
|
|
result.t_a = (((origin_a.y - origin_b.y) * dir_b.x) + ((origin_b.x - origin_a.x) * dir_b.y)) / denominator;
|
|
result.t_b = (((origin_a.y - origin_b.y) * dir_a.x) + ((origin_b.x - origin_a.x) * dir_a.y)) / denominator;
|
|
result.hit = true;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// NOTE: Other /////////////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_V2F32 DN_Lerp_V2F32(DN_V2F32 a, DN_F32 t, DN_V2F32 b)
|
|
{
|
|
DN_V2F32 result = {};
|
|
result.x = a.x + ((b.x - a.x) * t);
|
|
result.y = a.y + ((b.y - a.y) * t);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_F32 DN_Lerp_F32(DN_F32 a, DN_F32 t, DN_F32 b)
|
|
{
|
|
DN_F32 result = a + ((b - a) * t);
|
|
return result;
|
|
}
|
|
#define DN_ASYNC_CPP
|
|
|
|
// DN: Single header generator commented out this header => #include "../dn_base_inc.h"
|
|
// DN: Single header generator commented out this header => #include "../dn_os_inc.h"
|
|
// DN: Single header generator commented out this header => #include "dn_async.h"
|
|
#if !defined(DN_ASYNC_H)
|
|
#define DN_ASYNC_H
|
|
|
|
// DN: Single header generator commented out this header => #include "../dn_base_inc.h"
|
|
// DN: Single header generator commented out this header => #include "../dn_os_inc.h"
|
|
|
|
enum DN_ASYNCPriority
|
|
{
|
|
DN_ASYNCPriority_Low,
|
|
DN_ASYNCPriority_High,
|
|
DN_ASYNCPriority_Count,
|
|
};
|
|
|
|
struct DN_ASYNCCore
|
|
{
|
|
DN_OSMutex ring_mutex;
|
|
DN_OSConditionVariable ring_write_cv;
|
|
DN_OSSemaphore worker_sem;
|
|
DN_Ring ring;
|
|
DN_OSThread *threads;
|
|
DN_U32 thread_count;
|
|
DN_U32 busy_threads;
|
|
DN_U32 join_threads;
|
|
};
|
|
|
|
typedef void(DN_ASYNCWorkFunc)(void *input);
|
|
|
|
struct DN_ASYNCWork
|
|
{
|
|
DN_ASYNCWorkFunc *func;
|
|
void *input;
|
|
void *output;
|
|
};
|
|
|
|
struct DN_ASYNCJob
|
|
{
|
|
DN_ASYNCWork work;
|
|
DN_OSSemaphore completion_sem;
|
|
};
|
|
|
|
struct DN_ASYNCTask
|
|
{
|
|
DN_ASYNCWork work;
|
|
};
|
|
|
|
DN_API void DN_ASYNC_Init (DN_ASYNCCore *async, char *base, DN_USize base_size, DN_OSThread *threads, DN_U32 threads_size);
|
|
DN_API void DN_ASYNC_Deinit (DN_ASYNCCore *async);
|
|
DN_API bool DN_ASYNC_QueueWork(DN_ASYNCCore *async, DN_ASYNCWorkFunc *func, void *input, DN_U64 wait_time_ms);
|
|
DN_API DN_OSSemaphore DN_ASYNC_QueueTask(DN_ASYNCCore *async, DN_ASYNCWorkFunc *func, void *input, DN_U64 wait_time_ms);
|
|
DN_API void DN_ASYNC_WaitTask (DN_OSSemaphore *sem, DN_U32 timeout_ms);
|
|
|
|
#endif // DN_ASYNC_H
|
|
|
|
static DN_I32 DN_ASYNC_ThreadEntryPoint_(DN_OSThread *thread)
|
|
{
|
|
DN_OS_ThreadSetName(DN_FStr8_ToStr8(&thread->name));
|
|
DN_ASYNCCore *async = DN_CAST(DN_ASYNCCore *) thread->user_context;
|
|
DN_Ring *ring = &async->ring;
|
|
for (;;) {
|
|
DN_OS_SemaphoreWait(&async->worker_sem, UINT32_MAX);
|
|
if (async->join_threads)
|
|
break;
|
|
|
|
DN_ASYNCJob job = {};
|
|
for (DN_OS_MutexScope(&async->ring_mutex)) {
|
|
if (DN_Ring_HasData(ring, sizeof(job))) {
|
|
DN_Ring_Read(ring, &job, sizeof(job));
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (job.work.func) {
|
|
DN_OS_ConditionVariableBroadcast(&async->ring_write_cv); // Resume any blocked ring write(s)
|
|
|
|
DN_Atomic_AddU32(&async->busy_threads, 1);
|
|
job.work.func(job.work.input);
|
|
DN_Atomic_SubU32(&async->busy_threads, 1);
|
|
|
|
if (job.completion_sem.handle != 0)
|
|
DN_OS_SemaphoreIncrement(&job.completion_sem, 1);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
DN_API void DN_ASYNC_Init(DN_ASYNCCore *async, char *base, DN_USize base_size, DN_OSThread *threads, DN_U32 threads_size)
|
|
{
|
|
DN_Assert(async);
|
|
async->ring.size = base_size;
|
|
async->ring.base = base;
|
|
async->ring_mutex = DN_OS_MutexInit();
|
|
async->ring_write_cv = DN_OS_ConditionVariableInit();
|
|
async->worker_sem = DN_OS_SemaphoreInit(0);
|
|
async->thread_count = threads_size;
|
|
async->threads = threads;
|
|
for (DN_ForIndexU(index, async->thread_count)) {
|
|
DN_OSThread *thread = async->threads + index;
|
|
thread->name = DN_FStr8_InitF<64>("ASYNC W%zu", index);
|
|
DN_OS_ThreadInit(thread, DN_ASYNC_ThreadEntryPoint_, async);
|
|
}
|
|
}
|
|
|
|
DN_API void DN_ASYNC_Deinit(DN_ASYNCCore *async)
|
|
{
|
|
DN_Assert(async);
|
|
DN_Atomic_SetValue32(&async->join_threads, true);
|
|
DN_OS_SemaphoreIncrement(&async->worker_sem, async->thread_count);
|
|
for (DN_ForItSize(it, DN_OSThread, async->threads, async->thread_count))
|
|
DN_OS_ThreadDeinit(it.data);
|
|
}
|
|
|
|
|
|
static bool DN_ASYNC_QueueJob_(DN_ASYNCCore *async, DN_ASYNCJob const *job, DN_U64 wait_time_ms) {
|
|
DN_U64 end_time_ms = DN_OS_DateUnixTimeMs() + wait_time_ms;
|
|
bool result = false;
|
|
for (DN_OS_MutexScope(&async->ring_mutex)) {
|
|
for (;;) {
|
|
if (DN_Ring_HasSpace(&async->ring, sizeof(*job))) {
|
|
DN_Ring_WriteStruct(&async->ring, job);
|
|
result = true;
|
|
break;
|
|
}
|
|
DN_OS_ConditionVariableWaitUntil(&async->ring_write_cv, &async->ring_mutex, end_time_ms);
|
|
if (DN_OS_DateUnixTimeMs() >= end_time_ms)
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (result)
|
|
DN_OS_SemaphoreIncrement(&async->worker_sem, 1); // Flag that a job is available
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_ASYNC_QueueWork(DN_ASYNCCore *async, DN_ASYNCWorkFunc *func, void *input, DN_U64 wait_time_ms)
|
|
{
|
|
DN_ASYNCJob job = {};
|
|
job.work.func = func;
|
|
job.work.input = input;
|
|
bool result = DN_ASYNC_QueueJob_(async, &job, wait_time_ms);
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_OSSemaphore DN_ASYNC_QueueTask(DN_ASYNCCore *async, DN_ASYNCWorkFunc *func, void *input, DN_U64 wait_time_ms)
|
|
{
|
|
DN_OSSemaphore result = DN_OS_SemaphoreInit(0);
|
|
DN_ASYNCJob job = {};
|
|
job.work.func = func;
|
|
job.work.input = input;
|
|
job.completion_sem = result;
|
|
DN_ASYNC_QueueJob_(async, &job, wait_time_ms);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_ASYNC_WaitTask(DN_OSSemaphore *sem, DN_U32 timeout_ms)
|
|
{
|
|
DN_OS_SemaphoreWait(sem, timeout_ms);
|
|
DN_OS_SemaphoreDeinit(sem);
|
|
}
|
|
|
|
DN_API void DN_BinPack_U64(DN_BinPack *pack, DN_BinPackMode mode, DN_U64 *item)
|
|
{
|
|
DN_U64 const VALUE_MASK = 0b0111'1111;
|
|
DN_U8 const CONTINUE_BIT = 0b1000'0000;
|
|
|
|
if (mode == DN_BinPackMode_Serialise) {
|
|
DN_U64 it = *item;
|
|
do {
|
|
DN_U8 write_value = DN_CAST(DN_U8)(it & VALUE_MASK);
|
|
it >>= 7;
|
|
if (it)
|
|
write_value |= CONTINUE_BIT;
|
|
DN_Str8Builder_AppendBytesCopy(&pack->writer, &write_value, sizeof(write_value));
|
|
} while (it);
|
|
} else {
|
|
*item = 0;
|
|
DN_USize bits_read = 0;
|
|
for (DN_U8 src = CONTINUE_BIT; (src & CONTINUE_BIT) && bits_read < 64; bits_read += 7) {
|
|
src = pack->read.data[pack->read_index++];
|
|
DN_U8 masked_src = src & VALUE_MASK;
|
|
*item |= (DN_CAST(DN_U64) masked_src << bits_read);
|
|
}
|
|
}
|
|
}
|
|
|
|
DN_API void DN_BinPack_VarInt_(DN_BinPack *pack, DN_BinPackMode mode, void *item, DN_USize size)
|
|
{
|
|
DN_U64 value = 0;
|
|
DN_AssertF(size <= sizeof(value),
|
|
"An item larger than 64 bits (%zu) is trying to be packed as a variable integer which is not supported",
|
|
size * 8);
|
|
|
|
if (mode == DN_BinPackMode_Serialise) // Read `item` into U64 `value`
|
|
DN_Memcpy(&value, item, size);
|
|
|
|
DN_BinPack_U64(pack, mode, &value);
|
|
|
|
if (mode == DN_BinPackMode_Deserialise) // Write U64 `value` into `item`
|
|
DN_Memcpy(item, &value, size);
|
|
}
|
|
|
|
DN_API void DN_BinPack_U32(DN_BinPack *pack, DN_BinPackMode mode, DN_U32 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_U16(DN_BinPack *pack, DN_BinPackMode mode, DN_U16 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_U8(DN_BinPack *pack, DN_BinPackMode mode, DN_U8 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_I64(DN_BinPack *pack, DN_BinPackMode mode, DN_I64 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_I32(DN_BinPack *pack, DN_BinPackMode mode, DN_I32 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_I16(DN_BinPack *pack, DN_BinPackMode mode, DN_I16 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_I8(DN_BinPack *pack, DN_BinPackMode mode, DN_I8 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_F64(DN_BinPack *pack, DN_BinPackMode mode, DN_F64 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_F32(DN_BinPack *pack, DN_BinPackMode mode, DN_F32 *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
#if defined(DN_MATH_H)
|
|
DN_API void DN_BinPack_V2(DN_BinPack *pack, DN_BinPackMode mode, DN_V2F32 *item)
|
|
{
|
|
DN_BinPack_F32(pack, mode, &item->x);
|
|
DN_BinPack_F32(pack, mode, &item->y);
|
|
}
|
|
|
|
DN_API void DN_BinPack_V4(DN_BinPack *pack, DN_BinPackMode mode, DN_V4F32 *item)
|
|
{
|
|
DN_BinPack_F32(pack, mode, &item->x);
|
|
DN_BinPack_F32(pack, mode, &item->y);
|
|
DN_BinPack_F32(pack, mode, &item->z);
|
|
DN_BinPack_F32(pack, mode, &item->w);
|
|
}
|
|
#endif
|
|
|
|
DN_API void DN_BinPack_Bool(DN_BinPack *pack, DN_BinPackMode mode, bool *item)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, item, sizeof(*item));
|
|
}
|
|
|
|
DN_API void DN_BinPack_Str8(DN_BinPack *pack, DN_Arena *arena, DN_BinPackMode mode, DN_Str8 *string)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, &string->size, sizeof(string->size));
|
|
if (mode == DN_BinPackMode_Serialise) {
|
|
DN_Str8Builder_AppendBytesCopy(&pack->writer, string->data, string->size);
|
|
} else {
|
|
DN_Str8 src = DN_Str8_Slice(pack->read, pack->read_index, string->size);
|
|
*string = DN_Str8_Copy(arena, src);
|
|
pack->read_index += src.size;
|
|
}
|
|
}
|
|
|
|
DN_API void DN_BinPack_Str8Pool(DN_BinPack *pack, DN_Pool *pool, DN_BinPackMode mode, DN_Str8 *string)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, &string->size, sizeof(string->size));
|
|
if (mode == DN_BinPackMode_Serialise) {
|
|
DN_Str8Builder_AppendBytesCopy(&pack->writer, string->data, string->size);
|
|
} else {
|
|
DN_Str8 src = DN_Str8_Slice(pack->read, pack->read_index, string->size);
|
|
*string = DN_Pool_AllocStr8Copy(pool, src);
|
|
pack->read_index += src.size;
|
|
}
|
|
}
|
|
|
|
template <DN_USize N>
|
|
DN_API void DN_BinPack_FStr8(DN_BinPack *pack, DN_BinPackMode mode, DN_FStr8<N> *string)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, &string->size, sizeof(string->size));
|
|
if (mode == DN_BinPackMode_Serialise) {
|
|
DN_Str8Builder_AppendBytesCopy(&pack->writer, string->data, string->size);
|
|
} else {
|
|
DN_Str8 src = DN_Str8_Slice(pack->read, pack->read_index, string->size);
|
|
*string = DN_FStr8_InitF<N>("%.*s", DN_STR_FMT(src));
|
|
pack->read_index += src.size;
|
|
}
|
|
}
|
|
|
|
DN_API void DN_BinPack_Bytes(DN_BinPack *pack, DN_Arena *arena, DN_BinPackMode mode, void **ptr, DN_USize *size)
|
|
{
|
|
DN_Str8 string = DN_Str8_Init(*ptr, *size);
|
|
DN_BinPack_Str8(pack, arena, mode, &string);
|
|
*ptr = string.data;
|
|
*size = string.size;
|
|
}
|
|
|
|
DN_API void DN_BinPack_CArray(DN_BinPack *pack, DN_BinPackMode mode, void *ptr, DN_USize size)
|
|
{
|
|
DN_BinPack_VarInt_(pack, mode, &size, sizeof(size));
|
|
if (mode == DN_BinPackMode_Serialise) {
|
|
DN_Str8Builder_AppendBytesCopy(&pack->writer, ptr, size);
|
|
} else {
|
|
DN_Str8 src = DN_Str8_Slice(pack->read, pack->read_index, size);
|
|
DN_Assert(src.size == size);
|
|
DN_Memcpy(ptr, src.data, DN_Min(src.size, size));
|
|
pack->read_index += src.size;
|
|
}
|
|
}
|
|
|
|
DN_API DN_Str8 DN_BinPack_Build(DN_BinPack const *pack, DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = DN_Str8Builder_Build(&pack->writer, arena);
|
|
return result;
|
|
}
|
|
// DN: Single header generator commented out this header => #include "dn_csv.h"
|
|
#if !defined(DN_CSV_H)
|
|
#define DN_CSV_H
|
|
|
|
enum DN_CSVSerialise
|
|
{
|
|
DN_CSVSerialise_Read,
|
|
DN_CSVSerialise_Write,
|
|
};
|
|
|
|
struct DN_CSVTokeniser
|
|
{
|
|
bool bad;
|
|
DN_Str8 string;
|
|
char delimiter;
|
|
char const *it;
|
|
bool end_of_line;
|
|
};
|
|
|
|
struct DN_CSVPack
|
|
{
|
|
DN_Str8Builder write_builder;
|
|
DN_USize write_column;
|
|
DN_CSVTokeniser read_tokeniser;
|
|
};
|
|
|
|
#endif // !defined(DN_CSV_H)
|
|
|
|
static DN_CSVTokeniser DN_CSV_TokeniserInit(DN_Str8 string, char delimiter)
|
|
{
|
|
DN_CSVTokeniser result = {};
|
|
result.string = string;
|
|
result.delimiter = delimiter;
|
|
return result;
|
|
}
|
|
|
|
static bool DN_CSV_TokeniserValid(DN_CSVTokeniser *tokeniser)
|
|
{
|
|
bool result = tokeniser && !tokeniser->bad;
|
|
return result;
|
|
}
|
|
|
|
static bool DN_CSV_TokeniserNextRow(DN_CSVTokeniser *tokeniser)
|
|
{
|
|
bool result = false;
|
|
if (DN_CSV_TokeniserValid(tokeniser) && DN_Str8_HasData(tokeniser->string)) {
|
|
// NOTE: First time querying row iterator is nil, let tokeniser advance
|
|
if (tokeniser->it) {
|
|
// NOTE: Only advance the tokeniser if we're at the end of the line and
|
|
// there's more to tokenise.
|
|
char const *end = tokeniser->string.data + tokeniser->string.size;
|
|
if (tokeniser->it != end && tokeniser->end_of_line) {
|
|
tokeniser->end_of_line = false;
|
|
result = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static DN_Str8 DN_CSV_TokeniserNextField(DN_CSVTokeniser *tokeniser)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!DN_CSV_TokeniserValid(tokeniser))
|
|
return result;
|
|
|
|
if (!DN_Str8_HasData(tokeniser->string)) {
|
|
tokeniser->bad = true;
|
|
return result;
|
|
}
|
|
|
|
// NOTE: First time tokeniser is invoked with a string, set up initial state.
|
|
char const *string_end = tokeniser->string.data + tokeniser->string.size;
|
|
if (!tokeniser->it) {
|
|
tokeniser->it = tokeniser->string.data;
|
|
// NOTE: Skip any leading new lines
|
|
while (tokeniser->it[0] == '\n' || tokeniser->it[0] == '\r')
|
|
if (++tokeniser->it == string_end)
|
|
break;
|
|
}
|
|
|
|
// NOTE: Tokeniser pointing at end, no more valid data to parse.
|
|
if (tokeniser->it == string_end)
|
|
return result;
|
|
|
|
// NOTE: Scan forward until the next control character.
|
|
// 1. '"' Double quoted field, extract everything between the quotes.
|
|
// 2. tokeniser->delimiter End of the field, extract everything leading up to the delimiter.
|
|
// 3. '\n' Last field in record, extract everything leading up the the new line.
|
|
char const *begin = tokeniser->it;
|
|
while (tokeniser->it != string_end && (tokeniser->it[0] != '"' &&
|
|
tokeniser->it[0] != tokeniser->delimiter &&
|
|
tokeniser->it[0] != '\n'))
|
|
tokeniser->it++;
|
|
|
|
bool quoted_field = (tokeniser->it != string_end) && tokeniser->it[0] == '"';
|
|
if (quoted_field) {
|
|
begin = ++tokeniser->it; // Begin after the quote
|
|
|
|
// NOTE: Scan forward until the next '"' which marks the end
|
|
// of the field unless it is escaped by another '"'.
|
|
find_next_quote:
|
|
while (tokeniser->it != string_end && tokeniser->it[0] != '"')
|
|
tokeniser->it++;
|
|
|
|
// NOTE: If we encounter a '"' right after, the quotes were escaped
|
|
// and we need to skip to the next instance of a '"'.
|
|
if (tokeniser->it != string_end && tokeniser->it + 1 != string_end && tokeniser->it[1] == '"') {
|
|
tokeniser->it += 2;
|
|
goto find_next_quote;
|
|
}
|
|
}
|
|
|
|
// NOTE: Mark the end of the field
|
|
char const *end = tokeniser->it;
|
|
tokeniser->end_of_line = tokeniser->it == string_end || end[0] == '\n';
|
|
|
|
// NOTE: In files with \r\n style new lines ensure that we don't include
|
|
// the \r byte in the CSV field we produce.
|
|
if (end != string_end && end[0] == '\n') {
|
|
DN_Assert((uintptr_t)(end - 1) > (uintptr_t)tokeniser->string.data &&
|
|
"Internal error: The string iterator is pointing behind the start of the string we're reading");
|
|
if (end[-1] == '\r')
|
|
end = end - 1;
|
|
}
|
|
|
|
// NOTE: Quoted fields may have whitespace after the closing quote, we skip
|
|
// until we reach the field terminator.
|
|
if (quoted_field)
|
|
while (tokeniser->it != string_end && (tokeniser->it[0] != tokeniser->delimiter &&
|
|
tokeniser->it[0] != '\n'))
|
|
tokeniser->it++;
|
|
|
|
// NOTE: Advance the tokeniser past the field terminator.
|
|
if (tokeniser->it != string_end)
|
|
tokeniser->it++;
|
|
|
|
// NOTE: Generate the record
|
|
result.data = DN_CAST(char *) begin;
|
|
result.size = DN_CAST(int)(end - begin);
|
|
return result;
|
|
}
|
|
|
|
static DN_Str8 DN_CSV_TokeniserNextColumn(DN_CSVTokeniser *tokeniser)
|
|
{
|
|
DN_Str8 result = {};
|
|
if (!DN_CSV_TokeniserValid(tokeniser))
|
|
return result;
|
|
|
|
// NOTE: End of line, the user must explicitly advance to the next row
|
|
if (tokeniser->end_of_line)
|
|
return result;
|
|
|
|
// NOTE: Advance tokeniser to the next field in the row
|
|
result = DN_CSV_TokeniserNextField(tokeniser);
|
|
return result;
|
|
}
|
|
|
|
static void DN_CSV_TokeniserSkipLine(DN_CSVTokeniser *tokeniser)
|
|
{
|
|
while (DN_CSV_TokeniserValid(tokeniser) && !tokeniser->end_of_line)
|
|
DN_CSV_TokeniserNextColumn(tokeniser);
|
|
DN_CSV_TokeniserNextRow(tokeniser);
|
|
}
|
|
|
|
static int DN_CSV_TokeniserNextN(DN_CSVTokeniser *tokeniser, DN_Str8 *fields, int fields_size, bool column_iterator)
|
|
{
|
|
if (!DN_CSV_TokeniserValid(tokeniser) || !fields || fields_size <= 0)
|
|
return 0;
|
|
|
|
int result = 0;
|
|
for (; result < fields_size; result++) {
|
|
fields[result] = column_iterator ? DN_CSV_TokeniserNextColumn(tokeniser) : DN_CSV_TokeniserNextField(tokeniser);
|
|
if (!DN_CSV_TokeniserValid(tokeniser) || !DN_Str8_HasData(fields[result]))
|
|
break;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
DN_MSVC_WARNING_PUSH
|
|
DN_MSVC_WARNING_DISABLE(4505) // 'x': unreferenced function with internal linkage has been removed
|
|
static int DN_CSV_TokeniserNextColumnN(DN_CSVTokeniser *tokeniser, DN_Str8 *fields, int fields_size)
|
|
{
|
|
int result = DN_CSV_TokeniserNextN(tokeniser, fields, fields_size, true /*column_iterator*/);
|
|
return result;
|
|
}
|
|
|
|
static int DN_CSV_TokeniserNextFieldN(DN_CSVTokeniser *tokeniser, DN_Str8 *fields, int fields_size)
|
|
{
|
|
int result = DN_CSV_TokeniserNextN(tokeniser, fields, fields_size, false /*column_iterator*/);
|
|
return result;
|
|
}
|
|
|
|
static void DN_CSV_TokeniserSkipLineN(DN_CSVTokeniser *tokeniser, int count)
|
|
{
|
|
for (int i = 0; i < count && DN_CSV_TokeniserValid(tokeniser); i++)
|
|
DN_CSV_TokeniserSkipLine(tokeniser);
|
|
}
|
|
|
|
static void DN_CSV_PackU64(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_U64 *value)
|
|
{
|
|
if (serialise == DN_CSVSerialise_Read) {
|
|
DN_Str8 csv_value = DN_CSV_TokeniserNextColumn(&pack->read_tokeniser);
|
|
DN_Str8ToU64Result to_u64 = DN_Str8_ToU64(csv_value, 0);
|
|
DN_Assert(to_u64.success);
|
|
*value = to_u64.value;
|
|
} else {
|
|
DN_Str8Builder_AppendF(&pack->write_builder, "%s%" PRIu64, pack->write_column++ ? "," : "", *value);
|
|
}
|
|
}
|
|
|
|
static void DN_CSV_PackI64(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_I64 *value)
|
|
{
|
|
if (serialise == DN_CSVSerialise_Read) {
|
|
DN_Str8 csv_value = DN_CSV_TokeniserNextColumn(&pack->read_tokeniser);
|
|
DN_Str8ToI64Result to_i64 = DN_Str8_ToI64(csv_value, 0);
|
|
DN_Assert(to_i64.success);
|
|
*value = to_i64.value;
|
|
} else {
|
|
DN_Str8Builder_AppendF(&pack->write_builder, "%s%" PRIu64, pack->write_column++ ? "," : "", *value);
|
|
}
|
|
}
|
|
|
|
static void DN_CSV_PackI32(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_I32 *value)
|
|
{
|
|
DN_I64 u64 = *value;
|
|
DN_CSV_PackI64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = DN_SaturateCastI64ToI32(u64);
|
|
}
|
|
|
|
static void DN_CSV_PackI16(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_I16 *value)
|
|
{
|
|
DN_I64 u64 = *value;
|
|
DN_CSV_PackI64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = DN_SaturateCastI64ToI16(u64);
|
|
}
|
|
|
|
static void DN_CSV_PackI8(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_I8 *value)
|
|
{
|
|
DN_I64 u64 = *value;
|
|
DN_CSV_PackI64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = DN_SaturateCastI64ToI8(u64);
|
|
}
|
|
|
|
|
|
static void DN_CSV_PackU32(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_U32 *value)
|
|
{
|
|
DN_U64 u64 = *value;
|
|
DN_CSV_PackU64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = DN_SaturateCastU64ToU32(u64);
|
|
}
|
|
|
|
static void DN_CSV_PackU16(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_U16 *value)
|
|
{
|
|
DN_U64 u64 = *value;
|
|
DN_CSV_PackU64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = DN_SaturateCastU64ToU16(u64);
|
|
}
|
|
|
|
static void DN_CSV_PackBoolAsU64(DN_CSVPack *pack, DN_CSVSerialise serialise, bool *value)
|
|
{
|
|
DN_U64 u64 = *value;
|
|
DN_CSV_PackU64(pack, serialise, &u64);
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*value = u64 ? 1 : 0;
|
|
}
|
|
|
|
static void DN_CSV_PackStr8(DN_CSVPack *pack, DN_CSVSerialise serialise, DN_Str8 *str8, DN_Arena *arena)
|
|
{
|
|
if (serialise == DN_CSVSerialise_Read) {
|
|
DN_Str8 csv_value = DN_CSV_TokeniserNextColumn(&pack->read_tokeniser);
|
|
*str8 = DN_Str8_Copy(arena, csv_value);
|
|
} else {
|
|
DN_Str8Builder_AppendF(&pack->write_builder, "%s%.*s", pack->write_column++ ? "," : "", DN_STR_FMT(*str8));
|
|
}
|
|
}
|
|
|
|
static void DN_CSV_PackBuffer(DN_CSVPack *pack, DN_CSVSerialise serialise, void *dest, size_t *size)
|
|
{
|
|
if (serialise == DN_CSVSerialise_Read) {
|
|
DN_Str8 csv_value = DN_CSV_TokeniserNextColumn(&pack->read_tokeniser);
|
|
*size = DN_Min(*size, csv_value.size);
|
|
DN_Memcpy(dest, csv_value.data, *size);
|
|
} else {
|
|
DN_Str8Builder_AppendF(&pack->write_builder, "%s%.*s", pack->write_column++ ? "," : "", DN_CAST(int)(*size), dest);
|
|
}
|
|
}
|
|
|
|
static void DN_CSV_PackBufferWithMax(DN_CSVPack *pack, DN_CSVSerialise serialise, void *dest, size_t *size, size_t max)
|
|
{
|
|
if (serialise == DN_CSVSerialise_Read)
|
|
*size = max;
|
|
DN_CSV_PackBuffer(pack, serialise, dest, size);
|
|
}
|
|
|
|
static bool DN_CSV_PackNewLine(DN_CSVPack *pack, DN_CSVSerialise serialise)
|
|
{
|
|
bool result = true;
|
|
if (serialise == DN_CSVSerialise_Read) {
|
|
result = DN_CSV_TokeniserNextRow(&pack->read_tokeniser);
|
|
} else {
|
|
pack->write_column = 0;
|
|
result = DN_Str8Builder_AppendRef(&pack->write_builder, DN_STR8("\n"));
|
|
}
|
|
return result;
|
|
}
|
|
DN_MSVC_WARNING_POP
|
|
#define DN_HASH_CPP
|
|
|
|
/*
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// $$\ $$\ $$$$$$\ $$$$$$\ $$\ $$\
|
|
// $$ | $$ |$$ __$$\ $$ __$$\ $$ | $$ |
|
|
// $$ | $$ |$$ / $$ |$$ / \__|$$ | $$ |
|
|
// $$$$$$$$ |$$$$$$$$ |\$$$$$$\ $$$$$$$$ |
|
|
// $$ __$$ |$$ __$$ | \____$$\ $$ __$$ |
|
|
// $$ | $$ |$$ | $$ |$$\ $$ |$$ | $$ |
|
|
// $$ | $$ |$$ | $$ |\$$$$$$ |$$ | $$ |
|
|
// \__| \__|\__| \__| \______/ \__| \__|
|
|
//
|
|
// dn_hash.cpp
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
*/
|
|
|
|
// NOTE: DN_FNV1A //////////////////////////////////////////////////////////////////////////////////
|
|
// Default values recommended by: http://isthe.com/chongo/tech/comp/fnv/
|
|
DN_API uint32_t DN_FNV1A32_Iterate(void const *bytes, DN_USize size, uint32_t hash)
|
|
{
|
|
auto buffer = DN_CAST(uint8_t const *)bytes;
|
|
for (DN_USize i = 0; i < size; i++)
|
|
hash = (buffer[i] ^ hash) * 16777619 /*FNV Prime*/;
|
|
return hash;
|
|
}
|
|
|
|
DN_API uint32_t DN_FNV1A32_Hash(void const *bytes, DN_USize size)
|
|
{
|
|
uint32_t result = DN_FNV1A32_Iterate(bytes, size, DN_FNV1A32_SEED);
|
|
return result;
|
|
}
|
|
|
|
DN_API uint64_t DN_FNV1A64_Iterate(void const *bytes, DN_USize size, uint64_t hash)
|
|
{
|
|
auto buffer = DN_CAST(uint8_t const *)bytes;
|
|
for (DN_USize i = 0; i < size; i++)
|
|
hash = (buffer[i] ^ hash) * 1099511628211 /*FNV Prime*/;
|
|
return hash;
|
|
}
|
|
|
|
DN_API uint64_t DN_FNV1A64_Hash(void const *bytes, DN_USize size)
|
|
{
|
|
uint64_t result = DN_FNV1A64_Iterate(bytes, size, DN_FNV1A64_SEED);
|
|
return result;
|
|
}
|
|
|
|
// NOTE: DN_MurmurHash3 ////////////////////////////////////////////////////////////////////////////
|
|
#if defined(DN_COMPILER_MSVC) || defined(DN_COMPILER_CLANG_CL)
|
|
#define DN_MMH3_ROTL32(x, y) _rotl(x, y)
|
|
#define DN_MMH3_ROTL64(x, y) _rotl64(x, y)
|
|
#else
|
|
#define DN_MMH3_ROTL32(x, y) ((x) << (y)) | ((x) >> (32 - (y)))
|
|
#define DN_MMH3_ROTL64(x, y) ((x) << (y)) | ((x) >> (64 - (y)))
|
|
#endif
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Block read - if your platform needs to do endian-swapping or can only
|
|
// handle aligned reads, do the conversion here
|
|
|
|
DN_FORCE_INLINE uint32_t DN_MurmurHash3_GetBlock32(uint32_t const *p, int i)
|
|
{
|
|
return p[i];
|
|
}
|
|
|
|
DN_FORCE_INLINE uint64_t DN_MurmurHash3_GetBlock64(uint64_t const *p, int i)
|
|
{
|
|
return p[i];
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Finalization mix - force all bits of a hash block to avalanche
|
|
|
|
DN_FORCE_INLINE uint32_t DN_MurmurHash3_FMix32(uint32_t h)
|
|
{
|
|
h ^= h >> 16;
|
|
h *= 0x85ebca6b;
|
|
h ^= h >> 13;
|
|
h *= 0xc2b2ae35;
|
|
h ^= h >> 16;
|
|
return h;
|
|
}
|
|
|
|
DN_FORCE_INLINE uint64_t DN_MurmurHash3_FMix64(uint64_t k)
|
|
{
|
|
k ^= k >> 33;
|
|
k *= 0xff51afd7ed558ccd;
|
|
k ^= k >> 33;
|
|
k *= 0xc4ceb9fe1a85ec53;
|
|
k ^= k >> 33;
|
|
return k;
|
|
}
|
|
|
|
DN_API uint32_t DN_MurmurHash3_x86U32(void const *key, int len, uint32_t seed)
|
|
{
|
|
const uint8_t *data = (const uint8_t *)key;
|
|
const int nblocks = len / 4;
|
|
|
|
uint32_t h1 = seed;
|
|
|
|
const uint32_t c1 = 0xcc9e2d51;
|
|
const uint32_t c2 = 0x1b873593;
|
|
|
|
//----------
|
|
// body
|
|
|
|
const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
|
|
|
|
for (int i = -nblocks; i; i++)
|
|
{
|
|
uint32_t k1 = DN_MurmurHash3_GetBlock32(blocks, i);
|
|
|
|
k1 *= c1;
|
|
k1 = DN_MMH3_ROTL32(k1, 15);
|
|
k1 *= c2;
|
|
|
|
h1 ^= k1;
|
|
h1 = DN_MMH3_ROTL32(h1, 13);
|
|
h1 = h1 * 5 + 0xe6546b64;
|
|
}
|
|
|
|
//----------
|
|
// tail
|
|
|
|
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
|
|
|
|
uint32_t k1 = 0;
|
|
|
|
switch (len & 3)
|
|
{
|
|
case 3:
|
|
k1 ^= tail[2] << 16;
|
|
case 2:
|
|
k1 ^= tail[1] << 8;
|
|
case 1:
|
|
k1 ^= tail[0];
|
|
k1 *= c1;
|
|
k1 = DN_MMH3_ROTL32(k1, 15);
|
|
k1 *= c2;
|
|
h1 ^= k1;
|
|
};
|
|
|
|
//----------
|
|
// finalization
|
|
|
|
h1 ^= len;
|
|
|
|
h1 = DN_MurmurHash3_FMix32(h1);
|
|
|
|
return h1;
|
|
}
|
|
|
|
DN_API DN_MurmurHash3 DN_MurmurHash3_x64U128(void const *key, int len, uint32_t seed)
|
|
{
|
|
const uint8_t *data = (const uint8_t *)key;
|
|
const int nblocks = len / 16;
|
|
|
|
uint64_t h1 = seed;
|
|
uint64_t h2 = seed;
|
|
|
|
const uint64_t c1 = 0x87c37b91114253d5;
|
|
const uint64_t c2 = 0x4cf5ad432745937f;
|
|
|
|
//----------
|
|
// body
|
|
|
|
const uint64_t *blocks = (const uint64_t *)(data);
|
|
|
|
for (int i = 0; i < nblocks; i++)
|
|
{
|
|
uint64_t k1 = DN_MurmurHash3_GetBlock64(blocks, i * 2 + 0);
|
|
uint64_t k2 = DN_MurmurHash3_GetBlock64(blocks, i * 2 + 1);
|
|
|
|
k1 *= c1;
|
|
k1 = DN_MMH3_ROTL64(k1, 31);
|
|
k1 *= c2;
|
|
h1 ^= k1;
|
|
|
|
h1 = DN_MMH3_ROTL64(h1, 27);
|
|
h1 += h2;
|
|
h1 = h1 * 5 + 0x52dce729;
|
|
|
|
k2 *= c2;
|
|
k2 = DN_MMH3_ROTL64(k2, 33);
|
|
k2 *= c1;
|
|
h2 ^= k2;
|
|
|
|
h2 = DN_MMH3_ROTL64(h2, 31);
|
|
h2 += h1;
|
|
h2 = h2 * 5 + 0x38495ab5;
|
|
}
|
|
|
|
//----------
|
|
// tail
|
|
|
|
const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
|
|
|
|
uint64_t k1 = 0;
|
|
uint64_t k2 = 0;
|
|
|
|
switch (len & 15)
|
|
{
|
|
case 15:
|
|
k2 ^= ((uint64_t)tail[14]) << 48;
|
|
case 14:
|
|
k2 ^= ((uint64_t)tail[13]) << 40;
|
|
case 13:
|
|
k2 ^= ((uint64_t)tail[12]) << 32;
|
|
case 12:
|
|
k2 ^= ((uint64_t)tail[11]) << 24;
|
|
case 11:
|
|
k2 ^= ((uint64_t)tail[10]) << 16;
|
|
case 10:
|
|
k2 ^= ((uint64_t)tail[9]) << 8;
|
|
case 9:
|
|
k2 ^= ((uint64_t)tail[8]) << 0;
|
|
k2 *= c2;
|
|
k2 = DN_MMH3_ROTL64(k2, 33);
|
|
k2 *= c1;
|
|
h2 ^= k2;
|
|
|
|
case 8:
|
|
k1 ^= ((uint64_t)tail[7]) << 56;
|
|
case 7:
|
|
k1 ^= ((uint64_t)tail[6]) << 48;
|
|
case 6:
|
|
k1 ^= ((uint64_t)tail[5]) << 40;
|
|
case 5:
|
|
k1 ^= ((uint64_t)tail[4]) << 32;
|
|
case 4:
|
|
k1 ^= ((uint64_t)tail[3]) << 24;
|
|
case 3:
|
|
k1 ^= ((uint64_t)tail[2]) << 16;
|
|
case 2:
|
|
k1 ^= ((uint64_t)tail[1]) << 8;
|
|
case 1:
|
|
k1 ^= ((uint64_t)tail[0]) << 0;
|
|
k1 *= c1;
|
|
k1 = DN_MMH3_ROTL64(k1, 31);
|
|
k1 *= c2;
|
|
h1 ^= k1;
|
|
};
|
|
|
|
//----------
|
|
// finalization
|
|
|
|
h1 ^= len;
|
|
h2 ^= len;
|
|
|
|
h1 += h2;
|
|
h2 += h1;
|
|
|
|
h1 = DN_MurmurHash3_FMix64(h1);
|
|
h2 = DN_MurmurHash3_FMix64(h2);
|
|
|
|
h1 += h2;
|
|
h2 += h1;
|
|
|
|
DN_MurmurHash3 result = {};
|
|
result.e[0] = h1;
|
|
result.e[1] = h2;
|
|
return result;
|
|
}
|
|
#define DN_HELPERS_CPP
|
|
|
|
/*
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// $$\ $$\ $$$$$$$$\ $$\ $$$$$$$\ $$$$$$$$\ $$$$$$$\ $$$$$$\
|
|
// $$ | $$ |$$ _____|$$ | $$ __$$\ $$ _____|$$ __$$\ $$ __$$\
|
|
// $$ | $$ |$$ | $$ | $$ | $$ |$$ | $$ | $$ |$$ / \__|
|
|
// $$$$$$$$ |$$$$$\ $$ | $$$$$$$ |$$$$$\ $$$$$$$ |\$$$$$$\
|
|
// $$ __$$ |$$ __| $$ | $$ ____/ $$ __| $$ __$$< \____$$\
|
|
// $$ | $$ |$$ | $$ | $$ | $$ | $$ | $$ |$$\ $$ |
|
|
// $$ | $$ |$$$$$$$$\ $$$$$$$$\ $$ | $$$$$$$$\ $$ | $$ |\$$$$$$ |
|
|
// \__| \__|\________|\________|\__| \________|\__| \__| \______/
|
|
//
|
|
// dn_helpers.cpp
|
|
//
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
*/
|
|
|
|
// NOTE: DN_PCG32 //////////////////////////////////////////////////////////////////////////////////
|
|
#define DN_PCG_DEFAULT_MULTIPLIER_64 6364136223846793005ULL
|
|
#define DN_PCG_DEFAULT_INCREMENT_64 1442695040888963407ULL
|
|
|
|
DN_API DN_PCG32 DN_PCG32_Init(uint64_t seed)
|
|
{
|
|
DN_PCG32 result = {};
|
|
DN_PCG32_Next(&result);
|
|
result.state += seed;
|
|
DN_PCG32_Next(&result);
|
|
return result;
|
|
}
|
|
|
|
DN_API uint32_t DN_PCG32_Next(DN_PCG32 *rng)
|
|
{
|
|
uint64_t state = rng->state;
|
|
rng->state = state * DN_PCG_DEFAULT_MULTIPLIER_64 + DN_PCG_DEFAULT_INCREMENT_64;
|
|
|
|
// XSH-RR
|
|
uint32_t value = (uint32_t)((state ^ (state >> 18)) >> 27);
|
|
int rot = state >> 59;
|
|
return rot ? (value >> rot) | (value << (32 - rot)) : value;
|
|
}
|
|
|
|
DN_API uint64_t DN_PCG32_Next64(DN_PCG32 *rng)
|
|
{
|
|
uint64_t value = DN_PCG32_Next(rng);
|
|
value <<= 32;
|
|
value |= DN_PCG32_Next(rng);
|
|
return value;
|
|
}
|
|
|
|
DN_API uint32_t DN_PCG32_Range(DN_PCG32 *rng, uint32_t low, uint32_t high)
|
|
{
|
|
uint32_t bound = high - low;
|
|
uint32_t threshold = -(int32_t)bound % bound;
|
|
|
|
for (;;) {
|
|
uint32_t r = DN_PCG32_Next(rng);
|
|
if (r >= threshold)
|
|
return low + (r % bound);
|
|
}
|
|
}
|
|
|
|
DN_API float DN_PCG32_NextF32(DN_PCG32 *rng)
|
|
{
|
|
uint32_t x = DN_PCG32_Next(rng);
|
|
return (float)(int32_t)(x >> 8) * 0x1.0p-24f;
|
|
}
|
|
|
|
DN_API double DN_PCG32_NextF64(DN_PCG32 *rng)
|
|
{
|
|
uint64_t x = DN_PCG32_Next64(rng);
|
|
return (double)(int64_t)(x >> 11) * 0x1.0p-53;
|
|
}
|
|
|
|
DN_API void DN_PCG32_Advance(DN_PCG32 *rng, uint64_t delta)
|
|
{
|
|
uint64_t cur_mult = DN_PCG_DEFAULT_MULTIPLIER_64;
|
|
uint64_t cur_plus = DN_PCG_DEFAULT_INCREMENT_64;
|
|
|
|
uint64_t acc_mult = 1;
|
|
uint64_t acc_plus = 0;
|
|
|
|
while (delta != 0) {
|
|
if (delta & 1) {
|
|
acc_mult *= cur_mult;
|
|
acc_plus = acc_plus * cur_mult + cur_plus;
|
|
}
|
|
cur_plus = (cur_mult + 1) * cur_plus;
|
|
cur_mult *= cur_mult;
|
|
delta >>= 1;
|
|
}
|
|
|
|
rng->state = acc_mult * rng->state + acc_plus;
|
|
}
|
|
|
|
#if !defined(DN_NO_JSON_BUILDER)
|
|
// NOTE: DN_JSONBuilder ////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_JSONBuilder DN_JSONBuilder_Init(DN_Arena *arena, int spaces_per_indent)
|
|
{
|
|
DN_JSONBuilder result = {};
|
|
result.spaces_per_indent = spaces_per_indent;
|
|
result.string_builder.arena = arena;
|
|
return result;
|
|
}
|
|
|
|
DN_API DN_Str8 DN_JSONBuilder_Build(DN_JSONBuilder const *builder, DN_Arena *arena)
|
|
{
|
|
DN_Str8 result = DN_Str8Builder_Build(&builder->string_builder, arena);
|
|
return result;
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_KeyValue(DN_JSONBuilder *builder, DN_Str8 key, DN_Str8 value)
|
|
{
|
|
if (key.size == 0 && value.size == 0)
|
|
return;
|
|
|
|
DN_JSONBuilderItem item = DN_JSONBuilderItem_KeyValue;
|
|
if (value.size >= 1) {
|
|
if (value.data[0] == '{' || value.data[0] == '[')
|
|
item = DN_JSONBuilderItem_OpenContainer;
|
|
else if (value.data[0] == '}' || value.data[0] == ']')
|
|
item = DN_JSONBuilderItem_CloseContainer;
|
|
}
|
|
|
|
bool adding_to_container_with_items =
|
|
item != DN_JSONBuilderItem_CloseContainer && (builder->last_item == DN_JSONBuilderItem_KeyValue ||
|
|
builder->last_item == DN_JSONBuilderItem_CloseContainer);
|
|
|
|
uint8_t prefix_size = 0;
|
|
char prefix[2] = {0};
|
|
if (adding_to_container_with_items)
|
|
prefix[prefix_size++] = ',';
|
|
|
|
if (builder->last_item != DN_JSONBuilderItem_Empty)
|
|
prefix[prefix_size++] = '\n';
|
|
|
|
if (item == DN_JSONBuilderItem_CloseContainer)
|
|
builder->indent_level--;
|
|
|
|
int spaces_per_indent = builder->spaces_per_indent ? builder->spaces_per_indent : 2;
|
|
int spaces = builder->indent_level * spaces_per_indent;
|
|
|
|
if (key.size)
|
|
DN_Str8Builder_AppendF(&builder->string_builder,
|
|
"%.*s%*c\"%.*s\": %.*s",
|
|
prefix_size,
|
|
prefix,
|
|
spaces,
|
|
' ',
|
|
DN_STR_FMT(key),
|
|
DN_STR_FMT(value));
|
|
else if (spaces == 0)
|
|
DN_Str8Builder_AppendF(&builder->string_builder, "%.*s%.*s", prefix_size, prefix, DN_STR_FMT(value));
|
|
else
|
|
DN_Str8Builder_AppendF(&builder->string_builder, "%.*s%*c%.*s", prefix_size, prefix, spaces, ' ', DN_STR_FMT(value));
|
|
|
|
if (item == DN_JSONBuilderItem_OpenContainer)
|
|
builder->indent_level++;
|
|
|
|
builder->last_item = item;
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_KeyValueFV(DN_JSONBuilder *builder, DN_Str8 key, char const *value_fmt, va_list args)
|
|
{
|
|
DN_OSTLSTMem tmem = DN_OS_TLSTMem(builder->string_builder.arena);
|
|
DN_Str8 value = DN_Str8_InitFV(tmem.arena, value_fmt, args);
|
|
DN_JSONBuilder_KeyValue(builder, key, value);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_KeyValueF(DN_JSONBuilder *builder, DN_Str8 key, char const *value_fmt, ...)
|
|
{
|
|
va_list args;
|
|
va_start(args, value_fmt);
|
|
DN_JSONBuilder_KeyValueFV(builder, key, value_fmt, args);
|
|
va_end(args);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_ObjectBeginNamed(DN_JSONBuilder *builder, DN_Str8 name)
|
|
{
|
|
DN_JSONBuilder_KeyValue(builder, name, DN_STR8("{"));
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_ObjectEnd(DN_JSONBuilder *builder)
|
|
{
|
|
DN_JSONBuilder_KeyValue(builder, DN_STR8(""), DN_STR8("}"));
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_ArrayBeginNamed(DN_JSONBuilder *builder, DN_Str8 name)
|
|
{
|
|
DN_JSONBuilder_KeyValue(builder, name, DN_STR8("["));
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_ArrayEnd(DN_JSONBuilder *builder)
|
|
{
|
|
DN_JSONBuilder_KeyValue(builder, DN_STR8(""), DN_STR8("]"));
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_Str8Named(DN_JSONBuilder *builder, DN_Str8 key, DN_Str8 value)
|
|
{
|
|
DN_JSONBuilder_KeyValueF(builder, key, "\"%.*s\"", value.size, value.data);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_LiteralNamed(DN_JSONBuilder *builder, DN_Str8 key, DN_Str8 value)
|
|
{
|
|
DN_JSONBuilder_KeyValueF(builder, key, "%.*s", value.size, value.data);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_U64Named(DN_JSONBuilder *builder, DN_Str8 key, uint64_t value)
|
|
{
|
|
DN_JSONBuilder_KeyValueF(builder, key, "%I64u", value);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_I64Named(DN_JSONBuilder *builder, DN_Str8 key, int64_t value)
|
|
{
|
|
DN_JSONBuilder_KeyValueF(builder, key, "%I64d", value);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_F64Named(DN_JSONBuilder *builder, DN_Str8 key, double value, int decimal_places)
|
|
{
|
|
if (!builder)
|
|
return;
|
|
|
|
if (decimal_places >= 16)
|
|
decimal_places = 16;
|
|
|
|
// NOTE: Generate the format string for the float, depending on how many
|
|
// decimals places it wants.
|
|
char float_fmt[16];
|
|
if (decimal_places > 0) {
|
|
// NOTE: Emit the format string "%.<decimal_places>f" i.e. %.1f
|
|
DN_SNPrintF(float_fmt, sizeof(float_fmt), "%%.%df", decimal_places);
|
|
} else {
|
|
// NOTE: Emit the format string "%f"
|
|
DN_SNPrintF(float_fmt, sizeof(float_fmt), "%%f");
|
|
}
|
|
DN_JSONBuilder_KeyValueF(builder, key, float_fmt, value);
|
|
}
|
|
|
|
DN_API void DN_JSONBuilder_BoolNamed(DN_JSONBuilder *builder, DN_Str8 key, bool value)
|
|
{
|
|
DN_Str8 value_string = value ? DN_STR8("true") : DN_STR8("false");
|
|
DN_JSONBuilder_KeyValueF(builder, key, "%.*s", value_string.size, value_string.data);
|
|
}
|
|
#endif // !defined(DN_NO_JSON_BUILDER)
|
|
|
|
// NOTE: DN_JobQueue ///////////////////////////////////////////////////////////////////////////////
|
|
DN_API DN_JobQueueSPMC DN_OS_JobQueueSPMCInit()
|
|
{
|
|
DN_JobQueueSPMC result = {};
|
|
result.thread_wait_for_job_semaphore = DN_OS_SemaphoreInit(0 /*initial_count*/);
|
|
result.wait_for_completion_semaphore = DN_OS_SemaphoreInit(0 /*initial_count*/);
|
|
result.complete_queue_write_semaphore = DN_OS_SemaphoreInit(DN_ArrayCountU(result.complete_queue));
|
|
result.mutex = DN_OS_MutexInit();
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_JobQueueSPMCCanAdd(DN_JobQueueSPMC const *queue, uint32_t count)
|
|
{
|
|
uint32_t read_index = queue->read_index;
|
|
uint32_t write_index = queue->write_index;
|
|
uint32_t size = write_index - read_index;
|
|
bool result = (size + count) <= DN_ArrayCountU(queue->jobs);
|
|
return result;
|
|
}
|
|
|
|
DN_API bool DN_OS_JobQueueSPMCAddArray(DN_JobQueueSPMC *queue, DN_Job *jobs, uint32_t count)
|
|
{
|
|
if (!queue)
|
|
return false;
|
|
|
|
uint32_t const pot_mask = DN_ArrayCountU(queue->jobs) - 1;
|
|
uint32_t read_index = queue->read_index;
|
|
uint32_t write_index = queue->write_index;
|
|
uint32_t size = write_index - read_index;
|
|
|
|
if ((size + count) > DN_ArrayCountU(queue->jobs))
|
|
return false;
|
|
|
|
for (size_t offset = 0; offset < count; offset++) {
|
|
uint32_t wrapped_write_index = (write_index + offset) & pot_mask;
|
|
queue->jobs[wrapped_write_index] = jobs[offset];
|
|
}
|
|
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
queue->write_index += count;
|
|
DN_OS_SemaphoreIncrement(&queue->thread_wait_for_job_semaphore, count);
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
return true;
|
|
}
|
|
|
|
DN_API bool DN_OS_JobQueueSPMCAdd(DN_JobQueueSPMC *queue, DN_Job job)
|
|
{
|
|
bool result = DN_OS_JobQueueSPMCAddArray(queue, &job, 1);
|
|
return result;
|
|
}
|
|
|
|
DN_API int32_t DN_OS_JobQueueSPMCThread(DN_OSThread *thread)
|
|
{
|
|
DN_JobQueueSPMC *queue = DN_CAST(DN_JobQueueSPMC *) thread->user_context;
|
|
uint32_t const pot_mask = DN_ArrayCountU(queue->jobs) - 1;
|
|
static_assert(DN_ArrayCountU(queue->jobs) == DN_ArrayCountU(queue->complete_queue), "PoT mask is used to mask access to both arrays");
|
|
|
|
for (;;) {
|
|
DN_OS_SemaphoreWait(&queue->thread_wait_for_job_semaphore, DN_OS_SEMAPHORE_INFINITE_TIMEOUT);
|
|
if (queue->quit)
|
|
break;
|
|
|
|
DN_Assert(queue->read_index != queue->write_index);
|
|
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
uint32_t wrapped_read_index = queue->read_index & pot_mask;
|
|
DN_Job job = queue->jobs[wrapped_read_index];
|
|
queue->read_index += 1;
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
|
|
job.elapsed_tsc -= DN_CPU_TSC();
|
|
job.func(thread, job.user_context);
|
|
job.elapsed_tsc += DN_CPU_TSC();
|
|
|
|
if (job.add_to_completion_queue) {
|
|
DN_OS_SemaphoreWait(&queue->complete_queue_write_semaphore, DN_OS_SEMAPHORE_INFINITE_TIMEOUT);
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
queue->complete_queue[(queue->complete_write_index++ & pot_mask)] = job;
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
DN_OS_SemaphoreIncrement(&queue->complete_queue_write_semaphore, 1);
|
|
}
|
|
|
|
// NOTE: Update finish counter
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
queue->finish_index += 1;
|
|
|
|
// NOTE: If all jobs are finished and we have another thread who is
|
|
// blocked via `WaitForCompletion` for this job queue, we will go
|
|
// release the semaphore to wake them all up.
|
|
bool all_jobs_finished = queue->finish_index == queue->write_index;
|
|
if (all_jobs_finished && queue->threads_waiting_for_completion) {
|
|
DN_OS_SemaphoreIncrement(&queue->wait_for_completion_semaphore,
|
|
queue->threads_waiting_for_completion);
|
|
queue->threads_waiting_for_completion = 0;
|
|
}
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
}
|
|
|
|
return queue->quit_exit_code;
|
|
}
|
|
|
|
DN_API void DN_OS_JobQueueSPMCWaitForCompletion(DN_JobQueueSPMC *queue)
|
|
{
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
if (queue->finish_index == queue->write_index) {
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
return;
|
|
}
|
|
queue->threads_waiting_for_completion++;
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
|
|
DN_OS_SemaphoreWait(&queue->wait_for_completion_semaphore, DN_OS_SEMAPHORE_INFINITE_TIMEOUT);
|
|
}
|
|
|
|
DN_API DN_USize DN_OS_JobQueueSPMCGetFinishedJobs(DN_JobQueueSPMC *queue, DN_Job *jobs, DN_USize jobs_size)
|
|
{
|
|
DN_USize result = 0;
|
|
if (!queue || !jobs || jobs_size <= 0)
|
|
return result;
|
|
|
|
uint32_t const pot_mask = DN_ArrayCountU(queue->jobs) - 1;
|
|
DN_OS_MutexLock(&queue->mutex);
|
|
while (queue->complete_read_index < queue->complete_write_index && result < jobs_size)
|
|
jobs[result++] = queue->complete_queue[(queue->complete_read_index++ & pot_mask)];
|
|
DN_OS_MutexUnlock(&queue->mutex);
|
|
|
|
return result;
|
|
} |