First commit of Engine V2

Changelog:
- Integrate custom memory arena with o1heap
- Integrate sc and cc for data structures
    - integrate with o1heap for dynamic memory
- Add some tests
master
En Yi 2024-12-28 15:07:05 +08:00
commit ea4c1216e0
34 changed files with 8359 additions and 0 deletions

2
.gitignore vendored 100644
View File

@ -0,0 +1,2 @@
.cache/
build/

19
CMakeLists.txt 100644
View File

@ -0,0 +1,19 @@
set(PROJECT_NAME EngineV2)
set(CMAKE_C_COMPILER clang)
set(CMAKE_C_FLAGS "-Wall -Wextra")
cmake_minimum_required(VERSION 3.22.1)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
project(${PROJECT_NAME} C CXX)
set(CMAKE_C_STANDARD 17)
if (EMSCRIPTEN)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DPLATFORM_WEB")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -s USE_GLFW=3 -s ASSERTIONS=1 -s WASM=1 -s ASYNCIFY -s MIN_WEBGL_VERSION=2 -s MAX_WEBGL_VERSION=2 -s TOTAL_MEMORY=16777216 -s TOTAL_STACK=1048576 --preload-file ./res ")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_EXECUTABLE_SUFFIX ".html")
endif ()
enable_testing()
add_subdirectory(engine)

View File

@ -0,0 +1,19 @@
add_subdirectory(base)
add_library(engine
memory.c
)
target_include_directories(engine
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)
target_link_libraries(engine
PUBLIC
cc
)
#if (BUILD_TESTING)
find_package(cmocka 1.1.0 REQUIRED)
add_subdirectory(tests)
#endif()

View File

@ -0,0 +1,36 @@
add_subdirectory(o1heap)
add_subdirectory(sc_extra)
add_subdirectory(sc_ds)
add_subdirectory(cc)
add_library(mem_arena
STATIC
memory_arena.c
)
target_include_directories(mem_arena
PUBLIC
${CMAKE_CURRENT_LIST_DIR}/public/internal
)
target_link_libraries(mem_arena
PRIVATE
o1heap
sc_mutex
cc
)
add_library(base
base_dummy.c
)
target_include_directories(base
PUBLIC
${CMAKE_CURRENT_LIST_DIR}/public
)
target_link_libraries(base
PUBLIC
cc
mem_arena
)

View File

@ -0,0 +1,79 @@
sc
---
BSD-3-Clause
Copyright 2021 Ozan Tezcan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Convienient Containers
---
MIT License
Copyright (c) 2022-2024 Jackson L. Allan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
o1heap
---
MIT License
Copyright (c) 2020 Pavel Kirienko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1 @@
#include "memory_arena.h"

View File

@ -0,0 +1,2 @@
add_library(cc INTERFACE)
target_include_directories(cc INTERFACE ${CMAKE_CURRENT_LIST_DIR})

6417
engine/base/cc/cc.h 100644

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,148 @@
#include "memory_arena.h"
#include "o1heap.h"
#define CC_NO_SHORT_NAMES
#include "cc.h"
#include "sc_mutex.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
/**
* This memory arena is engine-wide
* Thus, this can take on a singleton design.
*
* o1heap does not store the actual buffer size.
* To reimplement realloc, we need to keep track of the size of memory
* allocation for a pointer, so that we can copy over. Thus, we use a
* unordered map to do so.
*
* So, there are two allocation from system heap:
* 1. Memory arena buffer
* 2. The memory size map
*/
static void* mem_buffer = NULL;
static O1HeapInstance* heap_handle = NULL;
static struct sc_mutex lock = {0};
static cc_map(uintptr_t, size_t) mmap;
static size_t total_mallocd = 0;
bool mem_arena_init(uint16_t size_mb)
{
if (mem_buffer != NULL) return true;
const size_t sz = size_mb * 1024U * 1024U;
const size_t raw_sz = sz + O1HEAP_ALIGNMENT - 1U;
mem_buffer = calloc(1, raw_sz);
if (mem_buffer == NULL) return false;
int res = sc_mutex_init(&lock);
if (res != 0) {
free(mem_buffer);
mem_buffer = NULL;
return false;
}
cc_init(&mmap);
heap_handle = o1heapInit(mem_buffer + ((uintptr_t)mem_buffer % O1HEAP_ALIGNMENT), sz);
total_mallocd = 0;
return true;
}
bool mem_arena_deinit(void)
{
if (mem_buffer == NULL) return true;
free(mem_buffer);
mem_buffer = NULL;
heap_handle = NULL;
sc_mutex_term(&lock);
cc_cleanup(&mmap);
total_mallocd = 0;
return true;
}
void mem_arena_print()
{
O1HeapDiagnostics diag = o1heapGetDiagnostics(heap_handle);
printf("O1heap Memory Arena Info\n");
printf("--------------------\n");
printf("Capacity: %.3f MB\n", diag.capacity * 1.0 / 1024 / 1024);
printf("Allocated: %lu B\n", diag.allocated);
printf("Peak allocated: %lu B\n", diag.peak_allocated);
printf("Peak request: %lu\n", diag.peak_request_size);
printf("OOM count: %lu\n", diag.oom_count);
}
size_t mem_arena_get_allocated(void)
{
return total_mallocd;
}
void* mem_arena_malloc(size_t size)
{
sc_mutex_lock(&lock);
void* buf = o1heapAllocate(heap_handle, size);
assert(cc_insert(&mmap, (uintptr_t)buf, size) != NULL);
total_mallocd += size;
sc_mutex_unlock(&lock);
return buf;
}
void mem_arena_free(void* ptr)
{
sc_mutex_lock(&lock);
o1heapFree(heap_handle, ptr);
size_t* sz = cc_get(&mmap, (uintptr_t)ptr);
assert(sz != NULL);
if (sz == NULL) {
return;
}
total_mallocd -= *sz;
assert(cc_erase(&mmap, (uintptr_t)ptr));
assert(o1heapDoInvariantsHold(heap_handle));
sc_mutex_unlock(&lock);
}
void* mem_arena_calloc(size_t nmemb, size_t sz)
{
const size_t total_sz = nmemb * sz;
void* buf = mem_arena_malloc(total_sz);
if (buf == NULL) return NULL;
memset(buf, 0, total_sz);
return buf;
}
void* mem_arena_realloc(void* buf, size_t new_size)
{
/**
* Simple implementation: Malloc a new memory and copy over.
* This is not friendly towards the arena, but it works.
*
*/
if (buf == NULL) {
return mem_arena_malloc(new_size);
}
size_t* old_sz = cc_get(&mmap, (uintptr_t)buf);
if (old_sz == NULL) return NULL;
void* new_buf = mem_arena_malloc(new_size);
if (new_buf == NULL) return NULL;
size_t cpy_sz = (*old_sz > new_size) ? new_size : *old_sz;
memcpy(new_buf, buf, cpy_sz);
mem_arena_free(buf);
return new_buf;
}

View File

@ -0,0 +1,9 @@
add_library(o1heap
STATIC
o1heap.c
)
target_include_directories(o1heap
PUBLIC
${CMAKE_CURRENT_LIST_DIR}
)

View File

@ -0,0 +1,497 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
#include "o1heap.h"
#include <assert.h>
#include <limits.h>
// ---------------------------------------- BUILD CONFIGURATION OPTIONS ----------------------------------------
/// Define this macro to include build configuration header. This is an alternative to the -D compiler flag.
/// Usage example with CMake: "-DO1HEAP_CONFIG_HEADER=\"${CMAKE_CURRENT_SOURCE_DIR}/my_o1heap_config.h\""
#ifdef O1HEAP_CONFIG_HEADER
# include O1HEAP_CONFIG_HEADER
#endif
/// The assertion macro defaults to the standard assert().
/// It can be overridden to manually suppress assertion checks or use a different error handling policy.
#ifndef O1HEAP_ASSERT
// Intentional violation of MISRA: the assertion check macro cannot be replaced with a function definition.
# define O1HEAP_ASSERT(x) assert(x) // NOSONAR
#endif
/// Allow usage of compiler intrinsics for branch annotation and CLZ.
#ifndef O1HEAP_USE_INTRINSICS
# define O1HEAP_USE_INTRINSICS 1
#endif
/// Branch probability annotations are used to improve the worst case execution time (WCET). They are entirely optional.
#if O1HEAP_USE_INTRINSICS && !defined(O1HEAP_LIKELY)
# if defined(__GNUC__) || defined(__clang__) || defined(__CC_ARM)
// Intentional violation of MISRA: branch hinting macro cannot be replaced with a function definition.
# define O1HEAP_LIKELY(x) __builtin_expect((x), 1) // NOSONAR
# endif
#endif
#ifndef O1HEAP_LIKELY
# define O1HEAP_LIKELY(x) x
#endif
/// This option is used for testing only. Do not use in production.
#ifndef O1HEAP_PRIVATE
# define O1HEAP_PRIVATE static inline
#endif
/// Count leading zeros (CLZ) is used for fast computation of binary logarithm (which needs to be done very often).
/// Most of the modern processors (including the embedded ones) implement dedicated hardware support for fast CLZ
/// computation, which is available via compiler intrinsics. The default implementation will automatically use
/// the intrinsics for some of the compilers; for others it will default to the slow software emulation,
/// which can be overridden by the user via O1HEAP_CONFIG_HEADER. The library guarantees that the argument is positive.
#if O1HEAP_USE_INTRINSICS && !defined(O1HEAP_CLZ)
# if defined(__GNUC__) || defined(__clang__) || defined(__CC_ARM)
# define O1HEAP_CLZ __builtin_clzl
# endif
#endif
#ifndef O1HEAP_CLZ
O1HEAP_PRIVATE uint_fast8_t O1HEAP_CLZ(const size_t x)
{
O1HEAP_ASSERT(x > 0);
size_t t = ((size_t) 1U) << ((sizeof(size_t) * CHAR_BIT) - 1U);
uint_fast8_t r = 0;
while ((x & t) == 0)
{
t >>= 1U;
r++;
}
return r;
}
#endif
// ---------------------------------------- INTERNAL DEFINITIONS ----------------------------------------
#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
# error "Unsupported language: ISO C99 or a newer version is required."
#endif
#if __STDC_VERSION__ < 201112L
// Intentional violation of MISRA: static assertion macro cannot be replaced with a function definition.
# define static_assert(x, ...) typedef char _static_assert_gl(_static_assertion_, __LINE__)[(x) ? 1 : -1] // NOSONAR
# define _static_assert_gl(a, b) _static_assert_gl_impl(a, b) // NOSONAR
// Intentional violation of MISRA: the paste operator ## cannot be avoided in this context.
# define _static_assert_gl_impl(a, b) a##b // NOSONAR
#endif
/// The overhead is at most O1HEAP_ALIGNMENT bytes large,
/// then follows the user data which shall keep the next fragment aligned.
#define FRAGMENT_SIZE_MIN (O1HEAP_ALIGNMENT * 2U)
/// This is risky, handle with care: if the allocation amount plus per-fragment overhead exceeds 2**(b-1),
/// where b is the pointer bit width, then ceil(log2(amount)) yields b; then 2**b causes an integer overflow.
/// To avoid this, we put a hard limit on fragment size (which is amount + per-fragment overhead): 2**(b-1)
#define FRAGMENT_SIZE_MAX ((SIZE_MAX >> 1U) + 1U)
/// Normally we should subtract log2(FRAGMENT_SIZE_MIN) but log2 is bulky to compute using the preprocessor only.
/// We will certainly end up with unused bins this way, but it is cheap to ignore.
#define NUM_BINS_MAX (sizeof(size_t) * CHAR_BIT)
static_assert((O1HEAP_ALIGNMENT & (O1HEAP_ALIGNMENT - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MIN & (FRAGMENT_SIZE_MIN - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MAX & (FRAGMENT_SIZE_MAX - 1U)) == 0U, "Not a power of 2");
typedef struct Fragment Fragment;
typedef struct FragmentHeader
{
Fragment* next;
Fragment* prev;
size_t size;
bool used;
} FragmentHeader;
static_assert(sizeof(FragmentHeader) <= O1HEAP_ALIGNMENT, "Memory layout error");
struct Fragment
{
FragmentHeader header;
// Everything past the header may spill over into the allocatable space. The header survives across alloc/free.
Fragment* next_free; // Next free fragment in the bin; NULL in the last one.
Fragment* prev_free; // Same but points back; NULL in the first one.
};
static_assert(sizeof(Fragment) <= FRAGMENT_SIZE_MIN, "Memory layout error");
struct O1HeapInstance
{
Fragment* bins[NUM_BINS_MAX]; ///< Smallest fragments are in the bin at index 0.
size_t nonempty_bin_mask; ///< Bit 1 represents a non-empty bin; bin at index 0 is for the smallest fragments.
O1HeapDiagnostics diagnostics;
};
/// The amount of space allocated for the heap instance.
/// Its size is padded up to O1HEAP_ALIGNMENT to ensure correct alignment of the allocation arena that follows.
#define INSTANCE_SIZE_PADDED ((sizeof(O1HeapInstance) + O1HEAP_ALIGNMENT - 1U) & ~(O1HEAP_ALIGNMENT - 1U))
static_assert(INSTANCE_SIZE_PADDED >= sizeof(O1HeapInstance), "Invalid instance footprint computation");
static_assert((INSTANCE_SIZE_PADDED % O1HEAP_ALIGNMENT) == 0U, "Invalid instance footprint computation");
/// Undefined for zero argument.
O1HEAP_PRIVATE uint_fast8_t log2Floor(const size_t x)
{
O1HEAP_ASSERT(x > 0);
// NOLINTNEXTLINE redundant cast to the same type.
return (uint_fast8_t) (((sizeof(x) * CHAR_BIT) - 1U) - ((uint_fast8_t) O1HEAP_CLZ(x)));
}
/// Special case: if the argument is zero, returns zero.
O1HEAP_PRIVATE uint_fast8_t log2Ceil(const size_t x)
{
// NOLINTNEXTLINE redundant cast to the same type.
return (x <= 1U) ? 0U : (uint_fast8_t) ((sizeof(x) * CHAR_BIT) - ((uint_fast8_t) O1HEAP_CLZ(x - 1U)));
}
/// Raise 2 into the specified power.
/// You might be tempted to do something like (1U << power). WRONG! We humans are prone to forgetting things.
/// If you forget to cast your 1U to size_t or ULL, you may end up with undefined behavior.
O1HEAP_PRIVATE size_t pow2(const uint_fast8_t power)
{
return ((size_t) 1U) << power;
}
/// This is equivalent to pow2(log2Ceil(x)). Undefined for x<2.
O1HEAP_PRIVATE size_t roundUpToPowerOf2(const size_t x)
{
O1HEAP_ASSERT(x >= 2U);
// NOLINTNEXTLINE redundant cast to the same type.
return ((size_t) 1U) << ((sizeof(x) * CHAR_BIT) - ((uint_fast8_t) O1HEAP_CLZ(x - 1U)));
}
/// Links two fragments so that their next/prev pointers point to each other; left goes before right.
O1HEAP_PRIVATE void interlink(Fragment* const left, Fragment* const right)
{
if (O1HEAP_LIKELY(left != NULL))
{
left->header.next = right;
}
if (O1HEAP_LIKELY(right != NULL))
{
right->header.prev = left;
}
}
/// Adds a new fragment into the appropriate bin and updates the lookup mask.
O1HEAP_PRIVATE void rebin(O1HeapInstance* const handle, Fragment* const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint_fast8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when inserting.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Add the new fragment to the beginning of the bin list.
// I.e., each allocation will be returning the most-recently-used fragment -- good for caching.
fragment->next_free = handle->bins[idx];
fragment->prev_free = NULL;
if (O1HEAP_LIKELY(handle->bins[idx] != NULL))
{
handle->bins[idx]->prev_free = fragment;
}
handle->bins[idx] = fragment;
handle->nonempty_bin_mask |= pow2(idx);
}
/// Removes the specified fragment from its bin.
O1HEAP_PRIVATE void unbin(O1HeapInstance* const handle, const Fragment* const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint_fast8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when removing.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Remove the bin from the free fragment list.
if (O1HEAP_LIKELY(fragment->next_free != NULL))
{
fragment->next_free->prev_free = fragment->prev_free;
}
if (O1HEAP_LIKELY(fragment->prev_free != NULL))
{
fragment->prev_free->next_free = fragment->next_free;
}
// Update the bin header.
if (O1HEAP_LIKELY(handle->bins[idx] == fragment))
{
O1HEAP_ASSERT(fragment->prev_free == NULL);
handle->bins[idx] = fragment->next_free;
if (O1HEAP_LIKELY(handle->bins[idx] == NULL))
{
handle->nonempty_bin_mask &= ~pow2(idx);
}
}
}
// ---------------------------------------- PUBLIC API IMPLEMENTATION ----------------------------------------
O1HeapInstance* o1heapInit(void* const base, const size_t size)
{
O1HeapInstance* out = NULL;
if ((base != NULL) && ((((size_t) base) % O1HEAP_ALIGNMENT) == 0U) &&
(size >= (INSTANCE_SIZE_PADDED + FRAGMENT_SIZE_MIN)))
{
// Allocate the core heap metadata structure in the beginning of the arena.
O1HEAP_ASSERT(((size_t) base) % sizeof(O1HeapInstance*) == 0U);
out = (O1HeapInstance*) base;
out->nonempty_bin_mask = 0U;
for (size_t i = 0; i < NUM_BINS_MAX; i++)
{
out->bins[i] = NULL;
}
// Limit and align the capacity.
size_t capacity = size - INSTANCE_SIZE_PADDED;
if (capacity > FRAGMENT_SIZE_MAX)
{
capacity = FRAGMENT_SIZE_MAX;
}
while ((capacity % FRAGMENT_SIZE_MIN) != 0)
{
O1HEAP_ASSERT(capacity > 0U);
capacity--;
}
O1HEAP_ASSERT((capacity % FRAGMENT_SIZE_MIN) == 0);
O1HEAP_ASSERT((capacity >= FRAGMENT_SIZE_MIN) && (capacity <= FRAGMENT_SIZE_MAX));
// Initialize the root fragment.
Fragment* const frag = (Fragment*) (void*) (((char*) base) + INSTANCE_SIZE_PADDED);
O1HEAP_ASSERT((((size_t) frag) % O1HEAP_ALIGNMENT) == 0U);
frag->header.next = NULL;
frag->header.prev = NULL;
frag->header.size = capacity;
frag->header.used = false;
frag->next_free = NULL;
frag->prev_free = NULL;
rebin(out, frag);
O1HEAP_ASSERT(out->nonempty_bin_mask != 0U);
// Initialize the diagnostics.
out->diagnostics.capacity = capacity;
out->diagnostics.allocated = 0U;
out->diagnostics.peak_allocated = 0U;
out->diagnostics.peak_request_size = 0U;
out->diagnostics.oom_count = 0U;
}
return out;
}
void* o1heapAllocate(O1HeapInstance* const handle, const size_t amount)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
void* out = NULL;
// If the amount approaches approx. SIZE_MAX/2, an undetected integer overflow may occur.
// To avoid that, we do not attempt allocation if the amount exceeds the hard limit.
// We perform multiple redundant checks to account for a possible unaccounted overflow.
if (O1HEAP_LIKELY((amount > 0U) && (amount <= (handle->diagnostics.capacity - O1HEAP_ALIGNMENT))))
{
// Add the header size and align the allocation size to the power of 2.
// See "Timing-Predictable Memory Allocation In Hard Real-Time Systems", Herter, page 27.
const size_t fragment_size = roundUpToPowerOf2(amount + O1HEAP_ALIGNMENT);
O1HEAP_ASSERT(fragment_size <= FRAGMENT_SIZE_MAX);
O1HEAP_ASSERT(fragment_size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(fragment_size >= amount + O1HEAP_ALIGNMENT);
O1HEAP_ASSERT((fragment_size & (fragment_size - 1U)) == 0U); // Is power of 2.
const uint_fast8_t optimal_bin_index = log2Ceil(fragment_size / FRAGMENT_SIZE_MIN); // Use CEIL when fetching.
O1HEAP_ASSERT(optimal_bin_index < NUM_BINS_MAX);
const size_t candidate_bin_mask = ~(pow2(optimal_bin_index) - 1U);
// Find the smallest non-empty bin we can use.
const size_t suitable_bins = handle->nonempty_bin_mask & candidate_bin_mask;
const size_t smallest_bin_mask = suitable_bins & ~(suitable_bins - 1U); // Clear all bits but the lowest.
if (O1HEAP_LIKELY(smallest_bin_mask != 0))
{
O1HEAP_ASSERT((smallest_bin_mask & (smallest_bin_mask - 1U)) == 0U); // Is power of 2.
const uint_fast8_t bin_index = log2Floor(smallest_bin_mask);
O1HEAP_ASSERT(bin_index >= optimal_bin_index);
O1HEAP_ASSERT(bin_index < NUM_BINS_MAX);
// The bin we found shall not be empty, otherwise it's a state divergence (memory corruption?).
Fragment* const frag = handle->bins[bin_index];
O1HEAP_ASSERT(frag != NULL);
O1HEAP_ASSERT(frag->header.size >= fragment_size);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
O1HEAP_ASSERT(!frag->header.used);
unbin(handle, frag);
// Split the fragment if it is too large.
const size_t leftover = frag->header.size - fragment_size;
frag->header.size = fragment_size;
O1HEAP_ASSERT(leftover < handle->diagnostics.capacity); // Overflow check.
O1HEAP_ASSERT(leftover % FRAGMENT_SIZE_MIN == 0U); // Alignment check.
if (O1HEAP_LIKELY(leftover >= FRAGMENT_SIZE_MIN))
{
Fragment* const new_frag = (Fragment*) (void*) (((char*) frag) + fragment_size);
O1HEAP_ASSERT(((size_t) new_frag) % O1HEAP_ALIGNMENT == 0U);
new_frag->header.size = leftover;
new_frag->header.used = false;
interlink(new_frag, frag->header.next);
interlink(frag, new_frag);
rebin(handle, new_frag);
}
// Update the diagnostics.
O1HEAP_ASSERT((handle->diagnostics.allocated % FRAGMENT_SIZE_MIN) == 0U);
handle->diagnostics.allocated += fragment_size;
O1HEAP_ASSERT(handle->diagnostics.allocated <= handle->diagnostics.capacity);
if (O1HEAP_LIKELY(handle->diagnostics.peak_allocated < handle->diagnostics.allocated))
{
handle->diagnostics.peak_allocated = handle->diagnostics.allocated;
}
// Finalize the fragment we just allocated.
O1HEAP_ASSERT(frag->header.size >= amount + O1HEAP_ALIGNMENT);
frag->header.used = true;
out = ((char*) frag) + O1HEAP_ALIGNMENT;
}
}
// Update the diagnostics.
if (O1HEAP_LIKELY(handle->diagnostics.peak_request_size < amount))
{
handle->diagnostics.peak_request_size = amount;
}
if (O1HEAP_LIKELY((out == NULL) && (amount > 0U)))
{
handle->diagnostics.oom_count++;
}
return out;
}
void o1heapFree(O1HeapInstance* const handle, void* const pointer)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
if (O1HEAP_LIKELY(pointer != NULL)) // NULL pointer is a no-op.
{
Fragment* const frag = (Fragment*) (void*) (((char*) pointer) - O1HEAP_ALIGNMENT);
// Check for heap corruption in debug builds.
O1HEAP_ASSERT(((size_t) frag) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(((size_t) frag) >= (((size_t) handle) + INSTANCE_SIZE_PADDED));
O1HEAP_ASSERT(((size_t) frag) <=
(((size_t) handle) + INSTANCE_SIZE_PADDED + handle->diagnostics.capacity - FRAGMENT_SIZE_MIN));
O1HEAP_ASSERT(frag->header.used); // Catch double-free
O1HEAP_ASSERT(((size_t) frag->header.next) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(((size_t) frag->header.prev) % sizeof(Fragment*) == 0U);
O1HEAP_ASSERT(frag->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(frag->header.size <= handle->diagnostics.capacity);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
// Even if we're going to drop the fragment later, mark it free anyway to prevent double-free.
frag->header.used = false;
// Update the diagnostics. It must be done before merging because it invalidates the fragment size information.
O1HEAP_ASSERT(handle->diagnostics.allocated >= frag->header.size); // Heap corruption check.
handle->diagnostics.allocated -= frag->header.size;
// Merge with siblings and insert the returned fragment into the appropriate bin and update metadata.
Fragment* const prev = frag->header.prev;
Fragment* const next = frag->header.next;
const bool join_left = (prev != NULL) && (!prev->header.used);
const bool join_right = (next != NULL) && (!next->header.used);
if (join_left && join_right) // [ prev ][ this ][ next ] => [ ------- prev ------- ]
{
unbin(handle, prev);
unbin(handle, next);
prev->header.size += frag->header.size + next->header.size;
frag->header.size = 0; // Invalidate the dropped fragment headers to prevent double-free.
next->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next->header.next);
rebin(handle, prev);
}
else if (join_left) // [ prev ][ this ][ next ] => [ --- prev --- ][ next ]
{
unbin(handle, prev);
prev->header.size += frag->header.size;
frag->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next);
rebin(handle, prev);
}
else if (join_right) // [ prev ][ this ][ next ] => [ prev ][ --- this --- ]
{
unbin(handle, next);
frag->header.size += next->header.size;
next->header.size = 0;
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(frag, next->header.next);
rebin(handle, frag);
}
else
{
rebin(handle, frag);
}
}
}
bool o1heapDoInvariantsHold(const O1HeapInstance* const handle)
{
O1HEAP_ASSERT(handle != NULL);
bool valid = true;
// Check the bin mask consistency.
for (size_t i = 0; i < NUM_BINS_MAX; i++) // Dear compiler, feel free to unroll this loop.
{
const bool mask_bit_set = (handle->nonempty_bin_mask & pow2((uint_fast8_t) i)) != 0U;
const bool bin_nonempty = handle->bins[i] != NULL;
valid = valid && (mask_bit_set == bin_nonempty);
}
// Create a local copy of the diagnostics struct.
const O1HeapDiagnostics diag = handle->diagnostics;
// Capacity check.
valid = valid && (diag.capacity <= FRAGMENT_SIZE_MAX) && (diag.capacity >= FRAGMENT_SIZE_MIN) &&
((diag.capacity % FRAGMENT_SIZE_MIN) == 0U);
// Allocation info check.
valid = valid && (diag.allocated <= diag.capacity) && ((diag.allocated % FRAGMENT_SIZE_MIN) == 0U) &&
(diag.peak_allocated <= diag.capacity) && (diag.peak_allocated >= diag.allocated) &&
((diag.peak_allocated % FRAGMENT_SIZE_MIN) == 0U);
// Peak request check
valid = valid && ((diag.peak_request_size < diag.capacity) || (diag.oom_count > 0U));
if (diag.peak_request_size == 0U)
{
valid = valid && (diag.peak_allocated == 0U) && (diag.allocated == 0U) && (diag.oom_count == 0U);
}
else
{
valid = valid && // Overflow on summation is possible but safe to ignore.
(((diag.peak_request_size + O1HEAP_ALIGNMENT) <= diag.peak_allocated) || (diag.oom_count > 0U));
}
return valid;
}
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance* const handle)
{
O1HEAP_ASSERT(handle != NULL);
const O1HeapDiagnostics out = handle->diagnostics;
return out;
}

View File

@ -0,0 +1,122 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
//
// READ THE DOCUMENTATION IN README.md.
#ifndef O1HEAP_H_INCLUDED
#define O1HEAP_H_INCLUDED
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/// The semantic version number of this distribution.
#define O1HEAP_VERSION_MAJOR 2
/// The guaranteed alignment depends on the platform pointer width.
#define O1HEAP_ALIGNMENT (sizeof(void*) * 4U)
/// The definition is private, so the user code can only operate on pointers. This is done to enforce encapsulation.
typedef struct O1HeapInstance O1HeapInstance;
/// Runtime diagnostic information. This information can be used to facilitate runtime self-testing,
/// as required by certain safety-critical development guidelines.
/// If assertion checks are not disabled, the library will perform automatic runtime self-diagnostics that trigger
/// an assertion failure if a heap corruption is detected.
/// Health checks and validation can be done with o1heapDoInvariantsHold().
typedef struct
{
/// The total amount of memory available for serving allocation requests (heap size).
/// The maximum allocation size is (capacity - O1HEAP_ALIGNMENT).
/// This parameter does not include the overhead used up by O1HeapInstance and arena alignment.
/// This parameter is constant.
size_t capacity;
/// The amount of memory that is currently allocated, including the per-fragment overhead and size alignment.
/// For example, if the application requested a fragment of size 1 byte, the value reported here may be 32 bytes.
size_t allocated;
/// The maximum value of 'allocated' seen since initialization. This parameter is never decreased.
size_t peak_allocated;
/// The largest amount of memory that the allocator has attempted to allocate (perhaps unsuccessfully)
/// since initialization (not including the rounding and the allocator's own per-fragment overhead,
/// so the total is larger). This parameter is never decreased. The initial value is zero.
size_t peak_request_size;
/// The number of times an allocation request could not be completed due to the lack of memory or
/// excessive fragmentation. OOM stands for "out of memory". This parameter is never decreased.
uint64_t oom_count;
} O1HeapDiagnostics;
/// The arena base pointer shall be aligned at O1HEAP_ALIGNMENT, otherwise NULL is returned.
///
/// The total heap capacity cannot exceed approx. (SIZE_MAX/2). If the arena size allows for a larger heap,
/// the excess will be silently truncated away (no error). This is not a realistic use case because a typical
/// application is unlikely to be able to dedicate that much of the address space for the heap.
///
/// The function initializes a new heap instance allocated in the provided arena, taking some of its space for its
/// own needs (normally about 40..600 bytes depending on the architecture, but this parameter is not characterized).
/// A pointer to the newly initialized instance is returned.
///
/// If the provided space is insufficient, NULL is returned.
///
/// An initialized instance does not hold any resources. Therefore, if the instance is no longer needed,
/// it can be discarded without any de-initialization procedures.
///
/// The heap is not thread-safe; external synchronization may be required.
O1HeapInstance* o1heapInit(void* const base, const size_t size);
/// The semantics follows malloc() with additional guarantees the full list of which is provided below.
///
/// If the allocation request is served successfully, a pointer to the newly allocated memory fragment is returned.
/// The returned pointer is guaranteed to be aligned at O1HEAP_ALIGNMENT.
///
/// If the allocation request cannot be served due to the lack of memory or its excessive fragmentation,
/// a NULL pointer is returned.
///
/// The function is executed in constant time.
/// The allocated memory is NOT zero-filled (because zero-filling is a variable-complexity operation).
void* o1heapAllocate(O1HeapInstance* const handle, const size_t amount);
/// The semantics follows free() with additional guarantees the full list of which is provided below.
///
/// If the pointer does not point to a previously allocated block and is not NULL, the behavior is undefined.
/// Builds where assertion checks are enabled may trigger an assertion failure for some invalid inputs.
///
/// The function is executed in constant time.
void o1heapFree(O1HeapInstance* const handle, void* const pointer);
/// Performs a basic sanity check on the heap.
/// This function can be used as a weak but fast method of heap corruption detection.
/// If the handle pointer is NULL, the behavior is undefined.
/// The time complexity is constant.
/// The return value is truth if the heap looks valid, falsity otherwise.
bool o1heapDoInvariantsHold(const O1HeapInstance* const handle);
/// Samples and returns a copy of the diagnostic information, see O1HeapDiagnostics.
/// This function merely copies the structure from an internal storage, so it is fast to return.
/// If the handle pointer is NULL, the behavior is undefined.
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance* const handle);
#ifdef __cplusplus
}
#endif
#endif // O1HEAP_H_INCLUDED

View File

@ -0,0 +1,5 @@
#include "internal/memory_arena.h"
#define CC_REALLOC mem_arena_realloc
#define CC_FREE mem_arena_free
#define CC_NO_SHORT_NAMES
#include "cc.h"

View File

@ -0,0 +1,17 @@
#ifndef MEMORY_ARENA_H
#define MEMORY_ARENA_H
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
bool mem_arena_init(uint16_t size_mb);
bool mem_arena_deinit(void);
void mem_arena_print();
size_t mem_arena_get_allocated(void);
void* mem_arena_malloc(size_t size);
void* mem_arena_calloc(size_t nmemb, size_t sz);
void* mem_arena_realloc(void* buf, size_t new_size);
void mem_arena_free(void*);
#endif // MEMORY_ARENA_H

View File

@ -0,0 +1,44 @@
cmake_minimum_required(VERSION 3.5.1)
project(sc_lib C)
include(CTest)
include(CheckCCompilerFlag)
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release")
endif ()
message(STATUS "Build type ${CMAKE_BUILD_TYPE}")
set(SC_CONF_DIR
${CMAKE_CURRENT_LIST_DIR}/conf
)
set(SC_CONF_FLAG
SC_HAVE_CONFIG_H
)
set(MEM_ARENA_LIB
mem_arena
)
#add_subdirectory(array)
#add_subdirectory(buffer)
#add_subdirectory(condition)
#add_subdirectory(crc32)
#add_subdirectory(heap)
#add_subdirectory(ini)
#add_subdirectory(linked-list)
#add_subdirectory(logger)
#add_subdirectory(map)
#add_subdirectory(memory-map)
#add_subdirectory(option)
add_subdirectory(queue)
#add_subdirectory(perf)
#add_subdirectory(sc)
#add_subdirectory(signal)
#add_subdirectory(socket)
#add_subdirectory(string)
#add_subdirectory(time)
#add_subdirectory(timer)
#add_subdirectory(thread)
#add_subdirectory(uri)

View File

@ -0,0 +1,3 @@
#include "memory_arena.h"
#define sc_queue_calloc mem_arena_calloc
#define sc_queue_free mem_arena_free

View File

@ -0,0 +1,28 @@
cmake_minimum_required(VERSION 3.5.1)
project(sc_queue C)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS OFF)
add_library(
sc_queue STATIC
sc_queue.h)
set_target_properties(sc_queue PROPERTIES LINKER_LANGUAGE C)
target_compile_definitions(sc_queue
PUBLIC
${SC_CONF_FLAG}
)
target_include_directories(sc_queue
PUBLIC
${CMAKE_CURRENT_LIST_DIR}
${CMAKE_CURRENT_LIST_DIR}/../conf
)
target_link_libraries(sc_queue
PUBLIC
${MEM_ARENA_LIB}
)

View File

@ -0,0 +1,45 @@
### Generic queue
### Overview
- Queue implementation which grows when you add elements.
- Add/remove from head/tail is possible so it can be used as list, stack,
queue, dequeue etc.
- It comes with predefined types, check out at the end of sc_queue.h, you can
add there (sc_queue_def) if you need more.
### Usage
```c
#include "sc_queue.h"
#include <stdio.h>
int main(int argc, char *argv[])
{
int elem;
struct sc_queue_int queue;
sc_queue_init(&queue);
sc_queue_add_last(&queue, 2);
sc_queue_add_last(&queue, 3);
sc_queue_add_last(&queue, 4);
sc_queue_add_first(&queue, 1);
sc_queue_foreach (&queue, elem) {
printf("elem = [%d] \n", elem);
}
elem = sc_queue_del_last(&queue);
printf("Last element was : [%d] \n", elem);
elem = sc_queue_del_first(&queue);
printf("First element was : [%d] \n", elem);
sc_queue_term(&queue);
return 0;
}
```

View File

@ -0,0 +1,287 @@
/*
* BSD-3-Clause
*
* Copyright 2021 Ozan Tezcan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SC_QUEUE_H
#define SC_QUEUE_H
#include <assert.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define SC_QUEUE_VERSION "2.0.0"
#ifdef SC_HAVE_CONFIG_H
#include "sc_config.h"
#else
#define sc_queue_calloc calloc
#define sc_queue_free free
#endif
#ifndef SC_QUEUE_MAX
#define SC_QUEUE_MAX (SIZE_MAX)
#endif
#define sc_queue_def(T, name) \
struct sc_queue_##name { \
bool oom; \
size_t cap; \
size_t first; \
size_t last; \
/* NOLINTNEXTLINE */ \
T *elems; \
}
#define sc_queue_expand(q) \
do { \
size_t _cap, _len, _off; \
size_t _pos = ((q)->last + 1) & ((q)->cap - 1); \
void *_dst, *_src; \
\
if (_pos == (q)->first) { \
if ((q)->cap > SC_QUEUE_MAX / 2ul) { \
(q)->oom = true; \
break; \
} \
_cap = (q)->cap * 2; \
_dst = sc_queue_calloc(_cap, sizeof(*((q)->elems))); \
if (_dst == NULL) { \
(q)->oom = true; \
break; \
} \
_len = ((q)->cap - (q)->first) * sizeof(*(q)->elems); \
_off = ((q)->first * sizeof(*((q)->elems))); \
_src = ((char *) (q)->elems) + _off; \
\
memcpy(_dst, _src, _len); \
memcpy(((char *) _dst) + _len, (q)->elems, _off); \
(q)->oom = false; \
(q)->last = (q)->cap - 1; \
(q)->first = 0; \
(q)->cap = _cap; \
sc_queue_free((q)->elems); \
(q)->elems = _dst; \
} \
} while (0)
/**
* Init queue. Call sc_queue_oom(q) to see if memory allocation succeeded.
* @param q queue
*/
#define sc_queue_init(q) \
do { \
(q)->oom = false; \
(q)->cap = 8; \
(q)->first = 0; \
(q)->last = 0; \
(q)->elems = sc_queue_calloc(1, sizeof(*(q)->elems) * 8); \
if ((q)->elems == NULL) { \
(q)->oom = true; \
} \
} while (0)
/**
* Term queue
* @param q queue
*/
#define sc_queue_term(q) \
do { \
sc_queue_free((q)->elems); \
(q)->elems = NULL; \
(q)->cap = 0; \
(q)->first = 0; \
(q)->last = 0; \
(q)->oom = false; \
} while (0)
/**
* @param q queue
* @return true if last add operation failed, false otherwise.
*/
#define sc_queue_oom(q) ((q)->oom)
/**
* @param q queue
* @return element count
*/
#define sc_queue_size(q) (((q)->last - (q)->first) & ((q)->cap - 1))
/**
* Clear the queue without deallocating underlying memory.
* @param q queue
*/
#define sc_queue_clear(q) \
do { \
(q)->first = 0; \
(q)->last = 0; \
(q)->oom = false; \
} while (0)
/**
* @param q queue
* @return true if queue is empty
*/
#define sc_queue_empty(q) (((q)->last == (q)->first))
/**
* @param q queue
* @return index of the first element. If queue is empty, result is undefined.
*/
#define sc_queue_first(q) ((q)->first)
/**
* @param q queue
* @return index of the last element. If queue is empty, result is undefined.
*/
#define sc_queue_last(q) ((q)->last)
/**
* @param q queue
* @param i index
* @return index of the next element after i, if i is the last element,
* result is undefined.
*/
#define sc_queue_next(q, i) (((i) + 1) & ((q)->cap - 1))
/**
* Returns element at index 'i', so regular loops are possible :
*
* for (size_t i = 0; i < sc_queue_size(q); i++) {
* printf("%d" \n, sc_queue_at(q, i));
* }
*
* @param q queue
* @return element at index i
*/
#define sc_queue_at(q, i) (q)->elems[(((q)->first) + (i)) & ((q)->cap - 1)]
/**
* @param q queue
* @return peek first element, if queue is empty, result is undefined
*/
#define sc_queue_peek_first(q) ((q)->elems[(q)->first])
/**
* @param q queue
* @return peek last element, if queue is empty, result is undefined
*/
#define sc_queue_peek_last(q) (q)->elems[((q)->last - 1) & ((q)->cap - 1)]
/**
* Call sc_queue_oom(q) after this function to check out of memory condition.
*
* @param q queue
* @param elem elem to be added at the end of the list
*/
#define sc_queue_add_last(q, elem) \
do { \
sc_queue_expand(q); \
if ((q)->oom) { \
break; \
} \
(q)->oom = false; \
(q)->elems[(q)->last] = elem; \
(q)->last = ((q)->last + 1) & ((q)->cap - 1); \
} while (0)
/**
* @param q queue
* @return delete the last element from the queue and return its value.
* If queue is empty, result is undefined.
*/
#define sc_queue_del_last(q) \
((q)->elems[((q)->last = ((q)->last - 1) & ((q)->cap - 1))])
/**
* Call sc_queue_oom(q) after this function to check out of memory condition.
*
* @param q queue.
* @param elem elem to be added at the head of the list.
*/
#define sc_queue_add_first(q, elem) \
do { \
sc_queue_expand(q); \
if ((q)->oom) { \
break; \
} \
(q)->oom = false; \
(q)->first = ((q)->first - 1) & ((q)->cap - 1); \
(q)->elems[(q)->first] = elem; \
} while (0)
static inline size_t sc_queue_inc_first(size_t *first, size_t cap)
{
size_t tmp = *first;
*first = (*first + 1) & (cap - 1);
return tmp;
}
/**
* @param q queue
* @return delete the first element from the queue and return its value.
* If queue is empty, result is undefined.
*/
#define sc_queue_del_first(q) \
(q)->elems[sc_queue_inc_first(&(q)->first, (q)->cap)]
/**
* For each loop,
*
* int *queue;
* sc_queue_create(queue, 4);"
*
* int elem;
* sc_queue_foreach(queue, elem) {
* printf("Elem : %d \n, elem);
* }
*/
#define sc_queue_foreach(q, elem) \
for (size_t _k = 1, _i = sc_queue_first(q); \
_k && _i != sc_queue_last(q); \
_k = !_k, _i = sc_queue_next(q, _i)) \
for ((elem) = (q)->elems[_i]; _k; _k = !_k)
// (type, name)
sc_queue_def(int, int);
sc_queue_def(unsigned int, uint);
sc_queue_def(long, long);
sc_queue_def(unsigned long, ulong);
sc_queue_def(unsigned long long, ull);
sc_queue_def(uint32_t, 32);
sc_queue_def(uint64_t, 64);
sc_queue_def(double, double);
sc_queue_def(const char *, str);
sc_queue_def(void *, ptr);
#endif

View File

@ -0,0 +1,33 @@
cmake_minimum_required(VERSION 3.5.1)
project(sc_extra C)
include(CTest)
include(CheckCCompilerFlag)
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release")
endif ()
message(STATUS "Build type ${CMAKE_BUILD_TYPE}")
#add_subdirectory(array)
#add_subdirectory(buffer)
#add_subdirectory(condition)
#add_subdirectory(crc32)
#add_subdirectory(heap)
#add_subdirectory(ini)
#add_subdirectory(linked-list)
#add_subdirectory(logger)
#add_subdirectory(map)
#add_subdirectory(memory-map)
add_subdirectory(mutex)
#add_subdirectory(option)
#add_subdirectory(queue)
#add_subdirectory(perf)
#add_subdirectory(sc)
#add_subdirectory(signal)
#add_subdirectory(socket)
#add_subdirectory(string)
#add_subdirectory(time)
#add_subdirectory(timer)
#add_subdirectory(thread)
#add_subdirectory(uri)

View File

@ -0,0 +1,112 @@
cmake_minimum_required(VERSION 3.5.1)
project(sc_mutex C)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS OFF)
add_library(
sc_mutex SHARED
sc_mutex.c
sc_mutex.h)
target_include_directories(sc_mutex PUBLIC ${CMAKE_CURRENT_LIST_DIR})
if (NOT CMAKE_C_COMPILER_ID MATCHES "MSVC")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -Wextra -pedantic -Werror -pthread")
endif ()
# --------------------------------------------------------------------------- #
# --------------------- Test Configuration Start ---------------------------- #
# --------------------------------------------------------------------------- #
if (SC_BUILD_TEST)
include(CTest)
include(CheckCCompilerFlag)
if (SC_CLANG_TIDY)
message(STATUS "Enabled CLANG_TIDY")
set(CMAKE_C_CLANG_TIDY
clang-tidy;
-line-filter=[{"name":"${PROJECT_NAME}.h"},{"name":"${PROJECT_NAME}.c"}];
-checks=clang-analyzer-*,misc-*,portability-*,bugprone-*,-bugprone-reserved-identifier*;
-warnings-as-errors=clang-analyzer-*,misc-*,portability-*,bugprone-*;)
endif ()
enable_testing()
add_executable(${PROJECT_NAME}_test mutex_test.c sc_mutex.c)
target_compile_options(${PROJECT_NAME}_test PRIVATE -DSC_SIZE_MAX=1400000ul)
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND SC_USE_WRAP)
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR
"${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
target_compile_options(${PROJECT_NAME}_test PRIVATE -DSC_HAVE_WRAP)
target_compile_options(${PROJECT_NAME}_test PRIVATE -fno-builtin)
target_link_options(${PROJECT_NAME}_test PRIVATE
-Wl,--wrap=pthread_mutexattr_init -Wl,--wrap=pthread_mutex_destroy
-Wl,--wrap=pthread_mutex_init)
endif ()
endif ()
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR
"${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang" OR
"${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
target_compile_options(${PROJECT_NAME}_test PRIVATE -fno-omit-frame-pointer)
if (SANITIZER)
target_compile_options(${PROJECT_NAME}_test PRIVATE -fsanitize=${SANITIZER})
target_link_options(${PROJECT_NAME}_test PRIVATE -fsanitize=${SANITIZER})
endif ()
endif ()
add_test(NAME ${PROJECT_NAME}_test COMMAND ${PROJECT_NAME}_test)
SET(MEMORYCHECK_COMMAND_OPTIONS
"-q --log-fd=2 --trace-children=yes --track-origins=yes \
--leak-check=full --show-leak-kinds=all \
--error-exitcode=255")
add_custom_target(valgrind_${PROJECT_NAME} ${CMAKE_COMMAND}
-E env CTEST_OUTPUT_ON_FAILURE=1
${CMAKE_CTEST_COMMAND} -C $<CONFIG>
--overwrite MemoryCheckCommandOptions=${MEMORYCHECK_COMMAND_OPTIONS}
--verbose -T memcheck WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_custom_target(check_${PROJECT_NAME} ${CMAKE_COMMAND}
-E env CTEST_OUTPUT_ON_FAILURE=1
${CMAKE_CTEST_COMMAND} -C $<CONFIG> --verbose
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# ----------------------- - Code Coverage Start ----------------------------- #
if (${CMAKE_BUILD_TYPE} MATCHES "Coverage")
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
target_compile_options(${PROJECT_NAME}_test PRIVATE --coverage)
target_link_libraries(${PROJECT_NAME}_test gcov)
else ()
message(FATAL_ERROR "Only GCC is supported for coverage")
endif ()
endif ()
add_custom_target(coverage_${PROJECT_NAME})
add_custom_command(
TARGET coverage_${PROJECT_NAME}
COMMAND lcov --capture --directory .
--output-file coverage.info --rc lcov_branch_coverage=1 --rc lcov_excl_br_line='assert'
COMMAND lcov --remove coverage.info '/usr/*' '*example*' '*test*'
--output-file coverage.info --rc lcov_branch_coverage=1 --rc lcov_excl_br_line='assert'
COMMAND lcov --list coverage.info --rc lcov_branch_coverage=1 --rc lcov_excl_br_line='assert'
)
add_dependencies(coverage_${PROJECT_NAME} check_${PROJECT_NAME})
# -------------------------- Code Coverage End ------------------------------ #
endif ()
# ----------------------- Test Configuration End ---------------------------- #

View File

@ -0,0 +1,25 @@
### Mutex
### Mutex wrapper
- Basic mutex wrapper for Posix and Windows.
```c
#include "sc_mutex.h"
int main(int argc, char *argv[])
{
struct sc_mutex mutex;
sc_mutex_init(&mutex); // Init mutex
sc_mutex_lock(&mutex);
sc_mutex_unlock(&mutex);
sc_mutex_term(&mutex); // destroy mutex
return 0;
}
```

View File

@ -0,0 +1,122 @@
/*
* BSD-3-Clause
*
* Copyright 2021 Ozan Tezcan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _XOPEN_SOURCE
#define _XOPEN_SOURCE 700
#endif
#include "sc_mutex.h"
#include <assert.h>
#if defined(_WIN32) || defined(_WIN64)
int sc_mutex_init(struct sc_mutex *mtx)
{
InitializeCriticalSection(&mtx->mtx);
return 0;
}
int sc_mutex_term(struct sc_mutex *mtx)
{
DeleteCriticalSection(&mtx->mtx);
return 0;
}
void sc_mutex_lock(struct sc_mutex *mtx)
{
EnterCriticalSection(&mtx->mtx);
}
void sc_mutex_unlock(struct sc_mutex *mtx)
{
LeaveCriticalSection(&mtx->mtx);
}
#else
int sc_mutex_init(struct sc_mutex *mtx)
{
int rc, rv;
pthread_mutexattr_t attr;
pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
mtx->mtx = mut;
// May fail on OOM
rc = pthread_mutexattr_init(&attr);
if (rc != 0) {
return -1;
}
// This won't fail as long as we pass correct params.
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
assert(rc == 0);
// May fail on OOM
rc = pthread_mutex_init(&mtx->mtx, &attr);
// This won't fail as long as we pass correct param.
rv = pthread_mutexattr_destroy(&attr);
assert(rv == 0);
(void) rv;
return rc != 0 ? -1 : 0;
}
int sc_mutex_term(struct sc_mutex *mtx)
{
int rc;
rc = pthread_mutex_destroy(&mtx->mtx);
return rc != 0 ? -1 : 0;
}
void sc_mutex_lock(struct sc_mutex *mtx)
{
int rc;
// This won't fail as long as we pass correct param.
rc = pthread_mutex_lock(&mtx->mtx);
assert(rc == 0);
(void) rc;
}
void sc_mutex_unlock(struct sc_mutex *mtx)
{
int rc;
// This won't fail as long as we pass correct param.
rc = pthread_mutex_unlock(&mtx->mtx);
assert(rc == 0);
(void) rc;
}
#endif

View File

@ -0,0 +1,81 @@
/*
* BSD-3-Clause
*
* Copyright 2021 Ozan Tezcan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SC_MUTEX_H
#define SC_MUTEX_H
#define SC_MUTEX_VERSION "2.0.0"
#if defined(_WIN32) || defined(_WIN64)
#include <windows.h>
#else
#include <pthread.h>
#endif
struct sc_mutex {
#if defined(_WIN32) || defined(_WIN64)
CRITICAL_SECTION mtx;
#else
pthread_mutex_t mtx;
#endif
};
/**
* Create mutex.
*
* Be warned on Windows, mutexes are recursive, on Posix default
* mutex type is not recursive. Edit code if that bothers you. Pass
* PTHREAD_MUTEX_RECURSIVE instead of PTHREAD_MUTEX_NORMAL.
*
* @param mtx mtx
* @return '0' on success, '-1' on error.
*/
int sc_mutex_init(struct sc_mutex *mtx);
/**
* Destroy mutex
*
* @param mtx mtx
* @return '0' on success, '-1' on error.
*/
int sc_mutex_term(struct sc_mutex *mtx);
/**
* @param mtx mtx
*/
void sc_mutex_lock(struct sc_mutex *mtx);
/**
* @param mtx mtx
*/
void sc_mutex_unlock(struct sc_mutex *mtx);
#endif

View File

@ -0,0 +1,24 @@
#ifndef _ENGINE_CONF_H
#define _ENGINE_CONF_H
// Take care tuning these params. Web build doesn't work
// if memory used too high
#define MAX_SCENES_TO_RENDER 8
#define MAX_RENDER_LAYERS 4
#define MAX_RENDERMANAGER_DEPTH 4
#define MAX_ENTITIES 2047
#define MAX_TEXTURES 16
#define MAX_SPRITES 127
#define MAX_SOUNDS 32
#define MAX_FONTS 4
#define N_SFX 32
#define MAX_EMITTER_CONF 8
#define MAX_ACTIVE_PARTICLE_EMITTER 255
#define MAX_PARTICLES 32
#define N_TAGS 10
#define N_COMPONENTS 20
#define MAX_COMP_POOL_SIZE MAX_ENTITIES
#endif // _ENGINE_CONF_H

20
engine/memory.c 100644
View File

@ -0,0 +1,20 @@
#include "memory.h"
//#include "cmc/utl/futils.h"
/* Function implementation */
//#include "cmc/treeset/code.h"
void init_memory_system(void) {
for (uint32_t i = 0; i < mem_impl.n_components; ++i) {
//mem_impl.comp_mempools[i].free_set = idxSet_new(
// &(struct idxSet_fval){ .cmp = cmc_u32_cmp, NULL}
//);
cc_init(&mem_impl.comp_mempools[i].free_set);
}
}
void free_memory_system(void) {
for (uint32_t i = 0; i < mem_impl.n_components; ++i) {
//idxSet_free(mem_impl.comp_mempools[i].free_set);
cc_cleanup(&mem_impl.comp_mempools[i].free_set);
}
}

22
engine/memory.h 100644
View File

@ -0,0 +1,22 @@
#ifndef ENGINE_MEMORY_H
#define ENGINE_MEMORY_H
#include <stdint.h>
#define CC_NO_SHORT_NAMES
#include "cc.h"
typedef struct memoryPool {
//struct idxSet* free_set;
cc_oset( uint32_t ) free_set;
} memoryPool;
void init_memory_system(void);
void free_memory_system(void);
extern struct memoryImpl {
memoryPool* comp_mempools;
uint32_t n_components;
} mem_impl;
#endif // ENGINE_MEMORY_H

View File

@ -0,0 +1 @@
add_subdirectory(base)

View File

@ -0,0 +1,2 @@
add_subdirectory(manual)
add_subdirectory(unit)

View File

@ -0,0 +1,16 @@
add_executable(memArenaTest mem_arena_test.c)
target_link_libraries(memArenaTest PRIVATE
mem_arena
sc_queue
)
set_target_properties(memArenaTest
PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/manual)
add_executable(ccTest cc_test.c)
target_link_libraries(ccTest PRIVATE
base
)
set_target_properties(ccTest
PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/manual)

View File

@ -0,0 +1,27 @@
#include <stdio.h>
#include "base.h"
#include <assert.h>
int main(void) {
assert(mem_arena_init(4));
mem_arena_print();
cc_map( int, int ) int_map;
cc_init(&int_map);
cc_reserve(&int_map, 2048);
cc_insert(&int_map, 1, 4);
cc_insert(&int_map, 2, 7);
cc_insert(&int_map, 7, 1);
cc_insert(&int_map, 4, 8);
mem_arena_print();
cc_cleanup(&int_map);
mem_arena_print();
mem_arena_deinit();
return 0;
}

View File

@ -0,0 +1,29 @@
#include <stdio.h>
#include "memory_arena.h"
#include "sc_queue.h"
#include <assert.h>
int main(void) {
assert(mem_arena_init(4));
mem_arena_print();
void* buf = mem_arena_malloc(64);
mem_arena_print();
mem_arena_free(buf);
mem_arena_print();
struct sc_queue_32 queue;
sc_queue_init(&queue);
sc_queue_add_last(&queue, 1);
sc_queue_add_last(&queue, 2);
sc_queue_add_last(&queue, 3);
mem_arena_print();
sc_queue_term(&queue);
mem_arena_print();
mem_arena_deinit();
return 0;
}

View File

@ -0,0 +1,11 @@
add_executable(MemArenaUnitTest mem_arena_unit.c)
#target_compile_features(MemPoolTest PRIVATE c_std_99)
target_link_libraries(MemArenaUnitTest PRIVATE
cmocka
mem_arena
)
set_target_properties(MemArenaUnitTest
PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/unit)
add_test(NAME MemArenaUnitTest COMMAND MemArenaUnitTest)

View File

@ -0,0 +1,54 @@
#include "memory_arena.h"
#include <stdio.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <setjmp.h>
#include <cmocka.h>
/**
* Memory arena Test
*
* At this level, the function should behave similarly to the standard lib's
* malloc, calloc, realloc, and free
*/
static int setup_mem_arena(void** state)
{
(void)state;
// Test with 8 MB
mem_arena_init(8);
return 0;
}
static int teardown_mem_arena(void** state)
{
(void)state;
mem_arena_deinit();
return 0;
}
static void test_simple_malloc(void **state)
{
(void)state;
void* buf = mem_arena_malloc(16);
assert_non_null(buf);
assert_int_equal(mem_arena_get_allocated(), 16);
mem_arena_print();
mem_arena_free(buf);
assert_int_equal(mem_arena_get_allocated(), 0);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test_setup_teardown(test_simple_malloc, setup_mem_arena, teardown_mem_arena),
};
return cmocka_run_group_tests(tests, NULL, NULL);
}