diff --git a/.gitignore b/.gitignore index 1d14dff9aa..92909c9f84 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.* +!.gitignore .cache .clangd diff --git a/core/iwasm/common/wasm_memory.c b/core/iwasm/common/wasm_memory.c index 628a032370..3d1f148118 100644 --- a/core/iwasm/common/wasm_memory.c +++ b/core/iwasm/common/wasm_memory.c @@ -1030,6 +1030,24 @@ wasm_runtime_free_internal(void *ptr) } } +static inline void * +wasm_runtime_aligned_alloc_internal(unsigned int size, unsigned int alignment) +{ + if (memory_mode == MEMORY_MODE_UNKNOWN) { + LOG_ERROR("wasm_runtime_aligned_alloc failed: memory hasn't been " + "initialized.\n"); + return NULL; + } + + if (memory_mode != MEMORY_MODE_POOL) { + LOG_ERROR("wasm_runtime_aligned_alloc failed: only supported in POOL " + "memory mode.\n"); + return NULL; + } + + return mem_allocator_malloc_aligned(pool_allocator, size, alignment); +} + void * wasm_runtime_malloc(unsigned int size) { @@ -1052,6 +1070,35 @@ wasm_runtime_malloc(unsigned int size) return wasm_runtime_malloc_internal(size); } +void * +wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment) +{ + if (alignment == 0) { + LOG_WARNING( + "warning: wasm_runtime_aligned_alloc with zero alignment\n"); + return NULL; + } + + if (size == 0) { + LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n"); + /* Allocate at least alignment bytes (smallest multiple of alignment) */ + size = alignment; +#if BH_ENABLE_GC_VERIFY != 0 + exit(-1); +#endif + } + +#if WASM_ENABLE_FUZZ_TEST != 0 + if (size >= WASM_MEM_ALLOC_MAX_SIZE) { + LOG_WARNING( + "warning: wasm_runtime_aligned_alloc with too large size\n"); + return NULL; + } +#endif + + return wasm_runtime_aligned_alloc_internal(size, alignment); +} + void * wasm_runtime_realloc(void *ptr, unsigned int size) { diff --git a/core/iwasm/include/wasm_export.h b/core/iwasm/include/wasm_export.h index 86f7c22b17..830c5c030c 100644 --- a/core/iwasm/include/wasm_export.h +++ b/core/iwasm/include/wasm_export.h @@ -422,6 +422,22 @@ wasm_runtime_destroy(void); WASM_RUNTIME_API_EXTERN void * wasm_runtime_malloc(unsigned int size); +/** + * Allocate memory with specified alignment from runtime memory environment. + * This function mimics aligned_alloc() behavior in WebAssembly context. + * + * Note: Only supported in POOL memory mode. Other modes will return NULL. + * Note: Allocated memory cannot be reallocated with wasm_runtime_realloc(). + * + * @param size bytes need to allocate (must be multiple of alignment) + * @param alignment alignment requirement (must be power of 2, >= 8, <= page + * size) + * + * @return the pointer to aligned memory allocated, or NULL on failure + */ +WASM_RUNTIME_API_EXTERN void * +wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment); + /** * Reallocate memory from runtime memory environment * diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index 74214b2246..ffd543690a 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -552,6 +552,21 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size) return alloc_hmu(heap, size); } +/* Convert object pointer to HMU pointer - handles aligned allocations */ +MEM_ALLOC_API_INTER hmu_t * +obj_to_hmu(gc_object_t obj) +{ + /* Check for aligned allocation magic signature */ + if (gc_is_aligned_allocation(obj)) { + /* This is an aligned allocation, read offset */ + uint32_t *offset_ptr = (uint32_t *)((char *)obj - 8); + return (hmu_t *)((char *)obj - *offset_ptr); + } + + /* Normal allocation: standard offset */ + return (hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1; +} + #if BH_ENABLE_GC_VERIFY == 0 gc_object_t gc_alloc_vo(void *vheap, gc_size_t size) @@ -612,6 +627,124 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line) return ret; } +#if BH_ENABLE_GC_VERIFY == 0 +gc_object_t +gc_alloc_vo_aligned(void *vheap, gc_size_t size, gc_size_t alignment) +#else +gc_object_t +gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, + const char *file, int line) +#endif +{ + gc_heap_t *heap = (gc_heap_t *)vheap; + hmu_t *hmu = NULL; + gc_object_t ret = NULL; + gc_size_t tot_size, tot_size_unaligned; + gc_uint8 *base_obj; + uintptr_t aligned_addr; + uint32_t offset, alignment_log2; + uint32_t max_alignment; + + /* Get system page size for maximum alignment check */ + max_alignment = (uint32_t)os_getpagesize(); + + /* Validation */ + if (alignment == 0 || (alignment & (alignment - 1)) != 0) { + /* Zero or not power of 2 */ + return NULL; + } + + if (alignment < GC_MIN_ALIGNMENT) { + alignment = GC_MIN_ALIGNMENT; + } + + if (alignment > max_alignment) { + /* Exceeds page size */ + return NULL; + } + + if (size % alignment != 0) { + /* POSIX requirement: size must be multiple of alignment */ + return NULL; + } + + if (size > SIZE_MAX - alignment - HMU_SIZE - OBJ_PREFIX_SIZE + - OBJ_SUFFIX_SIZE - 8) { + /* Would overflow */ + return NULL; + } + +#if BH_ENABLE_GC_CORRUPTION_CHECK != 0 + if (heap->is_heap_corrupted) { + LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n"); + return NULL; + } +#endif + + /* Calculate total size needed */ + tot_size_unaligned = + HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE + alignment - 1 + 8; + tot_size = GC_ALIGN_8(tot_size_unaligned); + + if (tot_size < size) { + /* Integer overflow */ + return NULL; + } + + LOCK_HEAP(heap); + + hmu = alloc_hmu_ex(heap, tot_size); + if (!hmu) + goto finish; + + bh_assert(hmu_get_size(hmu) >= tot_size); + tot_size = hmu_get_size(hmu); + +#if GC_STAT_DATA != 0 + heap->total_size_allocated += tot_size; +#endif + + /* Get base object pointer */ + base_obj = (gc_uint8 *)hmu + HMU_SIZE + OBJ_PREFIX_SIZE; + + /* Find next aligned address, leaving 8 bytes for metadata */ + aligned_addr = (((uintptr_t)base_obj + 8 + alignment - 1) + & ~(uintptr_t)(alignment - 1)); + ret = (gc_object_t)aligned_addr; + + /* Verify we have enough space */ + bh_assert((gc_uint8 *)ret + size + OBJ_SUFFIX_SIZE + <= (gc_uint8 *)hmu + tot_size); + + /* Calculate offset from HMU to returned pointer */ + offset = (uint32_t)((char *)ret - (char *)hmu); + + /* Calculate log2 of alignment for magic value */ + alignment_log2 = 0; + while ((1U << alignment_log2) < alignment) { + alignment_log2++; + } + + /* Store offset 8 bytes before returned pointer */ + *((uint32_t *)((char *)ret - 8)) = offset; + + /* Store magic with encoded alignment */ + *((uint32_t *)((char *)ret - 4)) = + ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2; + + /* Initialize HMU */ + hmu_set_ut(hmu, HMU_VO); + hmu_unfree_vo(hmu); + +#if BH_ENABLE_GC_VERIFY != 0 + hmu_init_prefix_and_suffix(hmu, tot_size, file, line); +#endif + +finish: + UNLOCK_HEAP(heap); + return ret; +} + #if BH_ENABLE_GC_VERIFY == 0 gc_object_t gc_realloc_vo(void *vheap, void *ptr, gc_size_t size) @@ -644,6 +777,13 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file, } #endif + /* Check if this is an aligned allocation - not supported */ + if (gc_is_aligned_allocation(obj_old)) { + LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned " + "allocations\n"); + return NULL; + } + if (obj_old) { hmu_old = obj_to_hmu(obj_old); tot_size_old = hmu_get_size(hmu_old); diff --git a/core/shared/mem-alloc/ems/ems_gc.h b/core/shared/mem-alloc/ems/ems_gc.h index 9913ca2b6a..f42bdb07a3 100644 --- a/core/shared/mem-alloc/ems/ems_gc.h +++ b/core/shared/mem-alloc/ems/ems_gc.h @@ -193,6 +193,9 @@ gc_alloc_vo(void *heap, gc_size_t size); gc_object_t gc_realloc_vo(void *heap, void *ptr, gc_size_t size); +gc_object_t +gc_alloc_vo_aligned(void *heap, gc_size_t size, gc_size_t alignment); + int gc_free_vo(void *heap, gc_object_t obj); @@ -213,6 +216,10 @@ gc_object_t gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file, int line); +gc_object_t +gc_alloc_vo_aligned_internal(void *heap, gc_size_t size, gc_size_t alignment, + const char *file, int line); + int gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line); @@ -231,6 +238,9 @@ gc_free_wo_internal(void *vheap, void *ptr, const char *file, int line); #define gc_realloc_vo(heap, ptr, size) \ gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__) +#define gc_alloc_vo_aligned(heap, size, alignment) \ + gc_alloc_vo_aligned_internal(heap, size, alignment, __FILE__, __LINE__) + #define gc_free_vo(heap, obj) \ gc_free_vo_internal(heap, obj, __FILE__, __LINE__) diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index 605d764dfa..db6eff4018 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -13,6 +13,11 @@ extern "C" { #include "bh_platform.h" #include "ems_gc.h" +/* Test visibility macro for internal functions */ +#ifndef MEM_ALLOC_API_INTER +#define MEM_ALLOC_API_INTER WASM_RUNTIME_API_INTERN +#endif + /* HMU (heap memory unit) basic block type */ typedef enum hmu_type_enum { HMU_TYPE_MIN = 0, @@ -81,12 +86,132 @@ hmu_verify(void *vheap, hmu_t *hmu); #define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7) +/* Minimum alignment for allocations */ +#ifndef GC_MIN_ALIGNMENT +#define GC_MIN_ALIGNMENT 8 +#endif + #define GC_SMALLEST_SIZE \ GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8) #define GC_GET_REAL_SIZE(x) \ GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \ + (((x) > 8) ? (x) : 8)) +/* + * ============================================================================ + * Aligned Memory Allocation + * ============================================================================ + * + * This module implements aligned memory allocation similar to C11 + * aligned_alloc() and POSIX posix_memalign() for WAMR's garbage collector. + * + * POSIX aligned_alloc() Specification: + * ------------------------------------ + * From C11 §7.22.3.1 and POSIX.1-2017: + * void *aligned_alloc(size_t alignment, size_t size); + * + * Requirements: + * - alignment: Must be a valid alignment supported by the implementation, + * typically a power of 2 + * - size: Must be an integral multiple of alignment + * - Returns: Pointer aligned to the specified alignment boundary, or NULL + * - Memory must be freed with free() (not realloc'd) + * - Behavior: If size is 0, may return NULL or unique pointer (impl-defined) + * + * IMPORTANT: POSIX does not require realloc() to preserve alignment. + * Calling realloc() on aligned_alloc() memory has undefined behavior. + * + * WAMR Implementation Strategy: + * ----------------------------- + * We implement alignment through over-allocation with metadata tracking: + * + * 1. **Validation Phase**: + * - Check alignment is power-of-2, >= 8 bytes, <= system page size + * - Check size is multiple of alignment + * - Return NULL if validation fails + * + * 2. **Over-Allocation**: + * - Allocate (size + alignment + metadata_overhead) bytes + * - Extra space allows us to find an aligned boundary within the block + * - Calculate log2(alignment) for efficient offset storage + * + * 3. **Alignment Adjustment**: + * - Find next aligned address within allocated block + * - Calculate offset from original allocation to aligned address + * - Store offset in metadata for later free() operation + * + * 4. **Magic Marker Storage**: + * - Store magic marker (0xA11C0000 | offset) in 4 bytes before user pointer + * - Upper 16 bits: 0xA11C identifies aligned allocation + * - Lower 16 bits: offset from HMU to aligned pointer (max 65535 bytes) + * - This marker prevents unsafe realloc() operations + * + * 5. **Realloc Prevention**: + * - gc_realloc_vo_internal() checks for magic marker + * - Returns NULL if realloc attempted on aligned allocation + * - User must manually allocate new memory and copy data + * + * Memory Layout Diagram: + * ---------------------- + * + * Low Address High Address + * ┌─────────────┬──────────┬────────────────┬──────────────┬─────────────┐ + * │ HMU Header │ Padding │ Magic + Offset │ Aligned Data │ Padding │ + * │ (meta) │ (0-align)│ (4 bytes) │ (size) │ (overhead) │ + * └─────────────┴──────────┴────────────────┴──────────────┴─────────────┘ + * ▲ ▲ + * │ │ + * magic_ptr user_ptr (returned, aligned) + * + * Constraints and Limitations: + * ---------------------------- + * - Minimum alignment: 8 bytes (GC_MIN_ALIGNMENT) + * - Maximum alignment: System page size (os_getpagesize(), typically 4KB) + * - Maximum offset: 65535 bytes (16-bit storage limit) + * - Realloc support: None - returns NULL (prevents alignment loss) + * - Free support: Full - use mem_allocator_free() / wasm_runtime_free() + * - Thread safety: Protected by LOCK_HEAP/UNLOCK_HEAP + * + * Usage Example: + * -------------- + * // Allocate 256 bytes aligned to 64-byte boundary (e.g., for SIMD) + * void *ptr = wasm_runtime_aligned_alloc(256, 64); + * assert((uintptr_t)ptr % 64 == 0); // Guaranteed aligned + * + * // Use the memory... + * + * // Free normally (alignment metadata handled automatically) + * wasm_runtime_free(ptr); + * + * // INVALID: Cannot realloc aligned memory + * void *new_ptr = wasm_runtime_realloc(ptr, 512); // Returns NULL! + */ + +/* Aligned allocation magic markers */ +#define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000 +#define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000 + +/** + * Check if a gc_object was allocated with alignment requirements. + * + * Aligned allocations store a magic marker (0xA11C0000) in the 4 bytes + * immediately before the object pointer. This marker is used to identify + * aligned allocations to prevent unsafe realloc operations. + * + * @param obj the gc_object to check (user-visible pointer) + * @return true if obj is an aligned allocation, false otherwise + */ +static inline bool +gc_is_aligned_allocation(gc_object_t obj) +{ + if (!obj) + return false; + + uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); + return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) + == ALIGNED_ALLOC_MAGIC_VALUE); +} + /** * hmu bit operation */ @@ -107,14 +232,57 @@ hmu_verify(void *vheap, hmu_t *hmu); (((v) & (((((uint32)1 << size) - 1) << offset))) >> offset) /* clang-format on */ +/* clang-format off */ /** * gc object layout definition + * + * #### Header Bit Layout + * + * ``` + * 31 30 29 28 27 0 + * ┌──┬──┬──┬──┬───────────────────────────────────────────────────┐ + * │UT│UT│ P│ *│ Size or Type-Specific Data │ + * └──┴──┴──┴──┴───────────────────────────────────────────────────┘ + * ``` + * + * #### Bit Fields Breakdown + * + * | Bits | Field | Description | + * | --------- | ----------------------- | -------------------------------------------- | + * | **31-30** | **UT** (Usage Type) | 2 bits for chunk type | + * | **29** | **P** (Previous In Use) | 1 bit indicating if previous chunk is in use | + * | **28** | **Type-specific** | Meaning depends on UT field | + * | **27-0** | **Type-specific** | Size or other data depending on UT | + * + * #### Memory Layout in Heap + * + * ``` + * ┌─────────────────────────────────────────────────────────────┐ + * │ HMU Header (4 bytes) │ + * ├─────────────────────────────────────────────────────────────┤ + * │ OBJ_PREFIX (if BH_ENABLE_GC_VERIFY) │ + * │ - file_name pointer │ + * │ - line_no │ + * │ - size │ + * │ - padding values (for corruption detection) │ + * ├─────────────────────────────────────────────────────────────┤ + * │ User Data (aligned to 8 bytes) │ + * │ ... │ + * ├─────────────────────────────────────────────────────────────┤ + * │ OBJ_SUFFIX (if BH_ENABLE_GC_VERIFY) │ + * │ - padding values (for corruption detection) │ + * └─────────────────────────────────────────────────────────────┘ + * ``` */ +/* clang-format on */ #define HMU_SIZE (sizeof(hmu_t)) #define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1)) -#define obj_to_hmu(obj) ((hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1) + +/* obj_to_hmu function - handles both normal and aligned allocations */ +MEM_ALLOC_API_INTER hmu_t * +obj_to_hmu(gc_object_t obj); #define HMU_UT_SIZE 2 #define HMU_UT_OFFSET 30 diff --git a/core/shared/mem-alloc/mem_alloc.c b/core/shared/mem-alloc/mem_alloc.c index df1a4de4cf..c17a69ee28 100644 --- a/core/shared/mem-alloc/mem_alloc.c +++ b/core/shared/mem-alloc/mem_alloc.c @@ -57,6 +57,24 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr) gc_free_vo((gc_handle_t)allocator, ptr); } +#if BH_ENABLE_GC_VERIFY == 0 +void * +mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, + uint32_t alignment) +{ + return gc_alloc_vo_aligned((gc_handle_t)allocator, size, alignment); +} +#else +void * +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size, + uint32_t alignment, const char *file, + int line) +{ + return gc_alloc_vo_aligned_internal((gc_handle_t)allocator, size, alignment, + file, line); +} +#endif + #if WASM_ENABLE_GC != 0 void * mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size) diff --git a/core/shared/mem-alloc/mem_alloc.h b/core/shared/mem-alloc/mem_alloc.h index 97e87d4a5e..3e55d49ed5 100644 --- a/core/shared/mem-alloc/mem_alloc.h +++ b/core/shared/mem-alloc/mem_alloc.h @@ -46,6 +46,34 @@ mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size); void mem_allocator_free(mem_allocator_t allocator, void *ptr); +/* Aligned allocation support */ +#ifndef GC_MIN_ALIGNMENT +#define GC_MIN_ALIGNMENT 8 +#endif + +#if BH_ENABLE_GC_VERIFY == 0 + +void * +mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, + uint32_t alignment); + +#define mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ + file, line) \ + mem_allocator_malloc_aligned(allocator, size, alignment) + +#else /* BH_ENABLE_GC_VERIFY != 0 */ + +void * +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size, + uint32_t alignment, const char *file, + int line); + +#define mem_allocator_malloc_aligned(allocator, size, alignment) \ + mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ + __FILE__, __LINE__) + +#endif /* end of BH_ENABLE_GC_VERIFY */ + int mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new, uint32 pool_buf_size); diff --git a/core/shared/utils/bh_platform.h b/core/shared/utils/bh_platform.h index 86aef839dd..4774a7cabb 100644 --- a/core/shared/utils/bh_platform.h +++ b/core/shared/utils/bh_platform.h @@ -18,6 +18,24 @@ #include "bh_vector.h" #include "runtime_timer.h" +/** + * API visibility macros for WAMR internal functions + * + * WASM_RUNTIME_API_EXTERN - Public exported APIs (defined in wasm_export.h) + * WASM_RUNTIME_API_INTERN - Internal APIs visible across WAMR components + * + * In test builds (WAMR_BUILD_TEST=1), internal APIs are exposed for unit + * testing. In production builds, internal APIs are static (file-scoped) for + * encapsulation. + */ +#ifndef WASM_RUNTIME_API_INTERN +#ifdef WAMR_BUILD_TEST +#define WASM_RUNTIME_API_INTERN +#else +#define WASM_RUNTIME_API_INTERN static +#endif +#endif + /** * WA_MALLOC/WA_FREE need to be redefined for both * runtime native and WASM app respectively. diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index d99d991bbf..225f1e386a 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -73,6 +73,21 @@ endif() set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) FetchContent_MakeAvailable(googletest) +# Fetch CMocka for C unit tests +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24") + FetchContent_Declare( + cmocka + URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz + DOWNLOAD_EXTRACT_TIMESTAMP ON + ) +else() + FetchContent_Declare( + cmocka + URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz + ) +endif() +FetchContent_MakeAvailable(cmocka) + include(GoogleTest) enable_testing() @@ -90,6 +105,7 @@ add_subdirectory(unsupported-features) add_subdirectory(smart-tests) add_subdirectory(exception-handling) add_subdirectory(running-modes) +add_subdirectory(mem-alloc) if(WAMR_BUILD_TARGET STREQUAL "X86_64") add_subdirectory(aot-stack-frame) diff --git a/tests/unit/mem-alloc/CMakeLists.txt b/tests/unit/mem-alloc/CMakeLists.txt new file mode 100644 index 0000000000..9431c11a0f --- /dev/null +++ b/tests/unit/mem-alloc/CMakeLists.txt @@ -0,0 +1,60 @@ +# Copyright (C) 2019 Intel Corporation. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required(VERSION 3.14) + +project(test-mem-alloc) + +# Enable test build flag +add_definitions(-DWAMR_BUILD_TEST=1) + +# Test-specific feature configuration +set(WAMR_BUILD_AOT 0) +set(WAMR_BUILD_FAST_INTERP 0) +set(WAMR_BUILD_INTERP 1) +set(WAMR_BUILD_JIT 0) +set(WAMR_BUILD_LIBC_WASI 0) +set(WAMR_BUILD_APP_FRAMEWORK 0) + +include(../unit_common.cmake) + +# Test source files +set(TEST_SOURCES + test_runner.c + ${WAMR_RUNTIME_LIB_SOURCE} +) + +# +# Create test executable +# + +## Normal test executable +add_executable(mem-alloc-test ${TEST_SOURCES}) + +# Add include directories for mem-alloc internals +target_include_directories(mem-alloc-test PRIVATE + ${WAMR_ROOT_DIR}/core/shared/mem-alloc + ${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems +) + +## GC test executable +add_executable(mem-alloc-gc-test ${TEST_SOURCES}) + +target_include_directories(mem-alloc-gc-test PRIVATE + ${WAMR_ROOT_DIR}/core/shared/mem-alloc + ${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems +) + +target_compile_options(mem-alloc-gc-test PRIVATE -DWAMR_BUILD_GC=1 -DWAMR_BUILD_GC_VERIFY=1) + + +# Link dependencies +target_link_libraries(mem-alloc-test cmocka::cmocka m) +target_link_libraries(mem-alloc-gc-test cmocka::cmocka m) + +# Add to ctest +add_test(NAME mem-alloc-test COMMAND mem-alloc-test) +set_tests_properties(mem-alloc-test PROPERTIES TIMEOUT 60) + +add_test(NAME mem-alloc-gc-test COMMAND mem-alloc-gc-test) +set_tests_properties(mem-alloc-gc-test PROPERTIES TIMEOUT 60) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c new file mode 100644 index 0000000000..878e0e0d49 --- /dev/null +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -0,0 +1,519 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include +#include +#include +#include +#include + +#if WAMR_BUILD_TEST != 1 +#error "WAMR_BUILD_TEST must be defined as 1" +#endif + +#include "mem_alloc.h" +#include "ems_gc_internal.h" +#include "wasm_export.h" + +/* Test helper: Check if pointer is aligned */ +static inline bool +is_aligned(void *ptr, size_t alignment) +{ + return ((uintptr_t)ptr % alignment) == 0; +} + +/* Test helper: Check if allocation is aligned (has magic value) */ +static inline bool +is_aligned_allocation(gc_object_t obj) +{ + uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); + return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) + == ALIGNED_ALLOC_MAGIC_VALUE); +} + +/* Test: Normal allocation still works (regression) */ +static void +test_normal_alloc_basic(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Normal allocation should still work */ + ptr = mem_allocator_malloc(allocator, 128); + assert_non_null(ptr); + + /* Should be 8-byte aligned */ + assert_true(is_aligned(ptr, 8)); + + /* Should NOT be marked as aligned allocation */ + assert_false(is_aligned_allocation(ptr)); + + /* Free should work */ + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Valid alignment powers of 2 */ +static void +test_aligned_alloc_valid_alignments(void **state) +{ + mem_allocator_t allocator; + char heap_buf[128 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Test each valid alignment */ + int alignments[] = { 8, 16, 32, 64, 128, 256, 512, 1024 }; + int num_alignments = sizeof(alignments) / sizeof(alignments[0]); + for (int i = 0; i < num_alignments; i++) { + int align = alignments[i]; + + /* Allocate with size = multiple of alignment */ + ptr = mem_allocator_malloc_aligned(allocator, align * 2, align); + assert_non_null(ptr); + + /* Verify alignment */ + assert_true(is_aligned(ptr, align)); + + /* Verify marked as aligned */ + assert_true(is_aligned_allocation(ptr)); + + /* Free */ + mem_allocator_free(allocator, ptr); + } + + mem_allocator_destroy(allocator); +} + +/* Test: Realloc rejects aligned allocations */ +static void +test_realloc_rejects_aligned(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr, *new_ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate aligned */ + ptr = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(ptr); + assert_true(is_aligned_allocation(ptr)); + + /* Realloc should reject aligned allocation */ + new_ptr = mem_allocator_realloc(allocator, ptr, 256); + assert_null(new_ptr); + + /* Original pointer should still be valid - free it */ + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Realloc still works for normal allocations */ +static void +test_normal_realloc_works(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr, *new_ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate normal */ + ptr = mem_allocator_malloc(allocator, 128); + assert_non_null(ptr); + + /* Write some data */ + memset(ptr, 0xAB, 128); + + /* Realloc should work */ + new_ptr = mem_allocator_realloc(allocator, ptr, 256); + assert_non_null(new_ptr); + + /* Data should be preserved */ + for (int i = 0; i < 128; i++) { + assert_int_equal(((unsigned char *)new_ptr)[i], 0xAB); + } + + mem_allocator_free(allocator, new_ptr); + mem_allocator_destroy(allocator); +} + +/* Test: Invalid alignments (not power of 2 or zero) */ +static void +test_aligned_alloc_invalid_not_power_of_2(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* These should all fail (zero or not power of 2) */ + int invalid_alignments[] = { 0, 3, 5, 7, 9, 15, 17, 100 }; + int num_invalid = + sizeof(invalid_alignments) / sizeof(invalid_alignments[0]); + for (int i = 0; i < num_invalid; i++) { + ptr = + mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]); + assert_null(ptr); + } + + /* Small powers of 2 should succeed (adjusted to GC_MIN_ALIGNMENT) */ + ptr = mem_allocator_malloc_aligned(allocator, 8, 1); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 8, 2); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 8, 4); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Size must be multiple of alignment */ +static void +test_aligned_alloc_size_not_multiple(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Size not multiple of alignment - should fail */ + ptr = mem_allocator_malloc_aligned(allocator, 100, 64); + assert_null(ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 65, 64); + assert_null(ptr); + + /* Size is multiple - should succeed */ + ptr = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Mixed normal and aligned allocations */ +static void +test_mixed_alloc_interleaved(void **state) +{ + mem_allocator_t allocator; + char heap_buf[128 * 1024]; + void *normal1, *aligned1, *normal2, *aligned2; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate: normal -> aligned -> normal -> aligned */ + normal1 = mem_allocator_malloc(allocator, 64); + assert_non_null(normal1); + assert_false(is_aligned_allocation(normal1)); + + aligned1 = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(aligned1); + assert_true(is_aligned_allocation(aligned1)); + assert_true(is_aligned(aligned1, 64)); + + normal2 = mem_allocator_malloc(allocator, 96); + assert_non_null(normal2); + assert_false(is_aligned_allocation(normal2)); + + aligned2 = mem_allocator_malloc_aligned(allocator, 256, 128); + assert_non_null(aligned2); + assert_true(is_aligned_allocation(aligned2)); + assert_true(is_aligned(aligned2, 128)); + + /* Free in mixed order */ + mem_allocator_free(allocator, normal1); + mem_allocator_free(allocator, aligned2); + mem_allocator_free(allocator, normal2); + mem_allocator_free(allocator, aligned1); + + mem_allocator_destroy(allocator); +} + +/* Test: obj_to_hmu works correctly for both types */ +static void +test_mixed_obj_to_hmu(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *normal, *aligned; + hmu_t *hmu_normal, *hmu_aligned; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate both types */ + normal = mem_allocator_malloc(allocator, 128); + assert_non_null(normal); + + aligned = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(aligned); + + /* Get HMU pointers */ + hmu_normal = obj_to_hmu(normal); + hmu_aligned = obj_to_hmu(aligned); + + assert_non_null(hmu_normal); + assert_non_null(hmu_aligned); + + /* Both should have HMU_VO type */ + assert_int_equal(hmu_get_ut(hmu_normal), HMU_VO); + assert_int_equal(hmu_get_ut(hmu_aligned), HMU_VO); + + /* Sizes should be reasonable */ + assert_true(hmu_get_size(hmu_normal) >= 128); + assert_true(hmu_get_size(hmu_aligned) >= 128); + + /* Free both */ + mem_allocator_free(allocator, normal); + mem_allocator_free(allocator, aligned); + + mem_allocator_destroy(allocator); +} + +/* Test: Many aligned allocations */ +static void +test_aligned_alloc_many(void **state) +{ + mem_allocator_t allocator; + char heap_buf[512 * 1024]; + void *ptrs[100]; + int count = 0; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate as many as possible */ + for (int i = 0; i < 100; i++) { + int align = (i % 4 == 0) ? 64 : 32; + ptrs[i] = mem_allocator_malloc_aligned(allocator, align * 2, align); + if (ptrs[i]) { + assert_true(is_aligned(ptrs[i], align)); + count++; + } + else { + break; + } + } + + assert_true(count > 10); /* At least some should succeed */ + + /* Free all */ + for (int i = 0; i < count; i++) { + mem_allocator_free(allocator, ptrs[i]); + } + + mem_allocator_destroy(allocator); +} + +/* Test: Many mixed allocations */ +static void +test_mixed_alloc_many(void **state) +{ + mem_allocator_t allocator; + char heap_buf[512 * 1024]; + void *ptrs[200]; + int count = 0; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Alternate normal and aligned */ + for (int i = 0; i < 200; i++) { + if (i % 2 == 0) { + /* Normal allocation */ + ptrs[i] = mem_allocator_malloc(allocator, 64); + } + else { + /* Aligned allocation */ + ptrs[i] = mem_allocator_malloc_aligned(allocator, 64, 32); + } + + if (ptrs[i]) { + count++; + } + else { + break; + } + } + + assert_true(count > 20); + + /* Free in reverse order */ + for (int i = count - 1; i >= 0; i--) { + mem_allocator_free(allocator, ptrs[i]); + } + + mem_allocator_destroy(allocator); +} + +/* Test: wasm_runtime_aligned_alloc with valid inputs in POOL mode */ +static void +test_wasm_runtime_aligned_alloc_valid(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_full_init(&init_args)); + + /* Test valid aligned allocation */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_non_null(ptr); + assert_true(is_aligned(ptr, 64)); + + /* Free should work */ + wasm_runtime_free(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with zero size */ +static void +test_wasm_runtime_aligned_alloc_zero_size(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_full_init(&init_args)); + + /* Zero size should allocate alignment bytes (like malloc(0) behavior) */ + ptr = wasm_runtime_aligned_alloc(0, 64); + assert_non_null(ptr); + assert_true(is_aligned(ptr, 64)); + + wasm_runtime_free(ptr); + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with zero alignment */ +static void +test_wasm_runtime_aligned_alloc_zero_alignment(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_full_init(&init_args)); + + /* Zero alignment should return NULL */ + ptr = wasm_runtime_aligned_alloc(128, 0); + assert_null(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc in SYSTEM_ALLOCATOR mode returns NULL */ +static void +test_wasm_runtime_aligned_alloc_system_mode(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_System_Allocator; + + assert_true(wasm_runtime_full_init(&init_args)); + + /* Should return NULL in non-POOL mode */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_null(ptr); + + wasm_runtime_destroy(); +} + +/* Test: wasm_runtime_realloc rejects aligned allocations */ +static void +test_wasm_runtime_realloc_rejects_aligned(void **state) +{ + RuntimeInitArgs init_args; + void *ptr, *new_ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_full_init(&init_args)); + + /* Allocate with alignment */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_non_null(ptr); + + /* Realloc should return NULL */ + new_ptr = wasm_runtime_realloc(ptr, 256); + assert_null(new_ptr); + + /* Original pointer still valid */ + wasm_runtime_free(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with various alignments */ +static void +test_wasm_runtime_aligned_alloc_multiple_alignments(void **state) +{ + RuntimeInitArgs init_args; + int alignments[] = { 8, 16, 32, 64, 128, 256 }; + int num_alignments = sizeof(alignments) / sizeof(alignments[0]); + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(512 * 1024); + init_args.mem_alloc_option.pool.heap_size = 512 * 1024; + + assert_true(wasm_runtime_full_init(&init_args)); + + for (int i = 0; i < num_alignments; i++) { + int align = alignments[i]; + void *ptr = wasm_runtime_aligned_alloc(align * 2, align); + assert_non_null(ptr); + assert_true(is_aligned(ptr, align)); + wasm_runtime_free(ptr); + } + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c new file mode 100644 index 0000000000..d9621f73ad --- /dev/null +++ b/tests/unit/mem-alloc/test_runner.c @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include +#include +#include +#include + +/* Include test implementations */ +#include "mem_alloc_test.c" + +int +main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(test_normal_alloc_basic), + cmocka_unit_test(test_aligned_alloc_valid_alignments), + cmocka_unit_test(test_realloc_rejects_aligned), + cmocka_unit_test(test_normal_realloc_works), + cmocka_unit_test(test_aligned_alloc_invalid_not_power_of_2), + cmocka_unit_test(test_aligned_alloc_size_not_multiple), + cmocka_unit_test(test_mixed_alloc_interleaved), + cmocka_unit_test(test_mixed_obj_to_hmu), + cmocka_unit_test(test_aligned_alloc_many), + cmocka_unit_test(test_mixed_alloc_many), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_valid), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_size), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_alignment), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_system_mode), + cmocka_unit_test(test_wasm_runtime_realloc_rejects_aligned), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_multiple_alignments), + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +}