From be9cca36ac7b614fc6c24d2e99ed9c9ea8304df7 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:25:27 +0800 Subject: [PATCH 01/19] feat(mem-alloc): add aligned allocation API declaration Add mem_allocator_malloc_aligned() API with support for both GC_VERIFY enabled and disabled modes. Related to DESIGN_ALIGNED_ALLOC.md --- core/shared/mem-alloc/mem_alloc.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/core/shared/mem-alloc/mem_alloc.h b/core/shared/mem-alloc/mem_alloc.h index 97e87d4a5e..0304e1397d 100644 --- a/core/shared/mem-alloc/mem_alloc.h +++ b/core/shared/mem-alloc/mem_alloc.h @@ -46,6 +46,34 @@ mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size); void mem_allocator_free(mem_allocator_t allocator, void *ptr); +/* Aligned allocation support */ +#ifndef GC_MIN_ALIGNMENT +#define GC_MIN_ALIGNMENT 8 +#endif + +#if BH_ENABLE_GC_VERIFY == 0 + +void * +mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, + uint32_t alignment); + +#define mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ + file, line) \ + mem_allocator_malloc_aligned(allocator, size, alignment) + +#else /* BH_ENABLE_GC_VERIFY != 0 */ + +void * +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, + uint32_t size, uint32_t alignment, + const char *file, int line); + +#define mem_allocator_malloc_aligned(allocator, size, alignment) \ + mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ + __FILE__, __LINE__) + +#endif /* end of BH_ENABLE_GC_VERIFY */ + int mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new, uint32 pool_buf_size); From 74723316efca7a2ebf940b7a80489cab233dfa16 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:26:13 +0800 Subject: [PATCH 02/19] feat(mem-alloc): add test visibility and magic constants Add MEM_ALLOC_API_INTER macro for exposing internal functions in test builds and magic value constants for aligned allocation detection. --- core/shared/mem-alloc/ems/ems_gc_internal.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index 605d764dfa..770ccca5cc 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -13,6 +13,15 @@ extern "C" { #include "bh_platform.h" #include "ems_gc.h" +/* Test visibility macro for internal functions */ +#ifndef MEM_ALLOC_API_INTER +#ifdef WAMR_BUILD_TEST +#define MEM_ALLOC_API_INTER +#else +#define MEM_ALLOC_API_INTER static +#endif +#endif + /* HMU (heap memory unit) basic block type */ typedef enum hmu_type_enum { HMU_TYPE_MIN = 0, @@ -87,6 +96,10 @@ hmu_verify(void *vheap, hmu_t *hmu); GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \ + (((x) > 8) ? (x) : 8)) +/* Magic value for aligned allocation detection */ +#define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000 +#define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000 + /** * hmu bit operation */ From 03003503b56c6c438d8c3e46dcd99aa607ca8c20 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:27:34 +0800 Subject: [PATCH 03/19] feat(mem-alloc): modify obj_to_hmu for aligned detection Convert obj_to_hmu to function that detects aligned allocations via magic value and calculates correct HMU offset. --- core/shared/mem-alloc/ems/ems_alloc.c | 17 +++++++++++++++++ core/shared/mem-alloc/ems/ems_gc_internal.h | 5 ++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index 74214b2246..f8476f9d84 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -552,6 +552,23 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size) return alloc_hmu(heap, size); } +/* Convert object pointer to HMU pointer - handles aligned allocations */ +MEM_ALLOC_API_INTER hmu_t * +obj_to_hmu(gc_object_t obj) +{ + uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); + + /* Check for aligned allocation magic signature */ + if ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE) { + /* This is an aligned allocation, read offset */ + uint32_t *offset_ptr = (uint32_t *)((char *)obj - 8); + return (hmu_t *)((char *)obj - *offset_ptr); + } + + /* Normal allocation: standard offset */ + return (hmu_t *)((gc_uint8 *)(obj) - OBJ_PREFIX_SIZE) - 1; +} + #if BH_ENABLE_GC_VERIFY == 0 gc_object_t gc_alloc_vo(void *vheap, gc_size_t size) diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index 770ccca5cc..dfcd096c73 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -127,7 +127,10 @@ hmu_verify(void *vheap, hmu_t *hmu); #define HMU_SIZE (sizeof(hmu_t)) #define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1)) -#define obj_to_hmu(obj) ((hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1) + +/* obj_to_hmu function - handles both normal and aligned allocations */ +MEM_ALLOC_API_INTER hmu_t * +obj_to_hmu(gc_object_t obj); #define HMU_UT_SIZE 2 #define HMU_UT_OFFSET 30 From 5ad5633bc932486b748247ed7f2795d48031d34d Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:28:33 +0800 Subject: [PATCH 04/19] test(mem-alloc): add test infrastructure Add CMocka support and mem-alloc test subdirectory. --- tests/unit/CMakeLists.txt | 16 +++++++++++++ tests/unit/mem-alloc/CMakeLists.txt | 35 +++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/unit/mem-alloc/CMakeLists.txt diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index d99d991bbf..225f1e386a 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -73,6 +73,21 @@ endif() set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) FetchContent_MakeAvailable(googletest) +# Fetch CMocka for C unit tests +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24") + FetchContent_Declare( + cmocka + URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz + DOWNLOAD_EXTRACT_TIMESTAMP ON + ) +else() + FetchContent_Declare( + cmocka + URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz + ) +endif() +FetchContent_MakeAvailable(cmocka) + include(GoogleTest) enable_testing() @@ -90,6 +105,7 @@ add_subdirectory(unsupported-features) add_subdirectory(smart-tests) add_subdirectory(exception-handling) add_subdirectory(running-modes) +add_subdirectory(mem-alloc) if(WAMR_BUILD_TARGET STREQUAL "X86_64") add_subdirectory(aot-stack-frame) diff --git a/tests/unit/mem-alloc/CMakeLists.txt b/tests/unit/mem-alloc/CMakeLists.txt new file mode 100644 index 0000000000..8ac1bd9ffe --- /dev/null +++ b/tests/unit/mem-alloc/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright (C) 2019 Intel Corporation. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required(VERSION 3.14) + +project(test-mem-alloc) + +# Enable test build flag +add_definitions(-DWAMR_BUILD_TEST=1) + +# Test-specific feature configuration +set(WAMR_BUILD_AOT 0) +set(WAMR_BUILD_FAST_INTERP 0) +set(WAMR_BUILD_INTERP 0) +set(WAMR_BUILD_JIT 0) +set(WAMR_BUILD_LIBC_WASI 0) +set(WAMR_BUILD_APP_FRAMEWORK 0) + +include(../unit_common.cmake) + +# Test source files +set(TEST_SOURCES + test_runner.c + ${WAMR_RUNTIME_LIB_SOURCE} +) + +# Create test executable +add_executable(mem-alloc-test ${TEST_SOURCES}) + +# Link dependencies +target_link_libraries(mem-alloc-test cmocka::cmocka m) + +# Add to ctest +add_test(NAME mem-alloc-test COMMAND mem-alloc-test) +set_tests_properties(mem-alloc-test PROPERTIES TIMEOUT 60) From 526ff65e29e18de7675a8cb0a1df4f82a43b2c7e Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:35:23 +0800 Subject: [PATCH 05/19] test(mem-alloc): add test runner and stubs Create basic test infrastructure with helper functions. Fix INTERP setting and add include directories for ems headers. --- tests/unit/mem-alloc/CMakeLists.txt | 8 +++++- tests/unit/mem-alloc/mem_alloc_test.c | 35 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 23 ++++++++++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 tests/unit/mem-alloc/mem_alloc_test.c create mode 100644 tests/unit/mem-alloc/test_runner.c diff --git a/tests/unit/mem-alloc/CMakeLists.txt b/tests/unit/mem-alloc/CMakeLists.txt index 8ac1bd9ffe..745fd25dea 100644 --- a/tests/unit/mem-alloc/CMakeLists.txt +++ b/tests/unit/mem-alloc/CMakeLists.txt @@ -11,7 +11,7 @@ add_definitions(-DWAMR_BUILD_TEST=1) # Test-specific feature configuration set(WAMR_BUILD_AOT 0) set(WAMR_BUILD_FAST_INTERP 0) -set(WAMR_BUILD_INTERP 0) +set(WAMR_BUILD_INTERP 1) set(WAMR_BUILD_JIT 0) set(WAMR_BUILD_LIBC_WASI 0) set(WAMR_BUILD_APP_FRAMEWORK 0) @@ -27,6 +27,12 @@ set(TEST_SOURCES # Create test executable add_executable(mem-alloc-test ${TEST_SOURCES}) +# Add include directories for mem-alloc internals +target_include_directories(mem-alloc-test PRIVATE + ${WAMR_ROOT_DIR}/core/shared/mem-alloc + ${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems +) + # Link dependencies target_link_libraries(mem-alloc-test cmocka::cmocka m) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c new file mode 100644 index 0000000000..9e9fc321cd --- /dev/null +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include +#include +#include +#include +#include + +#if WAMR_BUILD_TEST != 1 + #error "WAMR_BUILD_TEST must be defined as 1" +#endif + +#include "mem_alloc.h" +#include "ems_gc_internal.h" + +/* Test helper: Check if pointer is aligned */ +static inline bool +is_aligned(void *ptr, size_t alignment) +{ + return ((uintptr_t)ptr % alignment) == 0; +} + +/* Test helper: Check if allocation is aligned (has magic value) */ +static inline bool +is_aligned_allocation(gc_object_t obj) +{ + uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); + return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE); +} + +/* Tests will be added incrementally */ diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c new file mode 100644 index 0000000000..4713cd3943 --- /dev/null +++ b/tests/unit/mem-alloc/test_runner.c @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include +#include +#include +#include + +/* Include test implementations */ +#include "mem_alloc_test.c" + +int +main(void) +{ + const struct CMUnitTest tests[] = { + /* Tests will be added incrementally */ + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +} From 74589ee986f646434f4dfebc7544be45236946c9 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 19 Mar 2026 21:37:19 +0800 Subject: [PATCH 06/19] test(mem-alloc): add normal allocation regression test Verify existing mem_allocator_malloc still works correctly. --- tests/unit/mem-alloc/mem_alloc_test.c | 27 ++++++++++++++++++++++++++- tests/unit/mem-alloc/test_runner.c | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index 9e9fc321cd..8c4fcde110 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -32,4 +32,29 @@ is_aligned_allocation(gc_object_t obj) return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE); } -/* Tests will be added incrementally */ +/* Test: Normal allocation still works (regression) */ +static void +test_normal_alloc_basic(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Normal allocation should still work */ + ptr = mem_allocator_malloc(allocator, 128); + assert_non_null(ptr); + + /* Should be 8-byte aligned */ + assert_true(is_aligned(ptr, 8)); + + /* Should NOT be marked as aligned allocation */ + assert_false(is_aligned_allocation(ptr)); + + /* Free should work */ + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index 4713cd3943..33ad23fceb 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -16,7 +16,7 @@ int main(void) { const struct CMUnitTest tests[] = { - /* Tests will be added incrementally */ + cmocka_unit_test(test_normal_alloc_basic), }; return cmocka_run_group_tests(tests, NULL, NULL); From a5230be48381e275bedfbcf548397997070a6cd0 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:24:23 +0800 Subject: [PATCH 07/19] feat(mem-alloc): implement gc_alloc_vo_aligned Add aligned allocation with over-allocation and metadata storage. Supports alignments from 8 bytes to page size. Co-Authored-By: Claude Sonnet 4.5 --- core/shared/mem-alloc/ems/ems_alloc.c | 117 ++++++++++++++++++++ core/shared/mem-alloc/ems/ems_gc.h | 10 ++ core/shared/mem-alloc/ems/ems_gc_internal.h | 5 + core/shared/mem-alloc/mem_alloc.c | 18 +++ 4 files changed, 150 insertions(+) diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index f8476f9d84..751f3012c9 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -629,6 +629,123 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line) return ret; } +#if BH_ENABLE_GC_VERIFY == 0 +gc_object_t +gc_alloc_vo_aligned(void *vheap, gc_size_t size, gc_size_t alignment) +#else +gc_object_t +gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, + const char *file, int line) +#endif +{ + gc_heap_t *heap = (gc_heap_t *)vheap; + hmu_t *hmu = NULL; + gc_object_t ret = NULL; + gc_size_t tot_size, tot_size_unaligned; + gc_uint8 *base_obj; + uintptr_t aligned_addr; + uint32_t offset, alignment_log2; + uint32_t max_alignment; + + /* Get system page size for maximum alignment check */ + max_alignment = (uint32_t)os_getpagesize(); + + /* Validation */ + if ((alignment & (alignment - 1)) != 0) { + /* Not power of 2 */ + return NULL; + } + + if (alignment < GC_MIN_ALIGNMENT) { + alignment = GC_MIN_ALIGNMENT; + } + + if (alignment > max_alignment) { + /* Exceeds page size */ + return NULL; + } + + if (size % alignment != 0) { + /* POSIX requirement: size must be multiple of alignment */ + return NULL; + } + + if (size > SIZE_MAX - alignment - HMU_SIZE - OBJ_PREFIX_SIZE + - OBJ_SUFFIX_SIZE - 8) { + /* Would overflow */ + return NULL; + } + +#if BH_ENABLE_GC_CORRUPTION_CHECK != 0 + if (heap->is_heap_corrupted) { + LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n"); + return NULL; + } +#endif + + /* Calculate total size needed */ + tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + + OBJ_SUFFIX_SIZE + alignment - 1 + 8; + tot_size = GC_ALIGN_8(tot_size_unaligned); + + if (tot_size < size) { + /* Integer overflow */ + return NULL; + } + + LOCK_HEAP(heap); + + hmu = alloc_hmu_ex(heap, tot_size); + if (!hmu) + goto finish; + + bh_assert(hmu_get_size(hmu) >= tot_size); + tot_size = hmu_get_size(hmu); + +#if GC_STAT_DATA != 0 + heap->total_size_allocated += tot_size; +#endif + + /* Get base object pointer */ + base_obj = (gc_uint8 *)hmu + HMU_SIZE + OBJ_PREFIX_SIZE; + + /* Find next aligned address, leaving 8 bytes for metadata */ + aligned_addr = (((uintptr_t)base_obj + 8 + alignment - 1) + & ~(uintptr_t)(alignment - 1)); + ret = (gc_object_t)aligned_addr; + + /* Verify we have enough space */ + bh_assert((gc_uint8 *)ret + size + OBJ_SUFFIX_SIZE + <= (gc_uint8 *)hmu + tot_size); + + /* Calculate offset from HMU to returned pointer */ + offset = (uint32_t)((char *)ret - (char *)hmu); + + /* Calculate log2 of alignment for magic value */ + alignment_log2 = 0; + while ((1U << alignment_log2) < alignment) { + alignment_log2++; + } + + /* Store offset 8 bytes before returned pointer */ + *((uint32_t *)((char *)ret - 8)) = offset; + + /* Store magic with encoded alignment */ + *((uint32_t *)((char *)ret - 4)) = ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2; + + /* Initialize HMU */ + hmu_set_ut(hmu, HMU_VO); + hmu_unfree_vo(hmu); + +#if BH_ENABLE_GC_VERIFY != 0 + hmu_init_prefix_and_suffix(hmu, tot_size, file, line); +#endif + +finish: + UNLOCK_HEAP(heap); + return ret; +} + #if BH_ENABLE_GC_VERIFY == 0 gc_object_t gc_realloc_vo(void *vheap, void *ptr, gc_size_t size) diff --git a/core/shared/mem-alloc/ems/ems_gc.h b/core/shared/mem-alloc/ems/ems_gc.h index 9913ca2b6a..f42bdb07a3 100644 --- a/core/shared/mem-alloc/ems/ems_gc.h +++ b/core/shared/mem-alloc/ems/ems_gc.h @@ -193,6 +193,9 @@ gc_alloc_vo(void *heap, gc_size_t size); gc_object_t gc_realloc_vo(void *heap, void *ptr, gc_size_t size); +gc_object_t +gc_alloc_vo_aligned(void *heap, gc_size_t size, gc_size_t alignment); + int gc_free_vo(void *heap, gc_object_t obj); @@ -213,6 +216,10 @@ gc_object_t gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file, int line); +gc_object_t +gc_alloc_vo_aligned_internal(void *heap, gc_size_t size, gc_size_t alignment, + const char *file, int line); + int gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line); @@ -231,6 +238,9 @@ gc_free_wo_internal(void *vheap, void *ptr, const char *file, int line); #define gc_realloc_vo(heap, ptr, size) \ gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__) +#define gc_alloc_vo_aligned(heap, size, alignment) \ + gc_alloc_vo_aligned_internal(heap, size, alignment, __FILE__, __LINE__) + #define gc_free_vo(heap, obj) \ gc_free_vo_internal(heap, obj, __FILE__, __LINE__) diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index dfcd096c73..05a26e2d76 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -90,6 +90,11 @@ hmu_verify(void *vheap, hmu_t *hmu); #define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7) +/* Minimum alignment for allocations */ +#ifndef GC_MIN_ALIGNMENT +#define GC_MIN_ALIGNMENT 8 +#endif + #define GC_SMALLEST_SIZE \ GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8) #define GC_GET_REAL_SIZE(x) \ diff --git a/core/shared/mem-alloc/mem_alloc.c b/core/shared/mem-alloc/mem_alloc.c index df1a4de4cf..5cb0f4ec00 100644 --- a/core/shared/mem-alloc/mem_alloc.c +++ b/core/shared/mem-alloc/mem_alloc.c @@ -57,6 +57,24 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr) gc_free_vo((gc_handle_t)allocator, ptr); } +#if BH_ENABLE_GC_VERIFY == 0 +void * +mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, + uint32_t alignment) +{ + return gc_alloc_vo_aligned((gc_handle_t)allocator, size, alignment); +} +#else +void * +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, + uint32_t size, uint32_t alignment, + const char *file, int line) +{ + return gc_alloc_vo_aligned_internal((gc_handle_t)allocator, size, alignment, + file, line); +} +#endif + #if WASM_ENABLE_GC != 0 void * mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size) From 24952cdd6d6d8a6e7e32479f968bb828609534b0 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:26:04 +0800 Subject: [PATCH 08/19] test(mem-alloc): add aligned allocation validation test Verify aligned allocations work for powers of 2. Co-Authored-By: Claude Sonnet 4.5 --- tests/unit/mem-alloc/mem_alloc_test.c | 33 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 1 + 2 files changed, 34 insertions(+) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index 8c4fcde110..f410633bb4 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -58,3 +58,36 @@ test_normal_alloc_basic(void **state) mem_allocator_destroy(allocator); } + +/* Test: Valid alignment powers of 2 */ +static void +test_aligned_alloc_valid_alignments(void **state) +{ + mem_allocator_t allocator; + char heap_buf[128 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Test each valid alignment */ + int alignments[] = {8, 16, 32, 64, 128, 256, 512, 1024}; + for (int i = 0; i < sizeof(alignments) / sizeof(alignments[0]); i++) { + int align = alignments[i]; + + /* Allocate with size = multiple of alignment */ + ptr = mem_allocator_malloc_aligned(allocator, align * 2, align); + assert_non_null(ptr); + + /* Verify alignment */ + assert_true(is_aligned(ptr, align)); + + /* Verify marked as aligned */ + assert_true(is_aligned_allocation(ptr)); + + /* Free */ + mem_allocator_free(allocator, ptr); + } + + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index 33ad23fceb..dffe195892 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -17,6 +17,7 @@ main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(test_normal_alloc_basic), + cmocka_unit_test(test_aligned_alloc_valid_alignments), }; return cmocka_run_group_tests(tests, NULL, NULL); From 9cb8bb6620788502d9e73f1b6d87378dbc7150c4 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:30:46 +0800 Subject: [PATCH 09/19] feat(mem-alloc): add realloc rejection for aligned allocs gc_realloc_vo now detects and rejects aligned allocations per POSIX behavior. Co-Authored-By: Claude Sonnet 4.5 --- core/shared/mem-alloc/ems/ems_alloc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index 751f3012c9..8442f220a4 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -778,6 +778,16 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file, } #endif + /* Check if this is an aligned allocation - not supported */ + if (obj_old) { + uint32_t *magic_ptr = (uint32_t *)((char *)obj_old - 4); + if ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE) { + LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned " + "allocations\n"); + return NULL; + } + } + if (obj_old) { hmu_old = obj_to_hmu(obj_old); tot_size_old = hmu_get_size(hmu_old); From 4f17f3aadf953576e462cb08e59addd7995132e8 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:37:15 +0800 Subject: [PATCH 10/19] test(mem-alloc): add realloc rejection tests Verify realloc correctly rejects aligned allocations and still works for normal allocations. Co-Authored-By: Claude Sonnet 4.5 --- tests/unit/mem-alloc/mem_alloc_test.c | 57 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 2 + 2 files changed, 59 insertions(+) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index f410633bb4..bfd7b8be24 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -91,3 +91,60 @@ test_aligned_alloc_valid_alignments(void **state) mem_allocator_destroy(allocator); } + +/* Test: Realloc rejects aligned allocations */ +static void +test_realloc_rejects_aligned(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr, *new_ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate aligned */ + ptr = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(ptr); + assert_true(is_aligned_allocation(ptr)); + + /* Realloc should reject aligned allocation */ + new_ptr = mem_allocator_realloc(allocator, ptr, 256); + assert_null(new_ptr); + + /* Original pointer should still be valid - free it */ + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Realloc still works for normal allocations */ +static void +test_normal_realloc_works(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr, *new_ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate normal */ + ptr = mem_allocator_malloc(allocator, 128); + assert_non_null(ptr); + + /* Write some data */ + memset(ptr, 0xAB, 128); + + /* Realloc should work */ + new_ptr = mem_allocator_realloc(allocator, ptr, 256); + assert_non_null(new_ptr); + + /* Data should be preserved */ + for (int i = 0; i < 128; i++) { + assert_int_equal(((unsigned char *)new_ptr)[i], 0xAB); + } + + mem_allocator_free(allocator, new_ptr); + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index dffe195892..64e2de22d3 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -18,6 +18,8 @@ main(void) const struct CMUnitTest tests[] = { cmocka_unit_test(test_normal_alloc_basic), cmocka_unit_test(test_aligned_alloc_valid_alignments), + cmocka_unit_test(test_realloc_rejects_aligned), + cmocka_unit_test(test_normal_realloc_works), }; return cmocka_run_group_tests(tests, NULL, NULL); From 50477a421f634654edc795a8dca4035387c792e2 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:45:03 +0800 Subject: [PATCH 11/19] test(mem-alloc): add alignment validation tests Verify invalid alignments and size requirements are enforced. Fix alignment validation to reject zero. Co-Authored-By: Claude Sonnet 4.5 --- core/shared/mem-alloc/ems/ems_alloc.c | 4 +- tests/unit/mem-alloc/mem_alloc_test.c | 60 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 2 + 3 files changed, 64 insertions(+), 2 deletions(-) diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index 8442f220a4..aacc29c4a8 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -651,8 +651,8 @@ gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, max_alignment = (uint32_t)os_getpagesize(); /* Validation */ - if ((alignment & (alignment - 1)) != 0) { - /* Not power of 2 */ + if (alignment == 0 || (alignment & (alignment - 1)) != 0) { + /* Zero or not power of 2 */ return NULL; } diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index bfd7b8be24..ddb5222c8b 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -148,3 +148,63 @@ test_normal_realloc_works(void **state) mem_allocator_free(allocator, new_ptr); mem_allocator_destroy(allocator); } + +/* Test: Invalid alignments (not power of 2 or zero) */ +static void +test_aligned_alloc_invalid_not_power_of_2(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* These should all fail (zero or not power of 2) */ + int invalid_alignments[] = {0, 3, 5, 7, 9, 15, 17, 100}; + for (int i = 0; i < sizeof(invalid_alignments) / sizeof(invalid_alignments[0]); i++) { + ptr = mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]); + assert_null(ptr); + } + + /* Small powers of 2 should succeed (adjusted to GC_MIN_ALIGNMENT) */ + ptr = mem_allocator_malloc_aligned(allocator, 8, 1); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 8, 2); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 8, 4); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} + +/* Test: Size must be multiple of alignment */ +static void +test_aligned_alloc_size_not_multiple(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *ptr; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Size not multiple of alignment - should fail */ + ptr = mem_allocator_malloc_aligned(allocator, 100, 64); + assert_null(ptr); + + ptr = mem_allocator_malloc_aligned(allocator, 65, 64); + assert_null(ptr); + + /* Size is multiple - should succeed */ + ptr = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(ptr); + mem_allocator_free(allocator, ptr); + + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index 64e2de22d3..0d583f88e0 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -20,6 +20,8 @@ main(void) cmocka_unit_test(test_aligned_alloc_valid_alignments), cmocka_unit_test(test_realloc_rejects_aligned), cmocka_unit_test(test_normal_realloc_works), + cmocka_unit_test(test_aligned_alloc_invalid_not_power_of_2), + cmocka_unit_test(test_aligned_alloc_size_not_multiple), }; return cmocka_run_group_tests(tests, NULL, NULL); From c1294f632a7a73e5ddbd2e5022f81b8b9cfa4a2d Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 11:47:44 +0800 Subject: [PATCH 12/19] test(mem-alloc): add mixed allocation tests Verify normal and aligned allocations can coexist and obj_to_hmu works correctly for both. Co-Authored-By: Claude Sonnet 4.5 --- tests/unit/mem-alloc/mem_alloc_test.c | 80 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 2 + 2 files changed, 82 insertions(+) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index ddb5222c8b..a3d87506f7 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -208,3 +208,83 @@ test_aligned_alloc_size_not_multiple(void **state) mem_allocator_destroy(allocator); } + +/* Test: Mixed normal and aligned allocations */ +static void +test_mixed_alloc_interleaved(void **state) +{ + mem_allocator_t allocator; + char heap_buf[128 * 1024]; + void *normal1, *aligned1, *normal2, *aligned2; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate: normal -> aligned -> normal -> aligned */ + normal1 = mem_allocator_malloc(allocator, 64); + assert_non_null(normal1); + assert_false(is_aligned_allocation(normal1)); + + aligned1 = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(aligned1); + assert_true(is_aligned_allocation(aligned1)); + assert_true(is_aligned(aligned1, 64)); + + normal2 = mem_allocator_malloc(allocator, 96); + assert_non_null(normal2); + assert_false(is_aligned_allocation(normal2)); + + aligned2 = mem_allocator_malloc_aligned(allocator, 256, 128); + assert_non_null(aligned2); + assert_true(is_aligned_allocation(aligned2)); + assert_true(is_aligned(aligned2, 128)); + + /* Free in mixed order */ + mem_allocator_free(allocator, normal1); + mem_allocator_free(allocator, aligned2); + mem_allocator_free(allocator, normal2); + mem_allocator_free(allocator, aligned1); + + mem_allocator_destroy(allocator); +} + +/* Test: obj_to_hmu works correctly for both types */ +static void +test_mixed_obj_to_hmu(void **state) +{ + mem_allocator_t allocator; + char heap_buf[64 * 1024]; + void *normal, *aligned; + hmu_t *hmu_normal, *hmu_aligned; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate both types */ + normal = mem_allocator_malloc(allocator, 128); + assert_non_null(normal); + + aligned = mem_allocator_malloc_aligned(allocator, 128, 64); + assert_non_null(aligned); + + /* Get HMU pointers */ + hmu_normal = obj_to_hmu(normal); + hmu_aligned = obj_to_hmu(aligned); + + assert_non_null(hmu_normal); + assert_non_null(hmu_aligned); + + /* Both should have HMU_VO type */ + assert_int_equal(hmu_get_ut(hmu_normal), HMU_VO); + assert_int_equal(hmu_get_ut(hmu_aligned), HMU_VO); + + /* Sizes should be reasonable */ + assert_true(hmu_get_size(hmu_normal) >= 128); + assert_true(hmu_get_size(hmu_aligned) >= 128); + + /* Free both */ + mem_allocator_free(allocator, normal); + mem_allocator_free(allocator, aligned); + + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index 0d583f88e0..300f583955 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -22,6 +22,8 @@ main(void) cmocka_unit_test(test_normal_realloc_works), cmocka_unit_test(test_aligned_alloc_invalid_not_power_of_2), cmocka_unit_test(test_aligned_alloc_size_not_multiple), + cmocka_unit_test(test_mixed_alloc_interleaved), + cmocka_unit_test(test_mixed_obj_to_hmu), }; return cmocka_run_group_tests(tests, NULL, NULL); From 57ccefd1e8b3eb53a973a859d6c16f33e3325946 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 12:26:27 +0800 Subject: [PATCH 13/19] test(mem-alloc): add stress tests Test many allocations and mixed allocation patterns. Co-Authored-By: Claude Sonnet 4.5 --- tests/unit/mem-alloc/mem_alloc_test.c | 73 +++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 2 + 2 files changed, 75 insertions(+) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index a3d87506f7..d3a1b75d40 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -288,3 +288,76 @@ test_mixed_obj_to_hmu(void **state) mem_allocator_destroy(allocator); } + +/* Test: Many aligned allocations */ +static void +test_aligned_alloc_many(void **state) +{ + mem_allocator_t allocator; + char heap_buf[512 * 1024]; + void *ptrs[100]; + int count = 0; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Allocate as many as possible */ + for (int i = 0; i < 100; i++) { + int align = (i % 4 == 0) ? 64 : 32; + ptrs[i] = mem_allocator_malloc_aligned(allocator, align * 2, align); + if (ptrs[i]) { + assert_true(is_aligned(ptrs[i], align)); + count++; + } else { + break; + } + } + + assert_true(count > 10); /* At least some should succeed */ + + /* Free all */ + for (int i = 0; i < count; i++) { + mem_allocator_free(allocator, ptrs[i]); + } + + mem_allocator_destroy(allocator); +} + +/* Test: Many mixed allocations */ +static void +test_mixed_alloc_many(void **state) +{ + mem_allocator_t allocator; + char heap_buf[512 * 1024]; + void *ptrs[200]; + int count = 0; + + allocator = mem_allocator_create(heap_buf, sizeof(heap_buf)); + assert_non_null(allocator); + + /* Alternate normal and aligned */ + for (int i = 0; i < 200; i++) { + if (i % 2 == 0) { + /* Normal allocation */ + ptrs[i] = mem_allocator_malloc(allocator, 64); + } else { + /* Aligned allocation */ + ptrs[i] = mem_allocator_malloc_aligned(allocator, 64, 32); + } + + if (ptrs[i]) { + count++; + } else { + break; + } + } + + assert_true(count > 20); + + /* Free in reverse order */ + for (int i = count - 1; i >= 0; i--) { + mem_allocator_free(allocator, ptrs[i]); + } + + mem_allocator_destroy(allocator); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index 300f583955..b1ec1ddffe 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -24,6 +24,8 @@ main(void) cmocka_unit_test(test_aligned_alloc_size_not_multiple), cmocka_unit_test(test_mixed_alloc_interleaved), cmocka_unit_test(test_mixed_obj_to_hmu), + cmocka_unit_test(test_aligned_alloc_many), + cmocka_unit_test(test_mixed_alloc_many), }; return cmocka_run_group_tests(tests, NULL, NULL); From 2b0db6e93c1a2dd3b6c9540c9532a100665a6d53 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 15:47:40 +0800 Subject: [PATCH 14/19] feat(mem-alloc): enhance memory allocation tests and add GC test executable --- .gitignore | 2 ++ tests/unit/mem-alloc/CMakeLists.txt | 19 +++++++++++++++++++ tests/unit/mem-alloc/mem_alloc_test.c | 7 +++++-- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 1d14dff9aa..92909c9f84 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.* +!.gitignore .cache .clangd diff --git a/tests/unit/mem-alloc/CMakeLists.txt b/tests/unit/mem-alloc/CMakeLists.txt index 745fd25dea..9431c11a0f 100644 --- a/tests/unit/mem-alloc/CMakeLists.txt +++ b/tests/unit/mem-alloc/CMakeLists.txt @@ -24,7 +24,11 @@ set(TEST_SOURCES ${WAMR_RUNTIME_LIB_SOURCE} ) +# # Create test executable +# + +## Normal test executable add_executable(mem-alloc-test ${TEST_SOURCES}) # Add include directories for mem-alloc internals @@ -33,9 +37,24 @@ target_include_directories(mem-alloc-test PRIVATE ${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems ) +## GC test executable +add_executable(mem-alloc-gc-test ${TEST_SOURCES}) + +target_include_directories(mem-alloc-gc-test PRIVATE + ${WAMR_ROOT_DIR}/core/shared/mem-alloc + ${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems +) + +target_compile_options(mem-alloc-gc-test PRIVATE -DWAMR_BUILD_GC=1 -DWAMR_BUILD_GC_VERIFY=1) + + # Link dependencies target_link_libraries(mem-alloc-test cmocka::cmocka m) +target_link_libraries(mem-alloc-gc-test cmocka::cmocka m) # Add to ctest add_test(NAME mem-alloc-test COMMAND mem-alloc-test) set_tests_properties(mem-alloc-test PROPERTIES TIMEOUT 60) + +add_test(NAME mem-alloc-gc-test COMMAND mem-alloc-gc-test) +set_tests_properties(mem-alloc-gc-test PROPERTIES TIMEOUT 60) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index d3a1b75d40..8ef510759c 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -72,7 +72,8 @@ test_aligned_alloc_valid_alignments(void **state) /* Test each valid alignment */ int alignments[] = {8, 16, 32, 64, 128, 256, 512, 1024}; - for (int i = 0; i < sizeof(alignments) / sizeof(alignments[0]); i++) { + int num_alignments = sizeof(alignments) / sizeof(alignments[0]); + for (int i = 0; i < num_alignments; i++) { int align = alignments[i]; /* Allocate with size = multiple of alignment */ @@ -162,7 +163,9 @@ test_aligned_alloc_invalid_not_power_of_2(void **state) /* These should all fail (zero or not power of 2) */ int invalid_alignments[] = {0, 3, 5, 7, 9, 15, 17, 100}; - for (int i = 0; i < sizeof(invalid_alignments) / sizeof(invalid_alignments[0]); i++) { + int num_invalid = + sizeof(invalid_alignments) / sizeof(invalid_alignments[0]); + for (int i = 0; i < num_invalid; i++) { ptr = mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]); assert_null(ptr); } From 125d303d34037e5cdc018cfeb669a4eae8faf574 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Fri, 20 Mar 2026 17:21:43 +0800 Subject: [PATCH 15/19] feat(mem-alloc): enhance aligned allocation metadata and documentation --- core/shared/mem-alloc/ems/ems_gc_internal.h | 61 +++++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index 05a26e2d76..fcde9c9996 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -92,7 +92,7 @@ hmu_verify(void *vheap, hmu_t *hmu); /* Minimum alignment for allocations */ #ifndef GC_MIN_ALIGNMENT -#define GC_MIN_ALIGNMENT 8 +#define GC_MIN_ALIGNMENT 8 #endif #define GC_SMALLEST_SIZE \ @@ -101,9 +101,22 @@ hmu_verify(void *vheap, hmu_t *hmu); GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \ + (((x) > 8) ? (x) : 8)) -/* Magic value for aligned allocation detection */ -#define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000 -#define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000 +/* + * Aligned allocation uses metadata in the header to store the offset + * + * ### Memory Layout + * + * Aligned allocations use over-allocation with metadata storage: + * + * ``` + * [HMU][PREFIX][...padding...][METADATA][ALIGNED_OBJ][SUFFIX] + * ^8 bytes ^returned pointer (aligned) + * ``` + * + * Magic value for aligned allocation detection + */ +#define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000 +#define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000 /** * hmu bit operation @@ -125,9 +138,49 @@ hmu_verify(void *vheap, hmu_t *hmu); (((v) & (((((uint32)1 << size) - 1) << offset))) >> offset) /* clang-format on */ +/* clang-format off */ /** * gc object layout definition + * + * #### Header Bit Layout + * + * ``` + * 31 30 29 28 27 0 + * ┌──┬──┬──┬──┬───────────────────────────────────────────────────┐ + * │UT│UT│ P│ *│ Size or Type-Specific Data │ + * └──┴──┴──┴──┴───────────────────────────────────────────────────┘ + * ``` + * + * #### Bit Fields Breakdown + * + * | Bits | Field | Description | + * | --------- | ----------------------- | -------------------------------------------- | + * | **31-30** | **UT** (Usage Type) | 2 bits for chunk type | + * | **29** | **P** (Previous In Use) | 1 bit indicating if previous chunk is in use | + * | **28** | **Type-specific** | Meaning depends on UT field | + * | **27-0** | **Type-specific** | Size or other data depending on UT | + * + * #### Memory Layout in Heap + * + * ``` + * ┌─────────────────────────────────────────────────────────────┐ + * │ HMU Header (4 bytes) │ + * ├─────────────────────────────────────────────────────────────┤ + * │ OBJ_PREFIX (if BH_ENABLE_GC_VERIFY) │ + * │ - file_name pointer │ + * │ - line_no │ + * │ - size │ + * │ - padding values (for corruption detection) │ + * ├─────────────────────────────────────────────────────────────┤ + * │ User Data (aligned to 8 bytes) │ + * │ ... │ + * ├─────────────────────────────────────────────────────────────┤ + * │ OBJ_SUFFIX (if BH_ENABLE_GC_VERIFY) │ + * │ - padding values (for corruption detection) │ + * └─────────────────────────────────────────────────────────────┘ + * ``` */ +/* clang-format on */ #define HMU_SIZE (sizeof(hmu_t)) From ba0af6ce03089a90060ac074539efa0c1b895b8f Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Wed, 25 Mar 2026 14:02:14 +0800 Subject: [PATCH 16/19] feat(api): expose aligned allocation through wasm_runtime_aligned_alloc Add public API for aligned memory allocation, exposing the existing mem_allocator_malloc_aligned infrastructure through wasm_export.h. - Add wasm_runtime_aligned_alloc() API declaration with documentation - Implement internal helper wasm_runtime_aligned_alloc_internal() - Add public function with size/alignment validation - POOL mode only, returns NULL for other memory modes - Follows wasm_runtime_malloc() patterns for consistency Co-Authored-By: Claude Sonnet 4.5 --- core/iwasm/common/wasm_memory.c | 41 ++++++++++++++++++++++++++++++++ core/iwasm/include/wasm_export.h | 15 ++++++++++++ 2 files changed, 56 insertions(+) diff --git a/core/iwasm/common/wasm_memory.c b/core/iwasm/common/wasm_memory.c index 628a032370..0745f79144 100644 --- a/core/iwasm/common/wasm_memory.c +++ b/core/iwasm/common/wasm_memory.c @@ -1030,6 +1030,24 @@ wasm_runtime_free_internal(void *ptr) } } +static inline void * +wasm_runtime_aligned_alloc_internal(unsigned int size, unsigned int alignment) +{ + if (memory_mode == MEMORY_MODE_UNKNOWN) { + LOG_ERROR("wasm_runtime_aligned_alloc failed: memory hasn't been " + "initialized.\n"); + return NULL; + } + else if (memory_mode == MEMORY_MODE_POOL) { + return mem_allocator_malloc_aligned(pool_allocator, size, alignment); + } + else { + LOG_ERROR("wasm_runtime_aligned_alloc failed: only supported in POOL " + "memory mode.\n"); + return NULL; + } +} + void * wasm_runtime_malloc(unsigned int size) { @@ -1052,6 +1070,29 @@ wasm_runtime_malloc(unsigned int size) return wasm_runtime_malloc_internal(size); } +void * +wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment) +{ + if (size == 0) { + LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n"); + return NULL; + } + + if (alignment == 0) { + LOG_WARNING("warning: wasm_runtime_aligned_alloc with zero alignment\n"); + return NULL; + } + +#if WASM_ENABLE_FUZZ_TEST != 0 + if (size >= WASM_MEM_ALLOC_MAX_SIZE) { + LOG_WARNING("warning: wasm_runtime_aligned_alloc with too large size\n"); + return NULL; + } +#endif + + return wasm_runtime_aligned_alloc_internal(size, alignment); +} + void * wasm_runtime_realloc(void *ptr, unsigned int size) { diff --git a/core/iwasm/include/wasm_export.h b/core/iwasm/include/wasm_export.h index 86f7c22b17..690cd206ef 100644 --- a/core/iwasm/include/wasm_export.h +++ b/core/iwasm/include/wasm_export.h @@ -422,6 +422,21 @@ wasm_runtime_destroy(void); WASM_RUNTIME_API_EXTERN void * wasm_runtime_malloc(unsigned int size); +/** + * Allocate memory with specified alignment from runtime memory environment. + * This function mimics aligned_alloc() behavior in WebAssembly context. + * + * Note: Only supported in POOL memory mode. Other modes will return NULL. + * Note: Allocated memory cannot be reallocated with wasm_runtime_realloc(). + * + * @param size bytes need to allocate (must be multiple of alignment) + * @param alignment alignment requirement (must be power of 2, >= 8, <= page size) + * + * @return the pointer to aligned memory allocated, or NULL on failure + */ +WASM_RUNTIME_API_EXTERN void * +wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment); + /** * Reallocate memory from runtime memory environment * From 76ee1d301317a5d966712a535bde738736dedc13 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Wed, 25 Mar 2026 14:21:41 +0800 Subject: [PATCH 17/19] test(mem-alloc): add wasm_runtime_aligned_alloc API tests Add comprehensive tests for the new public aligned allocation API: - Valid allocation in POOL mode with alignment verification - Zero size/alignment rejection - Non-POOL mode returns NULL - Realloc rejection for aligned allocations - Multiple alignment values (8, 16, 32, 64, 128, 256) Tests cover all scenarios from the implementation plan including error handling and mode-specific behavior. Co-Authored-By: Claude Sonnet 4.5 --- tests/unit/mem-alloc/mem_alloc_test.c | 152 ++++++++++++++++++++++++++ tests/unit/mem-alloc/test_runner.c | 6 + 2 files changed, 158 insertions(+) diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index 8ef510759c..57caa84f9e 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -16,6 +16,7 @@ #include "mem_alloc.h" #include "ems_gc_internal.h" +#include "wasm_export.h" /* Test helper: Check if pointer is aligned */ static inline bool @@ -364,3 +365,154 @@ test_mixed_alloc_many(void **state) mem_allocator_destroy(allocator); } + +/* Test: wasm_runtime_aligned_alloc with valid inputs in POOL mode */ +static void +test_wasm_runtime_aligned_alloc_valid(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + /* Test valid aligned allocation */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_non_null(ptr); + assert_true(is_aligned(ptr, 64)); + + /* Free should work */ + wasm_runtime_free(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with zero size */ +static void +test_wasm_runtime_aligned_alloc_zero_size(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + /* Zero size should return NULL */ + ptr = wasm_runtime_aligned_alloc(0, 64); + assert_null(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with zero alignment */ +static void +test_wasm_runtime_aligned_alloc_zero_alignment(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + /* Zero alignment should return NULL */ + ptr = wasm_runtime_aligned_alloc(128, 0); + assert_null(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc in SYSTEM_ALLOCATOR mode returns NULL */ +static void +test_wasm_runtime_aligned_alloc_system_mode(void **state) +{ + RuntimeInitArgs init_args; + void *ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_System_Allocator; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + /* Should return NULL in non-POOL mode */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_null(ptr); + + wasm_runtime_destroy(); +} + +/* Test: wasm_runtime_realloc rejects aligned allocations */ +static void +test_wasm_runtime_realloc_rejects_aligned(void **state) +{ + RuntimeInitArgs init_args; + void *ptr, *new_ptr; + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); + init_args.mem_alloc_option.pool.heap_size = 256 * 1024; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + /* Allocate with alignment */ + ptr = wasm_runtime_aligned_alloc(128, 64); + assert_non_null(ptr); + + /* Realloc should return NULL */ + new_ptr = wasm_runtime_realloc(ptr, 256); + assert_null(new_ptr); + + /* Original pointer still valid */ + wasm_runtime_free(ptr); + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} + +/* Test: wasm_runtime_aligned_alloc with various alignments */ +static void +test_wasm_runtime_aligned_alloc_multiple_alignments(void **state) +{ + RuntimeInitArgs init_args; + int alignments[] = {8, 16, 32, 64, 128, 256}; + int num_alignments = sizeof(alignments) / sizeof(alignments[0]); + + memset(&init_args, 0, sizeof(RuntimeInitArgs)); + init_args.mem_alloc_type = Alloc_With_Pool; + init_args.mem_alloc_option.pool.heap_buf = malloc(512 * 1024); + init_args.mem_alloc_option.pool.heap_size = 512 * 1024; + + assert_true(wasm_runtime_init()); + assert_true(wasm_runtime_full_init(&init_args)); + + for (int i = 0; i < num_alignments; i++) { + int align = alignments[i]; + void *ptr = wasm_runtime_aligned_alloc(align * 2, align); + assert_non_null(ptr); + assert_true(is_aligned(ptr, align)); + wasm_runtime_free(ptr); + } + + wasm_runtime_destroy(); + free(init_args.mem_alloc_option.pool.heap_buf); +} diff --git a/tests/unit/mem-alloc/test_runner.c b/tests/unit/mem-alloc/test_runner.c index b1ec1ddffe..d9621f73ad 100644 --- a/tests/unit/mem-alloc/test_runner.c +++ b/tests/unit/mem-alloc/test_runner.c @@ -26,6 +26,12 @@ main(void) cmocka_unit_test(test_mixed_obj_to_hmu), cmocka_unit_test(test_aligned_alloc_many), cmocka_unit_test(test_mixed_alloc_many), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_valid), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_size), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_alignment), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_system_mode), + cmocka_unit_test(test_wasm_runtime_realloc_rejects_aligned), + cmocka_unit_test(test_wasm_runtime_aligned_alloc_multiple_alignments), }; return cmocka_run_group_tests(tests, NULL, NULL); From 601e8fb2736bafdf650b76118a3b39b5397b2ec3 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Wed, 25 Mar 2026 15:15:05 +0800 Subject: [PATCH 18/19] refactor: improve code quality in aligned allocation - Simplify wasm_runtime_aligned_alloc_internal control flow using guard clause - Remove redundant wasm_runtime_init() calls in tests (wasm_runtime_full_init handles it) No functional changes, improves code readability and follows existing patterns. Co-Authored-By: Claude Sonnet 4.5 --- core/iwasm/common/wasm_memory.c | 8 ++++---- tests/unit/mem-alloc/mem_alloc_test.c | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/core/iwasm/common/wasm_memory.c b/core/iwasm/common/wasm_memory.c index 0745f79144..fc9842497c 100644 --- a/core/iwasm/common/wasm_memory.c +++ b/core/iwasm/common/wasm_memory.c @@ -1038,14 +1038,14 @@ wasm_runtime_aligned_alloc_internal(unsigned int size, unsigned int alignment) "initialized.\n"); return NULL; } - else if (memory_mode == MEMORY_MODE_POOL) { - return mem_allocator_malloc_aligned(pool_allocator, size, alignment); - } - else { + + if (memory_mode != MEMORY_MODE_POOL) { LOG_ERROR("wasm_runtime_aligned_alloc failed: only supported in POOL " "memory mode.\n"); return NULL; } + + return mem_allocator_malloc_aligned(pool_allocator, size, alignment); } void * diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index 57caa84f9e..aeb1aaf6b9 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -378,7 +378,6 @@ test_wasm_runtime_aligned_alloc_valid(void **state) init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); init_args.mem_alloc_option.pool.heap_size = 256 * 1024; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); /* Test valid aligned allocation */ @@ -405,7 +404,6 @@ test_wasm_runtime_aligned_alloc_zero_size(void **state) init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); init_args.mem_alloc_option.pool.heap_size = 256 * 1024; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); /* Zero size should return NULL */ @@ -428,7 +426,6 @@ test_wasm_runtime_aligned_alloc_zero_alignment(void **state) init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); init_args.mem_alloc_option.pool.heap_size = 256 * 1024; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); /* Zero alignment should return NULL */ @@ -449,7 +446,6 @@ test_wasm_runtime_aligned_alloc_system_mode(void **state) memset(&init_args, 0, sizeof(RuntimeInitArgs)); init_args.mem_alloc_type = Alloc_With_System_Allocator; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); /* Should return NULL in non-POOL mode */ @@ -471,7 +467,6 @@ test_wasm_runtime_realloc_rejects_aligned(void **state) init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024); init_args.mem_alloc_option.pool.heap_size = 256 * 1024; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); /* Allocate with alignment */ @@ -502,7 +497,6 @@ test_wasm_runtime_aligned_alloc_multiple_alignments(void **state) init_args.mem_alloc_option.pool.heap_buf = malloc(512 * 1024); init_args.mem_alloc_option.pool.heap_size = 512 * 1024; - assert_true(wasm_runtime_init()); assert_true(wasm_runtime_full_init(&init_args)); for (int i = 0; i < num_alignments; i++) { From 6d08a77eb1b91e8f66d08152b284f67a7cf450d3 Mon Sep 17 00:00:00 2001 From: "liang.he@intel.com" Date: Thu, 26 Mar 2026 11:44:35 +0800 Subject: [PATCH 19/19] feat: add POSIX-like aligned_alloc() - a new API: wasm_runtime_aligned_alloc() - gc_alloc_vo_aligned() in ems-alloc - unit test cases Co-Authored-By: Claude Sonnet 4.5 --- core/iwasm/common/wasm_memory.c | 18 ++- core/iwasm/include/wasm_export.h | 3 +- core/shared/mem-alloc/ems/ems_alloc.c | 26 ++--- core/shared/mem-alloc/ems/ems_gc_internal.h | 120 +++++++++++++++++--- core/shared/mem-alloc/mem_alloc.c | 8 +- core/shared/mem-alloc/mem_alloc.h | 14 +-- core/shared/utils/bh_platform.h | 18 +++ tests/unit/mem-alloc/mem_alloc_test.c | 31 +++-- 8 files changed, 180 insertions(+), 58 deletions(-) diff --git a/core/iwasm/common/wasm_memory.c b/core/iwasm/common/wasm_memory.c index fc9842497c..3d1f148118 100644 --- a/core/iwasm/common/wasm_memory.c +++ b/core/iwasm/common/wasm_memory.c @@ -1073,19 +1073,25 @@ wasm_runtime_malloc(unsigned int size) void * wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment) { - if (size == 0) { - LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n"); + if (alignment == 0) { + LOG_WARNING( + "warning: wasm_runtime_aligned_alloc with zero alignment\n"); return NULL; } - if (alignment == 0) { - LOG_WARNING("warning: wasm_runtime_aligned_alloc with zero alignment\n"); - return NULL; + if (size == 0) { + LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n"); + /* Allocate at least alignment bytes (smallest multiple of alignment) */ + size = alignment; +#if BH_ENABLE_GC_VERIFY != 0 + exit(-1); +#endif } #if WASM_ENABLE_FUZZ_TEST != 0 if (size >= WASM_MEM_ALLOC_MAX_SIZE) { - LOG_WARNING("warning: wasm_runtime_aligned_alloc with too large size\n"); + LOG_WARNING( + "warning: wasm_runtime_aligned_alloc with too large size\n"); return NULL; } #endif diff --git a/core/iwasm/include/wasm_export.h b/core/iwasm/include/wasm_export.h index 690cd206ef..830c5c030c 100644 --- a/core/iwasm/include/wasm_export.h +++ b/core/iwasm/include/wasm_export.h @@ -430,7 +430,8 @@ wasm_runtime_malloc(unsigned int size); * Note: Allocated memory cannot be reallocated with wasm_runtime_realloc(). * * @param size bytes need to allocate (must be multiple of alignment) - * @param alignment alignment requirement (must be power of 2, >= 8, <= page size) + * @param alignment alignment requirement (must be power of 2, >= 8, <= page + * size) * * @return the pointer to aligned memory allocated, or NULL on failure */ diff --git a/core/shared/mem-alloc/ems/ems_alloc.c b/core/shared/mem-alloc/ems/ems_alloc.c index aacc29c4a8..ffd543690a 100644 --- a/core/shared/mem-alloc/ems/ems_alloc.c +++ b/core/shared/mem-alloc/ems/ems_alloc.c @@ -556,17 +556,15 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size) MEM_ALLOC_API_INTER hmu_t * obj_to_hmu(gc_object_t obj) { - uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); - /* Check for aligned allocation magic signature */ - if ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE) { + if (gc_is_aligned_allocation(obj)) { /* This is an aligned allocation, read offset */ uint32_t *offset_ptr = (uint32_t *)((char *)obj - 8); return (hmu_t *)((char *)obj - *offset_ptr); } /* Normal allocation: standard offset */ - return (hmu_t *)((gc_uint8 *)(obj) - OBJ_PREFIX_SIZE) - 1; + return (hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1; } #if BH_ENABLE_GC_VERIFY == 0 @@ -671,7 +669,7 @@ gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, } if (size > SIZE_MAX - alignment - HMU_SIZE - OBJ_PREFIX_SIZE - - OBJ_SUFFIX_SIZE - 8) { + - OBJ_SUFFIX_SIZE - 8) { /* Would overflow */ return NULL; } @@ -684,8 +682,8 @@ gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, #endif /* Calculate total size needed */ - tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size - + OBJ_SUFFIX_SIZE + alignment - 1 + 8; + tot_size_unaligned = + HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE + alignment - 1 + 8; tot_size = GC_ALIGN_8(tot_size_unaligned); if (tot_size < size) { @@ -731,7 +729,8 @@ gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment, *((uint32_t *)((char *)ret - 8)) = offset; /* Store magic with encoded alignment */ - *((uint32_t *)((char *)ret - 4)) = ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2; + *((uint32_t *)((char *)ret - 4)) = + ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2; /* Initialize HMU */ hmu_set_ut(hmu, HMU_VO); @@ -779,13 +778,10 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file, #endif /* Check if this is an aligned allocation - not supported */ - if (obj_old) { - uint32_t *magic_ptr = (uint32_t *)((char *)obj_old - 4); - if ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE) { - LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned " - "allocations\n"); - return NULL; - } + if (gc_is_aligned_allocation(obj_old)) { + LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned " + "allocations\n"); + return NULL; } if (obj_old) { diff --git a/core/shared/mem-alloc/ems/ems_gc_internal.h b/core/shared/mem-alloc/ems/ems_gc_internal.h index fcde9c9996..db6eff4018 100644 --- a/core/shared/mem-alloc/ems/ems_gc_internal.h +++ b/core/shared/mem-alloc/ems/ems_gc_internal.h @@ -15,11 +15,7 @@ extern "C" { /* Test visibility macro for internal functions */ #ifndef MEM_ALLOC_API_INTER -#ifdef WAMR_BUILD_TEST -#define MEM_ALLOC_API_INTER -#else -#define MEM_ALLOC_API_INTER static -#endif +#define MEM_ALLOC_API_INTER WASM_RUNTIME_API_INTERN #endif /* HMU (heap memory unit) basic block type */ @@ -102,22 +98,120 @@ hmu_verify(void *vheap, hmu_t *hmu); + (((x) > 8) ? (x) : 8)) /* - * Aligned allocation uses metadata in the header to store the offset + * ============================================================================ + * Aligned Memory Allocation + * ============================================================================ * - * ### Memory Layout + * This module implements aligned memory allocation similar to C11 + * aligned_alloc() and POSIX posix_memalign() for WAMR's garbage collector. * - * Aligned allocations use over-allocation with metadata storage: + * POSIX aligned_alloc() Specification: + * ------------------------------------ + * From C11 §7.22.3.1 and POSIX.1-2017: + * void *aligned_alloc(size_t alignment, size_t size); * - * ``` - * [HMU][PREFIX][...padding...][METADATA][ALIGNED_OBJ][SUFFIX] - * ^8 bytes ^returned pointer (aligned) - * ``` + * Requirements: + * - alignment: Must be a valid alignment supported by the implementation, + * typically a power of 2 + * - size: Must be an integral multiple of alignment + * - Returns: Pointer aligned to the specified alignment boundary, or NULL + * - Memory must be freed with free() (not realloc'd) + * - Behavior: If size is 0, may return NULL or unique pointer (impl-defined) + * + * IMPORTANT: POSIX does not require realloc() to preserve alignment. + * Calling realloc() on aligned_alloc() memory has undefined behavior. + * + * WAMR Implementation Strategy: + * ----------------------------- + * We implement alignment through over-allocation with metadata tracking: + * + * 1. **Validation Phase**: + * - Check alignment is power-of-2, >= 8 bytes, <= system page size + * - Check size is multiple of alignment + * - Return NULL if validation fails + * + * 2. **Over-Allocation**: + * - Allocate (size + alignment + metadata_overhead) bytes + * - Extra space allows us to find an aligned boundary within the block + * - Calculate log2(alignment) for efficient offset storage + * + * 3. **Alignment Adjustment**: + * - Find next aligned address within allocated block + * - Calculate offset from original allocation to aligned address + * - Store offset in metadata for later free() operation + * + * 4. **Magic Marker Storage**: + * - Store magic marker (0xA11C0000 | offset) in 4 bytes before user pointer + * - Upper 16 bits: 0xA11C identifies aligned allocation + * - Lower 16 bits: offset from HMU to aligned pointer (max 65535 bytes) + * - This marker prevents unsafe realloc() operations + * + * 5. **Realloc Prevention**: + * - gc_realloc_vo_internal() checks for magic marker + * - Returns NULL if realloc attempted on aligned allocation + * - User must manually allocate new memory and copy data + * + * Memory Layout Diagram: + * ---------------------- + * + * Low Address High Address + * ┌─────────────┬──────────┬────────────────┬──────────────┬─────────────┐ + * │ HMU Header │ Padding │ Magic + Offset │ Aligned Data │ Padding │ + * │ (meta) │ (0-align)│ (4 bytes) │ (size) │ (overhead) │ + * └─────────────┴──────────┴────────────────┴──────────────┴─────────────┘ + * ▲ ▲ + * │ │ + * magic_ptr user_ptr (returned, aligned) * - * Magic value for aligned allocation detection + * Constraints and Limitations: + * ---------------------------- + * - Minimum alignment: 8 bytes (GC_MIN_ALIGNMENT) + * - Maximum alignment: System page size (os_getpagesize(), typically 4KB) + * - Maximum offset: 65535 bytes (16-bit storage limit) + * - Realloc support: None - returns NULL (prevents alignment loss) + * - Free support: Full - use mem_allocator_free() / wasm_runtime_free() + * - Thread safety: Protected by LOCK_HEAP/UNLOCK_HEAP + * + * Usage Example: + * -------------- + * // Allocate 256 bytes aligned to 64-byte boundary (e.g., for SIMD) + * void *ptr = wasm_runtime_aligned_alloc(256, 64); + * assert((uintptr_t)ptr % 64 == 0); // Guaranteed aligned + * + * // Use the memory... + * + * // Free normally (alignment metadata handled automatically) + * wasm_runtime_free(ptr); + * + * // INVALID: Cannot realloc aligned memory + * void *new_ptr = wasm_runtime_realloc(ptr, 512); // Returns NULL! */ + +/* Aligned allocation magic markers */ #define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000 #define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000 +/** + * Check if a gc_object was allocated with alignment requirements. + * + * Aligned allocations store a magic marker (0xA11C0000) in the 4 bytes + * immediately before the object pointer. This marker is used to identify + * aligned allocations to prevent unsafe realloc operations. + * + * @param obj the gc_object to check (user-visible pointer) + * @return true if obj is an aligned allocation, false otherwise + */ +static inline bool +gc_is_aligned_allocation(gc_object_t obj) +{ + if (!obj) + return false; + + uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); + return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) + == ALIGNED_ALLOC_MAGIC_VALUE); +} + /** * hmu bit operation */ diff --git a/core/shared/mem-alloc/mem_alloc.c b/core/shared/mem-alloc/mem_alloc.c index 5cb0f4ec00..c17a69ee28 100644 --- a/core/shared/mem-alloc/mem_alloc.c +++ b/core/shared/mem-alloc/mem_alloc.c @@ -60,15 +60,15 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr) #if BH_ENABLE_GC_VERIFY == 0 void * mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, - uint32_t alignment) + uint32_t alignment) { return gc_alloc_vo_aligned((gc_handle_t)allocator, size, alignment); } #else void * -mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, - uint32_t size, uint32_t alignment, - const char *file, int line) +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size, + uint32_t alignment, const char *file, + int line) { return gc_alloc_vo_aligned_internal((gc_handle_t)allocator, size, alignment, file, line); diff --git a/core/shared/mem-alloc/mem_alloc.h b/core/shared/mem-alloc/mem_alloc.h index 0304e1397d..3e55d49ed5 100644 --- a/core/shared/mem-alloc/mem_alloc.h +++ b/core/shared/mem-alloc/mem_alloc.h @@ -48,27 +48,27 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr); /* Aligned allocation support */ #ifndef GC_MIN_ALIGNMENT -#define GC_MIN_ALIGNMENT 8 +#define GC_MIN_ALIGNMENT 8 #endif #if BH_ENABLE_GC_VERIFY == 0 void * mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size, - uint32_t alignment); + uint32_t alignment); #define mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ - file, line) \ + file, line) \ mem_allocator_malloc_aligned(allocator, size, alignment) #else /* BH_ENABLE_GC_VERIFY != 0 */ void * -mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, - uint32_t size, uint32_t alignment, - const char *file, int line); +mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size, + uint32_t alignment, const char *file, + int line); -#define mem_allocator_malloc_aligned(allocator, size, alignment) \ +#define mem_allocator_malloc_aligned(allocator, size, alignment) \ mem_allocator_malloc_aligned_internal(allocator, size, alignment, \ __FILE__, __LINE__) diff --git a/core/shared/utils/bh_platform.h b/core/shared/utils/bh_platform.h index 86aef839dd..4774a7cabb 100644 --- a/core/shared/utils/bh_platform.h +++ b/core/shared/utils/bh_platform.h @@ -18,6 +18,24 @@ #include "bh_vector.h" #include "runtime_timer.h" +/** + * API visibility macros for WAMR internal functions + * + * WASM_RUNTIME_API_EXTERN - Public exported APIs (defined in wasm_export.h) + * WASM_RUNTIME_API_INTERN - Internal APIs visible across WAMR components + * + * In test builds (WAMR_BUILD_TEST=1), internal APIs are exposed for unit + * testing. In production builds, internal APIs are static (file-scoped) for + * encapsulation. + */ +#ifndef WASM_RUNTIME_API_INTERN +#ifdef WAMR_BUILD_TEST +#define WASM_RUNTIME_API_INTERN +#else +#define WASM_RUNTIME_API_INTERN static +#endif +#endif + /** * WA_MALLOC/WA_FREE need to be redefined for both * runtime native and WASM app respectively. diff --git a/tests/unit/mem-alloc/mem_alloc_test.c b/tests/unit/mem-alloc/mem_alloc_test.c index aeb1aaf6b9..878e0e0d49 100644 --- a/tests/unit/mem-alloc/mem_alloc_test.c +++ b/tests/unit/mem-alloc/mem_alloc_test.c @@ -11,7 +11,7 @@ #include #if WAMR_BUILD_TEST != 1 - #error "WAMR_BUILD_TEST must be defined as 1" +#error "WAMR_BUILD_TEST must be defined as 1" #endif #include "mem_alloc.h" @@ -30,7 +30,8 @@ static inline bool is_aligned_allocation(gc_object_t obj) { uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4); - return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) == ALIGNED_ALLOC_MAGIC_VALUE); + return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK) + == ALIGNED_ALLOC_MAGIC_VALUE); } /* Test: Normal allocation still works (regression) */ @@ -72,7 +73,7 @@ test_aligned_alloc_valid_alignments(void **state) assert_non_null(allocator); /* Test each valid alignment */ - int alignments[] = {8, 16, 32, 64, 128, 256, 512, 1024}; + int alignments[] = { 8, 16, 32, 64, 128, 256, 512, 1024 }; int num_alignments = sizeof(alignments) / sizeof(alignments[0]); for (int i = 0; i < num_alignments; i++) { int align = alignments[i]; @@ -163,11 +164,12 @@ test_aligned_alloc_invalid_not_power_of_2(void **state) assert_non_null(allocator); /* These should all fail (zero or not power of 2) */ - int invalid_alignments[] = {0, 3, 5, 7, 9, 15, 17, 100}; + int invalid_alignments[] = { 0, 3, 5, 7, 9, 15, 17, 100 }; int num_invalid = sizeof(invalid_alignments) / sizeof(invalid_alignments[0]); for (int i = 0; i < num_invalid; i++) { - ptr = mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]); + ptr = + mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]); assert_null(ptr); } @@ -312,12 +314,13 @@ test_aligned_alloc_many(void **state) if (ptrs[i]) { assert_true(is_aligned(ptrs[i], align)); count++; - } else { + } + else { break; } } - assert_true(count > 10); /* At least some should succeed */ + assert_true(count > 10); /* At least some should succeed */ /* Free all */ for (int i = 0; i < count; i++) { @@ -344,14 +347,16 @@ test_mixed_alloc_many(void **state) if (i % 2 == 0) { /* Normal allocation */ ptrs[i] = mem_allocator_malloc(allocator, 64); - } else { + } + else { /* Aligned allocation */ ptrs[i] = mem_allocator_malloc_aligned(allocator, 64, 32); } if (ptrs[i]) { count++; - } else { + } + else { break; } } @@ -406,10 +411,12 @@ test_wasm_runtime_aligned_alloc_zero_size(void **state) assert_true(wasm_runtime_full_init(&init_args)); - /* Zero size should return NULL */ + /* Zero size should allocate alignment bytes (like malloc(0) behavior) */ ptr = wasm_runtime_aligned_alloc(0, 64); - assert_null(ptr); + assert_non_null(ptr); + assert_true(is_aligned(ptr, 64)); + wasm_runtime_free(ptr); wasm_runtime_destroy(); free(init_args.mem_alloc_option.pool.heap_buf); } @@ -489,7 +496,7 @@ static void test_wasm_runtime_aligned_alloc_multiple_alignments(void **state) { RuntimeInitArgs init_args; - int alignments[] = {8, 16, 32, 64, 128, 256}; + int alignments[] = { 8, 16, 32, 64, 128, 256 }; int num_alignments = sizeof(alignments) / sizeof(alignments[0]); memset(&init_args, 0, sizeof(RuntimeInitArgs));