Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
be9cca3
feat(mem-alloc): add aligned allocation API declaration
lum1n0us Mar 19, 2026
7472331
feat(mem-alloc): add test visibility and magic constants
lum1n0us Mar 19, 2026
0300350
feat(mem-alloc): modify obj_to_hmu for aligned detection
lum1n0us Mar 19, 2026
5ad5633
test(mem-alloc): add test infrastructure
lum1n0us Mar 19, 2026
526ff65
test(mem-alloc): add test runner and stubs
lum1n0us Mar 19, 2026
74589ee
test(mem-alloc): add normal allocation regression test
lum1n0us Mar 19, 2026
a5230be
feat(mem-alloc): implement gc_alloc_vo_aligned
lum1n0us Mar 20, 2026
24952cd
test(mem-alloc): add aligned allocation validation test
lum1n0us Mar 20, 2026
9cb8bb6
feat(mem-alloc): add realloc rejection for aligned allocs
lum1n0us Mar 20, 2026
4f17f3a
test(mem-alloc): add realloc rejection tests
lum1n0us Mar 20, 2026
50477a4
test(mem-alloc): add alignment validation tests
lum1n0us Mar 20, 2026
c1294f6
test(mem-alloc): add mixed allocation tests
lum1n0us Mar 20, 2026
57ccefd
test(mem-alloc): add stress tests
lum1n0us Mar 20, 2026
2b0db6e
feat(mem-alloc): enhance memory allocation tests and add GC test exec…
lum1n0us Mar 20, 2026
125d303
feat(mem-alloc): enhance aligned allocation metadata and documentation
lum1n0us Mar 20, 2026
ba0af6c
feat(api): expose aligned allocation through wasm_runtime_aligned_alloc
lum1n0us Mar 25, 2026
76ee1d3
test(mem-alloc): add wasm_runtime_aligned_alloc API tests
lum1n0us Mar 25, 2026
601e8fb
refactor: improve code quality in aligned allocation
lum1n0us Mar 25, 2026
6d08a77
feat: add POSIX-like aligned_alloc()
lum1n0us Mar 26, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
.*
!.gitignore

.cache
.clangd
Expand Down
47 changes: 47 additions & 0 deletions core/iwasm/common/wasm_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,24 @@ wasm_runtime_free_internal(void *ptr)
}
}

static inline void *
wasm_runtime_aligned_alloc_internal(unsigned int size, unsigned int alignment)
{
if (memory_mode == MEMORY_MODE_UNKNOWN) {
LOG_ERROR("wasm_runtime_aligned_alloc failed: memory hasn't been "
"initialized.\n");
return NULL;
}

if (memory_mode != MEMORY_MODE_POOL) {
LOG_ERROR("wasm_runtime_aligned_alloc failed: only supported in POOL "
"memory mode.\n");
return NULL;
}

return mem_allocator_malloc_aligned(pool_allocator, size, alignment);
}

void *
wasm_runtime_malloc(unsigned int size)
{
Expand All @@ -1052,6 +1070,35 @@ wasm_runtime_malloc(unsigned int size)
return wasm_runtime_malloc_internal(size);
}

void *
wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment)
{
if (alignment == 0) {
LOG_WARNING(
"warning: wasm_runtime_aligned_alloc with zero alignment\n");
return NULL;
}

if (size == 0) {
LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n");
/* Allocate at least alignment bytes (smallest multiple of alignment) */
size = alignment;
#if BH_ENABLE_GC_VERIFY != 0
exit(-1);
#endif
}

#if WASM_ENABLE_FUZZ_TEST != 0
if (size >= WASM_MEM_ALLOC_MAX_SIZE) {
LOG_WARNING(
"warning: wasm_runtime_aligned_alloc with too large size\n");
return NULL;
}
#endif

return wasm_runtime_aligned_alloc_internal(size, alignment);
}

void *
wasm_runtime_realloc(void *ptr, unsigned int size)
{
Expand Down
16 changes: 16 additions & 0 deletions core/iwasm/include/wasm_export.h
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,22 @@ wasm_runtime_destroy(void);
WASM_RUNTIME_API_EXTERN void *
wasm_runtime_malloc(unsigned int size);

/**
* Allocate memory with specified alignment from runtime memory environment.
* This function mimics aligned_alloc() behavior in WebAssembly context.
*
* Note: Only supported in POOL memory mode. Other modes will return NULL.
* Note: Allocated memory cannot be reallocated with wasm_runtime_realloc().
*
* @param size bytes need to allocate (must be multiple of alignment)
* @param alignment alignment requirement (must be power of 2, >= 8, <= page
* size)
*
* @return the pointer to aligned memory allocated, or NULL on failure
*/
WASM_RUNTIME_API_EXTERN void *
wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment);

/**
* Reallocate memory from runtime memory environment
*
Expand Down
140 changes: 140 additions & 0 deletions core/shared/mem-alloc/ems/ems_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,21 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
return alloc_hmu(heap, size);
}

/* Convert object pointer to HMU pointer - handles aligned allocations */
MEM_ALLOC_API_INTER hmu_t *
obj_to_hmu(gc_object_t obj)
{
/* Check for aligned allocation magic signature */
if (gc_is_aligned_allocation(obj)) {
/* This is an aligned allocation, read offset */
uint32_t *offset_ptr = (uint32_t *)((char *)obj - 8);
return (hmu_t *)((char *)obj - *offset_ptr);
}

/* Normal allocation: standard offset */
return (hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1;
}

#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_vo(void *vheap, gc_size_t size)
Expand Down Expand Up @@ -612,6 +627,124 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
return ret;
}

#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_vo_aligned(void *vheap, gc_size_t size, gc_size_t alignment)
#else
gc_object_t
gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment,
const char *file, int line)
#endif
{
gc_heap_t *heap = (gc_heap_t *)vheap;
hmu_t *hmu = NULL;
gc_object_t ret = NULL;
gc_size_t tot_size, tot_size_unaligned;
gc_uint8 *base_obj;
uintptr_t aligned_addr;
uint32_t offset, alignment_log2;
uint32_t max_alignment;

/* Get system page size for maximum alignment check */
max_alignment = (uint32_t)os_getpagesize();

/* Validation */
if (alignment == 0 || (alignment & (alignment - 1)) != 0) {
/* Zero or not power of 2 */
return NULL;
}

if (alignment < GC_MIN_ALIGNMENT) {
alignment = GC_MIN_ALIGNMENT;
}

if (alignment > max_alignment) {
/* Exceeds page size */
return NULL;
}

if (size % alignment != 0) {
/* POSIX requirement: size must be multiple of alignment */
return NULL;
}

if (size > SIZE_MAX - alignment - HMU_SIZE - OBJ_PREFIX_SIZE
- OBJ_SUFFIX_SIZE - 8) {
/* Would overflow */
return NULL;
}

#if BH_ENABLE_GC_CORRUPTION_CHECK != 0
if (heap->is_heap_corrupted) {
LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
return NULL;
}
#endif

/* Calculate total size needed */
tot_size_unaligned =
HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE + alignment - 1 + 8;
tot_size = GC_ALIGN_8(tot_size_unaligned);

if (tot_size < size) {
/* Integer overflow */
return NULL;
}

LOCK_HEAP(heap);

hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto finish;

bh_assert(hmu_get_size(hmu) >= tot_size);
tot_size = hmu_get_size(hmu);

#if GC_STAT_DATA != 0
heap->total_size_allocated += tot_size;
#endif

/* Get base object pointer */
base_obj = (gc_uint8 *)hmu + HMU_SIZE + OBJ_PREFIX_SIZE;

/* Find next aligned address, leaving 8 bytes for metadata */
aligned_addr = (((uintptr_t)base_obj + 8 + alignment - 1)
& ~(uintptr_t)(alignment - 1));
ret = (gc_object_t)aligned_addr;

/* Verify we have enough space */
bh_assert((gc_uint8 *)ret + size + OBJ_SUFFIX_SIZE
<= (gc_uint8 *)hmu + tot_size);

/* Calculate offset from HMU to returned pointer */
offset = (uint32_t)((char *)ret - (char *)hmu);

/* Calculate log2 of alignment for magic value */
alignment_log2 = 0;
while ((1U << alignment_log2) < alignment) {
alignment_log2++;
}

/* Store offset 8 bytes before returned pointer */
*((uint32_t *)((char *)ret - 8)) = offset;

/* Store magic with encoded alignment */
*((uint32_t *)((char *)ret - 4)) =
ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2;

/* Initialize HMU */
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);

#if BH_ENABLE_GC_VERIFY != 0
hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
#endif

finish:
UNLOCK_HEAP(heap);
return ret;
}

#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
Expand Down Expand Up @@ -644,6 +777,13 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
}
#endif

/* Check if this is an aligned allocation - not supported */
if (gc_is_aligned_allocation(obj_old)) {
LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned "
"allocations\n");
return NULL;
}

if (obj_old) {
hmu_old = obj_to_hmu(obj_old);
tot_size_old = hmu_get_size(hmu_old);
Expand Down
10 changes: 10 additions & 0 deletions core/shared/mem-alloc/ems/ems_gc.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,9 @@ gc_alloc_vo(void *heap, gc_size_t size);
gc_object_t
gc_realloc_vo(void *heap, void *ptr, gc_size_t size);

gc_object_t
gc_alloc_vo_aligned(void *heap, gc_size_t size, gc_size_t alignment);

int
gc_free_vo(void *heap, gc_object_t obj);

Expand All @@ -213,6 +216,10 @@ gc_object_t
gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file,
int line);

gc_object_t
gc_alloc_vo_aligned_internal(void *heap, gc_size_t size, gc_size_t alignment,
const char *file, int line);

int
gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);

Expand All @@ -231,6 +238,9 @@ gc_free_wo_internal(void *vheap, void *ptr, const char *file, int line);
#define gc_realloc_vo(heap, ptr, size) \
gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__)

#define gc_alloc_vo_aligned(heap, size, alignment) \
gc_alloc_vo_aligned_internal(heap, size, alignment, __FILE__, __LINE__)

#define gc_free_vo(heap, obj) \
gc_free_vo_internal(heap, obj, __FILE__, __LINE__)

Expand Down
Loading
Loading