diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index b5b6d3ee56..f7d41e009e 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -25,6 +25,7 @@ ****************************************************************************/ #include +#include #include #include #include @@ -44,10 +45,8 @@ #define KASAN_LAST_WORD_MASK(end) \ (UINTPTR_MAX >> (-(end) & (KASAN_BITS_PER_WORD - 1))) -#define KASAN_SHADOW_SCALE (sizeof(uintptr_t)) - #define KASAN_SHADOW_SIZE(size) \ - (KASAN_BYTES_PER_WORD * ((size) / KASAN_SHADOW_SCALE / KASAN_BITS_PER_WORD)) + (KASAN_BYTES_PER_WORD * ((size) / MM_ALIGN / KASAN_BITS_PER_WORD)) #define KASAN_REGION_SIZE(size) \ (sizeof(struct kasan_region_s) + KASAN_SHADOW_SIZE(size)) @@ -87,7 +86,7 @@ kasan_mem_to_shadow(FAR const void *ptr, size_t size, { DEBUGASSERT(addr + size <= g_region[i]->end); addr -= g_region[i]->begin; - addr /= KASAN_SHADOW_SCALE; + addr /= MM_ALIGN; *bit = addr % KASAN_BITS_PER_WORD; return &g_region[i]->shadow[addr / KASAN_BITS_PER_WORD]; } @@ -110,15 +109,15 @@ kasan_is_poisoned(FAR const void *addr, size_t size) return kasan_global_is_poisoned(addr, size); } - if (size <= KASAN_SHADOW_SCALE) + if (size <= MM_ALIGN) { return ((*p >> bit) & 1); } nbit = KASAN_BITS_PER_WORD - bit % KASAN_BITS_PER_WORD; mask = KASAN_FIRST_WORD_MASK(bit); - size = ALIGN_UP(size, KASAN_SHADOW_SCALE); - size /= KASAN_SHADOW_SCALE; + size = ALIGN_UP(size, MM_ALIGN); + size /= MM_ALIGN; while (size >= nbit) { @@ -155,6 +154,9 @@ static void kasan_set_poison(FAR const void *addr, size_t size, unsigned int nbit; uintptr_t mask; + DEBUGASSERT((uintptr_t)addr % MM_ALIGN == 0); + DEBUGASSERT(size % MM_ALIGN == 0); + p = kasan_mem_to_shadow(addr, size, &bit); if (p == NULL) { @@ -163,7 +165,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size, nbit = KASAN_BITS_PER_WORD - bit % KASAN_BITS_PER_WORD; mask = KASAN_FIRST_WORD_MASK(bit); - size /= KASAN_SHADOW_SCALE; + size /= MM_ALIGN; flags = spin_lock_irqsave(&g_lock); while (size >= nbit) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index d1ba3bf73a..503779ca1f 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -22,6 +22,7 @@ * Included Files ****************************************************************************/ +#include #include #include #include @@ -45,10 +46,8 @@ #define kasan_random_tag() (1 + rand() % ((1 << (64 - KASAN_TAG_SHIFT)) - 2)) -#define KASAN_SHADOW_SCALE (sizeof(uintptr_t)) - #define KASAN_SHADOW_SIZE(size) \ - ((size) + KASAN_SHADOW_SCALE - 1) / KASAN_SHADOW_SCALE + ((size) + MM_ALIGN - 1) / MM_ALIGN #define KASAN_REGION_SIZE(size) \ (sizeof(struct kasan_region_s) + KASAN_SHADOW_SIZE(size)) @@ -89,7 +88,7 @@ kasan_mem_to_shadow(FAR const void *ptr, size_t size) { DEBUGASSERT(addr + size <= g_region[i]->end); addr -= g_region[i]->begin; - return &g_region[i]->shadow[addr / KASAN_SHADOW_SCALE]; + return &g_region[i]->shadow[addr / MM_ALIGN]; } } @@ -135,6 +134,9 @@ static void kasan_set_poison(FAR const void *addr, irqstate_t flags; FAR uint8_t *p; + DEBUGASSERT((uintptr_t)addr % MM_ALIGN == 0); + DEBUGASSERT(size % MM_ALIGN == 0); + p = kasan_mem_to_shadow(addr, size); if (p == NULL) { diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h index 6c06f733db..3d08112d62 100644 --- a/mm/mm_heap/mm.h +++ b/mm/mm_heap/mm.h @@ -143,7 +143,7 @@ * previous freenode */ -#define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - sizeof(mmsize_t)) +#define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - MM_ALIGN) /* Get the node size */ @@ -173,7 +173,12 @@ typedef size_t mmsize_t; struct mm_allocnode_s { - mmsize_t preceding; /* Physical preceding chunk size */ + union + { + mmsize_t preceding; /* Physical preceding chunk size */ + uint8_t align[MM_ALIGN]; + }; + mmsize_t size; /* Size of this chunk */ #if CONFIG_MM_BACKTRACE >= 0 pid_t pid; /* The pid for caller */ @@ -182,13 +187,19 @@ struct mm_allocnode_s FAR void *backtrace[CONFIG_MM_BACKTRACE]; /* The backtrace buffer for caller */ # endif #endif -}; +} +aligned_data(MM_ALIGN); /* This describes a free chunk */ struct mm_freenode_s { - mmsize_t preceding; /* Physical preceding chunk size */ + union + { + mmsize_t preceding; /* Physical preceding chunk size */ + uint8_t align[MM_ALIGN]; + }; + mmsize_t size; /* Size of this chunk */ #if CONFIG_MM_BACKTRACE >= 0 pid_t pid; /* The pid for caller */ @@ -199,7 +210,8 @@ struct mm_freenode_s #endif FAR struct mm_freenode_s *flink; /* Supports a doubly linked list */ FAR struct mm_freenode_s *blink; -}; +} +aligned_data(MM_ALIGN); static_assert(MM_SIZEOF_ALLOCNODE <= MM_MIN_CHUNK, "Error size for struct mm_allocnode_s\n"); diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c index 61b1b91a6c..91102b2b9c 100644 --- a/mm/mm_heap/mm_initialize.c +++ b/mm/mm_heap/mm_initialize.c @@ -150,7 +150,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart, heapbase = MM_ALIGN_UP((uintptr_t)heapstart + 2 * MM_SIZEOF_ALLOCNODE) - 2 * MM_SIZEOF_ALLOCNODE; - heapsize = heapsize - (heapbase - (uintptr_t)heapstart); + heapsize = MM_ALIGN_DOWN(heapsize - (heapbase - (uintptr_t)heapstart)); /* Register KASan for access rights check. We need to register after * address alignment.