Compare commits

...

3 commits

Author SHA1 Message Date
Mike.
5b50ea7d34
Merge 0f70c44b4a into aa0aecbd80 2025-01-12 01:29:17 +08:00
wangmingrong1
aa0aecbd80 mempool: addbacktrace should be before kasan_unpoison
If thread 1 is executing kasan_unpoison but a scheduling occurs and the block is trampled upon, the displayed backtracking may still be from the previously allocated backtracking

Signed-off-by: wangmingrong1 <wangmingrong1@xiaomi.com>
2025-01-12 01:29:14 +08:00
wangmingrong1
0f70c44b4a mm: Add pending configuration for mm node struct and precding
pending memory block node precding and the entire struct, after turning on this bit, the size of each memory block wil be aligned to MM_ALIGN

Signed-off-by: wangmingrong1 <wangmingrong1@xiaomi.com>
2025-01-10 11:50:47 +08:00
3 changed files with 44 additions and 7 deletions

View file

@ -69,6 +69,14 @@ config MM_DEFAULT_ALIGNMENT
memory default alignment is equal to sizoef(uintptr), if this value
is not 0, this value must be 2^n and at least sizeof(uintptr).
config MM_NODE_PENDING
bool "Enable pending memory node"
default n
---help---
Pending memory block node precding and the entire struct,
After turning on this bit, the size of each memory block
will be aligned to MM_ALIGN
config MM_SMALL
bool "Small memory model"
default n

View file

@ -397,16 +397,17 @@ retry:
pool->nalloc++;
spin_unlock_irqrestore(&pool->lock, flags);
blk = kasan_unpoison(blk, pool->blocksize);
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
#endif
#if CONFIG_MM_BACKTRACE >= 0
mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
((FAR char *)blk + pool->blocksize));
#endif
blk = kasan_unpoison(blk, pool->blocksize);
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
#endif
return blk;
}

View file

@ -143,7 +143,17 @@
* previous freenode
*/
#define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - sizeof(mmsize_t))
#ifdef CONFIG_MM_NODE_PENDING
# define MM_NODE_STRUCT_PENDING aligned_data(MM_ALIGN)
#else
# define MM_NODE_STRUCT_PENDING
#endif
#ifdef CONFIG_MM_NODE_PENDING
# define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - MM_ALIGN)
#else
# define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - sizeof(mmsize_t))
#endif
/* Get the node size */
@ -173,7 +183,16 @@ typedef size_t mmsize_t;
struct mm_allocnode_s
{
#ifdef CONFIG_MM_NODE_PENDING
union
{
mmsize_t preceding; /* Physical preceding chunk size */
uint8_t align[MM_ALIGN];
};
#else
mmsize_t preceding; /* Physical preceding chunk size */
#endif
mmsize_t size; /* Size of this chunk */
#if CONFIG_MM_BACKTRACE >= 0
pid_t pid; /* The pid for caller */
@ -182,13 +201,22 @@ struct mm_allocnode_s
FAR void *backtrace[CONFIG_MM_BACKTRACE]; /* The backtrace buffer for caller */
# endif
#endif
};
}MM_NODE_STRUCT_PENDING;
/* This describes a free chunk */
struct mm_freenode_s
{
#ifdef CONFIG_MM_NODE_PENDING
union
{
mmsize_t preceding; /* Physical preceding chunk size */
uint8_t align[MM_ALIGN];
};
#else
mmsize_t preceding; /* Physical preceding chunk size */
#endif
mmsize_t size; /* Size of this chunk */
#if CONFIG_MM_BACKTRACE >= 0
pid_t pid; /* The pid for caller */
@ -199,7 +227,7 @@ struct mm_freenode_s
#endif
FAR struct mm_freenode_s *flink; /* Supports a doubly linked list */
FAR struct mm_freenode_s *blink;
};
}MM_NODE_STRUCT_PENDING;
static_assert(MM_SIZEOF_ALLOCNODE <= MM_MIN_CHUNK,
"Error size for struct mm_allocnode_s\n");