mirror of
https://github.com/apache/nuttx.git
synced 2025-01-12 22:08:35 +08:00
mm: support custom the mm alignment and default to be 8
Signed-off-by: wangbowen6 <wangbowen6@xiaomi.com>
This commit is contained in:
parent
97b4900c59
commit
70083168f1
12 changed files with 73 additions and 35 deletions
|
@ -61,6 +61,15 @@ config MM_KERNEL_HEAPSIZE
|
|||
user-mode heap. This value may need to be aligned to units of the
|
||||
size of the smallest memory protection region.
|
||||
|
||||
config MM_DFAULT_ALIGNMENT
|
||||
int "Memory default alignment in bytes"
|
||||
default 8
|
||||
range 0 64
|
||||
---help---
|
||||
The memory default alignment in bytes, if this value is 0, the real
|
||||
memory default alignment is equal to sizoef(uintptr), if this value
|
||||
is not 0, this value must be 2^n and at least sizeof(uintptr).
|
||||
|
||||
config MM_SMALL
|
||||
bool "Small memory model"
|
||||
default n
|
||||
|
|
|
@ -110,7 +110,12 @@
|
|||
#define MM_MAX_CHUNK (1 << MM_MAX_SHIFT)
|
||||
#define MM_NNODES (MM_MAX_SHIFT - MM_MIN_SHIFT + 1)
|
||||
|
||||
#define MM_GRAN_MASK (MM_MIN_CHUNK - 1)
|
||||
#if CONFIG_MM_DFAULT_ALIGNMENT == 0
|
||||
# define MM_ALIGN sizeof(uintptr_t)
|
||||
#else
|
||||
# define MM_ALIGN CONFIG_MM_DFAULT_ALIGNMENT
|
||||
#endif
|
||||
#define MM_GRAN_MASK (MM_ALIGN - 1)
|
||||
#define MM_ALIGN_UP(a) (((a) + MM_GRAN_MASK) & ~MM_GRAN_MASK)
|
||||
#define MM_ALIGN_DOWN(a) ((a) & ~MM_GRAN_MASK)
|
||||
|
||||
|
@ -138,10 +143,6 @@
|
|||
|
||||
#define OVERHEAD_MM_ALLOCNODE (SIZEOF_MM_ALLOCNODE - sizeof(mmsize_t))
|
||||
|
||||
/* What is the size of the freenode? */
|
||||
|
||||
#define SIZEOF_MM_FREENODE sizeof(struct mm_freenode_s)
|
||||
|
||||
/* Get the node size */
|
||||
|
||||
#define SIZEOF_MM_NODE(node) ((node)->size & (~MM_MASK_BIT))
|
||||
|
@ -194,8 +195,9 @@ struct mm_freenode_s
|
|||
static_assert(SIZEOF_MM_ALLOCNODE <= MM_MIN_CHUNK,
|
||||
"Error size for struct mm_allocnode_s\n");
|
||||
|
||||
static_assert(SIZEOF_MM_FREENODE <= MM_MIN_CHUNK,
|
||||
"Error size for struct mm_freenode_s\n");
|
||||
static_assert(MM_ALIGN >= sizeof(uintptr_t) &&
|
||||
(MM_ALIGN & MM_GRAN_MASK) == 0,
|
||||
"Error memory aligment\n");
|
||||
|
||||
struct mm_delaynode_s
|
||||
{
|
||||
|
|
|
@ -51,7 +51,7 @@ void mm_addfreechunk(FAR struct mm_heap_s *heap,
|
|||
size_t nodesize = SIZEOF_MM_NODE(node);
|
||||
int ndx;
|
||||
|
||||
DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
|
||||
DEBUGASSERT((node->size & MM_ALLOC_BIT) == 0);
|
||||
|
||||
/* Convert the size to a nodelist index */
|
||||
|
|
|
@ -50,7 +50,7 @@ static void checkcorruption_handler(FAR struct mm_allocnode_s *node,
|
|||
{
|
||||
FAR struct mm_freenode_s *fnode = (FAR void *)node;
|
||||
|
||||
assert(nodesize >= SIZEOF_MM_FREENODE);
|
||||
assert(nodesize >= MM_MIN_CHUNK);
|
||||
assert(fnode->blink->flink == fnode);
|
||||
assert(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
|
||||
assert(fnode->flink == NULL ||
|
||||
|
|
|
@ -139,7 +139,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
|||
heap->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE | MM_ALLOC_BIT;
|
||||
node = (FAR struct mm_freenode_s *)
|
||||
(heapbase + SIZEOF_MM_ALLOCNODE);
|
||||
DEBUGASSERT((((uintptr_t)node + SIZEOF_MM_ALLOCNODE) % MM_MIN_CHUNK) == 0);
|
||||
DEBUGASSERT((((uintptr_t)node + SIZEOF_MM_ALLOCNODE) % MM_ALIGN) == 0);
|
||||
node->size = heapsize - 2 * SIZEOF_MM_ALLOCNODE;
|
||||
heap->mm_heapend[IDX] = (FAR struct mm_allocnode_s *)
|
||||
(heapend - SIZEOF_MM_ALLOCNODE);
|
||||
|
@ -204,7 +204,6 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
|
|||
heapsize -= sizeof(struct mm_heap_s);
|
||||
heapstart = (FAR char *)heap_adj + sizeof(struct mm_heap_s);
|
||||
|
||||
DEBUGASSERT(MM_MIN_CHUNK >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT(MM_MIN_CHUNK >= SIZEOF_MM_ALLOCNODE);
|
||||
|
||||
/* Set up global variables */
|
||||
|
|
|
@ -57,7 +57,7 @@ static void mallinfo_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
|
|||
{
|
||||
FAR struct mm_freenode_s *fnode = (FAR void *)node;
|
||||
|
||||
DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
|
||||
DEBUGASSERT(fnode->blink->flink == fnode);
|
||||
DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
|
||||
DEBUGASSERT(fnode->flink == NULL ||
|
||||
|
|
|
@ -130,9 +130,15 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
|||
#endif
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node and
|
||||
* (2) to make sure that it is an even multiple of our granule size.
|
||||
* (2) to make sure that it is aligned with MM_ALIGN and its size is at
|
||||
* least MM_MIN_CHUNK.
|
||||
*/
|
||||
|
||||
if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
|
||||
{
|
||||
size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
|
||||
}
|
||||
|
||||
alignsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE);
|
||||
if (alignsize < size)
|
||||
{
|
||||
|
@ -141,8 +147,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
DEBUGASSERT(alignsize >= MM_MIN_CHUNK);
|
||||
DEBUGASSERT(alignsize >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT(alignsize >= MM_ALIGN);
|
||||
|
||||
/* We need to hold the MM mutex while we muck with the nodelist. */
|
||||
|
||||
|
@ -204,7 +209,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
|||
*/
|
||||
|
||||
remaining = nodesize - alignsize;
|
||||
if (remaining >= SIZEOF_MM_FREENODE)
|
||||
if (remaining >= MM_MIN_CHUNK)
|
||||
{
|
||||
/* Create the remainder node */
|
||||
|
||||
|
@ -277,6 +282,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
|||
}
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(ret == NULL || ((uintptr_t)ret) % MM_MIN_CHUNK == 0);
|
||||
DEBUGASSERT(ret == NULL || ((uintptr_t)ret) % MM_ALIGN == 0);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
FAR struct mm_allocnode_s *node;
|
||||
uintptr_t rawchunk;
|
||||
uintptr_t alignedchunk;
|
||||
size_t mask = alignment - 1;
|
||||
size_t mask;
|
||||
size_t allocsize;
|
||||
size_t newsize;
|
||||
|
||||
|
@ -84,16 +84,22 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
* alignment of malloc, then just let malloc do the work.
|
||||
*/
|
||||
|
||||
if (alignment <= MM_MIN_CHUNK)
|
||||
if (alignment <= MM_ALIGN)
|
||||
{
|
||||
FAR void *ptr = mm_malloc(heap, size);
|
||||
DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);
|
||||
return ptr;
|
||||
}
|
||||
else if (alignment < MM_MIN_CHUNK)
|
||||
{
|
||||
alignment = MM_MIN_CHUNK;
|
||||
}
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node, (2)
|
||||
* to make sure that it is an even multiple of our granule size, and to
|
||||
* include the alignment amount.
|
||||
mask = alignment - 1;
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node and
|
||||
* (2) to make sure that it is aligned with MM_ALIGN and its size is at
|
||||
* least MM_MIN_CHUNK.
|
||||
*
|
||||
* Notice that we increase the allocation size by twice the requested
|
||||
* alignment. We do this so that there will be at least two valid
|
||||
|
@ -103,6 +109,11 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
* not include SIZEOF_MM_ALLOCNODE.
|
||||
*/
|
||||
|
||||
if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
|
||||
{
|
||||
size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
|
||||
}
|
||||
|
||||
newsize = MM_ALIGN_UP(size); /* Make multiples of our granule size */
|
||||
allocsize = newsize + 2 * alignment; /* Add double full alignment size */
|
||||
|
||||
|
@ -154,13 +165,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
next = (FAR struct mm_allocnode_s *)
|
||||
((FAR char *)node + SIZEOF_MM_NODE(node));
|
||||
|
||||
/* Make sure that there is space to convert the preceding
|
||||
* mm_allocnode_s into an mm_freenode_s. I think that this should
|
||||
* always be true
|
||||
*/
|
||||
|
||||
DEBUGASSERT(alignedchunk >= rawchunk + 8);
|
||||
|
||||
newnode = (FAR struct mm_allocnode_s *)
|
||||
(alignedchunk - SIZEOF_MM_ALLOCNODE);
|
||||
|
||||
|
@ -178,7 +182,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
* alignment point.
|
||||
*/
|
||||
|
||||
if (precedingsize < SIZEOF_MM_FREENODE)
|
||||
if (precedingsize < MM_MIN_CHUNK)
|
||||
{
|
||||
alignedchunk += alignment;
|
||||
newnode = (FAR struct mm_allocnode_s *)
|
||||
|
|
|
@ -95,7 +95,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
|
|||
{
|
||||
FAR struct mm_freenode_s *fnode = (FAR void *)node;
|
||||
|
||||
DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
|
||||
DEBUGASSERT(fnode->blink->flink == fnode);
|
||||
DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
|
||||
DEBUGASSERT(fnode->flink == NULL ||
|
||||
|
|
|
@ -110,9 +110,15 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
#endif
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node and
|
||||
* (2) to make sure that it is an even multiple of our granule size.
|
||||
* (2) to make sure that it is aligned with MM_ALIGN and its size is at
|
||||
* least MM_MIN_CHUNK.
|
||||
*/
|
||||
|
||||
if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
|
||||
{
|
||||
size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
|
||||
}
|
||||
|
||||
newsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE);
|
||||
if (newsize < size)
|
||||
{
|
||||
|
@ -256,6 +262,13 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
prev->flink->blink = prev->blink;
|
||||
}
|
||||
|
||||
/* Make sure the new previous node has enough space */
|
||||
|
||||
if (prevsize < takeprev + MM_MIN_CHUNK)
|
||||
{
|
||||
takeprev = prevsize;
|
||||
}
|
||||
|
||||
/* Extend the node into the previous free chunk */
|
||||
|
||||
newnode = (FAR struct mm_allocnode_s *)
|
||||
|
@ -270,7 +283,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
*/
|
||||
|
||||
prevsize -= takeprev;
|
||||
DEBUGASSERT(prevsize >= SIZEOF_MM_FREENODE);
|
||||
prev->size = prevsize | (prev->size & MM_MASK_BIT);
|
||||
nodesize += takeprev;
|
||||
newnode->size = nodesize | MM_ALLOC_BIT | MM_PREVFREE_BIT;
|
||||
|
@ -323,6 +335,13 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
next->flink->blink = next->blink;
|
||||
}
|
||||
|
||||
/* Make sure the new next node has enough space */
|
||||
|
||||
if (nextsize < takenext + MM_MIN_CHUNK)
|
||||
{
|
||||
takenext = nextsize;
|
||||
}
|
||||
|
||||
/* Extend the node into the next chunk */
|
||||
|
||||
nodesize += takenext;
|
||||
|
@ -339,7 +358,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
newnode = (FAR struct mm_freenode_s *)
|
||||
((FAR char *)oldnode + nodesize);
|
||||
newnode->size = nextsize - takenext;
|
||||
DEBUGASSERT(newnode->size >= SIZEOF_MM_FREENODE);
|
||||
andbeyond->preceding = newnode->size;
|
||||
|
||||
/* Add the new free node to the nodelist (with the new size) */
|
||||
|
|
|
@ -106,7 +106,7 @@ void mm_shrinkchunk(FAR struct mm_heap_s *heap,
|
|||
* chunk to be shrunk.
|
||||
*/
|
||||
|
||||
else if (nodesize >= size + SIZEOF_MM_FREENODE)
|
||||
else if (nodesize >= size + MM_MIN_CHUNK)
|
||||
{
|
||||
FAR struct mm_freenode_s *newnode;
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
int mm_size2ndx(size_t size)
|
||||
{
|
||||
DEBUGASSERT(size >= MM_MIN_CHUNK);
|
||||
if (size >= MM_MAX_CHUNK)
|
||||
{
|
||||
return MM_NNODES - 1;
|
||||
|
|
Loading…
Reference in a new issue