forked from nuttx/nuttx-update
mm/mm_heap: add mempool to optimize small block performance
There are many small memory block in NuttX system, eg: struct tcb_s, struct inode, etc, and several disadvantages about them: 1.Their frequent allocate and free cause the system memory fragmentation. 2.Since each memory block has an overhead, the utilization of small memory blocks is relatively low, which will cause memory waste. So we can use mempool to alloc smallo block, to improve alloc speed and utilization, to reduce fragmentation. Signed-off-by: dongjiuzhu1 <dongjiuzhu1@xiaomi.com>
This commit is contained in:
parent
7cd325f3be
commit
c82f44c4f3
8 changed files with 112 additions and 1 deletions
16
mm/Kconfig
16
mm/Kconfig
|
@ -172,6 +172,22 @@ config MM_SHM
|
|||
Build in support for the shared memory interfaces shmget(), shmat(),
|
||||
shmctl(), and shmdt().
|
||||
|
||||
config MM_HEAP_MEMPOOL_THRESHOLD
|
||||
int "The size of threshold to avoid using multiple mempool in heap"
|
||||
default 0
|
||||
---help---
|
||||
If the size of the memory requested by the user is less
|
||||
than the threshold, the memory will be requested from the
|
||||
multiple mempool by default.
|
||||
|
||||
config MM_HEAP_MEMPOOL_EXPAND
|
||||
int "The expand size for each mempool in multiple mempool"
|
||||
default 1024
|
||||
depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
---help---
|
||||
This size describes the size of each expansion of each memory
|
||||
pool with insufficient memory in the multi-level memory pool.
|
||||
|
||||
config FS_PROCFS_EXCLUDE_MEMPOOL
|
||||
bool "Exclude mempool"
|
||||
default DEFAULT_SMALL
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <nuttx/sched.h>
|
||||
#include <nuttx/fs/procfs.h>
|
||||
#include <nuttx/lib/math32.h>
|
||||
#include <nuttx/mm/mempool.h>
|
||||
|
||||
#include <assert.h>
|
||||
#include <execinfo.h>
|
||||
|
@ -133,6 +134,11 @@
|
|||
|
||||
#define SIZEOF_MM_FREENODE sizeof(struct mm_freenode_s)
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
# define MM_IS_FROM_MEMPOOL(mem) \
|
||||
((*((FAR mmsize_t *)mem - 1) & MM_ALLOC_BIT) == 0)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
@ -225,6 +231,14 @@ struct mm_heap_s
|
|||
|
||||
FAR struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
|
||||
|
||||
/* The is a multiple mempool of the heap */
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
struct mempool_multiple_s mm_mpool;
|
||||
struct mempool_s mm_pools[CONFIG_MM_HEAP_MEMPOOL_THRESHOLD /
|
||||
sizeof(uintptr_t)];
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
|
||||
struct procfs_meminfo_entry_s mm_procfs;
|
||||
#endif
|
||||
|
|
|
@ -82,6 +82,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
|
|||
return;
|
||||
}
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
if (MM_IS_FROM_MEMPOOL(mem))
|
||||
{
|
||||
mempool_multiple_free(&heap->mm_mpool, mem);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (mm_lock(heap) < 0)
|
||||
{
|
||||
/* Meet -ESRCH return, which means we are in situations
|
||||
|
|
|
@ -226,6 +226,20 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
|
|||
# endif
|
||||
#endif
|
||||
|
||||
/* Initialize the multiple mempool in heap */
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
heap->mm_mpool.pools = heap->mm_pools;
|
||||
heap->mm_mpool.npools = sizeof(heap->mm_pools) / sizeof(heap->mm_pools[0]);
|
||||
for (i = 0; i < heap->mm_mpool.npools; i++)
|
||||
{
|
||||
heap->mm_pools[i].blocksize = (i + 1) * sizeof(uintptr_t);
|
||||
heap->mm_pools[i].expandsize = CONFIG_MM_HEAP_MEMPOOL_EXPAND;
|
||||
}
|
||||
|
||||
mempool_multiple_init(&heap->mm_mpool, name);
|
||||
#endif
|
||||
|
||||
/* Add the initial region of memory to the heap */
|
||||
|
||||
mm_addregion(heap, heapstart, heapsize);
|
||||
|
|
|
@ -120,6 +120,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
ret = mempool_multiple_alloc(&heap->mm_mpool, size);
|
||||
if (ret != NULL)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node and
|
||||
* (2) to make sure that it is an even multiple of our granule size.
|
||||
*/
|
||||
|
|
|
@ -46,6 +46,13 @@ size_t mm_malloc_size(FAR void *mem)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
if (MM_IS_FROM_MEMPOOL(mem))
|
||||
{
|
||||
return mempool_multiple_alloc_size(mem);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Map the memory chunk into a free node */
|
||||
|
||||
node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE);
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
* within that chunk that meets the alignment request and then frees any
|
||||
* leading or trailing space.
|
||||
*
|
||||
* The alignment argument must be a power of two. 8-byte alignment is
|
||||
* The alignment argument must be a power of two. 16-byte alignment is
|
||||
* guaranteed by normal malloc calls.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
@ -72,6 +72,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
node = mempool_multiple_memalign(&heap->mm_mpool, alignment, size);
|
||||
if (node != NULL)
|
||||
{
|
||||
return node;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* If this requested alinement's less than or equal to the natural
|
||||
* alignment of malloc, then just let malloc do the work.
|
||||
*/
|
||||
|
|
|
@ -34,6 +34,12 @@
|
|||
#include "mm_heap/mm.h"
|
||||
#include "kasan/kasan.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -88,6 +94,36 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
||||
if (MM_IS_FROM_MEMPOOL(oldmem))
|
||||
{
|
||||
newmem = mempool_multiple_realloc(&heap->mm_mpool, oldmem, size);
|
||||
if (newmem != NULL)
|
||||
{
|
||||
return newmem;
|
||||
}
|
||||
|
||||
newmem = mm_malloc(heap, size);
|
||||
if (newmem != NULL)
|
||||
{
|
||||
memcpy(newmem, oldmem, mempool_multiple_alloc_size(oldmem));
|
||||
mempool_multiple_free(&heap->mm_mpool, oldmem);
|
||||
}
|
||||
|
||||
return newmem;
|
||||
}
|
||||
else
|
||||
{
|
||||
newmem = mempool_multiple_alloc(&heap->mm_mpool, size);
|
||||
if (newmem != NULL)
|
||||
{
|
||||
memcpy(newmem, oldmem, MIN(size, mm_malloc_size(oldmem)));
|
||||
mm_free(heap, oldmem);
|
||||
return newmem;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Adjust the size to account for (1) the size of the allocated node and
|
||||
* (2) to make sure that it is an even multiple of our granule size.
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue