1
0
Fork 0
forked from nuttx/nuttx-update

mm/dump: make macro more common, fix help prompt

Signed-off-by: buxiasen <buxiasen@xiaomi.com>
This commit is contained in:
buxiasen 2024-07-25 22:26:57 +08:00 committed by GUIDINGLI
parent 1fdb3f5107
commit fd6634ecb5
6 changed files with 159 additions and 130 deletions

View file

@ -58,7 +58,7 @@
* to handle the longest line generated by this logic.
*/
#define MEMINFO_LINELEN 256
#define MEMINFO_LINELEN 512
/****************************************************************************
* Private Types
@ -429,23 +429,33 @@ static ssize_t memdump_read(FAR struct file *filep, FAR char *buffer,
procfile = (FAR struct meminfo_file_s *)filep->f_priv;
DEBUGASSERT(procfile);
#if CONFIG_MM_BACKTRACE >= 0
linesize = procfs_snprintf(procfile->line, MEMINFO_LINELEN,
"usage: <on/off/pid/used/free/leak>"
"<seqmin> <seqmax>\n"
"on/off backtrace\n"
"pid: dump pid allocated node\n"
"used: dump all allocated node\n"
"free: dump all free node\n"
"leak: dump all leaked node\n"
"The current sequence number %lu\n",
g_mm_seqno);
#else
linesize = procfs_snprintf(procfile->line, MEMINFO_LINELEN,
"usage: <used/free>\n"
"used: dump all allocated node\n"
"free: dump all free node\n");
"usage: <used/free"
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
"/mempool"
#endif
#if CONFIG_MM_BACKTRACE > 0
"/on/off"
#endif
#if CONFIG_MM_BACKTRACE >= 0
"/leak/pid> <seqmin> <seqmax"
#endif
">\n"
"used: dump all allocated node\n"
"free: dump all free node\n"
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
"mempool: dump all mempool alloc node\n"
#endif
#if CONFIG_MM_BACKTRACE > 0
"on/off: set backtrace enabled state\n"
#endif
#if CONFIG_MM_BACKTRACE >= 0
"leak: dump all leaked node\n"
"pid: dump pid allocated node\n"
"The current sequence number %lu\n",
g_mm_seqno
#endif
);
copysize = procfs_memcpy(procfile->line, linesize, buffer, buflen,
&offset);
@ -554,14 +564,14 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer,
#endif
break;
#if CONFIG_MM_BACKTRACE >= 0
case 'l':
dump.pid = PID_MM_LEAK;
#if CONFIG_MM_BACKTRACE >= 0
p = (FAR char *)buffer + 4;
goto dump;
#endif
break;
#endif
#if CONFIG_MM_BACKTRACE >= 0
default:

View file

@ -139,11 +139,23 @@
# define MM_INTERNAL_HEAP(heap) ((heap) == USR_HEAP)
#endif
#define MM_DUMP_ASSIGN(dump, pid) ((dump) == (pid))
#define MM_DUMP_ALLOC(dump, pid) \
((dump) == PID_MM_ALLOC && (pid) != PID_MM_MEMPOOL)
#define MM_DUMP_LEAK(dump, pid) \
((dump) == PID_MM_LEAK && (pid) >= 0 && nxsched_get_tcb(pid) == NULL)
#if CONFIG_MM_BACKTRACE >= 0
# define MM_DUMP_ALLOC(dump, node) \
((node) != NULL && (dump)->pid == PID_MM_ALLOC && \
(node)->pid != PID_MM_MEMPOOL)
# define MM_DUMP_SEQNO(dump, node) \
((node)->seqno >= (dump)->seqmin && (node)->seqno <= (dump)->seqmax)
# define MM_DUMP_ASSIGN(dump, node) \
((node) != NULL && (dump)->pid == (node)->pid)
# define MM_DUMP_LEAK(dump, node) \
((node) != NULL && (dump)->pid == PID_MM_LEAK && (node)->pid >= 0 && \
nxsched_get_tcb((node)->pid) == NULL)
#else
# define MM_DUMP_ALLOC(dump,node) ((dump)->pid == PID_MM_ALLOC)
# define MM_DUMP_SEQNO(dump,node) (true)
# define MM_DUMP_ASSIGN(dump,node) (false)
# define MM_DUMP_LEAK(dump,pid) (false)
#endif
#define MM_INIT_MAGIC 0xcc
#define MM_ALLOC_MAGIC 0xaa

View file

@ -176,10 +176,8 @@ static void mempool_info_task_callback(FAR struct mempool_s *pool,
return;
}
if ((MM_DUMP_ASSIGN(task->pid, buf->pid) ||
MM_DUMP_ALLOC(task->pid, buf->pid) ||
MM_DUMP_LEAK(task->pid, buf->pid)) &&
buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
if ((MM_DUMP_ASSIGN(task, buf) || MM_DUMP_ALLOC(task, buf) ||
MM_DUMP_LEAK(task, buf)) && MM_DUMP_SEQNO(task, buf))
{
info->aordblks++;
info->uordblks += blocksize;
@ -198,10 +196,8 @@ static void mempool_memdump_callback(FAR struct mempool_s *pool,
return;
}
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
if ((MM_DUMP_ASSIGN(dump, buf) || MM_DUMP_ALLOC(dump, buf) ||
MM_DUMP_LEAK(dump, buf)) && MM_DUMP_SEQNO(dump, buf))
{
char tmp[CONFIG_MM_BACKTRACE * BACKTRACE_PTR_FMT_WIDTH + 1] = "";

View file

@ -99,22 +99,12 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node,
if (MM_NODE_IS_ALLOC(node))
{
DEBUGASSERT(nodesize >= MM_SIZEOF_ALLOCNODE);
#if CONFIG_MM_BACKTRACE < 0
if (task->pid == PID_MM_ALLOC)
if ((MM_DUMP_ASSIGN(task, node) || MM_DUMP_ALLOC(task, node) ||
MM_DUMP_LEAK(task, node)) && MM_DUMP_SEQNO(task, node))
{
info->aordblks++;
info->uordblks += nodesize;
}
#else
if ((MM_DUMP_ASSIGN(task->pid, node->pid) ||
MM_DUMP_ALLOC(task->pid, node->pid) ||
MM_DUMP_LEAK(task->pid, node->pid)) &&
node->seqno >= task->seqmin && node->seqno <= task->seqmax)
{
info->aordblks++;
info->uordblks += nodesize;
}
#endif
}
else if (task->pid == PID_MM_FREE)
{

View file

@ -43,52 +43,54 @@
* Private Types
****************************************************************************/
struct mm_memdump_priv_s
{
FAR const struct mm_memdump_s *dump;
};
/****************************************************************************
* Private Functions
****************************************************************************/
static void memdump_allocnode(FAR struct mm_allocnode_s *node)
{
size_t nodesize = MM_SIZEOF_NODE(node);
#if CONFIG_MM_BACKTRACE < 0
syslog(LOG_INFO, "%12zu%*p\n",
nodesize, BACKTRACE_PTR_FMT_WIDTH,
(FAR const char *)node + MM_SIZEOF_ALLOCNODE);
#elif CONFIG_MM_BACKTRACE == 0
syslog(LOG_INFO, "%6d%12zu%12lu%*p\n",
node->pid, nodesize, node->seqno,
BACKTRACE_PTR_FMT_WIDTH,
(FAR const char *)node + MM_SIZEOF_ALLOCNODE);
#else
char buf[BACKTRACE_BUFFER_SIZE(CONFIG_MM_BACKTRACE)];
backtrace_format(buf, sizeof(buf), node->backtrace,
CONFIG_MM_BACKTRACE);
syslog(LOG_INFO, "%6d%12zu%12lu%*p %s\n",
node->pid, nodesize, node->seqno,
BACKTRACE_PTR_FMT_WIDTH,
(FAR const char *)node + MM_SIZEOF_ALLOCNODE, buf);
#endif
}
static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
{
FAR const struct mm_memdump_s *dump = arg;
FAR struct mm_memdump_priv_s *priv = arg;
FAR const struct mm_memdump_s *dump = priv->dump;
size_t nodesize = MM_SIZEOF_NODE(node);
if (MM_NODE_IS_ALLOC(node))
{
DEBUGASSERT(nodesize >= MM_SIZEOF_ALLOCNODE);
#if CONFIG_MM_BACKTRACE < 0
if (dump->pid == PID_MM_ALLOC)
if ((MM_DUMP_ASSIGN(dump, node) || MM_DUMP_ALLOC(dump, node) ||
MM_DUMP_LEAK(dump, node)) && MM_DUMP_SEQNO(dump, node))
{
syslog(LOG_INFO, "%12zu%*p\n",
nodesize, BACKTRACE_PTR_FMT_WIDTH,
((FAR char *)node + MM_SIZEOF_ALLOCNODE));
memdump_allocnode(node);
}
#elif CONFIG_MM_BACKTRACE == 0
if ((MM_DUMP_ASSIGN(dump->pid, node->pid) ||
MM_DUMP_ALLOC(dump->pid, node->pid) ||
MM_DUMP_LEAK(dump->pid, node->pid)) &&
node->seqno >= dump->seqmin && node->seqno <= dump->seqmax)
{
syslog(LOG_INFO, "%6d%12zu%12lu%*p\n",
node->pid, nodesize, node->seqno,
BACKTRACE_PTR_FMT_WIDTH,
((FAR char *)node + MM_SIZEOF_ALLOCNODE));
}
#else
if ((MM_DUMP_ASSIGN(dump->pid, node->pid) ||
MM_DUMP_ALLOC(dump->pid, node->pid) ||
MM_DUMP_LEAK(dump->pid, node->pid)) &&
node->seqno >= dump->seqmin && node->seqno <= dump->seqmax)
{
char buf[BACKTRACE_BUFFER_SIZE(CONFIG_MM_BACKTRACE)];
backtrace_format(buf, sizeof(buf), node->backtrace,
CONFIG_MM_BACKTRACE);
syslog(LOG_INFO, "%6d%12zu%12lu%*p %s\n",
node->pid, nodesize, node->seqno,
BACKTRACE_PTR_FMT_WIDTH,
((FAR char *)node + MM_SIZEOF_ALLOCNODE), buf);
}
#endif
}
else if (dump->pid == PID_MM_FREE)
{
@ -127,8 +129,16 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
void mm_memdump(FAR struct mm_heap_s *heap,
FAR const struct mm_memdump_s *dump)
{
struct mm_memdump_priv_s priv;
struct mallinfo_task info;
info = mm_mallinfo_task(heap, dump);
if (info.aordblks == 0 && dump->pid >= PID_MM_FREE)
{
return;
}
if (dump->pid >= PID_MM_ALLOC)
{
syslog(LOG_INFO, "Dump all used memory node info:\n");
@ -140,7 +150,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
BACKTRACE_PTR_FMT_WIDTH, "Address", "Backtrace");
#endif
}
else
else if (dump->pid == PID_MM_FREE)
{
syslog(LOG_INFO, "Dump all free memory node info:\n");
syslog(LOG_INFO, "%12s%*s\n", "Size", BACKTRACE_PTR_FMT_WIDTH,
@ -150,10 +160,14 @@ void mm_memdump(FAR struct mm_heap_s *heap,
#ifdef CONFIG_MM_HEAP_MEMPOOL
mempool_multiple_memdump(heap->mm_mpool, dump);
#endif
mm_foreach(heap, memdump_handler, (FAR void *)dump);
memset(&priv, 0, sizeof(struct mm_memdump_priv_s));
priv.dump = dump;
info = mm_mallinfo_task(heap, dump);
mm_foreach(heap, memdump_handler, &priv);
syslog(LOG_INFO, "%12s%12s\n", "Total Blks", "Total Size");
syslog(LOG_INFO, "%12d%12d\n", info.aordblks, info.uordblks);
if (dump->pid == PID_MM_FREE)
{
syslog(LOG_INFO, "%12s%12s\n", "Total Blks", "Total Size");
syslog(LOG_INFO, "%12d%12d\n", info.aordblks, info.uordblks);
}
}

View file

@ -131,6 +131,11 @@ struct mm_mallinfo_handler_s
FAR struct mallinfo_task *info;
};
struct mm_memdump_priv_s
{
FAR const struct mm_memdump_s *dump;
};
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
@ -141,6 +146,28 @@ static void mm_delayfree(struct mm_heap_s *heap, void *mem, bool delay);
* Private Functions
****************************************************************************/
static void memdump_allocnode(FAR void *ptr, size_t size)
{
#if CONFIG_MM_BACKTRACE < 0
syslog(LOG_INFO, "%12zu%*p\n", size, BACKTRACE_PTR_FMT_WIDTH, ptr);
#elif CONFIG_MM_BACKTRACE == 0
syslog(LOG_INFO, "%6d%12zu%12lu%*p\n",
buf->pid, size, buf->seqno, BACKTRACE_PTR_FMT_WIDTH, ptr);
#else
char tmp[BACKTRACE_BUFFER_SIZE(CONFIG_MM_BACKTRACE)];
FAR struct memdump_backtrace_s *buf =
ptr + size - sizeof(struct memdump_backtrace_s);
backtrace_format(tmp, sizeof(tmp), buf->backtrace,
CONFIG_MM_BACKTRACE);
syslog(LOG_INFO, "%6d%12zu%12lu%*p %s\n",
buf->pid, size, buf->seqno, BACKTRACE_PTR_FMT_WIDTH,
ptr, tmp);
#endif
}
#if CONFIG_MM_BACKTRACE >= 0
/****************************************************************************
@ -321,25 +348,20 @@ static void mallinfo_task_handler(FAR void *ptr, size_t size, int used,
if (used)
{
#if CONFIG_MM_BACKTRACE < 0
if (task->pid == PID_MM_ALLOC)
{
info->aordblks++;
info->uordblks += size;
}
#else
#if CONFIG_MM_BACKTRACE >= 0
FAR struct memdump_backtrace_s *buf =
ptr + size - sizeof(struct memdump_backtrace_s);
#else
# define buf NULL
#endif
if ((MM_DUMP_ASSIGN(task->pid, buf->pid) ||
MM_DUMP_ALLOC(task->pid, buf->pid) ||
MM_DUMP_LEAK(task->pid, buf->pid)) &&
buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
if ((MM_DUMP_ASSIGN(task, buf) || MM_DUMP_ALLOC(task, buf) ||
MM_DUMP_LEAK(task, buf)) && MM_DUMP_SEQNO(task, buf))
{
info->aordblks++;
info->uordblks += size;
}
#endif
#undef buf
}
else if (task->pid == PID_MM_FREE)
{
@ -434,45 +456,23 @@ static void mm_unlock(FAR struct mm_heap_s *heap)
static void memdump_handler(FAR void *ptr, size_t size, int used,
FAR void *user)
{
FAR const struct mm_memdump_s *dump = user;
FAR struct mm_memdump_priv_s *priv = user;
FAR const struct mm_memdump_s *dump = priv->dump;
if (used)
{
#if CONFIG_MM_BACKTRACE < 0
if (dump->pid == PID_MM_ALLOC)
{
syslog(LOG_INFO, "%12zu%*p\n", size, BACKTRACE_PTR_FMT_WIDTH, ptr);
}
#elif CONFIG_MM_BACKTRACE == 0
#if CONFIG_MM_BACKTRACE >= 0
FAR struct memdump_backtrace_s *buf =
ptr + size - sizeof(struct memdump_backtrace_s);
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
{
syslog(LOG_INFO, "%6d%12zu%12lu%*p\n",
buf->pid, size, buf->seqno, BACKTRACE_PTR_FMT_WIDTH, ptr);
}
#else
FAR struct memdump_backtrace_s *buf =
ptr + size - sizeof(struct memdump_backtrace_s);
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
{
char tmp[BACKTRACE_BUFFER_SIZE(CONFIG_MM_BACKTRACE)];
backtrace_format(tmp, sizeof(tmp), buf->backtrace,
CONFIG_MM_BACKTRACE);
syslog(LOG_INFO, "%6d%12zu%12lu%*p %s\n",
buf->pid, size, buf->seqno, BACKTRACE_PTR_FMT_WIDTH,
ptr, tmp);
}
# define buf NULL
#endif
if ((MM_DUMP_ASSIGN(dump, buf) || MM_DUMP_ALLOC(dump, buf) ||
MM_DUMP_LEAK(dump, buf)) && MM_DUMP_SEQNO(dump, buf))
{
memdump_allocnode(ptr, size);
}
#undef buf
}
else if (dump->pid == PID_MM_FREE)
{
@ -494,6 +494,7 @@ static void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem,
if (mm_lock(heap) == 0)
{
size_t size = mm_malloc_size(heap, mem);
UNUSED(size);
#ifdef CONFIG_MM_FILL_ALLOCATIONS
memset(mem, MM_FREE_MAGIC, size);
#endif
@ -1062,6 +1063,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
#else
# define region 0
#endif
struct mm_memdump_priv_s priv;
struct mallinfo_task info;
if (dump->pid >= PID_MM_ALLOC)
@ -1075,7 +1077,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
BACKTRACE_PTR_FMT_WIDTH, "Address", "Backtrace");
#endif
}
else
else if (dump->pid == PID_MM_FREE)
{
syslog(LOG_INFO, "Dump all free memory node info:\n");
syslog(LOG_INFO, "%12s%*s\n", "Size", BACKTRACE_PTR_FMT_WIDTH,
@ -1086,20 +1088,25 @@ void mm_memdump(FAR struct mm_heap_s *heap,
mempool_multiple_memdump(heap->mm_mpool, dump);
#endif
memset(&priv, 0, sizeof(struct mm_memdump_priv_s));
priv.dump = dump;
#if CONFIG_MM_REGIONS > 1
for (region = 0; region < heap->mm_nregions; region++)
#endif
{
DEBUGVERIFY(mm_lock(heap));
tlsf_walk_pool(heap->mm_heapstart[region],
memdump_handler, (FAR void *)dump);
memdump_handler, &priv);
mm_unlock(heap);
}
#undef region
info = mm_mallinfo_task(heap, dump);
syslog(LOG_INFO, "%12s%12s\n", "Total Blks", "Total Size");
syslog(LOG_INFO, "%12d%12d\n", info.aordblks, info.uordblks);
if (dump->pid == PID_MM_FREE)
{
info = mm_mallinfo_task(heap, dump);
syslog(LOG_INFO, "%12s%12s\n", "Total Blks", "Total Size");
syslog(LOG_INFO, "%12d%12d\n", info.aordblks, info.uordblks);
}
}
/****************************************************************************