1
0
Fork 0
forked from nuttx/nuttx-update

tools/gdb: cache gdb.Type result

Signed-off-by: xuxingliang <xuxingliang@xiaomi.com>
This commit is contained in:
xuxingliang 2024-08-14 11:49:49 +08:00 committed by Xiang Xiao
parent 1d25eed55c
commit 3a46b6e6af
5 changed files with 52 additions and 56 deletions

View file

@ -23,16 +23,16 @@
import gdb
import utils
list_node = utils.CachedType("struct list_node")
sq_queue = utils.CachedType("sq_queue_t")
dq_queue = utils.CachedType("dq_queue_t")
list_node_type = utils.lookup_type("struct list_node")
sq_queue_type = utils.lookup_type("sq_queue_t")
dq_queue_type = utils.lookup_type("dq_queue_t")
def list_for_each(head):
"""Iterate over a list"""
if head.type == list_node.get_type().pointer():
if head.type == list_node_type.pointer():
head = head.dereference()
elif head.type != list_node.get_type():
elif head.type != list_node_type:
raise TypeError("Must be struct list_node not {}".format(head.type))
if head["next"] == 0:
@ -59,9 +59,9 @@ def list_check(head):
"""Check the consistency of a list"""
nb = 0
if head.type == list_node.get_type().pointer():
if head.type == list_node_type.pointer():
head = head.dereference()
elif head.type != list_node.get_type():
elif head.type != list_node_type:
raise gdb.GdbError("argument must be of type (struct list_node [*])")
c = head
try:
@ -123,9 +123,9 @@ def list_check(head):
def sq_for_every(sq, entry):
"""Iterate over a singly linked list"""
if sq.type == sq_queue.get_type().pointer():
if sq.type == sq_queue_type.pointer():
sq = sq.dereference()
elif sq.type != sq_queue.get_type():
elif sq.type != sq_queue_type:
gdb.write("Must be struct sq_queue not {}".format(sq.type))
return
@ -141,9 +141,9 @@ def sq_for_every(sq, entry):
def sq_is_empty(sq):
"""Check if a singly linked list is empty"""
if sq.type == sq_queue.get_type().pointer():
if sq.type == sq_queue_type.pointer():
sq = sq.dereference()
elif sq.type != sq_queue.get_type():
elif sq.type != sq_queue_type:
return False
if sq["head"] == 0:
@ -155,9 +155,9 @@ def sq_is_empty(sq):
def sq_check(sq):
"""Check the consistency of a singly linked list"""
nb = 0
if sq.type == sq_queue.get_type().pointer():
if sq.type == sq_queue_type.pointer():
sq = sq.dereference()
elif sq.type != sq_queue.get_type():
elif sq.type != sq_queue_type:
gdb.write("Must be struct sq_queue not {}".format(sq.type))
return
@ -179,9 +179,9 @@ def sq_check(sq):
def dq_for_every(dq, entry):
"""Iterate over a doubly linked list"""
if dq.type == dq_queue.get_type().pointer():
if dq.type == dq_queue_type.pointer():
dq = dq.dereference()
elif dq.type != dq_queue.get_type():
elif dq.type != dq_queue_type:
gdb.write("Must be struct dq_queue not {}".format(dq.type))
return
@ -197,9 +197,9 @@ def dq_for_every(dq, entry):
def dq_check(dq):
"""Check the consistency of a doubly linked list"""
nb = 0
if dq.type == dq_queue.get_type().pointer():
if dq.type == dq_queue_type.pointer():
dq = dq.dereference()
elif dq.type != dq_queue.get_type():
elif dq.type != dq_queue_type:
gdb.write("Must be struct dq_queue not {}".format(dq.type))
return
@ -232,9 +232,9 @@ class Nxlistcheck(gdb.Command):
raise gdb.GdbError("nx-list-check takes one argument")
obj = gdb.parse_and_eval(argv[0])
if obj.type == list_node.get_type().pointer():
if obj.type == list_node_type.pointer():
list_check(obj)
elif obj.type == sq_queue.get_type().pointer():
elif obj.type == sq_queue_type.pointer():
sq_check(obj)
else:
raise gdb.GdbError("Invalid argument type: {}".format(obj.type))

View file

@ -24,8 +24,8 @@ import math
import time
import gdb
from lists import sq_for_every, sq_queue
from utils import get_long_type, get_symbol_value, read_ulong
from lists import sq_for_every, sq_queue_type
from utils import get_long_type, get_symbol_value, lookup_type, read_ulong
try:
import numpy as np
@ -44,9 +44,9 @@ PID_MM_ALLOC = -3
PID_MM_LEAK = -2
PID_MM_MEMPOOL = -1
mm_allocnode_type = gdb.lookup_type("struct mm_allocnode_s")
sizeof_size_t = gdb.lookup_type("size_t").sizeof
mempool_backtrace_type = gdb.lookup_type("struct mempool_backtrace_s")
mm_allocnode_type = lookup_type("struct mm_allocnode_s")
sizeof_size_t = lookup_type("size_t").sizeof
mempool_backtrace_type = lookup_type("struct mempool_backtrace_s")
CONFIG_MM_BACKTRACE = get_symbol_value("CONFIG_MM_BACKTRACE")
CONFIG_MM_DFAULT_ALIGNMENT = get_symbol_value("CONFIG_MM_DFAULT_ALIGNMENT")
@ -206,7 +206,7 @@ def get_count(element):
def mempool_foreach(pool):
"""Iterate over all block in a mempool"""
sq_entry_type = gdb.lookup_type("sq_entry_t")
sq_entry_type = lookup_type("sq_entry_t")
blocksize = mempool_realblocksize(pool)
if pool["ibase"] != 0:
@ -217,7 +217,7 @@ def mempool_foreach(pool):
yield buf
nblk -= 1
entry = sq_queue.get_type().pointer()
entry = sq_queue_type.pointer()
for entry in sq_for_every(pool["equeue"], entry):
nblk = (pool["expandsize"] - sq_entry_type.sizeof) / blocksize
base = int(entry) - nblk * blocksize
@ -229,7 +229,7 @@ def mempool_foreach(pool):
def mempool_dumpbuf(buf, blksize, count, align, simple, detail, alive):
charnode = gdb.Value(buf).cast(gdb.lookup_type("char").pointer())
charnode = gdb.Value(buf).cast(lookup_type("char").pointer())
if not alive:
# if pid is not alive put a red asterisk.
@ -295,7 +295,7 @@ class Memdump(gdb.Command):
"""Dump the mempool memory"""
for pool in mempool_multiple_foreach(mpool):
if pid == PID_MM_FREE:
entry = sq_queue.get_type().pointer()
entry = sq_queue_type.pointer()
for entry in sq_for_every(pool["queue"], entry):
gdb.write("%12u%#*x\n" % (pool["blocksize"], self.align, entry))
@ -807,7 +807,7 @@ class Memmap(gdb.Command):
heap = gdb.parse_and_eval("g_mmheap")
for node in mm_foreach(heap):
if node["size"] & MM_ALLOC_BIT != 0:
allocnode = gdb.Value(node).cast(gdb.lookup_type("char").pointer())
allocnode = gdb.Value(node).cast(lookup_type("char").pointer())
info.append(
{
"addr": int(allocnode),
@ -863,7 +863,7 @@ class Memfrag(gdb.Command):
heap = gdb.parse_and_eval("g_mmheap")
for node in mm_foreach(heap):
if node["size"] & MM_ALLOC_BIT == 0:
freenode = gdb.Value(node).cast(gdb.lookup_type("char").pointer())
freenode = gdb.Value(node).cast(lookup_type("char").pointer())
info.append(
{
"addr": int(freenode),

View file

@ -57,7 +57,7 @@ class Stack(object):
gdb.write("An overflow detected, dumping the stack:\n")
ptr_4bytes = gdb.Value(self._stack_base).cast(
gdb.lookup_type("unsigned int").pointer()
utils.lookup_type("unsigned int").pointer()
)
for i in range(0, self._stack_size // 4):
@ -80,7 +80,7 @@ class Stack(object):
def check_max_usage(self):
ptr_4bytes = gdb.Value(self._stack_base).cast(
gdb.lookup_type("unsigned int").pointer()
utils.lookup_type("unsigned int").pointer()
)
spare = 0

View file

@ -93,12 +93,12 @@ class Nxsetregs(gdb.Command):
if arg[0] != "":
regs = gdb.parse_and_eval(f"{arg[0]}").cast(
gdb.lookup_type("char").pointer()
utils.lookup_type("char").pointer()
)
else:
gdb.execute("set $_current_regs=tcbinfo_current_regs()")
current_regs = gdb.parse_and_eval("$_current_regs")
regs = current_regs.cast(gdb.lookup_type("char").pointer())
regs = current_regs.cast(utils.lookup_type("char").pointer())
if regs == 0:
gdb.write("regs is NULL\n")
@ -114,7 +114,7 @@ class Nxsetregs(gdb.Command):
gdb.execute("select-frame 0")
if tcbinfo["reg_off"]["p"][i] != UINT16_MAX:
value = gdb.Value(regs + tcbinfo["reg_off"]["p"][i]).cast(
gdb.lookup_type("uintptr_t").pointer()
utils.lookup_type("uintptr_t").pointer()
)[0]
gdb.execute(f"set ${reg.name} = {value}")
@ -162,9 +162,9 @@ class Nxinfothreads(gdb.Command):
statename = f'\x1b{"[32;1m" if statename == "Running" else "[33;1m"}{statename}\x1b[m'
if tcb["task_state"] == gdb.parse_and_eval("TSTATE_WAIT_SEM"):
mutex = tcb["waitobj"].cast(gdb.lookup_type("sem_t").pointer())
mutex = tcb["waitobj"].cast(utils.lookup_type("sem_t").pointer())
if mutex["flags"] & SEM_TYPE_MUTEX:
mutex = tcb["waitobj"].cast(gdb.lookup_type("mutex_t").pointer())
mutex = tcb["waitobj"].cast(utils.lookup_type("mutex_t").pointer())
statename = f"Waiting,Mutex:{mutex['holder']}"
try:

View file

@ -26,30 +26,26 @@ import gdb
from macros import fetch_macro_info
g_symbol_cache = {}
g_type_cache = {}
class CachedType:
"""Cache a type object, so that we can reconnect to the new_objfile event"""
def lookup_type(name, block=None) -> gdb.Type:
"""Return the type object of a type name"""
global g_type_cache
def __init__(self, name):
self._type = None
self._name = name
key = (name, block)
if key not in g_type_cache:
try:
g_type_cache[key] = (
gdb.lookup_type(name, block=block) if block else gdb.lookup_type(name)
)
except gdb.error:
g_type_cache[key] = None
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError("cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, "events") and hasattr(gdb.events, "new_objfile"):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
return g_type_cache[key]
long_type = CachedType("long")
long_type = lookup_type("long")
class MacroCtx:
@ -93,7 +89,7 @@ else:
def get_long_type():
"""Return the cached long type object"""
global long_type
return long_type.get_type()
return long_type
def offset_of(typeobj, field):