arch/x86_64: this_task is stored in the CPU private data

By default in SMP, obtaining this_task requires disabling interrupts, obtaining the current CPU index, accessing a global variable, and re-enabling interrupts. Storing this_task in percpu makes retrieval faster.

Signed-off-by: liwenxiang1 <liwenxiang1@xiaomi.com>
This commit is contained in:
liwenxiang1 2024-12-09 14:09:45 +08:00 committed by Xiang Xiao
parent 6485093277
commit 1fad0f1654
4 changed files with 28 additions and 27 deletions

View file

@ -72,13 +72,9 @@ struct intel64_cpu_s
bool interrupt_context;
/* current_regs holds a references to the current interrupt level
* register storage structure. If is non-NULL only during interrupt
* processing. Access to current_regs must be through
* up_current_regs() and up_set_current_regs() functions
*/
/* Current task */
uint64_t *current_regs;
struct tcb_s *this_task;
#ifdef CONFIG_LIB_SYSCALL
/* Current user RSP for syscall */
@ -143,22 +139,6 @@ static inline_function int up_cpu_index(void)
* Inline functions
****************************************************************************/
static inline_function uint64_t *up_current_regs(void)
{
uint64_t *regs;
__asm__ volatile("movq %%gs:(%c1), %0"
: "=r" (regs)
: "i" (offsetof(struct intel64_cpu_s, current_regs)));
return regs;
}
static inline_function void up_set_current_regs(uint64_t *regs)
{
__asm__ volatile("movq %0, %%gs:(%c1)"
:: "r" (regs), "i" (offsetof(struct intel64_cpu_s,
current_regs)));
}
static inline_function bool up_interrupt_context(void)
{
bool flag;
@ -177,12 +157,30 @@ static inline_function void up_set_interrupt_context(bool flag)
interrupt_context)));
}
/****************************************************************************
* Schedule acceleration macros
****************************************************************************/
#define up_this_task() \
({ \
struct tcb_s *this_task; \
__asm__ volatile("movq %%gs:(%c1), %0" \
: "=r" (this_task) \
: "i" (offsetof(struct intel64_cpu_s, this_task))); \
this_task; \
})
#define up_update_task(t) \
__asm__ volatile("movq %0, %%gs:(%c1)" \
:: "r" ((struct tcb_s *)t), \
"i" (offsetof(struct intel64_cpu_s, this_task)))
/****************************************************************************
* Name: up_getusrpc
****************************************************************************/
#define up_getusrpc(regs) \
(((uint64_t *)((regs) ? (regs) : up_current_regs()))[REG_RIP])
(((uint64_t *)((regs) ? (regs) : running_regs()))[REG_RIP])
#undef EXTERN
#ifdef __cplusplus

View file

@ -145,8 +145,8 @@ int up_backtrace(struct tcb_s *tcb,
{
ret += backtrace(rtcb->stack_base_ptr,
rtcb->stack_base_ptr + rtcb->adj_stack_size,
(void *)up_current_regs()[REG_RBP],
(void *)up_current_regs()[REG_RIP],
(void *)running_regs()[REG_RBP],
(void *)running_regs()[REG_RIP],
&buffer[ret], size - ret, &skip);
}
}

View file

@ -148,7 +148,7 @@ void x86_64_ap_boot(void)
x86_64_cpu_priv_set(cpu);
tcb = this_task();
tcb = current_task(cpu);
UNUSED(tcb);
/* Configure interrupts */
@ -192,6 +192,8 @@ void x86_64_ap_boot(void)
__revoke_low_memory();
}
up_update_task(tcb);
/* Then transfer control to the IDLE task */
nx_idle_trampoline();

View file

@ -30,6 +30,7 @@
#include <debug.h>
#include <nuttx/irq.h>
#include "sched/sched.h"
#include "x86_64_internal.h"
/****************************************************************************
@ -113,7 +114,7 @@ void backtrace(uint64_t rbp)
void up_dump_register(void *dumpregs)
{
volatile uint64_t *regs = dumpregs ? dumpregs : up_current_regs();
volatile uint64_t *regs = dumpregs ? dumpregs : running_regs();
uint64_t mxcsr;
uint64_t cr2;