sched:add parameters to restore_critical_section
reason: In SMP, when a context switch occurs, restore_critical_section is executed. To reduce the time taken for context switching, we directly pass the required parameters to restore_critical_section instead of acquiring them repeatedly. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
8439296a50
commit
1aab457b4c
18 changed files with 67 additions and 31 deletions
|
@ -54,7 +54,9 @@
|
||||||
|
|
||||||
uint32_t *arm_syscall(uint32_t *regs)
|
uint32_t *arm_syscall(uint32_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
/* Nested interrupts are not supported */
|
/* Nested interrupts are not supported */
|
||||||
|
|
||||||
|
@ -158,9 +160,13 @@ uint32_t *arm_syscall(uint32_t *regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
restore_critical_section();
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
|
restore_critical_section(tcb, cpu);
|
||||||
regs = (uint32_t *)CURRENT_REGS;
|
regs = (uint32_t *)CURRENT_REGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -478,7 +478,7 @@ int arm_svcall(int irq, void *context, void *arg)
|
||||||
|
|
||||||
if (regs != CURRENT_REGS)
|
if (regs != CURRENT_REGS)
|
||||||
{
|
{
|
||||||
restore_critical_section();
|
restore_critical_section(this_task(), this_cpu());
|
||||||
}
|
}
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -160,7 +160,9 @@ static void dispatch_syscall(void)
|
||||||
|
|
||||||
uint32_t *arm_syscall(uint32_t *regs)
|
uint32_t *arm_syscall(uint32_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
|
int cpu;
|
||||||
#ifdef CONFIG_BUILD_KERNEL
|
#ifdef CONFIG_BUILD_KERNEL
|
||||||
uint32_t cpsr;
|
uint32_t cpsr;
|
||||||
#endif
|
#endif
|
||||||
|
@ -590,9 +592,13 @@ uint32_t *arm_syscall(uint32_t *regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
restore_critical_section();
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
|
restore_critical_section(tcb, cpu);
|
||||||
regs = (uint32_t *)CURRENT_REGS;
|
regs = (uint32_t *)CURRENT_REGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -487,7 +487,7 @@ int arm_svcall(int irq, void *context, void *arg)
|
||||||
|
|
||||||
if (regs != CURRENT_REGS)
|
if (regs != CURRENT_REGS)
|
||||||
{
|
{
|
||||||
restore_critical_section();
|
restore_critical_section(this_task(), this_cpu());
|
||||||
}
|
}
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -156,7 +156,9 @@ static void dispatch_syscall(void)
|
||||||
|
|
||||||
uint32_t *arm_syscall(uint32_t *regs)
|
uint32_t *arm_syscall(uint32_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
|
int cpu;
|
||||||
#ifdef CONFIG_BUILD_PROTECTED
|
#ifdef CONFIG_BUILD_PROTECTED
|
||||||
uint32_t cpsr;
|
uint32_t cpsr;
|
||||||
#endif
|
#endif
|
||||||
|
@ -567,9 +569,13 @@ uint32_t *arm_syscall(uint32_t *regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
restore_critical_section();
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
|
restore_critical_section(tcb, cpu);
|
||||||
regs = (uint32_t *)CURRENT_REGS;
|
regs = (uint32_t *)CURRENT_REGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -488,7 +488,7 @@ int arm_svcall(int irq, void *context, void *arg)
|
||||||
|
|
||||||
if (regs != CURRENT_REGS)
|
if (regs != CURRENT_REGS)
|
||||||
{
|
{
|
||||||
restore_critical_section();
|
restore_critical_section(this_task(), this_cpu());
|
||||||
}
|
}
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -156,7 +156,9 @@ static void dispatch_syscall(void)
|
||||||
|
|
||||||
uint32_t *arm_syscall(uint32_t *regs)
|
uint32_t *arm_syscall(uint32_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
|
int cpu;
|
||||||
#ifdef CONFIG_BUILD_PROTECTED
|
#ifdef CONFIG_BUILD_PROTECTED
|
||||||
uint32_t cpsr;
|
uint32_t cpsr;
|
||||||
#endif
|
#endif
|
||||||
|
@ -567,9 +569,13 @@ uint32_t *arm_syscall(uint32_t *regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
restore_critical_section();
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
|
restore_critical_section(tcb, cpu);
|
||||||
regs = (uint32_t *)CURRENT_REGS;
|
regs = (uint32_t *)CURRENT_REGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -161,6 +161,8 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
|
||||||
uint64_t cmd;
|
uint64_t cmd;
|
||||||
struct regs_context *f_regs;
|
struct regs_context *f_regs;
|
||||||
uint64_t *ret_regs;
|
uint64_t *ret_regs;
|
||||||
|
struct tcb_s *tcb;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
/* Nested interrupts are not supported */
|
/* Nested interrupts are not supported */
|
||||||
|
|
||||||
|
@ -252,11 +254,13 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret_regs;
|
return ret_regs;
|
||||||
|
|
|
@ -501,7 +501,7 @@ int riscv_swint(int irq, void *context, void *arg)
|
||||||
|
|
||||||
if (regs != CURRENT_REGS)
|
if (regs != CURRENT_REGS)
|
||||||
{
|
{
|
||||||
restore_critical_section();
|
restore_critical_section(this_task(), this_cpu());
|
||||||
}
|
}
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -37,6 +37,9 @@
|
||||||
|
|
||||||
void *riscv_perform_syscall(uintreg_t *regs)
|
void *riscv_perform_syscall(uintreg_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
/* Set up the interrupt register set needed by swint() */
|
/* Set up the interrupt register set needed by swint() */
|
||||||
|
|
||||||
CURRENT_REGS = regs;
|
CURRENT_REGS = regs;
|
||||||
|
@ -64,11 +67,13 @@ void *riscv_perform_syscall(uintreg_t *regs)
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, cpu);
|
||||||
|
|
||||||
/* If a context switch occurred while processing the interrupt then
|
/* If a context switch occurred while processing the interrupt then
|
||||||
* CURRENT_REGS may have change value. If we return any value
|
* CURRENT_REGS may have change value. If we return any value
|
||||||
|
|
|
@ -77,7 +77,7 @@ void up_exit(int status)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Then switch contexts */
|
/* Then switch contexts */
|
||||||
|
|
||||||
|
|
|
@ -246,7 +246,7 @@ int up_cpu_paused_restore(void)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Then switch contexts. Any necessary address environment changes
|
/* Then switch contexts. Any necessary address environment changes
|
||||||
* will be made when the interrupt returns.
|
* will be made when the interrupt returns.
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
|
|
||||||
#include "clock/clock.h"
|
#include "clock/clock.h"
|
||||||
#include "sim_internal.h"
|
#include "sim_internal.h"
|
||||||
|
#include "sched/sched.h"
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Functions
|
* Public Functions
|
||||||
|
@ -77,7 +78,7 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Then switch contexts */
|
/* Then switch contexts */
|
||||||
|
|
||||||
|
@ -102,7 +103,7 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Then switch contexts */
|
/* Then switch contexts */
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ void up_exit(int status)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Then switch contexts */
|
/* Then switch contexts */
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, this_cpu());
|
||||||
|
|
||||||
/* Record the new "running" task. g_running_tasks[] is only used by
|
/* Record the new "running" task. g_running_tasks[] is only used by
|
||||||
* assertion logic for reporting crashes.
|
* assertion logic for reporting crashes.
|
||||||
|
|
|
@ -62,6 +62,9 @@
|
||||||
#ifndef CONFIG_SUPPRESS_INTERRUPTS
|
#ifndef CONFIG_SUPPRESS_INTERRUPTS
|
||||||
static uint64_t *common_handler(int irq, uint64_t *regs)
|
static uint64_t *common_handler(int irq, uint64_t *regs)
|
||||||
{
|
{
|
||||||
|
struct tcb_s *tcb;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
/* Current regs non-zero indicates that we are processing an interrupt;
|
/* Current regs non-zero indicates that we are processing an interrupt;
|
||||||
* g_current_regs is also used to manage interrupt level context switches.
|
* g_current_regs is also used to manage interrupt level context switches.
|
||||||
*
|
*
|
||||||
|
@ -99,11 +102,13 @@ static uint64_t *common_handler(int irq, uint64_t *regs)
|
||||||
* crashes.
|
* crashes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
g_running_tasks[this_cpu()] = this_task();
|
cpu = this_cpu();
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
g_running_tasks[cpu] = tcb;
|
||||||
|
|
||||||
/* Restore the cpu lock */
|
/* Restore the cpu lock */
|
||||||
|
|
||||||
restore_critical_section();
|
restore_critical_section(tcb, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If a context switch occurred while processing the interrupt then
|
/* If a context switch occurred while processing the interrupt then
|
||||||
|
|
|
@ -442,7 +442,7 @@ int xtensa_swint(int irq, void *context, void *arg)
|
||||||
|
|
||||||
if (regs != CURRENT_REGS)
|
if (regs != CURRENT_REGS)
|
||||||
{
|
{
|
||||||
restore_critical_section();
|
restore_critical_section(this_task(), this_cpu());
|
||||||
}
|
}
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -312,21 +312,18 @@ void leave_critical_section(irqstate_t flags) noinstrument_function;
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# define restore_critical_section() \
|
# define restore_critical_section(tcb, cpu) \
|
||||||
do { \
|
do { \
|
||||||
FAR struct tcb_s *tcb; \
|
|
||||||
int me = this_cpu(); \
|
|
||||||
tcb = current_task(me); \
|
|
||||||
if (tcb->irqcount <= 0) \
|
if (tcb->irqcount <= 0) \
|
||||||
{\
|
{\
|
||||||
if ((g_cpu_irqset & (1 << me)) != 0) \
|
if ((g_cpu_irqset & (1 << cpu)) != 0) \
|
||||||
{ \
|
{ \
|
||||||
cpu_irqlock_clear(); \
|
cpu_irqlock_clear(); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
# define restore_critical_section()
|
# define restore_critical_section(tcb, cpu)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#undef EXTERN
|
#undef EXTERN
|
||||||
|
|
Loading…
Reference in a new issue