sched: add nxsched_remove_self
reason: 1In the scenario of active waiting, context switching is inevitable, and we can eliminate redundant judgments. code size before hujun5@hujun5-OptiPlex-7070:~/downloads1/vela_sim/nuttx$ size nuttx text data bss dec hex filename 262848 49985 63893 376726 5bf96 nuttx after hujun5@hujun5-OptiPlex-7070:~/downloads1/vela_sim/nuttx$ size nuttx text data bss dec hex filename 263324 49985 63893 377202 5c172 nuttx reduce code size by -476 Configuring NuttX and compile: $ ./tools/configure.sh -l qemu-armv8a:nsh_smp $ make Running with qemu $ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \ -machine virt,virtualization=on,gic-version=3 \ -net none -chardev stdio,id=con,mux=on -serial chardev:con \ -mon chardev=con,mode=readline -kernel ./nuttx Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
dc6eeba453
commit
e13d255345
11 changed files with 201 additions and 214 deletions
|
@ -138,7 +138,6 @@ int nxmq_wait_receive(FAR struct mqueue_inode_s *msgq,
|
|||
{
|
||||
FAR struct mqueue_msg_s *newmsg;
|
||||
FAR struct tcb_s *rtcb;
|
||||
bool switch_needed;
|
||||
|
||||
DEBUGASSERT(rcvmsg != NULL);
|
||||
|
||||
|
@ -186,21 +185,18 @@ int nxmq_wait_receive(FAR struct mqueue_inode_s *msgq,
|
|||
|
||||
DEBUGASSERT(!is_idle_task(rtcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_MQNOTEMPTY;
|
||||
nxsched_add_prioritized(rtcb, MQ_WNELIST(msgq->cmn));
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* When we resume at this point, either (1) the message queue
|
||||
* is no longer empty, or (2) the wait has been interrupted by
|
||||
|
|
|
@ -215,7 +215,6 @@ FAR struct mqueue_msg_s *nxmq_alloc_msg(void)
|
|||
int nxmq_wait_send(FAR struct mqueue_inode_s *msgq, int oflags)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
bool switch_needed;
|
||||
|
||||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
/* nxmq_wait_send() is not a cancellation point, but may be called via
|
||||
|
@ -271,21 +270,18 @@ int nxmq_wait_send(FAR struct mqueue_inode_s *msgq, int oflags)
|
|||
|
||||
DEBUGASSERT(!is_idle_task(rtcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_MQNOTFULL;
|
||||
nxsched_add_prioritized(rtcb, MQ_WNFLIST(msgq->cmn));
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* When we resume at this point, either (1) the message queue
|
||||
* is no longer empty, or (2) the wait has been interrupted by
|
||||
|
|
|
@ -45,7 +45,6 @@ static int msgrcv_wait(FAR struct msgq_s *msgq, FAR struct msgbuf_s **rcvmsg,
|
|||
FAR struct msgbuf_s *newmsg = NULL;
|
||||
FAR struct msgbuf_s *tmp;
|
||||
FAR struct tcb_s *rtcb;
|
||||
bool switch_needed;
|
||||
|
||||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
/* msgrcv_wait() is not a cancellation point, but it may be called
|
||||
|
@ -129,21 +128,18 @@ static int msgrcv_wait(FAR struct msgq_s *msgq, FAR struct msgbuf_s **rcvmsg,
|
|||
|
||||
DEBUGASSERT(NULL != rtcb->flink);
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_MQNOTEMPTY;
|
||||
nxsched_add_prioritized(rtcb, MQ_WNELIST(msgq->cmn));
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* When we resume at this point, either (1) the message queue
|
||||
* is no longer empty, or (2) the wait has been interrupted by
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
static int msgsnd_wait(FAR struct msgq_s *msgq, int msgflg)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
bool switch_needed;
|
||||
|
||||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
/* msgsnd_wait() is not a cancellation point, but may be called via
|
||||
|
@ -95,21 +94,18 @@ static int msgsnd_wait(FAR struct msgq_s *msgq, int msgflg)
|
|||
|
||||
DEBUGASSERT(NULL != rtcb->flink);
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_MQNOTFULL;
|
||||
nxsched_add_prioritized(rtcb, MQ_WNFLIST(msgq->cmn));
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* When we resume at this point, either (1) the message queue
|
||||
* is no longer empty, or (2) the wait has been interrupted by
|
||||
|
|
|
@ -112,7 +112,6 @@ void pg_miss(void)
|
|||
{
|
||||
FAR struct tcb_s *ftcb = this_task();
|
||||
FAR struct tcb_s *wtcb;
|
||||
bool switch_needed;
|
||||
|
||||
/* Sanity checking
|
||||
*
|
||||
|
@ -138,21 +137,18 @@ void pg_miss(void)
|
|||
|
||||
DEBUGASSERT(!is_idle_task(ftcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(ftcb, true);
|
||||
nxsched_remove_self(ftcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
ftcb->task_state = TSTATE_WAIT_PAGEFILL;
|
||||
nxsched_add_prioritized(ftcb, list_waitingforfill());
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), ftcb);
|
||||
}
|
||||
|
||||
/* Boost the page fill worker thread priority.
|
||||
* - Check the priority of the task at the head of the g_waitingforfill
|
||||
|
|
|
@ -321,6 +321,7 @@ int nxthread_create(FAR const char *name, uint8_t ttype, int priority,
|
|||
|
||||
bool nxsched_add_readytorun(FAR struct tcb_s *rtrtcb);
|
||||
bool nxsched_remove_readytorun(FAR struct tcb_s *rtrtcb, bool merge);
|
||||
void nxsched_remove_self(FAR struct tcb_s *rtrtcb);
|
||||
bool nxsched_add_prioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list);
|
||||
void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
|
||||
uint8_t task_state);
|
||||
|
|
|
@ -106,6 +106,11 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
|
||||
return doswitch;
|
||||
}
|
||||
|
||||
void nxsched_remove_self(FAR struct tcb_s *tcb)
|
||||
{
|
||||
nxsched_remove_readytorun(tcb, true);
|
||||
}
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -132,20 +137,22 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
||||
void nxsched_remove_running(FAR struct tcb_s *tcb)
|
||||
{
|
||||
FAR dq_queue_t *tasklist;
|
||||
bool doswitch = false;
|
||||
FAR struct tcb_s *nxttcb;
|
||||
FAR struct tcb_s *rtrtcb = NULL;
|
||||
int cpu;
|
||||
|
||||
/* Which CPU (if any) is the task running on? Which task list holds the
|
||||
* TCB?
|
||||
*/
|
||||
|
||||
cpu = rtcb->cpu;
|
||||
tasklist = TLIST_HEAD(rtcb, cpu);
|
||||
DEBUGASSERT(tcb->task_state == TSTATE_TASK_RUNNING);
|
||||
cpu = tcb->cpu;
|
||||
tasklist = &g_assignedtasks[cpu];
|
||||
|
||||
/* Check if the TCB to be removed is at the head of a ready-to-run list.
|
||||
/* Check if the TCB to be removed is at the head of a running list.
|
||||
* For the case of SMP, there are two lists involved: (1) the
|
||||
* g_readytorun list that holds non-running tasks that have not been
|
||||
* assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
|
||||
|
@ -153,44 +160,31 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
* that CPU. Only this latter list contains the currently active task
|
||||
* only removing the head of that list can result in a context switch.
|
||||
*
|
||||
* rtcb->blink == NULL will tell us if the TCB is at the head of the
|
||||
* ready-to-run list and, hence, a candidate for the new running task.
|
||||
* tcb->blink == NULL will tell us if the TCB is at the head of the
|
||||
* running list and, hence, a candidate for the new running task.
|
||||
*
|
||||
* If so, then the tasklist RUNNABLE attribute will inform us if the list
|
||||
* holds the currently executing task and, hence, if a context switch
|
||||
* should occur.
|
||||
*/
|
||||
|
||||
if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
|
||||
{
|
||||
FAR struct tcb_s *nxttcb;
|
||||
FAR struct tcb_s *rtrtcb = NULL;
|
||||
int me;
|
||||
DEBUGASSERT(tcb->blink == NULL);
|
||||
DEBUGASSERT(TLIST_ISRUNNABLE(tcb->task_state));
|
||||
|
||||
/* There must always be at least one task in the list (the IDLE task)
|
||||
* after the TCB being removed.
|
||||
*/
|
||||
|
||||
nxttcb = rtcb->flink;
|
||||
nxttcb = tcb->flink;
|
||||
DEBUGASSERT(nxttcb != NULL);
|
||||
|
||||
/* If we are modifying the head of some assigned task list other than
|
||||
* our own, we will need to stop that CPU.
|
||||
*/
|
||||
|
||||
me = this_cpu();
|
||||
if (cpu != me)
|
||||
{
|
||||
DEBUGVERIFY(up_cpu_pause(cpu));
|
||||
}
|
||||
|
||||
/* The task is running but the CPU that it was running on has been
|
||||
* paused. We can now safely remove its TCB from the ready-to-run
|
||||
* paused. We can now safely remove its TCB from the running
|
||||
* task list. In the SMP case this may be either the g_readytorun()
|
||||
* or the g_assignedtasks[cpu] list.
|
||||
*/
|
||||
|
||||
dq_rem_head((FAR dq_entry_t *)rtcb, tasklist);
|
||||
dq_rem_head((FAR dq_entry_t *)tcb, tasklist);
|
||||
|
||||
/* Find the highest priority non-running tasks in the g_assignedtasks
|
||||
* list of other CPUs, and also non-idle tasks, place them in the
|
||||
|
@ -243,7 +237,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
* CPU.
|
||||
*/
|
||||
|
||||
for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head;
|
||||
for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
|
||||
rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
|
||||
rtrtcb = rtrtcb->flink);
|
||||
|
||||
|
@ -261,7 +255,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
* list and add to the head of the g_assignedtasks[cpu] list.
|
||||
*/
|
||||
|
||||
dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun());
|
||||
dq_rem((FAR dq_entry_t *)rtrtcb, &g_readytorun);
|
||||
dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist);
|
||||
|
||||
rtrtcb->cpu = cpu;
|
||||
|
@ -293,32 +287,59 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
|
|||
|
||||
nxttcb->task_state = TSTATE_TASK_RUNNING;
|
||||
|
||||
/* All done, restart the other CPU (if it was paused). */
|
||||
/* Since the TCB is no longer in any list, it is now invalid */
|
||||
|
||||
doswitch = true;
|
||||
tcb->task_state = TSTATE_TASK_INVALID;
|
||||
}
|
||||
|
||||
void nxsched_remove_self(FAR struct tcb_s *tcb)
|
||||
{
|
||||
nxsched_remove_running(tcb);
|
||||
if (g_pendingtasks.head)
|
||||
{
|
||||
nxsched_merge_pending();
|
||||
}
|
||||
}
|
||||
|
||||
bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge)
|
||||
{
|
||||
bool doswitch = false;
|
||||
|
||||
if (tcb->task_state == TSTATE_TASK_RUNNING)
|
||||
{
|
||||
int me = this_cpu();
|
||||
int cpu = tcb->cpu;
|
||||
if (cpu != me)
|
||||
{
|
||||
/* In this we will not want to report a context switch to this
|
||||
* CPU. Only the other CPU is affected.
|
||||
*/
|
||||
|
||||
DEBUGVERIFY(up_cpu_resume(cpu));
|
||||
doswitch = false;
|
||||
up_cpu_pause(tcb->cpu);
|
||||
nxsched_remove_running(tcb);
|
||||
up_cpu_resume(tcb->cpu);
|
||||
}
|
||||
else
|
||||
{
|
||||
nxsched_remove_running(tcb);
|
||||
doswitch = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The task is not running. Just remove its TCB from the ready-to-run
|
||||
FAR dq_queue_t *tasklist;
|
||||
|
||||
tasklist = TLIST_HEAD(tcb, tcb->cpu);
|
||||
|
||||
DEBUGASSERT(tcb->task_state != TSTATE_TASK_RUNNING);
|
||||
|
||||
/* The task is not running. Just remove its TCB from the task
|
||||
* list. In the SMP case this may be either the g_readytorun() or the
|
||||
* g_assignedtasks[cpu] list.
|
||||
*/
|
||||
|
||||
dq_rem((FAR dq_entry_t *)rtcb, tasklist);
|
||||
}
|
||||
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
||||
|
||||
/* Since the TCB is no longer in any list, it is now invalid */
|
||||
|
||||
rtcb->task_state = TSTATE_TASK_INVALID;
|
||||
tcb->task_state = TSTATE_TASK_INVALID;
|
||||
}
|
||||
|
||||
if (list_pendingtasks()->head && merge)
|
||||
{
|
||||
|
|
|
@ -73,7 +73,6 @@ int nxsem_wait(FAR sem_t *sem)
|
|||
{
|
||||
FAR struct tcb_s *rtcb = this_task();
|
||||
irqstate_t flags;
|
||||
bool switch_needed;
|
||||
int ret;
|
||||
|
||||
/* This API should not be called from interrupt handlers & idleloop */
|
||||
|
@ -168,21 +167,18 @@ int nxsem_wait(FAR sem_t *sem)
|
|||
|
||||
DEBUGASSERT(!is_idle_task(rtcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_SEM;
|
||||
nxsched_add_prioritized(rtcb, SEM_WAITLIST(sem));
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* When we resume at this point, either (1) the semaphore has been
|
||||
* assigned to this thread of execution, or (2) the semaphore wait
|
||||
|
|
|
@ -82,7 +82,6 @@ int sigsuspend(FAR const sigset_t *set)
|
|||
FAR struct tcb_s *rtcb = this_task();
|
||||
sigset_t saved_sigprocmask;
|
||||
irqstate_t flags;
|
||||
bool switch_needed;
|
||||
|
||||
/* sigsuspend() is a cancellation point */
|
||||
|
||||
|
@ -127,21 +126,18 @@ int sigsuspend(FAR const sigset_t *set)
|
|||
|
||||
DEBUGASSERT(!is_idle_task(rtcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_SIG;
|
||||
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* We are running again, restore the original sigprocmask */
|
||||
|
||||
|
|
|
@ -251,7 +251,6 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
FAR sigpendq_t *sigpend;
|
||||
irqstate_t flags;
|
||||
sclock_t waitticks;
|
||||
bool switch_needed;
|
||||
siginfo_t unbinfo;
|
||||
int ret;
|
||||
|
||||
|
@ -364,7 +363,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
|
@ -373,10 +372,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
|
||||
/* We no longer need the watchdog */
|
||||
|
||||
|
@ -406,22 +402,19 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
|
||||
DEBUGASSERT(!is_idle_task(rtcb));
|
||||
|
||||
/* Remove the tcb task from the ready-to-run list. */
|
||||
/* Remove the tcb task from the running list. */
|
||||
|
||||
switch_needed = nxsched_remove_readytorun(rtcb, true);
|
||||
nxsched_remove_self(rtcb);
|
||||
|
||||
/* Add the task to the specified blocked task list */
|
||||
|
||||
rtcb->task_state = TSTATE_WAIT_SIG;
|
||||
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
|
||||
|
||||
/* Now, perform the context switch if one is needed */
|
||||
/* Now, perform the context switch */
|
||||
|
||||
if (switch_needed)
|
||||
{
|
||||
up_switch_context(this_task(), rtcb);
|
||||
}
|
||||
}
|
||||
|
||||
/* We are running again, clear the sigwaitmask */
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ int nxtask_exit(void)
|
|||
* ready-to-run with state == TSTATE_TASK_RUNNING
|
||||
*/
|
||||
|
||||
nxsched_remove_readytorun(dtcb, true);
|
||||
nxsched_remove_self(dtcb);
|
||||
|
||||
/* Get the new task at the head of the ready to run list */
|
||||
|
||||
|
|
Loading…
Reference in a new issue