sched_unlock: remove redundant/stale logic and comment
This commit is contained in:
parent
fb92b60000
commit
b4d8ac862e
1 changed files with 1 additions and 29 deletions
|
@ -98,10 +98,6 @@ int sched_unlock(void)
|
|||
sched_note_premption(rtcb, false);
|
||||
#endif
|
||||
|
||||
/* Set the lock count to zero */
|
||||
|
||||
rtcb->lockcount = 0;
|
||||
|
||||
/* Release any ready-to-run tasks that have collected in
|
||||
* g_pendingtasks.
|
||||
*
|
||||
|
@ -109,27 +105,7 @@ int sched_unlock(void)
|
|||
* this task to be switched out!
|
||||
*/
|
||||
|
||||
/* In the SMP case, the tasks remains pend(1) if we are
|
||||
* in a critical section, i.e., g_cpu_irqlock is locked by other
|
||||
* CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
|
||||
* g_cpu_lockset is locked. In those cases, the release of the
|
||||
* pending tasks must be deferred until those conditions are met.
|
||||
*
|
||||
* There are certain conditions that we must avoid by preventing
|
||||
* releasing the pending tasks while within the critical section
|
||||
* of other CPUs. This logic does that and there is matching
|
||||
* logic in nxsched_add_readytorun to avoid starting new tasks
|
||||
* within the critical section (unless the CPU is the holder of
|
||||
* the lock).
|
||||
*
|
||||
* REVISIT: If this CPU is only one that holds the IRQ lock, then
|
||||
* we should go ahead and release the pending tasks. See the logic
|
||||
* leave_critical_section(): It will call nxsched_merge_pending()
|
||||
* BEFORE it clears IRQ lock.
|
||||
*/
|
||||
|
||||
if (!nxsched_islocked_tcb(rtcb) &&
|
||||
list_pendingtasks()->head != NULL)
|
||||
if (list_pendingtasks()->head != NULL)
|
||||
{
|
||||
if (nxsched_merge_pending())
|
||||
{
|
||||
|
@ -247,10 +223,6 @@ int sched_unlock(void)
|
|||
sched_note_premption(rtcb, false);
|
||||
#endif
|
||||
|
||||
/* Set the lock count to zero */
|
||||
|
||||
rtcb->lockcount = 0;
|
||||
|
||||
/* Release any ready-to-run tasks that have collected in
|
||||
* g_pendingtasks.
|
||||
*
|
||||
|
|
Loading…
Reference in a new issue