If SMP is enabled, if any interrupt handler calls enter_critical_section(), it should take the spinlock.

This commit is contained in:
Gregory Nutt 2016-11-15 08:37:58 -06:00
parent b53866c872
commit 65ab12fbb9

View file

@ -102,51 +102,72 @@ irqstate_t enter_critical_section(void)
ret = up_irq_save();
/* Check if we were called from an interrupt handler and that the tasks
* lists have been initialized.
/* Verify that the system has sufficient initialized so that the task lists
* are valid.
*/
if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS)
if (g_os_initstate >= OSINIT_TASKLISTS)
{
/* Do we already have interrupts disabled? */
/* If called from an interrupt handler, then just take the spinlock.
* If we are already in a critical section, this will lock the CPU
* in the interrupt handler. Sounds worse than it is.
*/
rtcb = this_task();
DEBUGASSERT(rtcb != NULL);
if (rtcb->irqcount > 0)
if (up_interrupt_context())
{
/* Yes... make sure that the spinlock is set and increment the IRQ
* lock count.
/* We are in an interrupt handler but within a critical section.
* Wait until we can get the spinlock (meaning that we are no
* longer in the critical section).
*/
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX);
rtcb->irqcount++;
spin_lock(&g_cpu_irqlock);
}
else
{
/* NO.. Take the spinlock to get exclusive access and set the lock
* count to 1.
*
* We must avoid that case where a context occurs between taking the
* g_cpu_irqlock and disabling interrupts. Also interrupts disables
* must follow a stacked order. We cannot other context switches to
* re-order the enabling/disabling of interrupts.
*
* The scheduler accomplishes this by treating the irqcount like
* lockcount: Both will disable pre-emption.
*/
/* Normal tasking environment. */
/* Do we already have interrupts disabled? */
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
rtcb->irqcount = 1;
rtcb = this_task();
DEBUGASSERT(rtcb != NULL);
if (rtcb->irqcount > 0)
{
/* Yes... make sure that the spinlock is set and increment the
* IRQ lock count.
*/
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED &&
rtcb->irqcount < INT16_MAX);
rtcb->irqcount++;
}
else
{
/* NO.. Take the spinlock to get exclusive access and set the
* lock count to 1.
*
* We must avoid that case where a context occurs between
* taking the g_cpu_irqlock and disabling interrupts. Also
* interrupts disables must follow a stacked order. We
* cannot other context switches to re-order the enabling/
* disabling of interrupts.
*
* The scheduler accomplishes this by treating the irqcount
* like lockcount: Both will disable pre-emption.
*/
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
rtcb->irqcount = 1;
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
/* Note that we have entered the critical section */
/* Note that we have entered the critical section */
sched_note_csection(rtcb, true);
sched_note_csection(rtcb, true);
#endif
}
}
}
}
/* Return interrupt status */
@ -187,59 +208,79 @@ irqstate_t enter_critical_section(void)
#ifdef CONFIG_SMP
void leave_critical_section(irqstate_t flags)
{
/* Check if we were called from an interrupt handler and that the tasks
* lists have been initialized.
/* Verify that the system has sufficient initialized so that the task lists
* are valid.
*/
if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS)
if (g_os_initstate >= OSINIT_TASKLISTS)
{
FAR struct tcb_s *rtcb = this_task();
DEBUGASSERT(rtcb != 0 && rtcb->irqcount > 0);
/* Will we still have interrupts disabled after decrementing the
* count?
/* If called from an interrupt handler, then just take the spinlock.
* If we are already in a critical section, this will lock the CPU
* in the interrupt handler. Sounds worse than it is.
*/
if (rtcb->irqcount > 1)
if (up_interrupt_context())
{
/* Yes... make sure that the spinlock is set */
/* We are in an interrupt handler but within a critical section.
* Wait until we can get the spinlock (meaning that we are no
* longer in the critical section).
*/
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED);
rtcb->irqcount--;
spin_unlock(&g_cpu_irqlock);
}
else
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
/* No.. Note that we have entered the critical section */
FAR struct tcb_s *rtcb = this_task();
DEBUGASSERT(rtcb != 0 && rtcb->irqcount > 0);
sched_note_csection(rtcb, false);
#endif
/* Decrement our count on the lock. If all CPUs have released,
* then unlock the spinlock.
*/
/* Normal tasking context */
/* Will we still have interrupts disabled after decrementing the
* count?
*/
rtcb->irqcount = 0;
spin_clrbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
/* Have all CPUs release the lock? */
if (!spin_islocked(&g_cpu_irqlock))
if (rtcb->irqcount > 1)
{
/* Check if there are pending tasks and that pre-emption is
* also enabled.
/* Yes... make sure that the spinlock is set */
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED);
rtcb->irqcount--;
}
else
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
/* No.. Note that we have entered the critical section */
sched_note_csection(rtcb, false);
#endif
/* Decrement our count on the lock. If all CPUs have
* released, then unlock the spinlock.
*/
if (g_pendingtasks.head != NULL && !spin_islocked(&g_cpu_schedlock))
rtcb->irqcount = 0;
spin_clrbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
/* Have all CPUs release the lock? */
if (!spin_islocked(&g_cpu_irqlock))
{
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks if the scheduler is not locked.
*
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
/* Check if there are pending tasks and that pre-emption
* is also enabled.
*/
up_release_pending();
if (g_pendingtasks.head != NULL &&
!spin_islocked(&g_cpu_schedlock))
{
/* Release any ready-to-run tasks that have collected
* in g_pendingtasks if the scheduler is not locked.
*
* NOTE: This operation has a very high likelihood of
* causing this task to be switched out!
*/
up_release_pending();
}
}
}
}