1
0
Fork 0
forked from nuttx/nuttx-update

sched/spinlock: remove nesting spinlock support

developers must be careful to hold spinlocks and ensure all
of protected code is under control, so remove support for nested
spinlocks to improve performance.

Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
chao an 2024-10-11 12:40:50 +08:00 committed by Xiang Xiao
parent 302d2f3b56
commit 5aa13bc490
2 changed files with 6 additions and 33 deletions

View file

@ -152,10 +152,6 @@ void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);
extern volatile spinlock_t g_irq_spin; extern volatile spinlock_t g_irq_spin;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
extern volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
/**************************************************************************** /****************************************************************************
* Name: up_testset * Name: up_testset
* *
@ -532,14 +528,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
if (NULL == lock) if (NULL == lock)
{ {
int me = this_cpu(); spin_lock_wo_note(&g_irq_spin);
if (0 == g_irq_spin_count[me])
{
spin_lock_wo_note(&g_irq_spin);
}
g_irq_spin_count[me]++;
DEBUGASSERT(0 != g_irq_spin_count[me]);
} }
else else
{ {
@ -558,10 +547,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), * If the argument lock is not specified (i.e. NULL),
* disable local interrupts and take the global spinlock (g_irq_spin) * disable local interrupts and take the global spinlock (g_irq_spin).
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
* counter on the CPU is incremented to allow nested calls and return
* the interrupt state.
* *
* If the argument lock is specified, * If the argument lock is specified,
* disable local interrupts and take the lock spinlock and return * disable local interrupts and take the lock spinlock and return
@ -699,14 +685,7 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
{ {
if (NULL == lock) if (NULL == lock)
{ {
int me = this_cpu(); spin_unlock_wo_note(&g_irq_spin);
DEBUGASSERT(0 < g_irq_spin_count[me]);
g_irq_spin_count[me]--;
if (0 == g_irq_spin_count[me])
{
spin_unlock_wo_note(&g_irq_spin);
}
} }
else else
{ {
@ -724,11 +703,9 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
* *
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), * If the argument lock is not specified (i.e. NULL), release the
* decrement the call counter (g_irq_spin_count[cpu]) and if it * spinlock (g_irq_spin) and restore the interrupt state as it was
* decrements to zero then release the spinlock (g_irq_spin) and * prior to the previous call to spin_lock_irqsave(NULL).
* restore the interrupt state as it was prior to the previous call to
* spin_lock_irqsave(NULL).
* *
* If the argument lock is specified, release the lock and * If the argument lock is specified, release the lock and
* restore the interrupt state as it was prior to the previous call to * restore the interrupt state as it was prior to the previous call to

View file

@ -43,10 +43,6 @@
volatile spinlock_t g_irq_spin = SP_UNLOCKED; volatile spinlock_t g_irq_spin = SP_UNLOCKED;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
#ifdef CONFIG_RW_SPINLOCK #ifdef CONFIG_RW_SPINLOCK
/* Used for access control */ /* Used for access control */