sched: remove spin_lock_irqsave(NULL) support

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2025-01-08 16:56:34 +08:00 committed by Xiang Xiao
parent 69ee240b45
commit 4bd8d9eac7
2 changed files with 10 additions and 70 deletions

View file

@ -106,14 +106,6 @@ void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);
* Public Data Types * Public Data Types
****************************************************************************/ ****************************************************************************/
/* Used for access control */
extern volatile spinlock_t g_irq_spin;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
extern volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
/**************************************************************************** /****************************************************************************
* Name: up_testset * Name: up_testset
* *
@ -448,26 +440,12 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
static inline_function static inline_function
irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
{ {
irqstate_t ret; irqstate_t flags;
ret = up_irq_save(); flags = up_irq_save();
if (NULL == lock)
{
int me = this_cpu();
if (0 == g_irq_spin_count[me])
{
spin_lock_wo_note(&g_irq_spin);
}
g_irq_spin_count[me]++;
DEBUGASSERT(0 != g_irq_spin_count[me]);
}
else
{
spin_lock_wo_note(lock); spin_lock_wo_note(lock);
}
return ret; return flags;
} }
#else #else
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save()) # define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
@ -478,14 +456,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
* *
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), * Disable local interrupts and take the lock spinlock and return
* disable local interrupts and take the global spinlock (g_irq_spin)
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
* counter on the CPU is incremented to allow nested calls and return
* the interrupt state.
*
* If the argument lock is specified,
* disable local interrupts and take the lock spinlock and return
* the interrupt state. * the interrupt state.
* *
* NOTE: This API is very simple to protect data (e.g. H/W register * NOTE: This API is very simple to protect data (e.g. H/W register
@ -496,9 +467,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
* This function is equivalent to up_irq_save(). * This function is equivalent to up_irq_save().
* *
* Input Parameters: * Input Parameters:
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used * lock - Caller specific spinlock. not NULL.
* and can be nested. Otherwise, nested call for the same lock
* would cause a deadlock
* *
* Returned Value: * Returned Value:
* An opaque, architecture-specific value that represents the state of * An opaque, architecture-specific value that represents the state of
@ -617,22 +586,8 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
static inline_function static inline_function
void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
irqstate_t flags) irqstate_t flags)
{
if (NULL == lock)
{
int me = this_cpu();
DEBUGASSERT(0 < g_irq_spin_count[me]);
g_irq_spin_count[me]--;
if (0 == g_irq_spin_count[me])
{
spin_unlock_wo_note(&g_irq_spin);
}
}
else
{ {
spin_unlock_wo_note(lock); spin_unlock_wo_note(lock);
}
up_irq_restore(flags); up_irq_restore(flags);
} }
@ -645,21 +600,14 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
* *
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), * Release the lock and restore the interrupt state as it was prior
* decrement the call counter (g_irq_spin_count[cpu]) and if it * to the previous call to spin_lock_irqsave(lock).
* decrements to zero then release the spinlock (g_irq_spin) and
* restore the interrupt state as it was prior to the previous call to
* spin_lock_irqsave(NULL).
*
* If the argument lock is specified, release the lock and
* restore the interrupt state as it was prior to the previous call to
* spin_lock_irqsave(lock).
* *
* If SMP is not enabled: * If SMP is not enabled:
* This function is equivalent to up_irq_restore(). * This function is equivalent to up_irq_restore().
* *
* Input Parameters: * Input Parameters:
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used. * lock - Caller specific spinlock. not NULL
* *
* flags - The architecture-specific value that represents the state of * flags - The architecture-specific value that represents the state of
* the interrupts prior to the call to spin_lock_irqsave(lock); * the interrupts prior to the call to spin_lock_irqsave(lock);

View file

@ -39,14 +39,6 @@
* Public Data * Public Data
****************************************************************************/ ****************************************************************************/
/* Used for access control */
volatile spinlock_t g_irq_spin = SP_UNLOCKED;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
/**************************************************************************** /****************************************************************************
* Public Functions * Public Functions
****************************************************************************/ ****************************************************************************/