forked from nuttx/nuttx-update
spinlock: use spin_lock_init replace spin_initialize
reason: 1: spin_lock_init and spin_initialize have similar functionalities. 2: spin_lock and spin_unlock should be called in matching pairs. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
e69903c939
commit
34e79f9618
10 changed files with 19 additions and 29 deletions
|
@ -236,7 +236,7 @@ void *esp_coex_common_spin_lock_create_wrapper(void)
|
||||||
DEBUGPANIC();
|
DEBUGPANIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(lock, SP_UNLOCKED);
|
spin_lock_init(lock);
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -236,7 +236,7 @@ void *esp_coex_common_spin_lock_create_wrapper(void)
|
||||||
DEBUGPANIC();
|
DEBUGPANIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(lock, SP_UNLOCKED);
|
spin_lock_init(lock);
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -241,7 +241,8 @@ int up_cpu_start(int cpu)
|
||||||
* try to lock it but spins until the APP CPU starts and unlocks it.
|
* try to lock it but spins until the APP CPU starts and unlocks it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_initialize(&g_appcpu_interlock, SP_LOCKED);
|
spin_lock_init(&g_appcpu_interlock);
|
||||||
|
spin_lock(&g_appcpu_interlock);
|
||||||
|
|
||||||
/* Unstall the APP CPU */
|
/* Unstall the APP CPU */
|
||||||
|
|
||||||
|
@ -288,6 +289,10 @@ int up_cpu_start(int cpu)
|
||||||
/* And wait until the APP CPU starts and releases the spinlock. */
|
/* And wait until the APP CPU starts and releases the spinlock. */
|
||||||
|
|
||||||
spin_lock(&g_appcpu_interlock);
|
spin_lock(&g_appcpu_interlock);
|
||||||
|
|
||||||
|
/* prev cpu boot done */
|
||||||
|
|
||||||
|
spin_unlock(&g_appcpu_interlock);
|
||||||
DEBUGASSERT(g_appcpu_started);
|
DEBUGASSERT(g_appcpu_started);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -920,7 +920,7 @@ static void *esp_spin_lock_create(void)
|
||||||
DEBUGPANIC();
|
DEBUGPANIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(lock, SP_UNLOCKED);
|
spin_lock_init(lock);
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -879,7 +879,7 @@ static void *esp_spin_lock_create(void)
|
||||||
DEBUGPANIC();
|
DEBUGPANIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(lock, SP_UNLOCKED);
|
spin_lock_init(lock);
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,7 +226,8 @@ int up_cpu_start(int cpu)
|
||||||
* try to lock it but spins until the APP CPU starts and unlocks it.
|
* try to lock it but spins until the APP CPU starts and unlocks it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_initialize(&g_appcpu_interlock, SP_LOCKED);
|
spin_lock_init(&g_appcpu_interlock);
|
||||||
|
spin_lock(&g_appcpu_interlock);
|
||||||
|
|
||||||
/* OpenOCD might have already enabled clock gating and taken APP CPU
|
/* OpenOCD might have already enabled clock gating and taken APP CPU
|
||||||
* out of reset. Don't reset the APP CPU if that's the case as this
|
* out of reset. Don't reset the APP CPU if that's the case as this
|
||||||
|
@ -272,6 +273,10 @@ int up_cpu_start(int cpu)
|
||||||
/* And wait until the APP CPU starts and releases the spinlock. */
|
/* And wait until the APP CPU starts and releases the spinlock. */
|
||||||
|
|
||||||
spin_lock(&g_appcpu_interlock);
|
spin_lock(&g_appcpu_interlock);
|
||||||
|
|
||||||
|
/* prev cpu boot done */
|
||||||
|
|
||||||
|
spin_unlock(&g_appcpu_interlock);
|
||||||
DEBUGASSERT(g_appcpu_started);
|
DEBUGASSERT(g_appcpu_started);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -923,7 +923,7 @@ static void *esp_spin_lock_create(void)
|
||||||
DEBUGPANIC();
|
DEBUGPANIC();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(lock, SP_UNLOCKED);
|
spin_lock_init(lock);
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -495,26 +495,6 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
||||||
# define spin_is_locked(l) (*(l) == SP_LOCKED)
|
# define spin_is_locked(l) (*(l) == SP_LOCKED)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Name: spin_initialize
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Initialize a non-reentrant spinlock object to its initial,
|
|
||||||
* unlocked state.
|
|
||||||
*
|
|
||||||
* Input Parameters:
|
|
||||||
* lock - A reference to the spinlock object to be initialized.
|
|
||||||
* state - Initial state of the spinlock {SP_LOCKED or SP_UNLOCKED)
|
|
||||||
*
|
|
||||||
* Returned Value:
|
|
||||||
* None.
|
|
||||||
*
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/* void spin_initialize(FAR spinlock_t *lock, spinlock_t state); */
|
|
||||||
|
|
||||||
#define spin_initialize(l,s) do { SP_DMB(); *(l) = (s); } while (0)
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Name: spin_lock_irqsave_wo_note
|
* Name: spin_lock_irqsave_wo_note
|
||||||
*
|
*
|
||||||
|
|
|
@ -94,7 +94,7 @@ int pthread_spin_init(FAR pthread_spinlock_t *lock, int pshared)
|
||||||
DEBUGASSERT(lock != NULL);
|
DEBUGASSERT(lock != NULL);
|
||||||
if (lock != NULL)
|
if (lock != NULL)
|
||||||
{
|
{
|
||||||
spin_initialize(&lock->sp_lock, SP_UNLOCKED);
|
spin_lock_init(&lock->sp_lock);
|
||||||
lock->sp_holder = IMPOSSIBLE_THREAD;
|
lock->sp_holder = IMPOSSIBLE_THREAD;
|
||||||
ret = OK;
|
ret = OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -304,7 +304,7 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
|
||||||
kasan_poison(base, size);
|
kasan_poison(base, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_initialize(&pool->lock, SP_UNLOCKED);
|
spin_lock_init(&pool->lock);
|
||||||
if (pool->wait && pool->expandsize == 0)
|
if (pool->wait && pool->expandsize == 0)
|
||||||
{
|
{
|
||||||
nxsem_init(&pool->waitsem, 0, 0);
|
nxsem_init(&pool->waitsem, 0, 0);
|
||||||
|
|
Loading…
Reference in a new issue