esp32: Update libc stubs to properly acquire/release locks.
Avoid using static mutex and recursive mutex as the resource to be acquired/release. Instead, create a specific lock for each call if it does not exist.
This commit is contained in:
parent
44e2e9011f
commit
d56e6de628
1 changed files with 70 additions and 28 deletions
|
@ -38,18 +38,12 @@
|
||||||
|
|
||||||
#include "rom/esp32_libc_stubs.h"
|
#include "rom/esp32_libc_stubs.h"
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Pre-processor Definitions
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
#define _lock_t int
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Private Types
|
* Private Types
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
static mutex_t g_nxlock_common;
|
static mutex_t g_nxlock_common = NXMUTEX_INITIALIZER;
|
||||||
static mutex_t g_nxlock_recursive;
|
static mutex_t g_nxlock_recursive = NXMUTEX_INITIALIZER;
|
||||||
|
|
||||||
/* Forward declaration */
|
/* Forward declaration */
|
||||||
|
|
||||||
|
@ -171,64 +165,112 @@ void _raise_r(struct _reent *r)
|
||||||
|
|
||||||
void _lock_init(_lock_t *lock)
|
void _lock_init(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_init(&g_nxlock_common);
|
*lock = 0;
|
||||||
nxsem_get_value(&g_nxlock_common.sem, lock);
|
|
||||||
|
mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
|
||||||
|
|
||||||
|
nxmutex_init(mutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_init_recursive(_lock_t *lock)
|
void _lock_init_recursive(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_init(&g_nxlock_recursive);
|
*lock = 0;
|
||||||
nxsem_get_value(&g_nxlock_recursive.sem, lock);
|
|
||||||
|
rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
|
||||||
|
|
||||||
|
nxrmutex_init(rmutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)rmutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_close(_lock_t *lock)
|
void _lock_close(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_destroy(&g_nxlock_common);
|
mutex_t *mutex = (mutex_t *)(*lock);
|
||||||
|
|
||||||
|
nxmutex_destroy(mutex);
|
||||||
|
kmm_free((void *)(*lock));
|
||||||
*lock = 0;
|
*lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_close_recursive(_lock_t *lock)
|
void _lock_close_recursive(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_destroy(&g_nxlock_recursive);
|
rmutex_t *rmutex = (rmutex_t *)(*lock);
|
||||||
|
|
||||||
|
nxrmutex_destroy(rmutex);
|
||||||
|
kmm_free((void *)(*lock));
|
||||||
*lock = 0;
|
*lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_acquire(_lock_t *lock)
|
void _lock_acquire(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_lock(&g_nxlock_common);
|
if ((*lock) == 0)
|
||||||
nxsem_get_value(&g_nxlock_common.sem, lock);
|
{
|
||||||
|
mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
|
||||||
|
|
||||||
|
nxmutex_init(mutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
nxmutex_lock((mutex_t *)(*lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_acquire_recursive(_lock_t *lock)
|
void _lock_acquire_recursive(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_lock(&g_nxlock_recursive);
|
if ((*lock) == 0)
|
||||||
nxsem_get_value(&g_nxlock_recursive.sem, lock);
|
{
|
||||||
|
rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
|
||||||
|
|
||||||
|
nxrmutex_init(rmutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)rmutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
nxrmutex_lock((rmutex_t *)(*lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
int _lock_try_acquire(_lock_t *lock)
|
int _lock_try_acquire(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_trylock(&g_nxlock_common);
|
if ((*lock) == 0)
|
||||||
nxsem_get_value(&g_nxlock_common.sem, lock);
|
{
|
||||||
return 0;
|
mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
|
||||||
|
|
||||||
|
nxmutex_init(mutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nxmutex_trylock((mutex_t *)(*lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
int _lock_try_acquire_recursive(_lock_t *lock)
|
int _lock_try_acquire_recursive(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_trylock(&g_nxlock_recursive);
|
if ((*lock) == 0)
|
||||||
nxsem_get_value(&g_nxlock_recursive.sem, lock);
|
{
|
||||||
return 0;
|
rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
|
||||||
|
|
||||||
|
nxrmutex_init(rmutex);
|
||||||
|
|
||||||
|
*lock = (_lock_t)rmutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nxrmutex_trylock((rmutex_t *)(*lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_release(_lock_t *lock)
|
void _lock_release(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_unlock(&g_nxlock_common);
|
mutex_t *mutex = (mutex_t *)(*lock);
|
||||||
nxsem_get_value(&g_nxlock_common.sem, lock);
|
|
||||||
|
nxmutex_unlock(mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _lock_release_recursive(_lock_t *lock)
|
void _lock_release_recursive(_lock_t *lock)
|
||||||
{
|
{
|
||||||
nxmutex_unlock(&g_nxlock_recursive);
|
rmutex_t *rmutex = (rmutex_t *)(*lock);
|
||||||
nxsem_get_value(&g_nxlock_recursive.sem, lock);
|
|
||||||
|
nxrmutex_unlock(rmutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct _reent *__getreent(void)
|
struct _reent *__getreent(void)
|
||||||
|
|
Loading…
Reference in a new issue