mirror of
https://github.com/apache/nuttx.git
synced 2025-01-12 20:58:44 +08:00
use small lock in following files
arch/arm/src/armv7-a/arm_gicv2.c arch/arm/src/imx6/imx_serial.c arch/arm/src/imxrt/imxrt_flexcan.c arch/xtensa/src/esp32/esp32_himem.c arch/xtensa/src/esp32s3/esp32s3_himem.c Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
0fe3aebd6a
commit
67b95d0142
5 changed files with 46 additions and 38 deletions
|
@ -52,7 +52,7 @@
|
|||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SMP) && CONFIG_SMP_NCPUS > 1
|
||||
static volatile cpu_set_t g_gic_init_done;
|
||||
static atomic_t g_gic_init_done;
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -72,11 +72,7 @@ static volatile cpu_set_t g_gic_init_done;
|
|||
#if defined(CONFIG_SMP) && CONFIG_SMP_NCPUS > 1
|
||||
static void arm_gic_init_done(void)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
flags = spin_lock_irqsave(NULL);
|
||||
CPU_SET(this_cpu(), &g_gic_init_done);
|
||||
spin_unlock_irqrestore(NULL, flags);
|
||||
atomic_fetch_or(&g_gic_init_done, 1 << this_cpu());
|
||||
}
|
||||
|
||||
static void arm_gic_wait_done(cpu_set_t cpuset)
|
||||
|
@ -85,7 +81,7 @@ static void arm_gic_wait_done(cpu_set_t cpuset)
|
|||
|
||||
do
|
||||
{
|
||||
CPU_AND(&tmpset, &g_gic_init_done, &cpuset);
|
||||
tmpset = (cpu_set_t)atomic_read(&g_gic_init_done) & cpuset;
|
||||
}
|
||||
while (!CPU_EQUAL(&tmpset, &cpuset));
|
||||
}
|
||||
|
|
|
@ -196,6 +196,7 @@ struct imx_uart_s
|
|||
uint32_t ucr1; /* Saved UCR1 value */
|
||||
uint8_t irq; /* IRQ associated with this UART */
|
||||
uint8_t parity; /* 0=none, 1=odd, 2=even */
|
||||
spinlock_t lock; /* Spinlock */
|
||||
uint8_t bits; /* Number of bits (7 or 8) */
|
||||
uint8_t stopbits2:1; /* 1: Configure with 2 stop bits vs 1 */
|
||||
#ifdef CONFIG_SERIAL_IFLOWCONTROL
|
||||
|
@ -295,6 +296,7 @@ static struct imx_uart_s g_uart1priv =
|
|||
.baud = CONFIG_UART1_BAUD,
|
||||
.irq = IMX_IRQ_UART1,
|
||||
.parity = CONFIG_UART1_PARITY,
|
||||
.lock = SP_UNLOCKED
|
||||
.bits = CONFIG_UART1_BITS,
|
||||
.stopbits2 = CONFIG_UART1_2STOP,
|
||||
};
|
||||
|
@ -325,6 +327,7 @@ static struct imx_uart_s g_uart2priv =
|
|||
.baud = CONFIG_UART2_BAUD,
|
||||
.irq = IMX_IRQ_UART2,
|
||||
.parity = CONFIG_UART2_PARITY,
|
||||
.lock = SP_UNLOCKED
|
||||
.bits = CONFIG_UART2_BITS,
|
||||
.stopbits2 = CONFIG_UART2_2STOP,
|
||||
};
|
||||
|
@ -353,6 +356,7 @@ static struct imx_uart_s g_uart3priv =
|
|||
.baud = IMX_UART3_VBASE,
|
||||
.irq = IMX_IRQ_UART3,
|
||||
.parity = CONFIG_UART3_PARITY,
|
||||
.lock = SP_UNLOCKED
|
||||
.bits = CONFIG_UART3_BITS,
|
||||
.stopbits2 = CONFIG_UART3_2STOP,
|
||||
};
|
||||
|
@ -381,6 +385,7 @@ static struct imx_uart_s g_uart4priv =
|
|||
.baud = IMX_UART4_VBASE,
|
||||
.irq = IMX_IRQ_UART4,
|
||||
.parity = CONFIG_UART4_PARITY,
|
||||
.lock = SP_UNLOCKED
|
||||
.bits = CONFIG_UART4_BITS,
|
||||
.stopbits2 = CONFIG_UART4_2STOP,
|
||||
};
|
||||
|
@ -409,6 +414,7 @@ static struct imx_uart_s g_uart5priv =
|
|||
.baud = IMX_UART5_VBASE,
|
||||
.irq = IMX_IRQ_UART5,
|
||||
.parity = CONFIG_UART5_PARITY,
|
||||
.lock = SP_UNLOCKED
|
||||
.bits = CONFIG_UART5_BITS,
|
||||
.stopbits2 = CONFIG_UART5_2STOP,
|
||||
};
|
||||
|
@ -877,7 +883,7 @@ static int imx_ioctl(struct file *filep, int cmd, unsigned long arg)
|
|||
* implement TCSADRAIN / TCSAFLUSH
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(NULL);
|
||||
flags = spin_lock_irqsave(&priv->lock);
|
||||
imx_disableuartint(priv, &ie);
|
||||
ret = imx_setup(dev);
|
||||
|
||||
|
@ -885,7 +891,7 @@ static int imx_ioctl(struct file *filep, int cmd, unsigned long arg)
|
|||
|
||||
imx_restoreuartint(priv, ie);
|
||||
priv->ie = ie;
|
||||
spin_unlock_irqrestore(NULL, flags);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -267,6 +267,7 @@ struct imxrt_driver_s
|
|||
bool bifup; /* true:ifup false:ifdown */
|
||||
bool canfd_capable;
|
||||
int mb_address_offset;
|
||||
spinlock_t lock;
|
||||
#ifdef TX_TIMEOUT_WQ
|
||||
struct wdog_s txtimeout[TXMBCOUNT]; /* TX timeout timer */
|
||||
#endif
|
||||
|
@ -777,7 +778,7 @@ static int imxrt_txpoll(struct net_driver_s *dev)
|
|||
* the field d_len is set to a value > 0.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(NULL);
|
||||
flags = spin_lock_irqsave(&priv->lock);
|
||||
|
||||
if (priv->dev.d_len > 0)
|
||||
{
|
||||
|
@ -793,12 +794,12 @@ static int imxrt_txpoll(struct net_driver_s *dev)
|
|||
|
||||
if (imxrt_txringfull(priv))
|
||||
{
|
||||
spin_unlock_irqrestore(NULL, flags);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, flags);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* If zero is returned, the polling will continue until all connections
|
||||
* have been examined.
|
||||
|
@ -2032,6 +2033,7 @@ int imxrt_caninitialize(int intf)
|
|||
priv->dev.d_ioctl = imxrt_ioctl; /* Support CAN ioctl() calls */
|
||||
#endif
|
||||
priv->dev.d_private = (void *)priv; /* Used to recover private state from dev */
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
/* Put the interface in the down state. This usually amounts to resetting
|
||||
* the device and/or calling imxrt_ifdown().
|
||||
|
|
|
@ -126,6 +126,7 @@ typedef struct
|
|||
unsigned int ram_block: 16;
|
||||
} rangeblock_t;
|
||||
|
||||
static spinlock_t g_descriptor_lock = SP_UNLOCKED;
|
||||
static ramblock_t *g_ram_descriptor = NULL;
|
||||
static rangeblock_t *g_range_descriptor = NULL;
|
||||
static int g_ramblockcnt = 0;
|
||||
|
@ -325,11 +326,11 @@ int esp_himem_alloc(size_t size, esp_himem_handle_t *handle_out)
|
|||
goto nomem;
|
||||
}
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
ok = allocate_blocks(blocks, r->block);
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
if (!ok)
|
||||
{
|
||||
goto nomem;
|
||||
|
@ -365,13 +366,13 @@ int esp_himem_free(esp_himem_handle_t handle)
|
|||
|
||||
/* Mark blocks as free */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
for (i = 0; i < handle->block_ct; i++)
|
||||
{
|
||||
g_ram_descriptor[handle->block[i]].is_alloced = false;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* Free handle */
|
||||
|
||||
|
@ -407,7 +408,7 @@ int esp_himem_alloc_map_range(size_t size,
|
|||
r->block_start = -1;
|
||||
|
||||
start_free = 0;
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < g_rangeblockcnt; i++)
|
||||
{
|
||||
|
@ -431,10 +432,11 @@ int esp_himem_alloc_map_range(size_t size,
|
|||
|
||||
if (r->block_start == -1)
|
||||
{
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* Couldn't find enough free blocks */
|
||||
|
||||
kmm_free(r);
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -445,7 +447,7 @@ int esp_himem_alloc_map_range(size_t size,
|
|||
g_range_descriptor[r->block_start + i].is_alloced = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* All done. */
|
||||
|
||||
|
@ -474,14 +476,14 @@ int esp_himem_free_map_range(esp_himem_rangehandle_t handle)
|
|||
|
||||
/* We should be good to free this. Mark blocks as free. */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < handle->block_ct; i++)
|
||||
{
|
||||
g_range_descriptor[i + handle->block_start].is_alloced = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
kmm_free(handle);
|
||||
return OK;
|
||||
}
|
||||
|
@ -537,7 +539,7 @@ int esp_himem_map(esp_himem_handle_t handle,
|
|||
|
||||
/* Map and mark as mapped */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
|
@ -548,7 +550,7 @@ int esp_himem_map(esp_himem_handle_t handle,
|
|||
handle->block[i + ram_block];
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
|
@ -589,7 +591,7 @@ int esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr,
|
|||
HIMEM_CHECK(range_block + blockcount > range->block_ct,
|
||||
"range out of bounds for handle", -EINVAL);
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
|
@ -602,7 +604,7 @@ int esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr,
|
|||
}
|
||||
|
||||
esp_spiram_writeback_cache();
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,6 +148,7 @@ static int himem_ioctl(struct file *filep, int cmd,
|
|||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
static spinlock_t g_descriptor_lock = SP_UNLOCKED;
|
||||
static ramblock_t *g_ram_descriptor = NULL;
|
||||
static rangeblock_t *g_range_descriptor = NULL;
|
||||
static int g_ramblockcnt = 0;
|
||||
|
@ -626,11 +627,11 @@ int esp_himem_alloc(size_t size, esp_himem_handle_t *handle_out)
|
|||
goto nomem;
|
||||
}
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
ok = allocate_blocks(blocks, r->block);
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
if (!ok)
|
||||
{
|
||||
goto nomem;
|
||||
|
@ -682,13 +683,13 @@ int esp_himem_free(esp_himem_handle_t handle)
|
|||
|
||||
/* Mark blocks as free */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
for (i = 0; i < handle->block_ct; i++)
|
||||
{
|
||||
g_ram_descriptor[handle->block[i]].is_alloced = false;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* Free handle */
|
||||
|
||||
|
@ -742,7 +743,7 @@ int esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t
|
|||
r->block_start = -1;
|
||||
|
||||
start_free = 0;
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < g_rangeblockcnt; i++)
|
||||
{
|
||||
|
@ -766,10 +767,11 @@ int esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t
|
|||
|
||||
if (r->block_start == -1)
|
||||
{
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* Couldn't find enough free blocks */
|
||||
|
||||
kmm_free(r);
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -780,7 +782,7 @@ int esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t
|
|||
g_range_descriptor[r->block_start + i].is_alloced = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
|
||||
/* All done. */
|
||||
|
||||
|
@ -826,14 +828,14 @@ int esp_himem_free_map_range(esp_himem_rangehandle_t handle)
|
|||
|
||||
/* We should be good to free this. Mark blocks as free. */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < handle->block_ct; i++)
|
||||
{
|
||||
g_range_descriptor[i + handle->block_start].is_alloced = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
kmm_free(handle);
|
||||
return OK;
|
||||
}
|
||||
|
@ -917,7 +919,7 @@ int esp_himem_map(esp_himem_handle_t handle,
|
|||
|
||||
/* Map and mark as mapped */
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
|
@ -928,7 +930,7 @@ int esp_himem_map(esp_himem_handle_t handle,
|
|||
handle->block[i + ram_block];
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
virt_bank = himem_mmu_start + range->block_start + i + range_block;
|
||||
|
@ -983,7 +985,7 @@ int esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
|
|||
HIMEM_CHECK(range_block + blockcount > range->block_ct,
|
||||
"range out of bounds for handle", -EINVAL);
|
||||
|
||||
spinlock_flags = spin_lock_irqsave(NULL);
|
||||
spinlock_flags = spin_lock_irqsave(&g_descriptor_lock);
|
||||
|
||||
for (i = 0; i < blockcount; i++)
|
||||
{
|
||||
|
@ -996,7 +998,7 @@ int esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
|
|||
}
|
||||
|
||||
esp_spiram_writeback_cache();
|
||||
spin_unlock_irqrestore(NULL, spinlock_flags);
|
||||
spin_unlock_irqrestore(&g_descriptor_lock, spinlock_flags);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue