mirror of
https://github.com/apache/nuttx.git
synced 2025-01-13 01:38:36 +08:00
Merge 8550dbd1d0
into aa0aecbd80
This commit is contained in:
commit
d63d3123dd
20 changed files with 76 additions and 76 deletions
|
@ -58,7 +58,7 @@ static clock_t clock_process_runtime(FAR struct tcb_s *tcb)
|
|||
|
||||
group = tcb->group;
|
||||
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
sq_for_every(&group->tg_members, curr)
|
||||
{
|
||||
tcb = container_of(curr, struct tcb_s, member);
|
||||
|
@ -66,7 +66,7 @@ static clock_t clock_process_runtime(FAR struct tcb_s *tcb)
|
|||
runtime += tcb->run_time;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
return runtime;
|
||||
# else /* HAVE_GROUP_MEMBERS */
|
||||
return tcb->run_time;
|
||||
|
@ -109,9 +109,9 @@ void nxclock_gettime(clockid_t clock_id, FAR struct timespec *tp)
|
|||
* was last set, this gives us the current time.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
clock_timespec_add(&g_basetime, &ts, tp);
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
#else
|
||||
clock_timekeeping_get_wall_time(tp);
|
||||
#endif
|
||||
|
|
|
@ -163,7 +163,7 @@ static void clock_inittime(FAR const struct timespec *tp)
|
|||
struct timespec ts;
|
||||
irqstate_t flags;
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
if (tp)
|
||||
{
|
||||
memcpy(&g_basetime, tp, sizeof(struct timespec));
|
||||
|
@ -173,11 +173,11 @@ static void clock_inittime(FAR const struct timespec *tp)
|
|||
clock_basetime(&g_basetime);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
|
||||
clock_systime_timespec(&ts);
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
|
||||
/* Adjust base time to hide initial timer ticks. */
|
||||
|
||||
|
@ -189,7 +189,7 @@ static void clock_inittime(FAR const struct timespec *tp)
|
|||
g_basetime.tv_sec--;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
#else
|
||||
clock_inittimekeeping(tp);
|
||||
#endif
|
||||
|
@ -351,9 +351,9 @@ void clock_resynchronize(FAR struct timespec *rtc_diff)
|
|||
* was last set, this gives us the current time.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
clock_timespec_add(&bias, &g_basetime, &curr_ts);
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
|
||||
/* Check if RTC has advanced past system time. */
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ clock_t perf_gettime(void)
|
|||
{
|
||||
FAR struct perf_s *perf = &g_perf;
|
||||
clock_t now = up_perf_gettime();
|
||||
irqstate_t flags = spin_lock_irqsave(&perf->lock);
|
||||
irqstate_t flags = spin_lock_irqsave_wo_note(&perf->lock);
|
||||
clock_t result;
|
||||
|
||||
/* Check if overflow */
|
||||
|
@ -107,7 +107,7 @@ clock_t perf_gettime(void)
|
|||
|
||||
perf->last = now;
|
||||
result = (clock_t)now | (clock_t)perf->overflow << 32;
|
||||
spin_unlock_irqrestore(&perf->lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&perf->lock, flags);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,11 +78,11 @@ void nxclock_settime(clockid_t clock_id, FAR const struct timespec *tp)
|
|||
|
||||
clock_systime_timespec(&bias);
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
|
||||
clock_timespec_subtract(tp, &bias, &g_basetime);
|
||||
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
|
||||
/* Setup the RTC (lo- or high-res) */
|
||||
|
||||
|
|
|
@ -68,9 +68,9 @@ int clock_systime_timespec(FAR struct timespec *ts)
|
|||
|
||||
up_rtc_gettime(ts);
|
||||
|
||||
flags = spin_lock_irqsave(&g_basetime_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_basetime_lock);
|
||||
clock_timespec_subtract(ts, &g_basetime, ts);
|
||||
spin_unlock_irqrestore(&g_basetime_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_basetime_lock, flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -102,9 +102,9 @@ void group_join(FAR struct pthread_tcb_s *tcb)
|
|||
|
||||
/* Add the member to the group */
|
||||
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
sq_addfirst(&tcb->cmn.member, &group->tg_members);
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_DISABLE_PTHREAD */
|
||||
|
|
|
@ -185,9 +185,9 @@ void group_leave(FAR struct tcb_s *tcb)
|
|||
/* Remove the member from group. */
|
||||
|
||||
#ifdef HAVE_GROUP_MEMBERS
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
sq_rem(&tcb->member, &group->tg_members);
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
|
||||
/* Have all of the members left the group? */
|
||||
|
||||
|
|
|
@ -68,13 +68,13 @@ int irq_to_ndx(int irq)
|
|||
{
|
||||
DEBUGASSERT(g_irqmap_count < CONFIG_ARCH_NUSER_INTERRUPTS);
|
||||
|
||||
irqstate_t flags = spin_lock_irqsave(&g_irqlock);
|
||||
irqstate_t flags = spin_lock_irqsave_wo_note(&g_irqlock);
|
||||
if (g_irqmap[irq] == 0)
|
||||
{
|
||||
g_irqmap[irq] = g_irqmap_count++;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_irqlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqlock, flags);
|
||||
return g_irqmap[irq];
|
||||
}
|
||||
#endif
|
||||
|
@ -108,7 +108,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg)
|
|||
* to the unexpected interrupt handler.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&g_irqlock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_irqlock);
|
||||
if (isr == NULL)
|
||||
{
|
||||
/* Disable the interrupt if we can before detaching it. We might
|
||||
|
@ -142,7 +142,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg)
|
|||
if (is_irqchain(ndx, isr))
|
||||
{
|
||||
ret = irqchain_attach(ndx, isr, arg);
|
||||
spin_unlock_irqrestore(&g_irqlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqlock, flags);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
@ -157,7 +157,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg)
|
|||
g_irqvector[ndx].count = 0;
|
||||
#endif
|
||||
|
||||
spin_unlock_irqrestore(&g_irqlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqlock, flags);
|
||||
ret = OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,14 +149,14 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg)
|
|||
FAR struct irqchain_s *curr;
|
||||
irqstate_t flags;
|
||||
|
||||
flags = spin_lock_irqsave(&g_irqchainlock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_irqchainlock);
|
||||
if (isr != irq_unexpected_isr)
|
||||
{
|
||||
if (g_irqvector[ndx].handler != irqchain_dispatch)
|
||||
{
|
||||
if (sq_count(&g_irqchainfreelist) < 2)
|
||||
{
|
||||
spin_unlock_irqrestore(&g_irqchainlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqchainlock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg)
|
|||
node = (FAR struct irqchain_s *)sq_remfirst(&g_irqchainfreelist);
|
||||
if (node == NULL)
|
||||
{
|
||||
spin_unlock_irqrestore(&g_irqchainlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqchainlock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg)
|
|||
irqchain_detach_all(ndx);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_irqchainlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqchainlock, flags);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg)
|
|||
return ndx;
|
||||
}
|
||||
|
||||
flags = spin_lock_irqsave(&g_irqchainlock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_irqchainlock);
|
||||
|
||||
if (g_irqvector[ndx].handler == irqchain_dispatch)
|
||||
{
|
||||
|
@ -263,7 +263,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg)
|
|||
ret = irq_detach(irq);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_irqchainlock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_irqchainlock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -839,7 +839,7 @@ void _assert(FAR const char *filename, int linenum,
|
|||
flags = 0; /* suppress GCC warning */
|
||||
if (os_ready)
|
||||
{
|
||||
flags = spin_lock_irqsave(&g_assert_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_assert_lock);
|
||||
}
|
||||
|
||||
#if CONFIG_BOARD_RESET_ON_ASSERT < 2
|
||||
|
@ -912,6 +912,6 @@ void _assert(FAR const char *filename, int linenum,
|
|||
|
||||
if (os_ready)
|
||||
{
|
||||
spin_unlock_irqrestore(&g_assert_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_assert_lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,9 +69,9 @@ void nxmq_free_msg(FAR struct mqueue_msg_s *mqmsg)
|
|||
* list from interrupt handlers.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&g_msgfreelock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_msgfreelock);
|
||||
list_add_tail(&g_msgfree, &mqmsg->node);
|
||||
spin_unlock_irqrestore(&g_msgfreelock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_msgfreelock, flags);
|
||||
}
|
||||
|
||||
/* If this is a message pre-allocated for interrupts,
|
||||
|
@ -84,9 +84,9 @@ void nxmq_free_msg(FAR struct mqueue_msg_s *mqmsg)
|
|||
* list from interrupt handlers.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&g_msgfreelock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_msgfreelock);
|
||||
list_add_tail(&g_msgfreeirq, &mqmsg->node);
|
||||
spin_unlock_irqrestore(&g_msgfreelock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_msgfreelock, flags);
|
||||
}
|
||||
|
||||
/* Otherwise, deallocate it. Note: interrupt handlers
|
||||
|
|
|
@ -139,9 +139,9 @@ static FAR struct mqueue_msg_s *nxmq_alloc_msg(uint16_t msgsize)
|
|||
|
||||
/* Try to get the message from the generally available free list. */
|
||||
|
||||
flags = spin_lock_irqsave(&g_msgfreelock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_msgfreelock);
|
||||
mqmsg = (FAR struct mqueue_msg_s *)list_remove_head(&g_msgfree);
|
||||
spin_unlock_irqrestore(&g_msgfreelock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_msgfreelock, flags);
|
||||
if (mqmsg == NULL)
|
||||
{
|
||||
/* If we were called from an interrupt handler, then try to get the
|
||||
|
@ -153,9 +153,9 @@ static FAR struct mqueue_msg_s *nxmq_alloc_msg(uint16_t msgsize)
|
|||
{
|
||||
/* Try the free list reserved for interrupt handlers */
|
||||
|
||||
flags = spin_lock_irqsave(&g_msgfreelock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_msgfreelock);
|
||||
mqmsg = (FAR struct mqueue_msg_s *)list_remove_head(&g_msgfreeirq);
|
||||
spin_unlock_irqrestore(&g_msgfreelock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_msgfreelock, flags);
|
||||
}
|
||||
|
||||
/* We were not called from an interrupt handler. */
|
||||
|
|
|
@ -75,7 +75,7 @@ static int profil_timer_handler_cpu(FAR void *arg)
|
|||
uintptr_t pc = up_getusrpc(NULL);
|
||||
irqstate_t flags;
|
||||
|
||||
flags = spin_lock_irqsave(&prof->lock);
|
||||
flags = spin_lock_irqsave_wo_note(&prof->lock);
|
||||
if (pc >= prof->lowpc && pc < prof->highpc)
|
||||
{
|
||||
size_t idx = (pc - prof->lowpc) / 2;
|
||||
|
@ -87,7 +87,7 @@ static int profil_timer_handler_cpu(FAR void *arg)
|
|||
prof->counter[idx]++;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&prof->lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&prof->lock, flags);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -156,12 +156,12 @@ int profil(FAR unsigned short *buf, size_t bufsiz,
|
|||
memset(buf, 0, bufsiz);
|
||||
highpc = (uintmax_t)bufsiz * 65536 / scale;
|
||||
|
||||
flags = spin_lock_irqsave(&prof->lock);
|
||||
flags = spin_lock_irqsave_wo_note(&prof->lock);
|
||||
prof->counter = buf;
|
||||
prof->lowpc = offset;
|
||||
prof->highpc = offset + highpc;
|
||||
prof->scale = scale;
|
||||
spin_unlock_irqrestore(&prof->lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&prof->lock, flags);
|
||||
|
||||
wd_start(&prof->timer, PROFTICK, profil_timer_handler,
|
||||
(wdparm_t)(uintptr_t)prof);
|
||||
|
|
|
@ -77,13 +77,13 @@ static void nxsched_smp_call_add(int cpu,
|
|||
{
|
||||
irqstate_t flags;
|
||||
|
||||
flags = spin_lock_irqsave(&g_smp_call_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_smp_call_lock);
|
||||
if (!sq_inqueue(&data->node[cpu], &g_smp_call_queue[cpu]))
|
||||
{
|
||||
sq_addlast(&data->node[cpu], &g_smp_call_queue[cpu]);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_smp_call_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_smp_call_lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -114,7 +114,7 @@ int nxsched_smp_call_handler(int irq, FAR void *context,
|
|||
FAR sq_entry_t *next;
|
||||
int cpu = this_cpu();
|
||||
|
||||
irqstate_t flags = spin_lock_irqsave(&g_smp_call_lock);
|
||||
irqstate_t flags = spin_lock_irqsave_wo_note(&g_smp_call_lock);
|
||||
|
||||
call_queue = &g_smp_call_queue[cpu];
|
||||
|
||||
|
@ -126,11 +126,11 @@ int nxsched_smp_call_handler(int irq, FAR void *context,
|
|||
|
||||
sq_rem(&data->node[cpu], call_queue);
|
||||
|
||||
spin_unlock_irqrestore(&g_smp_call_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_smp_call_lock, flags);
|
||||
|
||||
ret = data->func(data->arg);
|
||||
|
||||
flags = spin_lock_irqsave(&g_smp_call_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&g_smp_call_lock);
|
||||
|
||||
if (data->cookie != NULL)
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ int nxsched_smp_call_handler(int irq, FAR void *context,
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_smp_call_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_smp_call_lock, flags);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ static void nxsig_alloc_actionblock(void)
|
|||
/* Use pre-allocated instances only once */
|
||||
|
||||
#if CONFIG_SIG_PREALLOC_ACTIONS > 0
|
||||
flags = spin_lock_irqsave(&g_sigaction_spin);
|
||||
flags = spin_lock_irqsave_wo_note(&g_sigaction_spin);
|
||||
if (!g_sigactions_used)
|
||||
{
|
||||
for (i = 0; i < CONFIG_SIG_PREALLOC_ACTIONS; i++)
|
||||
|
@ -100,7 +100,7 @@ static void nxsig_alloc_actionblock(void)
|
|||
g_sigactions_used = true;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_sigaction_spin, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_sigaction_spin, flags);
|
||||
#endif
|
||||
|
||||
/* Allocate a block of signal actions */
|
||||
|
@ -108,14 +108,14 @@ static void nxsig_alloc_actionblock(void)
|
|||
sigact = kmm_malloc((sizeof(sigactq_t)) * CONFIG_SIG_ALLOC_ACTIONS);
|
||||
if (sigact != NULL)
|
||||
{
|
||||
flags = spin_lock_irqsave(&g_sigaction_spin);
|
||||
flags = spin_lock_irqsave_wo_note(&g_sigaction_spin);
|
||||
|
||||
for (i = 0; i < CONFIG_SIG_ALLOC_ACTIONS; i++)
|
||||
{
|
||||
sq_addlast((FAR sq_entry_t *)sigact++, &g_sigfreeaction);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_sigaction_spin, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_sigaction_spin, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,9 +134,9 @@ static FAR sigactq_t *nxsig_alloc_action(void)
|
|||
|
||||
/* Try to get the signal action structure from the free list */
|
||||
|
||||
flags = spin_lock_irqsave(&g_sigaction_spin);
|
||||
flags = spin_lock_irqsave_wo_note(&g_sigaction_spin);
|
||||
sigact = (FAR sigactq_t *)sq_remfirst(&g_sigfreeaction);
|
||||
spin_unlock_irqrestore(&g_sigaction_spin, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_sigaction_spin, flags);
|
||||
|
||||
/* Check if we got one via loop as not in critical section now */
|
||||
|
||||
|
@ -148,9 +148,9 @@ static FAR sigactq_t *nxsig_alloc_action(void)
|
|||
|
||||
/* And try again */
|
||||
|
||||
flags = spin_lock_irqsave(&g_sigaction_spin);
|
||||
flags = spin_lock_irqsave_wo_note(&g_sigaction_spin);
|
||||
sigact = (FAR sigactq_t *)sq_remfirst(&g_sigfreeaction);
|
||||
spin_unlock_irqrestore(&g_sigaction_spin, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_sigaction_spin, flags);
|
||||
}
|
||||
|
||||
return sigact;
|
||||
|
@ -459,9 +459,9 @@ void nxsig_release_action(FAR sigactq_t *sigact)
|
|||
{
|
||||
/* Non-preallocated instances will never return to heap! */
|
||||
|
||||
flags = spin_lock_irqsave(&g_sigaction_spin);
|
||||
flags = spin_lock_irqsave_wo_note(&g_sigaction_spin);
|
||||
sq_addlast((FAR sq_entry_t *)sigact, &g_sigfreeaction);
|
||||
spin_unlock_irqrestore(&g_sigaction_spin, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_sigaction_spin, flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -534,9 +534,9 @@ _sa_handler_t nxsig_default(FAR struct tcb_s *tcb, int signo, bool defaction)
|
|||
{
|
||||
/* nxsig_addset() is not atomic (but neither is sigaction()) */
|
||||
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
nxsig_addset(&group->tg_sigdefault, signo);
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,9 +546,9 @@ _sa_handler_t nxsig_default(FAR struct tcb_s *tcb, int signo, bool defaction)
|
|||
* atomic (but neither is sigaction()).
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
nxsig_delset(&group->tg_sigdefault, signo);
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
}
|
||||
|
||||
return handler;
|
||||
|
|
|
@ -57,7 +57,7 @@ FAR sigactq_t *nxsig_find_action(FAR struct task_group_s *group, int signo)
|
|||
* protection.
|
||||
*/
|
||||
|
||||
flags = spin_lock_irqsave(&group->tg_lock);
|
||||
flags = spin_lock_irqsave_wo_note(&group->tg_lock);
|
||||
|
||||
/* Search the list for a sigaction on this signal */
|
||||
|
||||
|
@ -65,7 +65,7 @@ FAR sigactq_t *nxsig_find_action(FAR struct task_group_s *group, int signo)
|
|||
((sigact) && (sigact->signo != signo));
|
||||
sigact = sigact->flink);
|
||||
|
||||
spin_unlock_irqrestore(&group->tg_lock, flags);
|
||||
spin_unlock_irqrestore_wo_note(&group->tg_lock, flags);
|
||||
}
|
||||
|
||||
return sigact;
|
||||
|
|
|
@ -63,10 +63,10 @@ static FAR struct posix_timer_s *timer_allocate(void)
|
|||
/* Try to get a preallocated timer from the free list */
|
||||
|
||||
#if CONFIG_PREALLOC_TIMERS > 0
|
||||
flags = spin_lock_irqsave(&g_locktimers);
|
||||
flags = spin_lock_irqsave_wo_note(&g_locktimers);
|
||||
ret = (FAR struct posix_timer_s *)
|
||||
sq_remfirst((FAR sq_queue_t *)&g_freetimers);
|
||||
spin_unlock_irqrestore(&g_locktimers, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_locktimers, flags);
|
||||
|
||||
/* Did we get one? */
|
||||
|
||||
|
@ -95,9 +95,9 @@ static FAR struct posix_timer_s *timer_allocate(void)
|
|||
|
||||
/* And add it to the end of the list of allocated timers */
|
||||
|
||||
flags = spin_lock_irqsave(&g_locktimers);
|
||||
flags = spin_lock_irqsave_wo_note(&g_locktimers);
|
||||
sq_addlast((FAR sq_entry_t *)ret, (FAR sq_queue_t *)&g_alloctimers);
|
||||
spin_unlock_irqrestore(&g_locktimers, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_locktimers, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -175,7 +175,7 @@ FAR struct posix_timer_s *timer_gethandle(timer_t timerid)
|
|||
|
||||
if (timerid != NULL)
|
||||
{
|
||||
flags = spin_lock_irqsave(&g_locktimers);
|
||||
flags = spin_lock_irqsave_wo_note(&g_locktimers);
|
||||
|
||||
sq_for_every(&g_alloctimers, entry)
|
||||
{
|
||||
|
@ -186,7 +186,7 @@ FAR struct posix_timer_s *timer_gethandle(timer_t timerid)
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_locktimers, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_locktimers, flags);
|
||||
}
|
||||
|
||||
return timer;
|
||||
|
|
|
@ -57,7 +57,7 @@ static inline void timer_free(struct posix_timer_s *timer)
|
|||
|
||||
/* Remove the timer from the allocated list */
|
||||
|
||||
flags = spin_lock_irqsave(&g_locktimers);
|
||||
flags = spin_lock_irqsave_wo_note(&g_locktimers);
|
||||
sq_rem((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_alloctimers);
|
||||
|
||||
/* Return it to the free list if it is one of the preallocated timers */
|
||||
|
@ -66,14 +66,14 @@ static inline void timer_free(struct posix_timer_s *timer)
|
|||
if ((timer->pt_flags & PT_FLAGS_PREALLOCATED) != 0)
|
||||
{
|
||||
sq_addlast((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_freetimers);
|
||||
spin_unlock_irqrestore(&g_locktimers, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_locktimers, flags);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* Otherwise, return it to the heap */
|
||||
|
||||
spin_unlock_irqrestore(&g_locktimers, flags);
|
||||
spin_unlock_irqrestore_wo_note(&g_locktimers, flags);
|
||||
kmm_free(timer);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue