mirror of
https://github.com/apache/nuttx.git
synced 2025-01-13 01:38:36 +08:00
Compare commits
2 commits
f14378a83a
...
6da278a639
Author | SHA1 | Date | |
---|---|---|---|
|
6da278a639 | ||
|
d0972396af |
6 changed files with 106 additions and 55 deletions
|
@ -1393,10 +1393,12 @@ void sched_note_irqhandler(int irq, FAR void *handler, bool enter)
|
|||
void sched_note_wdog(uint8_t event, FAR void *handler, FAR const void *arg)
|
||||
{
|
||||
FAR struct note_driver_s **driver;
|
||||
irqstate_t flags;
|
||||
struct note_wdog_s note;
|
||||
bool formatted = false;
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
|
||||
flags = enter_critical_section_wo_note();
|
||||
for (driver = g_note_drivers; *driver; driver++)
|
||||
{
|
||||
if (note_wdog(*driver, event, handler, arg))
|
||||
|
@ -1421,6 +1423,8 @@ void sched_note_wdog(uint8_t event, FAR void *handler, FAR const void *arg)
|
|||
|
||||
note_add(*driver, ¬e, sizeof(note));
|
||||
}
|
||||
|
||||
leave_critical_section_wo_note(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -258,9 +258,19 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg);
|
|||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_IRQCOUNT
|
||||
|
||||
# if (defined(CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION) && \
|
||||
CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0) || \
|
||||
defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
|
||||
irqstate_t enter_critical_section(void) noinstrument_function;
|
||||
# else
|
||||
# define enter_critical_section() enter_critical_section_wo_note()
|
||||
# endif
|
||||
|
||||
irqstate_t enter_critical_section_wo_note(void) noinstrument_function;
|
||||
#else
|
||||
# define enter_critical_section() up_irq_save()
|
||||
# define enter_critical_section_wo_note() up_irq_save()
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -288,9 +298,19 @@ irqstate_t enter_critical_section(void) noinstrument_function;
|
|||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_IRQCOUNT
|
||||
|
||||
# if (defined(CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION) && \
|
||||
CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0) || \
|
||||
defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
|
||||
void leave_critical_section(irqstate_t flags) noinstrument_function;
|
||||
# else
|
||||
# define leave_critical_section(f) leave_critical_section_wo_note(f)
|
||||
# endif
|
||||
|
||||
void leave_critical_section_wo_note(irqstate_t flags) noinstrument_function;
|
||||
#else
|
||||
# define leave_critical_section(f) up_irq_restore(f)
|
||||
# define leave_critical_section_wo_note(f) up_irq_restore(f)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -80,7 +80,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
|||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: enter_critical_section
|
||||
* Name: enter_critical_section_wo_note
|
||||
*
|
||||
* Description:
|
||||
* Take the CPU IRQ lock and disable interrupts on all CPUs. A thread-
|
||||
|
@ -90,7 +90,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
|||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t enter_critical_section(void)
|
||||
irqstate_t enter_critical_section_wo_note(void)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
irqstate_t ret;
|
||||
|
@ -246,15 +246,6 @@ irqstate_t enter_critical_section(void)
|
|||
|
||||
cpu_irqlock_set(cpu);
|
||||
rtcb->irqcount = 1;
|
||||
|
||||
/* Note that we have entered the critical section */
|
||||
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
|
||||
nxsched_critmon_csection(rtcb, true, return_address(0));
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
sched_note_csection(rtcb, true);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,7 +256,7 @@ irqstate_t enter_critical_section(void)
|
|||
|
||||
#else
|
||||
|
||||
irqstate_t enter_critical_section(void)
|
||||
irqstate_t enter_critical_section_wo_note(void)
|
||||
{
|
||||
irqstate_t ret;
|
||||
|
||||
|
@ -285,10 +276,28 @@ irqstate_t enter_critical_section(void)
|
|||
*/
|
||||
|
||||
DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX);
|
||||
if (++rtcb->irqcount == 1)
|
||||
{
|
||||
/* Note that we have entered the critical section */
|
||||
rtcb->irqcount++;
|
||||
}
|
||||
|
||||
/* Return interrupt status */
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 ||\
|
||||
defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
|
||||
irqstate_t enter_critical_section(void)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
irqstate_t flags;
|
||||
flags = enter_critical_section_wo_note();
|
||||
|
||||
if (!up_interrupt_context())
|
||||
{
|
||||
rtcb = this_task();
|
||||
if (rtcb->irqcount == 1)
|
||||
{
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
|
||||
nxsched_critmon_csection(rtcb, true, return_address(0));
|
||||
#endif
|
||||
|
@ -298,14 +307,12 @@ irqstate_t enter_critical_section(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* Return interrupt status */
|
||||
|
||||
return ret;
|
||||
return flags;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: leave_critical_section
|
||||
* Name: leave_critical_section_wo_note
|
||||
*
|
||||
* Description:
|
||||
* Decrement the IRQ lock count and if it decrements to zero then release
|
||||
|
@ -314,7 +321,7 @@ irqstate_t enter_critical_section(void)
|
|||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void leave_critical_section(irqstate_t flags)
|
||||
void leave_critical_section_wo_note(irqstate_t flags)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
|
@ -388,14 +395,6 @@ void leave_critical_section(irqstate_t flags)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* No.. Note that we have left the critical section */
|
||||
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
|
||||
nxsched_critmon_csection(rtcb, false, return_address(0));
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
sched_note_csection(rtcb, false);
|
||||
#endif
|
||||
/* Decrement our count on the lock. If all CPUs have
|
||||
* released, then unlock the spinlock.
|
||||
*/
|
||||
|
@ -421,10 +420,8 @@ void leave_critical_section(irqstate_t flags)
|
|||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void leave_critical_section(irqstate_t flags)
|
||||
void leave_critical_section_wo_note(irqstate_t flags)
|
||||
{
|
||||
/* Check if we were called from an interrupt handler and that the tasks
|
||||
* lists have been initialized.
|
||||
|
@ -440,17 +437,7 @@ void leave_critical_section(irqstate_t flags)
|
|||
*/
|
||||
|
||||
DEBUGASSERT(rtcb->irqcount > 0);
|
||||
if (--rtcb->irqcount <= 0)
|
||||
{
|
||||
/* Note that we have left the critical section */
|
||||
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
|
||||
nxsched_critmon_csection(rtcb, false, return_address(0));
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
sched_note_csection(rtcb, false);
|
||||
#endif
|
||||
}
|
||||
--rtcb->irqcount;
|
||||
}
|
||||
|
||||
/* Restore the previous interrupt state. */
|
||||
|
@ -458,4 +445,29 @@ void leave_critical_section(irqstate_t flags)
|
|||
up_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 ||\
|
||||
defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
|
||||
void leave_critical_section(irqstate_t flags)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
|
||||
if (!up_interrupt_context())
|
||||
{
|
||||
rtcb = this_task();
|
||||
if (rtcb->irqcount == 1)
|
||||
{
|
||||
# if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
|
||||
nxsched_critmon_csection(rtcb, false, return_address(0));
|
||||
# endif
|
||||
# ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
sched_note_csection(rtcb, false);
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
leave_critical_section_wo_note(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_IRQCOUNT */
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include "sched/sched.h"
|
||||
#include "wdog/wdog.h"
|
||||
|
||||
spinlock_t g_wdog_spinlock = SP_UNLOCKED;
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -60,15 +62,10 @@
|
|||
|
||||
int wd_cancel(FAR struct wdog_s *wdog)
|
||||
{
|
||||
irqstate_t flags;
|
||||
int ret;
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
ret = wd_cancel_irq(wdog);
|
||||
|
||||
leave_critical_section(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -91,12 +88,16 @@ int wd_cancel(FAR struct wdog_s *wdog)
|
|||
|
||||
int wd_cancel_irq(FAR struct wdog_s *wdog)
|
||||
{
|
||||
irqstate_t flags;
|
||||
bool head;
|
||||
|
||||
flags = spin_lock_irqsave(&g_wdog_spinlock);
|
||||
|
||||
/* Make sure that the watchdog is valid and still active. */
|
||||
|
||||
if (wdog == NULL || !WDOG_ISACTIVE(wdog))
|
||||
{
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -116,6 +117,7 @@ int wd_cancel_irq(FAR struct wdog_s *wdog)
|
|||
/* Mark the watchdog inactive */
|
||||
|
||||
wdog->func = NULL;
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
|
||||
if (head)
|
||||
{
|
||||
|
|
|
@ -112,10 +112,11 @@ static unsigned int g_wdtimernested;
|
|||
static inline_function void wd_expiration(clock_t ticks)
|
||||
{
|
||||
FAR struct wdog_s *wdog;
|
||||
wdparm_t arg;
|
||||
irqstate_t flags;
|
||||
wdentry_t func;
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wdog_spinlock);
|
||||
|
||||
#ifdef CONFIG_SCHED_TICKLESS
|
||||
/* Increment the nested watchdog timer count to handle cases where wd_start
|
||||
|
@ -147,12 +148,17 @@ static inline_function void wd_expiration(clock_t ticks)
|
|||
/* Indicate that the watchdog is no longer active. */
|
||||
|
||||
func = wdog->func;
|
||||
arg = wdog->arg;
|
||||
wdog->func = NULL;
|
||||
|
||||
/* Execute the watchdog function */
|
||||
|
||||
up_setpicbase(wdog->picbase);
|
||||
CALL_FUNC(func, wdog->arg);
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
|
||||
CALL_FUNC(func, arg);
|
||||
|
||||
flags = spin_lock_irqsave(&g_wdog_spinlock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_TICKLESS
|
||||
|
@ -161,7 +167,7 @@ static inline_function void wd_expiration(clock_t ticks)
|
|||
g_wdtimernested--;
|
||||
#endif
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -293,7 +299,7 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks,
|
|||
* the critical section is established.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wdog_spinlock);
|
||||
#ifdef CONFIG_SCHED_TICKLESS
|
||||
/* We need to reassess timer if the watchdog list head has changed. */
|
||||
|
||||
|
@ -314,8 +320,13 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks,
|
|||
* then this will pick that new delay.
|
||||
*/
|
||||
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
nxsched_reassess_timer();
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
}
|
||||
#else
|
||||
UNUSED(reassess);
|
||||
|
||||
|
@ -328,8 +339,8 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks,
|
|||
}
|
||||
|
||||
wd_insert(wdog, ticks, wdentry, arg);
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
#endif
|
||||
leave_critical_section(flags);
|
||||
|
||||
sched_note_wdog(NOTE_WDOG_START, wdentry, (FAR void *)(uintptr_t)ticks);
|
||||
return OK;
|
||||
|
@ -425,13 +436,13 @@ clock_t wd_timer(clock_t ticks, bool noswitches)
|
|||
wd_expiration(ticks);
|
||||
}
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wdog_spinlock);
|
||||
|
||||
/* Return the delay for the next watchdog to expire */
|
||||
|
||||
if (list_is_empty(&g_wdactivelist))
|
||||
{
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -442,7 +453,7 @@ clock_t wd_timer(clock_t ticks, bool noswitches)
|
|||
wdog = list_first_entry(&g_wdactivelist, struct wdog_s, node);
|
||||
ret = wdog->expired - ticks;
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wdog_spinlock, flags);
|
||||
|
||||
/* Return the delay for the next watchdog to expire */
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <nuttx/queue.h>
|
||||
#include <nuttx/wdog.h>
|
||||
#include <nuttx/list.h>
|
||||
#include <nuttx/spinlock_type.h>
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
|
@ -64,6 +65,7 @@ extern "C"
|
|||
*/
|
||||
|
||||
extern struct list_node g_wdactivelist;
|
||||
extern spinlock_t g_wdog_spinlock;
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
|
|
Loading…
Reference in a new issue