Miscellaneous fixes from astyle tool.
This commit is contained in:
parent
777169385c
commit
b274a97840
63 changed files with 422 additions and 418 deletions
|
@ -71,7 +71,7 @@ static long compare_timespec(FAR const struct timespec *a,
|
|||
return 1;
|
||||
}
|
||||
|
||||
return (long)a->tv_nsec -(long)b->tv_nsec;
|
||||
return (long)a->tv_nsec - (long)b->tv_nsec;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -80,5 +80,5 @@ static const uint8_t g_lookup[12] = {2, 5, 7, 10, 12, 15, 17, 20, 23, 25, 28, 30
|
|||
int clock_dow(int year, int month, int day)
|
||||
{
|
||||
day += month < 2 ? year-- : year - 2;
|
||||
return ((int)g_lookup[month] + day + 4 + year/4 - year/100 + year/400) % 7;
|
||||
return ((int)g_lookup[month] + day + 4 + year / 4 - year / 100 + year / 400) % 7;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
*/
|
||||
|
||||
# define INITIAL_SYSTEM_TIMER_TICKS \
|
||||
((uint64_t)(UINT32_MAX - (TICK_PER_SEC * 5)))
|
||||
((uint64_t)(UINT32_MAX - (TICK_PER_SEC * 5)))
|
||||
#else
|
||||
# define INITIAL_SYSTEM_TIMER_TICKS 0
|
||||
#endif
|
||||
|
|
|
@ -102,7 +102,7 @@ int putenv(FAR const char *string)
|
|||
/* Then let setenv do all of the work */
|
||||
|
||||
*pequal = '\0';
|
||||
ret = setenv(pname, pequal+1, TRUE);
|
||||
ret = setenv(pname, pequal + 1, TRUE);
|
||||
}
|
||||
|
||||
kmm_free(pname);
|
||||
|
|
|
@ -142,7 +142,7 @@ int group_addrenv(FAR struct tcb_s *tcb)
|
|||
|
||||
oldgroup = group_findbygid(g_gid_current);
|
||||
DEBUGASSERT(oldgroup &&
|
||||
(oldgroup->tg_flags & GROUP_FLAG_ADDRENV) != 0);
|
||||
(oldgroup->tg_flags & GROUP_FLAG_ADDRENV) != 0);
|
||||
|
||||
if (oldgroup)
|
||||
{
|
||||
|
|
|
@ -119,7 +119,7 @@ static void group_dumpchildren(FAR struct task_group_s *group,
|
|||
for (i = 0, child = group->tg_children; child; i++, child = child->flink)
|
||||
{
|
||||
_info(" %d. ch_flags=%02x ch_pid=%d ch_status=%d\n",
|
||||
i, child->ch_flags, child->ch_pid, child->ch_status);
|
||||
i, child->ch_flags, child->ch_pid, child->ch_status);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -251,7 +251,7 @@ void group_freechild(FAR struct child_status_s *child)
|
|||
****************************************************************************/
|
||||
|
||||
void group_addchild(FAR struct task_group_s *group,
|
||||
FAR struct child_status_s *child)
|
||||
FAR struct child_status_s *child)
|
||||
{
|
||||
/* Add the entry into the TCB list of children */
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ int group_initialize(FAR struct task_tcb_s *tcb)
|
|||
#ifdef HAVE_GROUP_MEMBERS
|
||||
/* Allocate space to hold GROUP_INITIAL_MEMBERS members of the group */
|
||||
|
||||
group->tg_members = (FAR pid_t *)kmm_malloc(GROUP_INITIAL_MEMBERS*sizeof(pid_t));
|
||||
group->tg_members = (FAR pid_t *)kmm_malloc(GROUP_INITIAL_MEMBERS * sizeof(pid_t));
|
||||
if (!group->tg_members)
|
||||
{
|
||||
kmm_free(group);
|
||||
|
|
|
@ -105,7 +105,7 @@ static int group_killchildren_handler(pid_t pid, FAR void *arg)
|
|||
int group_killchildren(FAR struct task_tcb_s *tcb)
|
||||
{
|
||||
return group_foreachchild(tcb->cmn.group, group_killchildren_handler,
|
||||
(FAR void *)((uintptr_t)tcb->cmn.pid));
|
||||
(FAR void *)((uintptr_t)tcb->cmn.pid));
|
||||
}
|
||||
|
||||
#endif /* HAVE_GROUP_MEMBERS */
|
||||
|
|
|
@ -37,43 +37,43 @@
|
|||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/compiler.h>
|
||||
#include <nuttx/sched.h>
|
||||
#include <nuttx/fs/fs.h>
|
||||
#include <nuttx/net/net.h>
|
||||
#include <nuttx/lib/lib.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/shm.h>
|
||||
#include <nuttx/kmalloc.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
#include <nuttx/syslog/syslog.h>
|
||||
#include <nuttx/init.h>
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/compiler.h>
|
||||
#include <nuttx/sched.h>
|
||||
#include <nuttx/fs/fs.h>
|
||||
#include <nuttx/net/net.h>
|
||||
#include <nuttx/lib/lib.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/shm.h>
|
||||
#include <nuttx/kmalloc.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
#include <nuttx/syslog/syslog.h>
|
||||
#include <nuttx/init.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
#include "signal/signal.h"
|
||||
#include "wdog/wdog.h"
|
||||
#include "semaphore/semaphore.h"
|
||||
#include "sched/sched.h"
|
||||
#include "signal/signal.h"
|
||||
#include "wdog/wdog.h"
|
||||
#include "semaphore/semaphore.h"
|
||||
#ifndef CONFIG_DISABLE_MQUEUE
|
||||
# include "mqueue/mqueue.h"
|
||||
# include "mqueue/mqueue.h"
|
||||
#endif
|
||||
#ifndef CONFIG_DISABLE_PTHREAD
|
||||
# include "pthread/pthread.h"
|
||||
# include "pthread/pthread.h"
|
||||
#endif
|
||||
#include "clock/clock.h"
|
||||
#include "timer/timer.h"
|
||||
#include "irq/irq.h"
|
||||
#include "clock/clock.h"
|
||||
#include "timer/timer.h"
|
||||
#include "irq/irq.h"
|
||||
#ifdef HAVE_TASK_GROUP
|
||||
#include "group/group.h"
|
||||
# include "group/group.h"
|
||||
#endif
|
||||
#include "init/init.h"
|
||||
#include "init/init.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
|
@ -100,7 +100,7 @@
|
|||
|
||||
/* This is the list of all tasks that are ready to run. This is a
|
||||
* prioritized list with head of the list holding the highest priority
|
||||
* (unassigned) task. In the non-SMP cae, the head of this list is the
|
||||
* (unassigned) task. In the non-SMP case, the head of this list is the
|
||||
* currently active task and the tail of this list, the lowest priority
|
||||
* task, is always the IDLE task.
|
||||
*/
|
||||
|
@ -109,7 +109,7 @@ volatile dq_queue_t g_readytorun;
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
/* In order to support SMP, the function of the g_readytorun list changes,
|
||||
* The g_readytorun is still used but in the SMP cae it will contain only:
|
||||
* The g_readytorun is still used but in the SMP case it will contain only:
|
||||
*
|
||||
* - Only tasks/threads that are eligible to run, but not currently running,
|
||||
* and
|
||||
|
@ -488,7 +488,7 @@ void os_start(void)
|
|||
* the IDLE task.
|
||||
*/
|
||||
|
||||
g_idletcb[cpu].cmn.affinity = SCHED_ALL_CPUS;
|
||||
g_idletcb[cpu].cmn.affinity = SCHED_ALL_CPUS;
|
||||
#endif
|
||||
|
||||
#if CONFIG_TASK_NAME_SIZE > 0
|
||||
|
@ -736,7 +736,7 @@ void os_start(void)
|
|||
|
||||
#if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu > 0)
|
||||
if (cpu > 0)
|
||||
{
|
||||
/* Clone stdout, stderr, stdin from the CPU0 IDLE task. */
|
||||
|
||||
|
|
|
@ -105,8 +105,8 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg)
|
|||
* unexpected exception handler.
|
||||
*/
|
||||
|
||||
isr = irq_unexpected_isr;
|
||||
arg = NULL;
|
||||
isr = irq_unexpected_isr;
|
||||
arg = NULL;
|
||||
}
|
||||
|
||||
/* Save the new ISR and its argument in the table. */
|
||||
|
|
|
@ -311,10 +311,10 @@ try_again:
|
|||
/* Normal tasking environment. */
|
||||
/* Do we already have interrupts disabled? */
|
||||
|
||||
rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
|
||||
if (rtcb->irqcount > 0)
|
||||
if (rtcb->irqcount > 0)
|
||||
{
|
||||
/* Yes... make sure that the spinlock is set and increment the
|
||||
* IRQ lock count.
|
||||
|
@ -496,7 +496,7 @@ void leave_critical_section(irqstate_t flags)
|
|||
}
|
||||
else
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
||||
/* No.. Note that we have left the critical section */
|
||||
|
||||
sched_note_csection(rtcb, false);
|
||||
|
|
|
@ -87,15 +87,15 @@ static void mod_dumploadinfo(FAR struct mod_loadinfo_s *loadinfo)
|
|||
|
||||
binfo("ELF Header:\n");
|
||||
binfo(" e_ident: %02x %02x %02x %02x\n",
|
||||
loadinfo->ehdr.e_ident[0], loadinfo->ehdr.e_ident[1],
|
||||
loadinfo->ehdr.e_ident[2], loadinfo->ehdr.e_ident[3]);
|
||||
loadinfo->ehdr.e_ident[0], loadinfo->ehdr.e_ident[1],
|
||||
loadinfo->ehdr.e_ident[2], loadinfo->ehdr.e_ident[3]);
|
||||
binfo(" e_type: %04x\n", loadinfo->ehdr.e_type);
|
||||
binfo(" e_machine: %04x\n", loadinfo->ehdr.e_machine);
|
||||
binfo(" e_version: %08x\n", loadinfo->ehdr.e_version);
|
||||
binfo(" e_entry: %08lx\n", (long)loadinfo->ehdr.e_entry);
|
||||
binfo(" e_phoff: %d\n", loadinfo->ehdr.e_phoff);
|
||||
binfo(" e_shoff: %d\n", loadinfo->ehdr.e_shoff);
|
||||
binfo(" e_flags: %08x\n" , loadinfo->ehdr.e_flags);
|
||||
binfo(" e_flags: %08x\n", loadinfo->ehdr.e_flags);
|
||||
binfo(" e_ehsize: %d\n", loadinfo->ehdr.e_ehsize);
|
||||
binfo(" e_phentsize: %d\n", loadinfo->ehdr.e_phentsize);
|
||||
binfo(" e_phnum: %d\n", loadinfo->ehdr.e_phnum);
|
||||
|
|
|
@ -295,9 +295,9 @@ ssize_t mq_doreceive(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg,
|
|||
|
||||
ASSERT(btcb);
|
||||
|
||||
btcb->msgwaitq = NULL;
|
||||
msgq->nwaitnotfull--;
|
||||
up_unblock_task(btcb);
|
||||
btcb->msgwaitq = NULL;
|
||||
msgq->nwaitnotfull--;
|
||||
up_unblock_task(btcb);
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
|
||||
#include "sched/sched.h"
|
||||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
# include "signal/signal.h"
|
||||
# include "signal/signal.h"
|
||||
#endif
|
||||
#include "mqueue/mqueue.h"
|
||||
|
||||
|
@ -417,10 +417,10 @@ int mq_dosend(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg, FAR const char *msg,
|
|||
|
||||
#ifdef CONFIG_CAN_PASS_STRUCTS
|
||||
DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
|
||||
event.sigev_value));
|
||||
event.sigev_value));
|
||||
#else
|
||||
DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo,
|
||||
event.sigev_value.sival_ptr));
|
||||
event.sigev_value.sival_ptr));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -331,19 +331,19 @@ int mq_timedsend(mqd_t mqdes, FAR const char *msg, size_t msglen, int prio,
|
|||
leave_cancellation_point();
|
||||
return ret;
|
||||
|
||||
/* Exit here with (1) the scheduler locked, (2) a message allocated, (3) a
|
||||
* wdog allocated, and (4) interrupts disabled. The error code is in
|
||||
* 'result'
|
||||
*/
|
||||
/* Exit here with (1) the scheduler locked, (2) a message allocated, (3) a
|
||||
* wdog allocated, and (4) interrupts disabled. The error code is in
|
||||
* 'result'
|
||||
*/
|
||||
|
||||
errout_in_critical_section:
|
||||
leave_critical_section(flags);
|
||||
wd_delete(rtcb->waitdog);
|
||||
rtcb->waitdog = NULL;
|
||||
|
||||
/* Exit here with (1) the scheduler locked and 2) a message allocated. The
|
||||
* error code is in 'result'
|
||||
*/
|
||||
/* Exit here with (1) the scheduler locked and 2) a message allocated. The
|
||||
* error code is in 'result'
|
||||
*/
|
||||
|
||||
errout_with_mqmsg:
|
||||
mq_msgfree(mqmsg);
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
/* Configuration ************************************************************/
|
||||
|
||||
#ifdef CONFIG_DISABLE_SIGNALS
|
||||
# warning "Signals needed by this function (CONFIG_DISABLE_SIGNALS=n)"
|
||||
# warning "Signals needed by this function (CONFIG_DISABLE_SIGNALS=n)"
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -626,8 +626,8 @@ int pg_worker(int argc, char *argv[])
|
|||
* g_pftcb).
|
||||
*/
|
||||
|
||||
pginfo("Calling pg_startfill\n");
|
||||
(void)pg_startfill();
|
||||
pginfo("Calling pg_startfill\n");
|
||||
(void)pg_startfill();
|
||||
}
|
||||
#else
|
||||
/* Are there tasks blocked and waiting for a fill? Loop until all
|
||||
|
|
|
@ -174,7 +174,7 @@ void pthread_cleanup_push(pthread_cleanup_t routine, FAR void *arg)
|
|||
|
||||
sched_lock();
|
||||
if ((tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD &&
|
||||
tcb->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE)
|
||||
tcb->tos < CONFIG_PTHREAD_CLEANUP_STACKSIZE)
|
||||
{
|
||||
unsigned int ndx = tcb->tos;
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ static bool pthread_notifywaiters(FAR struct join_s *pjoin)
|
|||
* awakened when all waiting tasks receive the data
|
||||
*/
|
||||
|
||||
(void)sem_init(&pjoin->data_sem, 0, (ntasks_waiting+1));
|
||||
(void)sem_init(&pjoin->data_sem, 0, (ntasks_waiting + 1));
|
||||
|
||||
/* Post the semaphore to restart each thread that is waiting
|
||||
* on the semaphore
|
||||
|
@ -239,7 +239,7 @@ int pthread_completejoin(pid_t pid, FAR void *exit_value)
|
|||
|
||||
if (!waiters && pjoin->detached)
|
||||
{
|
||||
pthread_destroyjoin(group, pjoin);
|
||||
pthread_destroyjoin(group, pjoin);
|
||||
}
|
||||
|
||||
/* Giving the following semaphore will allow the waiters
|
||||
|
|
|
@ -425,10 +425,10 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
|
|||
* parent thread's affinity mask.
|
||||
*/
|
||||
|
||||
if (attr->affinity != 0)
|
||||
{
|
||||
ptcb->cmn.affinity = attr->affinity;
|
||||
}
|
||||
if (attr->affinity != 0)
|
||||
{
|
||||
ptcb->cmn.affinity = attr->affinity;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Configure the TCB for a pthread receiving on parameter
|
||||
|
@ -560,9 +560,9 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
|
|||
/* Return the thread information to the caller */
|
||||
|
||||
if (thread)
|
||||
{
|
||||
*thread = (pthread_t)pid;
|
||||
}
|
||||
{
|
||||
*thread = (pthread_t)pid;
|
||||
}
|
||||
|
||||
if (!pjoin->started)
|
||||
{
|
||||
|
|
|
@ -95,20 +95,20 @@ void pthread_exit(FAR void *exit_value)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
/* Mark the pthread as non-cancelable to avoid additional calls to
|
||||
* pthread_exit() due to any cancellation point logic that might get
|
||||
* kicked off by actions taken during pthread_exit processing.
|
||||
*/
|
||||
/* Mark the pthread as non-cancelable to avoid additional calls to
|
||||
* pthread_exit() due to any cancellation point logic that might get
|
||||
* kicked off by actions taken during pthread_exit processing.
|
||||
*/
|
||||
|
||||
tcb->flags |= TCB_FLAG_NONCANCELABLE;
|
||||
tcb->flags &= ~TCB_FLAG_CANCEL_PENDING;
|
||||
tcb->cpcount = 0;
|
||||
tcb->flags |= TCB_FLAG_NONCANCELABLE;
|
||||
tcb->flags &= ~TCB_FLAG_CANCEL_PENDING;
|
||||
tcb->cpcount = 0;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PTHREAD_CLEANUP
|
||||
/* Perform any stack pthread clean-up callbacks */
|
||||
/* Perform any stack pthread clean-up callbacks */
|
||||
|
||||
pthread_cleanup_popall((FAR struct pthread_tcb_s *)tcb);
|
||||
pthread_cleanup_popall((FAR struct pthread_tcb_s *)tcb);
|
||||
#endif
|
||||
|
||||
/* Complete pending join operations */
|
||||
|
|
|
@ -203,7 +203,7 @@ int pthread_join(pthread_t thread, FAR pthread_addr_t *pexit_value)
|
|||
|
||||
if (pexit_value)
|
||||
{
|
||||
*pexit_value = pjoin->exit_value;
|
||||
*pexit_value = pjoin->exit_value;
|
||||
sinfo("exit_value=0x%p\n", pjoin->exit_value);
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ errout:
|
|||
ret = kill((pid_t)thread, signo);
|
||||
if (ret != OK)
|
||||
{
|
||||
ret = get_errno();
|
||||
ret = get_errno();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -345,49 +345,49 @@ int pthread_mutex_give(FAR struct pthread_mutex_s *mutex)
|
|||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
uint16_t pthread_disable_cancel(void)
|
||||
{
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
irqstate_t flags;
|
||||
uint16_t old;
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
irqstate_t flags;
|
||||
uint16_t old;
|
||||
|
||||
/* We need perform the following operations from within a critical section
|
||||
* because it can compete with interrupt level activity.
|
||||
*/
|
||||
/* We need perform the following operations from within a critical section
|
||||
* because it can compete with interrupt level activity.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
old = tcb->flags & (TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||
tcb->flags &= ~(TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||
leave_critical_section(flags);
|
||||
return old;
|
||||
flags = enter_critical_section();
|
||||
old = tcb->flags & (TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||
tcb->flags &= ~(TCB_FLAG_CANCEL_PENDING | TCB_FLAG_NONCANCELABLE);
|
||||
leave_critical_section(flags);
|
||||
return old;
|
||||
}
|
||||
|
||||
void pthread_enable_cancel(uint16_t cancelflags)
|
||||
{
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
irqstate_t flags;
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
irqstate_t flags;
|
||||
|
||||
/* We need perform the following operations from within a critical section
|
||||
* because it can compete with interrupt level activity.
|
||||
*/
|
||||
/* We need perform the following operations from within a critical section
|
||||
* because it can compete with interrupt level activity.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
tcb->flags |= cancelflags;
|
||||
flags = enter_critical_section();
|
||||
tcb->flags |= cancelflags;
|
||||
|
||||
/* What should we do if there is a pending cancellation?
|
||||
*
|
||||
* If the thread is executing with deferred cancellation, we need do
|
||||
* nothing more; the cancellation cannot occur until the next
|
||||
* cancellation point.
|
||||
*
|
||||
* However, if the thread is executing in asynchronous cancellation mode,
|
||||
* then we need to terminate now by simply calling pthread_exit().
|
||||
*/
|
||||
/* What should we do if there is a pending cancellation?
|
||||
*
|
||||
* If the thread is executing with deferred cancellation, we need do
|
||||
* nothing more; the cancellation cannot occur until the next
|
||||
* cancellation point.
|
||||
*
|
||||
* However, if the thread is executing in asynchronous cancellation mode,
|
||||
* then we need to terminate now by simply calling pthread_exit().
|
||||
*/
|
||||
|
||||
if ((tcb->flags & TCB_FLAG_CANCEL_DEFERRED) == 0 &&
|
||||
(tcb->flags & TCB_FLAG_CANCEL_PENDING) != 0)
|
||||
{
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
if ((tcb->flags & TCB_FLAG_CANCEL_DEFERRED) == 0 &&
|
||||
(tcb->flags & TCB_FLAG_CANCEL_PENDING) != 0)
|
||||
{
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
#endif /* CONFIG_CANCELLATION_POINTS */
|
||||
|
|
|
@ -165,70 +165,70 @@ int pthread_mutex_lock(FAR pthread_mutex_t *mutex)
|
|||
#endif /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
|
||||
#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE
|
||||
/* The calling thread does not hold the semaphore. The correct
|
||||
* behavior for the 'robust' mutex is to verify that the holder of the
|
||||
* mutex is still valid. This is protection from the case
|
||||
* where the holder of the mutex has exitted without unlocking it.
|
||||
*/
|
||||
/* The calling thread does not hold the semaphore. The correct
|
||||
* behavior for the 'robust' mutex is to verify that the holder of the
|
||||
* mutex is still valid. This is protection from the case
|
||||
* where the holder of the mutex has exitted without unlocking it.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_BOTH
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||
/* Include check if this is a NORMAL mutex and that it is robust */
|
||||
/* Include check if this is a NORMAL mutex and that it is robust */
|
||||
|
||||
if (mutex->pid > 0 &&
|
||||
((mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 ||
|
||||
mutex->type != PTHREAD_MUTEX_NORMAL) &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 &&
|
||||
((mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 ||
|
||||
mutex->type != PTHREAD_MUTEX_NORMAL) &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
|
||||
#else /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
/* This can only be a NORMAL mutex. Include check if it is robust */
|
||||
/* This can only be a NORMAL mutex. Include check if it is robust */
|
||||
|
||||
if (mutex->pid > 0 &&
|
||||
(mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 &&
|
||||
(mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
|
||||
#endif /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
#else /* CONFIG_PTHREAD_MUTEX_ROBUST */
|
||||
/* This mutex is always robust, whatever type it is. */
|
||||
/* This mutex is always robust, whatever type it is. */
|
||||
|
||||
if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL)
|
||||
#endif
|
||||
{
|
||||
DEBUGASSERT(mutex->pid != 0); /* < 0: available, >0 owned, ==0 error */
|
||||
DEBUGASSERT((mutex->flags & _PTHREAD_MFLAGS_INCONSISTENT) != 0);
|
||||
{
|
||||
DEBUGASSERT(mutex->pid != 0); /* < 0: available, >0 owned, ==0 error */
|
||||
DEBUGASSERT((mutex->flags & _PTHREAD_MFLAGS_INCONSISTENT) != 0);
|
||||
|
||||
/* A thread holds the mutex, but there is no such thread. POSIX
|
||||
* requires that the 'robust' mutex return EOWNERDEAD in this case.
|
||||
* It is then the caller's responsibility to call pthread_mutx_consistent()
|
||||
* fo fix the mutex.
|
||||
*/
|
||||
/* A thread holds the mutex, but there is no such thread. POSIX
|
||||
* requires that the 'robust' mutex return EOWNERDEAD in this case.
|
||||
* It is then the caller's responsibility to call pthread_mutx_consistent()
|
||||
* fo fix the mutex.
|
||||
*/
|
||||
|
||||
mutex->flags |= _PTHREAD_MFLAGS_INCONSISTENT;
|
||||
ret = EOWNERDEAD;
|
||||
}
|
||||
else
|
||||
mutex->flags |= _PTHREAD_MFLAGS_INCONSISTENT;
|
||||
ret = EOWNERDEAD;
|
||||
}
|
||||
else
|
||||
#endif /* !CONFIG_PTHREAD_MUTEX_UNSAFE */
|
||||
|
||||
{
|
||||
/* Take the underlying semaphore, waiting if necessary. NOTE that
|
||||
* is required to deadlock for the case of the non-robust NORMAL or
|
||||
* default mutex.
|
||||
*/
|
||||
{
|
||||
/* Take the underlying semaphore, waiting if necessary. NOTE that
|
||||
* is required to deadlock for the case of the non-robust NORMAL or
|
||||
* default mutex.
|
||||
*/
|
||||
|
||||
ret = pthread_mutex_take(mutex, true);
|
||||
ret = pthread_mutex_take(mutex, true);
|
||||
|
||||
/* If we successfully obtained the semaphore, then indicate
|
||||
* that we own it.
|
||||
*/
|
||||
/* If we successfully obtained the semaphore, then indicate
|
||||
* that we own it.
|
||||
*/
|
||||
|
||||
if (ret == OK)
|
||||
{
|
||||
mutex->pid = mypid;
|
||||
if (ret == OK)
|
||||
{
|
||||
mutex->pid = mypid;
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||
mutex->nlocks = 1;
|
||||
mutex->nlocks = 1;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sched_unlock();
|
||||
}
|
||||
|
|
|
@ -120,9 +120,9 @@ int pthread_mutex_trylock(FAR pthread_mutex_t *mutex)
|
|||
ret = OK;
|
||||
}
|
||||
|
||||
/* pthread_mutex_trytake failed. Did it fail because the semaphore
|
||||
* was not avaialable?
|
||||
*/
|
||||
/* pthread_mutex_trytake failed. Did it fail because the semaphore
|
||||
* was not avaialable?
|
||||
*/
|
||||
|
||||
else if (status == EAGAIN)
|
||||
{
|
||||
|
@ -147,55 +147,55 @@ int pthread_mutex_trylock(FAR pthread_mutex_t *mutex)
|
|||
#endif
|
||||
|
||||
#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE
|
||||
/* The calling thread does not hold the semaphore. The correct
|
||||
* behavior for the 'robust' mutex is to verify that the holder of
|
||||
* the mutex is still valid. This is protection from the case
|
||||
* where the holder of the mutex has exitted without unlocking it.
|
||||
*/
|
||||
/* The calling thread does not hold the semaphore. The correct
|
||||
* behavior for the 'robust' mutex is to verify that the holder of
|
||||
* the mutex is still valid. This is protection from the case
|
||||
* where the holder of the mutex has exitted without unlocking it.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_BOTH
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||
/* Check if this NORMAL mutex is robust */
|
||||
/* Check if this NORMAL mutex is robust */
|
||||
|
||||
if (mutex->pid > 0 &&
|
||||
((mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 ||
|
||||
mutex->type != PTHREAD_MUTEX_NORMAL) &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 &&
|
||||
((mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 ||
|
||||
mutex->type != PTHREAD_MUTEX_NORMAL) &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
|
||||
#else /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
/* Check if this NORMAL mutex is robust */
|
||||
/* Check if this NORMAL mutex is robust */
|
||||
|
||||
if (mutex->pid > 0 &&
|
||||
(mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 &&
|
||||
(mutex->flags & _PTHREAD_MFLAGS_ROBUST) != 0 &&
|
||||
sched_gettcb(mutex->pid) == NULL)
|
||||
|
||||
#endif /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
#else /* CONFIG_PTHREAD_MUTEX_ROBUST */
|
||||
/* This mutex is always robust, whatever type it is. */
|
||||
/* This mutex is always robust, whatever type it is. */
|
||||
|
||||
if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL)
|
||||
if (mutex->pid > 0 && sched_gettcb(mutex->pid) == NULL)
|
||||
#endif
|
||||
{
|
||||
DEBUGASSERT(mutex->pid != 0); /* < 0: available, >0 owned, ==0 error */
|
||||
DEBUGASSERT((mutex->flags & _PTHREAD_MFLAGS_INCONSISTENT) != 0);
|
||||
{
|
||||
DEBUGASSERT(mutex->pid != 0); /* < 0: available, >0 owned, ==0 error */
|
||||
DEBUGASSERT((mutex->flags & _PTHREAD_MFLAGS_INCONSISTENT) != 0);
|
||||
|
||||
/* A thread holds the mutex, but there is no such thread.
|
||||
* POSIX requires that the 'robust' mutex return EOWNERDEAD
|
||||
* in this case. It is then the caller's responsibility to
|
||||
* call pthread_mutx_consistent() fo fix the mutex.
|
||||
*/
|
||||
/* A thread holds the mutex, but there is no such thread.
|
||||
* POSIX requires that the 'robust' mutex return EOWNERDEAD
|
||||
* in this case. It is then the caller's responsibility to
|
||||
* call pthread_mutx_consistent() fo fix the mutex.
|
||||
*/
|
||||
|
||||
mutex->flags |= _PTHREAD_MFLAGS_INCONSISTENT;
|
||||
ret = EOWNERDEAD;
|
||||
}
|
||||
mutex->flags |= _PTHREAD_MFLAGS_INCONSISTENT;
|
||||
ret = EOWNERDEAD;
|
||||
}
|
||||
|
||||
/* The mutex is locked by another, active thread */
|
||||
|
||||
else
|
||||
else
|
||||
#endif /* CONFIG_PTHREAD_MUTEX_UNSAFE */
|
||||
{
|
||||
ret = EBUSY;
|
||||
}
|
||||
{
|
||||
ret = EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
/* Some other, unhandled error occurred */
|
||||
|
|
|
@ -199,40 +199,40 @@ int pthread_mutex_unlock(FAR pthread_mutex_t *mutex)
|
|||
#endif /* !CONFIG_PTHREAD_MUTEX_UNSAFE || CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||
/* Yes, the caller owns the semaphore.. Is this a recursive mutex? */
|
||||
/* Yes, the caller owns the semaphore.. Is this a recursive mutex? */
|
||||
|
||||
if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1)
|
||||
{
|
||||
/* This is a recursive mutex and we there are multiple locks held. Retain
|
||||
* the mutex lock, just decrement the count of locks held, and return
|
||||
* success.
|
||||
*/
|
||||
if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->nlocks > 1)
|
||||
{
|
||||
/* This is a recursive mutex and we there are multiple locks held. Retain
|
||||
* the mutex lock, just decrement the count of locks held, and return
|
||||
* success.
|
||||
*/
|
||||
|
||||
mutex->nlocks--;
|
||||
ret = OK;
|
||||
}
|
||||
else
|
||||
mutex->nlocks--;
|
||||
ret = OK;
|
||||
}
|
||||
else
|
||||
|
||||
#endif /* CONFIG_PTHREAD_MUTEX_TYPES */
|
||||
|
||||
/* This is either a non-recursive mutex or is the outermost unlock of
|
||||
* a recursive mutex.
|
||||
*
|
||||
* In the case where the calling thread is NOT the holder of the thread,
|
||||
* the behavior is undefined per POSIX. Here we do the same as GLIBC:
|
||||
* We allow the other thread to release the mutex even though it does
|
||||
* not own it.
|
||||
*/
|
||||
/* This is either a non-recursive mutex or is the outermost unlock of
|
||||
* a recursive mutex.
|
||||
*
|
||||
* In the case where the calling thread is NOT the holder of the thread,
|
||||
* the behavior is undefined per POSIX. Here we do the same as GLIBC:
|
||||
* We allow the other thread to release the mutex even though it does
|
||||
* not own it.
|
||||
*/
|
||||
|
||||
{
|
||||
/* Nullify the pid and lock count then post the semaphore */
|
||||
{
|
||||
/* Nullify the pid and lock count then post the semaphore */
|
||||
|
||||
mutex->pid = -1;
|
||||
mutex->pid = -1;
|
||||
#ifdef CONFIG_PTHREAD_MUTEX_TYPES
|
||||
mutex->nlocks = 0;
|
||||
mutex->nlocks = 0;
|
||||
#endif
|
||||
ret = pthread_mutex_give(mutex);
|
||||
}
|
||||
ret = pthread_mutex_give(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
sched_unlock();
|
||||
|
|
|
@ -152,7 +152,7 @@ struct tasklist_s
|
|||
|
||||
/* This is the list of all tasks that are ready to run. This is a
|
||||
* prioritized list with head of the list holding the highest priority
|
||||
* (unassigned) task. In the non-SMP cae, the head of this list is the
|
||||
* (unassigned) task. In the non-SMP case, the head of this list is the
|
||||
* currently active task and the tail of this list, the lowest priority
|
||||
* task, is always the IDLE task.
|
||||
*/
|
||||
|
|
|
@ -92,7 +92,7 @@ bool sched_addprioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list)
|
|||
*/
|
||||
|
||||
for (next = (FAR struct tcb_s *)list->head;
|
||||
(next && sched_priority <= next->sched_priority);
|
||||
(next && sched_priority <= next->sched_priority);
|
||||
next = next->flink);
|
||||
|
||||
/* Add the tcb to the spot found in the list. Check if the tcb
|
||||
|
|
|
@ -259,8 +259,8 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
|||
|
||||
(void)sched_addprioritized(btcb, (FAR dq_queue_t *)&g_readytorun);
|
||||
|
||||
btcb->task_state = TSTATE_TASK_READYTORUN;
|
||||
doswitch = false;
|
||||
btcb->task_state = TSTATE_TASK_READYTORUN;
|
||||
doswitch = false;
|
||||
}
|
||||
else /* (task_state == TSTATE_TASK_ASSIGNED || task_state == TSTATE_TASK_RUNNING) */
|
||||
{
|
||||
|
@ -342,7 +342,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
|||
/* Release our hold on the IRQ lock. */
|
||||
|
||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||
&g_cpu_irqlock);
|
||||
&g_cpu_irqlock);
|
||||
}
|
||||
|
||||
/* Sanity check. g_cpu_netcount should be greater than zero
|
||||
|
|
|
@ -72,13 +72,13 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
# define CPULOAD_TIMECONSTANT \
|
||||
(CONFIG_SMP_NCPUS * \
|
||||
CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
|
||||
CPULOAD_TICKSPERSEC)
|
||||
(CONFIG_SMP_NCPUS * \
|
||||
CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
|
||||
CPULOAD_TICKSPERSEC)
|
||||
#else
|
||||
# define CPULOAD_TIMECONSTANT \
|
||||
(CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
|
||||
CPULOAD_TICKSPERSEC)
|
||||
(CONFIG_SCHED_CPULOAD_TIMECONSTANT * \
|
||||
CPULOAD_TICKSPERSEC)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -75,8 +75,8 @@ void sched_ufree(FAR void *address)
|
|||
* collect garbage on a group-by-group basis.
|
||||
*/
|
||||
|
||||
ASSERT(!up_interrupt_context());
|
||||
kumm_free(address);
|
||||
ASSERT(!up_interrupt_context());
|
||||
kumm_free(address);
|
||||
|
||||
#else
|
||||
/* Check if this is an attempt to deallocate memory from an exception
|
||||
|
|
|
@ -73,8 +73,8 @@ static inline void sched_kucleanup(void)
|
|||
*/
|
||||
|
||||
#else
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ int sched_lock(void)
|
|||
{
|
||||
/* Note that we have pre-emption locked */
|
||||
|
||||
sched_note_premption(rtcb, true);
|
||||
sched_note_premption(rtcb, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@
|
|||
#include <time.h>
|
||||
|
||||
#if CONFIG_RR_INTERVAL > 0
|
||||
# include <sched.h>
|
||||
# include <nuttx/arch.h>
|
||||
# include <sched.h>
|
||||
# include <nuttx/arch.h>
|
||||
#endif
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
|
|
@ -286,7 +286,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
|
|||
/* Release our hold on the IRQ lock. */
|
||||
|
||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||
&g_cpu_irqlock);
|
||||
&g_cpu_irqlock);
|
||||
}
|
||||
|
||||
/* Sanity check. g_cpu_netcount should be greater than zero
|
||||
|
|
|
@ -102,7 +102,7 @@ int sched_setscheduler(pid_t pid, int policy,
|
|||
#ifdef CONFIG_SCHED_SPORADIC
|
||||
&& policy != SCHED_SPORADIC
|
||||
#endif
|
||||
)
|
||||
)
|
||||
{
|
||||
set_errno(EINVAL);
|
||||
return ERROR;
|
||||
|
|
|
@ -588,7 +588,7 @@ static void sporadic_replenish_expire(int argc, wdparm_t arg1, ...)
|
|||
/* This should not be the main timer */
|
||||
|
||||
DEBUGASSERT((repl->flags & (SPORADIC_FLAG_MAIN | SPORADIC_FLAG_REPLENISH))
|
||||
== SPORADIC_FLAG_REPLENISH);
|
||||
== SPORADIC_FLAG_REPLENISH);
|
||||
|
||||
/* As a special case, we can do nothing here if scheduler has been locked.
|
||||
* We cannot drop the priority because that might cause a context switch,
|
||||
|
|
|
@ -45,8 +45,8 @@
|
|||
#include <debug.h>
|
||||
|
||||
#if CONFIG_RR_INTERVAL > 0
|
||||
# include <sched.h>
|
||||
# include <nuttx/arch.h>
|
||||
# include <sched.h>
|
||||
# include <nuttx/arch.h>
|
||||
#endif
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
@ -54,7 +54,7 @@
|
|||
#include "clock/clock.h"
|
||||
|
||||
#ifdef CONFIG_CLOCK_TIMEKEEPING
|
||||
# include "clock/clock_timekeeping.h"
|
||||
# include "clock/clock_timekeeping.h"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_TICKLESS
|
||||
|
|
|
@ -115,7 +115,7 @@ int sched_unlock(void)
|
|||
*/
|
||||
|
||||
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
|
||||
(g_cpu_lockset & (1 << cpu)) != 0);
|
||||
(g_cpu_lockset & (1 << cpu)) != 0);
|
||||
|
||||
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
|
||||
&g_cpu_schedlock);
|
||||
|
|
|
@ -84,7 +84,7 @@ pid_t wait(FAR int *stat_loc)
|
|||
* trivial case.
|
||||
*/
|
||||
|
||||
return waitpid((pid_t)-1, stat_loc, 0);
|
||||
return waitpid((pid_t) - 1, stat_loc, 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_WAITPID && CONFIG_SCHED_HAVE_PARENT */
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
|
||||
#ifdef CONFIG_SCHED_CHILD_STATUS
|
||||
static void exited_child(FAR struct tcb_s *rtcb, FAR struct child_status_s *child,
|
||||
FAR siginfo_t *info)
|
||||
FAR siginfo_t *info)
|
||||
{
|
||||
/* The child has exited. Return the saved exit status (and some fudged
|
||||
* information).
|
||||
|
@ -224,7 +224,7 @@ int waitid(idtype_t idtype, id_t id, FAR siginfo_t *info, int options)
|
|||
#ifdef HAVE_GROUP_MEMBERS
|
||||
if (ctcb == NULL || ctcb->group->tg_pgid != rtcb->group->tg_gid)
|
||||
#else
|
||||
if (ctcb == NULL || ctcb->group>tg_ppid != rtcb->pid)
|
||||
if (ctcb == NULL || ctcb->group->tg_ppid != rtcb->pid)
|
||||
#endif
|
||||
{
|
||||
errcode = ECHILD;
|
||||
|
@ -384,9 +384,9 @@ int waitid(idtype_t idtype, id_t id, FAR siginfo_t *info, int options)
|
|||
|
||||
if (info->si_pid == (pid_t)id)
|
||||
{
|
||||
/* Yes... return success */
|
||||
/* Yes... return success */
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -467,7 +467,7 @@ static int sem_dumpholder(FAR struct semholder_s *pholder, FAR sem_t *sem,
|
|||
{
|
||||
#if CONFIG_SEM_PREALLOCHOLDERS > 0
|
||||
_info(" %08x: %08x %08x %04x\n",
|
||||
pholder, pholder->flink, pholder->htcb, pholder->counts);
|
||||
pholder, pholder->flink, pholder->htcb, pholder->counts);
|
||||
#else
|
||||
_info(" %08x: %08x %04x\n", pholder, pholder->htcb, pholder->counts);
|
||||
#endif
|
||||
|
@ -530,7 +530,7 @@ static int sem_restoreholderprio(FAR struct tcb_s *htcb,
|
|||
*/
|
||||
|
||||
DEBUGASSERT(/* htcb->sched_priority == stcb->sched_priority && */
|
||||
htcb->npend_reprio == 0);
|
||||
htcb->npend_reprio == 0);
|
||||
|
||||
/* Reset the holder's priority back to the base priority. */
|
||||
|
||||
|
@ -605,8 +605,8 @@ static int sem_restoreholderprio(FAR struct tcb_s *htcb,
|
|||
htcb->pend_reprios[i] = htcb->pend_reprios[j];
|
||||
}
|
||||
|
||||
htcb->npend_reprio = j;
|
||||
break;
|
||||
htcb->npend_reprio = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ static int sem_restoreholderprio(FAR struct tcb_s *htcb,
|
|||
****************************************************************************/
|
||||
|
||||
static int sem_restoreholderprioall(FAR struct semholder_s *pholder,
|
||||
FAR sem_t *sem, FAR void *arg)
|
||||
FAR sem_t *sem, FAR void *arg)
|
||||
{
|
||||
return sem_restoreholderprio(pholder->htcb, sem, arg);
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ static int sem_restoreholderprioB(FAR struct semholder_s *pholder,
|
|||
if (pholder->htcb == rtcb)
|
||||
{
|
||||
|
||||
/* The running task has given up a count on the semaphore */
|
||||
/* The running task has given up a count on the semaphore */
|
||||
|
||||
#if CONFIG_SEM_PREALLOCHOLDERS == 0
|
||||
/* In the case where there are only 2 holders. This step
|
||||
|
@ -682,7 +682,7 @@ static int sem_restoreholderprioB(FAR struct semholder_s *pholder,
|
|||
* causes a context switch.
|
||||
*/
|
||||
|
||||
sem_findandfreeholder(sem, rtcb);
|
||||
sem_findandfreeholder(sem, rtcb);
|
||||
#endif
|
||||
(void)sem_restoreholderprio(rtcb, sem, arg);
|
||||
return 1;
|
||||
|
@ -861,12 +861,12 @@ void sem_initholders(void)
|
|||
/* Put all of the pre-allocated holder structures into the free list */
|
||||
|
||||
g_freeholders = g_holderalloc;
|
||||
for (i = 0; i < (CONFIG_SEM_PREALLOCHOLDERS-1); i++)
|
||||
for (i = 0; i < (CONFIG_SEM_PREALLOCHOLDERS - 1); i++)
|
||||
{
|
||||
g_holderalloc[i].flink = &g_holderalloc[i+1];
|
||||
g_holderalloc[i].flink = &g_holderalloc[i + 1];
|
||||
}
|
||||
|
||||
g_holderalloc[CONFIG_SEM_PREALLOCHOLDERS-1].flink = NULL;
|
||||
g_holderalloc[CONFIG_SEM_PREALLOCHOLDERS - 1].flink = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1012,7 +1012,7 @@ void sem_boostpriority(FAR sem_t *sem)
|
|||
* count.
|
||||
*/
|
||||
|
||||
(void)sem_foreachholder(sem, sem_boostholderprio, rtcb);
|
||||
(void)sem_foreachholder(sem, sem_boostholderprio, rtcb);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -1189,7 +1189,11 @@ int sem_nfreeholders(void)
|
|||
FAR struct semholder_s *pholder;
|
||||
int n;
|
||||
|
||||
for (pholder = g_freeholders, n = 0; pholder; pholder = pholder->flink) n++;
|
||||
for (pholder = g_freeholders, n = 0; pholder; pholder = pholder->flink)
|
||||
{
|
||||
n++;
|
||||
}
|
||||
|
||||
return n;
|
||||
#else
|
||||
return 0;
|
||||
|
|
|
@ -70,8 +70,8 @@ FAR sigactq_t *sig_findaction(FAR struct task_group_s *group, int signo)
|
|||
/* Seach the list for a sigaction on this signal */
|
||||
|
||||
for (sigact = (FAR sigactq_t *)group->tg_sigactionq.head;
|
||||
((sigact) && (sigact->signo != signo));
|
||||
sigact = sigact->flink);
|
||||
((sigact) && (sigact->signo != signo));
|
||||
sigact = sigact->flink);
|
||||
|
||||
sched_unlock();
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ static sigpendq_t *sig_allocatependingsignalblock(sq_queue_t *siglist,
|
|||
/* Allocate a block of pending signal structures */
|
||||
|
||||
sigpendalloc =
|
||||
(FAR sigpendq_t *)kmm_malloc((sizeof(sigpendq_t)) * nsigs);
|
||||
(FAR sigpendq_t *)kmm_malloc((sizeof(sigpendq_t)) * nsigs);
|
||||
|
||||
sigpend = sigpendalloc;
|
||||
for (i = 0; i < nsigs; i++)
|
||||
|
@ -213,26 +213,26 @@ void sig_initialize(void)
|
|||
/* Add a block of signal structures to each list */
|
||||
|
||||
g_sigpendingactionalloc =
|
||||
sig_allocateblock(&g_sigpendingaction,
|
||||
NUM_PENDING_ACTIONS,
|
||||
SIG_ALLOC_FIXED);
|
||||
sig_allocateblock(&g_sigpendingaction,
|
||||
NUM_PENDING_ACTIONS,
|
||||
SIG_ALLOC_FIXED);
|
||||
|
||||
g_sigpendingirqactionalloc =
|
||||
sig_allocateblock(&g_sigpendingirqaction,
|
||||
NUM_PENDING_INT_ACTIONS,
|
||||
SIG_ALLOC_IRQ);
|
||||
sig_allocateblock(&g_sigpendingirqaction,
|
||||
NUM_PENDING_INT_ACTIONS,
|
||||
SIG_ALLOC_IRQ);
|
||||
|
||||
sig_allocateactionblock();
|
||||
|
||||
g_sigpendingsignalalloc =
|
||||
sig_allocatependingsignalblock(&g_sigpendingsignal,
|
||||
NUM_SIGNALS_PENDING,
|
||||
SIG_ALLOC_FIXED);
|
||||
sig_allocatependingsignalblock(&g_sigpendingsignal,
|
||||
NUM_SIGNALS_PENDING,
|
||||
SIG_ALLOC_FIXED);
|
||||
|
||||
g_sigpendingirqsignalalloc =
|
||||
sig_allocatependingsignalblock(&g_sigpendingirqsignal,
|
||||
NUM_INT_SIGNALS_PENDING,
|
||||
SIG_ALLOC_IRQ);
|
||||
sig_allocatependingsignalblock(&g_sigpendingirqsignal,
|
||||
NUM_INT_SIGNALS_PENDING,
|
||||
SIG_ALLOC_IRQ);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -252,7 +252,7 @@ void sig_allocateactionblock(void)
|
|||
/* Allocate a block of signal actions */
|
||||
|
||||
g_sigactionalloc =
|
||||
(FAR sigactq_t *)kmm_malloc((sizeof(sigactq_t)) * NUM_SIGNAL_ACTIONS);
|
||||
(FAR sigactq_t *)kmm_malloc((sizeof(sigactq_t)) * NUM_SIGNAL_ACTIONS);
|
||||
|
||||
sigact = g_sigactionalloc;
|
||||
for (i = 0; i < NUM_SIGNAL_ACTIONS; i++)
|
||||
|
|
|
@ -157,6 +157,6 @@ int sigprocmask(int how, FAR const sigset_t *set, FAR sigset_t *oset)
|
|||
sig_unmaskpendingsignal();
|
||||
}
|
||||
|
||||
sched_unlock();
|
||||
return ret;
|
||||
sched_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -97,10 +97,10 @@ static void sig_timeout(int argc, wdparm_t itcb)
|
|||
*/
|
||||
|
||||
union
|
||||
{
|
||||
FAR struct tcb_s *wtcb;
|
||||
wdparm_t itcb;
|
||||
} u;
|
||||
{
|
||||
FAR struct tcb_s *wtcb;
|
||||
wdparm_t itcb;
|
||||
} u;
|
||||
|
||||
u.itcb = itcb;
|
||||
ASSERT(u.wtcb);
|
||||
|
@ -264,7 +264,7 @@ int sigtimedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
#ifdef CONFIG_HAVE_LONG_LONG
|
||||
uint64_t waitticks64 = ((uint64_t)timeout->tv_sec * NSEC_PER_SEC +
|
||||
(uint64_t)timeout->tv_nsec + NSEC_PER_TICK - 1) /
|
||||
NSEC_PER_TICK;
|
||||
NSEC_PER_TICK;
|
||||
DEBUGASSERT(waitticks64 <= UINT32_MAX);
|
||||
waitticks = (uint32_t)waitticks64;
|
||||
#else
|
||||
|
@ -272,7 +272,7 @@ int sigtimedwait(FAR const sigset_t *set, FAR struct siginfo *info,
|
|||
|
||||
DEBUGASSERT(timeout->tv_sec < UINT32_MAX / MSEC_PER_SEC);
|
||||
waitmsec = timeout->tv_sec * MSEC_PER_SEC +
|
||||
(timeout->tv_nsec + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC;
|
||||
(timeout->tv_nsec + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC;
|
||||
waitticks = MSEC2TICK(waitmsec);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ bool enter_cancellation_point(void)
|
|||
|
||||
if (((tcb->flags & TCB_FLAG_NONCANCELABLE) == 0 &&
|
||||
(tcb->flags & TCB_FLAG_CANCEL_DEFERRED) != 0) ||
|
||||
tcb->cpcount > 0)
|
||||
tcb->cpcount > 0)
|
||||
{
|
||||
/* Check if there is a pending cancellation */
|
||||
|
||||
|
@ -150,15 +150,15 @@ bool enter_cancellation_point(void)
|
|||
if (tcb->cpcount == 0)
|
||||
{
|
||||
#ifndef CONFIG_DISABLE_PTHREAD
|
||||
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
|
||||
{
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
else
|
||||
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
|
||||
{
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,22 +236,22 @@ void leave_cancellation_point(void)
|
|||
if ((tcb->flags & TCB_FLAG_CANCEL_PENDING) != 0)
|
||||
{
|
||||
#ifndef CONFIG_DISABLE_PTHREAD
|
||||
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
|
||||
{
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
else
|
||||
if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
|
||||
{
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We are not at the outermost nesting level. Just decrment the
|
||||
* nesting level count.
|
||||
*/
|
||||
/* We are not at the outermost nesting level. Just decrment the
|
||||
* nesting level count.
|
||||
*/
|
||||
|
||||
tcb->cpcount--;
|
||||
}
|
||||
|
|
|
@ -259,7 +259,7 @@ int task_create(FAR const char *name, int priority,
|
|||
****************************************************************************/
|
||||
|
||||
int kernel_thread(FAR const char *name, int priority,
|
||||
int stack_size, main_t entry, FAR char * const argv[])
|
||||
int stack_size, main_t entry, FAR char *const argv[])
|
||||
{
|
||||
return thread_create(name, TCB_FLAG_TTYPE_KERNEL, priority, stack_size,
|
||||
entry, argv);
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#include "sched/sched.h"
|
||||
|
||||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
# include "signal/signal.h"
|
||||
# include "signal/signal.h"
|
||||
#endif
|
||||
#include "task/task.h"
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline void task_atexit(FAR struct tcb_s *tcb)
|
|||
* group exits, i.e., from higher to lower indices.
|
||||
*/
|
||||
|
||||
for (index = CONFIG_SCHED_ATEXIT_MAX-1; index >= 0; index--)
|
||||
for (index = CONFIG_SCHED_ATEXIT_MAX - 1; index >= 0; index--)
|
||||
{
|
||||
if (group->tg_atexitfunc[index])
|
||||
{
|
||||
|
@ -162,13 +162,13 @@ static inline void task_onexit(FAR struct tcb_s *tcb, int status)
|
|||
* when the task group exits, i.e., from higher to lower indices.
|
||||
*/
|
||||
|
||||
for (index = CONFIG_SCHED_ONEXIT_MAX-1; index >= 0; index--)
|
||||
for (index = CONFIG_SCHED_ONEXIT_MAX - 1; index >= 0; index--)
|
||||
{
|
||||
if (group->tg_onexitfunc[index])
|
||||
{
|
||||
/* Call the on_exit function */
|
||||
|
||||
(*group->tg_onexitfunc[index])(status, group->tg_onexitarg[index]);
|
||||
(*group->tg_onexitfunc[index])(status, group->tg_onexitarg[index]);
|
||||
|
||||
/* Nullify the on_exit function to prevent its reuse. */
|
||||
|
||||
|
@ -613,14 +613,14 @@ void task_exithook(FAR struct tcb_s *tcb, int status, bool nonblocking)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_CANCELLATION_POINTS
|
||||
/* Mark the task as non-cancelable to avoid additional calls to exit()
|
||||
* due to any cancellation point logic that might get kicked off by
|
||||
* actions taken during exit processing.
|
||||
*/
|
||||
/* Mark the task as non-cancelable to avoid additional calls to exit()
|
||||
* due to any cancellation point logic that might get kicked off by
|
||||
* actions taken during exit processing.
|
||||
*/
|
||||
|
||||
tcb->flags |= TCB_FLAG_NONCANCELABLE;
|
||||
tcb->flags &= ~TCB_FLAG_CANCEL_PENDING;
|
||||
tcb->cpcount = 0;
|
||||
tcb->flags |= TCB_FLAG_NONCANCELABLE;
|
||||
tcb->flags &= ~TCB_FLAG_CANCEL_PENDING;
|
||||
tcb->cpcount = 0;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SCHED_ATEXIT) || defined(CONFIG_SCHED_ONEXIT)
|
||||
|
|
|
@ -100,7 +100,7 @@ int task_init(FAR struct tcb_s *tcb, const char *name, int priority,
|
|||
|
||||
#ifndef CONFIG_DISABLE_PTHREAD
|
||||
DEBUGASSERT(tcb &&
|
||||
(tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_PTHREAD);
|
||||
(tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_PTHREAD);
|
||||
#endif
|
||||
|
||||
/* Create a new task group */
|
||||
|
|
|
@ -187,7 +187,7 @@ static int posix_spawn_proxy(int argc, FAR char *argv[])
|
|||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
DEBUGASSERT(g_spawn_parms.file_actions ||
|
||||
(g_spawn_parms.attr &&
|
||||
(g_spawn_parms.attr->flags & POSIX_SPAWN_SETSIGMASK) != 0));
|
||||
(g_spawn_parms.attr->flags & POSIX_SPAWN_SETSIGMASK) != 0));
|
||||
#else
|
||||
DEBUGASSERT(g_spawn_parms.file_actions);
|
||||
#endif
|
||||
|
|
|
@ -82,81 +82,81 @@ int prctl(int option, ...)
|
|||
va_start(ap, option);
|
||||
switch (option)
|
||||
{
|
||||
case PR_SET_NAME:
|
||||
case PR_GET_NAME:
|
||||
case PR_SET_NAME:
|
||||
case PR_GET_NAME:
|
||||
#if CONFIG_TASK_NAME_SIZE > 0
|
||||
{
|
||||
/* Get the prctl arguments */
|
||||
{
|
||||
/* Get the prctl arguments */
|
||||
|
||||
FAR char *name = va_arg(ap, FAR char *);
|
||||
int pid = va_arg(ap, int);
|
||||
FAR struct tcb_s *tcb;
|
||||
FAR char *name = va_arg(ap, FAR char *);
|
||||
int pid = va_arg(ap, int);
|
||||
FAR struct tcb_s *tcb;
|
||||
|
||||
/* Get the TCB associated with the PID (handling the special case of
|
||||
* pid==0 meaning "this thread")
|
||||
*/
|
||||
/* Get the TCB associated with the PID (handling the special case of
|
||||
* pid==0 meaning "this thread")
|
||||
*/
|
||||
|
||||
if (!pid)
|
||||
{
|
||||
tcb = this_task();
|
||||
}
|
||||
else
|
||||
{
|
||||
tcb = sched_gettcb(pid);
|
||||
}
|
||||
if (!pid)
|
||||
{
|
||||
tcb = this_task();
|
||||
}
|
||||
else
|
||||
{
|
||||
tcb = sched_gettcb(pid);
|
||||
}
|
||||
|
||||
/* An invalid pid will be indicated by a NULL TCB returned from
|
||||
* sched_gettcb()
|
||||
*/
|
||||
/* An invalid pid will be indicated by a NULL TCB returned from
|
||||
* sched_gettcb()
|
||||
*/
|
||||
|
||||
if (!tcb)
|
||||
{
|
||||
serr("ERROR: Pid does not correspond to a task: %d\n", pid);
|
||||
errcode = ESRCH;
|
||||
goto errout;
|
||||
}
|
||||
if (!tcb)
|
||||
{
|
||||
serr("ERROR: Pid does not correspond to a task: %d\n", pid);
|
||||
errcode = ESRCH;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
/* A pointer to the task name storage must also be provided */
|
||||
/* A pointer to the task name storage must also be provided */
|
||||
|
||||
if (!name)
|
||||
{
|
||||
serr("ERROR: No name provide\n");
|
||||
errcode = EFAULT;
|
||||
goto errout;
|
||||
}
|
||||
if (!name)
|
||||
{
|
||||
serr("ERROR: No name provide\n");
|
||||
errcode = EFAULT;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
/* Now get or set the task name */
|
||||
/* Now get or set the task name */
|
||||
|
||||
if (option == PR_SET_NAME)
|
||||
{
|
||||
/* Ensure that tcb->name will be null-terminated, truncating if
|
||||
* necessary.
|
||||
*/
|
||||
if (option == PR_SET_NAME)
|
||||
{
|
||||
/* Ensure that tcb->name will be null-terminated, truncating if
|
||||
* necessary.
|
||||
*/
|
||||
|
||||
strncpy(tcb->name, name, CONFIG_TASK_NAME_SIZE);
|
||||
tcb->name[CONFIG_TASK_NAME_SIZE] = '\0';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The returned value will be null-terminated, truncating if
|
||||
* necessary.
|
||||
*/
|
||||
strncpy(tcb->name, name, CONFIG_TASK_NAME_SIZE);
|
||||
tcb->name[CONFIG_TASK_NAME_SIZE] = '\0';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The returned value will be null-terminated, truncating if
|
||||
* necessary.
|
||||
*/
|
||||
|
||||
strncpy(name, tcb->name, CONFIG_TASK_NAME_SIZE-1);
|
||||
name[CONFIG_TASK_NAME_SIZE-1] = '\0';
|
||||
}
|
||||
}
|
||||
break;
|
||||
strncpy(name, tcb->name, CONFIG_TASK_NAME_SIZE - 1);
|
||||
name[CONFIG_TASK_NAME_SIZE - 1] = '\0';
|
||||
}
|
||||
}
|
||||
break;
|
||||
#else
|
||||
serr("ERROR: Option not enabled: %d\n", option);
|
||||
errcode = ENOSYS;
|
||||
goto errout;
|
||||
serr("ERROR: Option not enabled: %d\n", option);
|
||||
errcode = ENOSYS;
|
||||
goto errout;
|
||||
#endif
|
||||
|
||||
default:
|
||||
serr("ERROR: Unrecognized option: %d\n", option);
|
||||
errcode = EINVAL;
|
||||
goto errout;
|
||||
default:
|
||||
serr("ERROR: Unrecognized option: %d\n", option);
|
||||
errcode = EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
/* Not reachable unless CONFIG_TASK_NAME_SIZE is > 0. NOTE: This might
|
||||
|
|
|
@ -398,7 +398,7 @@ static int thread_schedsetup(FAR struct tcb_s *tcb, int priority,
|
|||
* affinity mask in this case.
|
||||
*/
|
||||
|
||||
task_inherit_affinity(tcb);
|
||||
task_inherit_affinity(tcb);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
|
@ -592,10 +592,10 @@ static inline int task_stackargsetup(FAR struct task_tcb_s *tcb,
|
|||
* argument and its NUL terminator in the string buffer.
|
||||
*/
|
||||
|
||||
stackargv[i+1] = str;
|
||||
nbytes = strlen(argv[i]) + 1;
|
||||
stackargv[i + 1] = str;
|
||||
nbytes = strlen(argv[i]) + 1;
|
||||
strcpy(str, argv[i]);
|
||||
str += nbytes;
|
||||
str += nbytes;
|
||||
}
|
||||
|
||||
/* Put a terminator entry at the end of the argv[] array. Then save the
|
||||
|
|
|
@ -210,7 +210,7 @@ static int task_spawn_proxy(int argc, FAR char *argv[])
|
|||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
DEBUGASSERT(g_spawn_parms.file_actions ||
|
||||
(g_spawn_parms.attr &&
|
||||
(g_spawn_parms.attr->flags & POSIX_SPAWN_SETSIGMASK) != 0));
|
||||
(g_spawn_parms.attr->flags & POSIX_SPAWN_SETSIGMASK) != 0));
|
||||
#else
|
||||
DEBUGASSERT(g_spawn_parms.file_actions);
|
||||
#endif
|
||||
|
@ -323,9 +323,9 @@ static int task_spawn_proxy(int argc, FAR char *argv[])
|
|||
****************************************************************************/
|
||||
|
||||
int task_spawn(FAR pid_t *pid, FAR const char *name, main_t entry,
|
||||
FAR const posix_spawn_file_actions_t *file_actions,
|
||||
FAR const posix_spawnattr_t *attr,
|
||||
FAR char *const argv[], FAR char *const envp[])
|
||||
FAR const posix_spawn_file_actions_t *file_actions,
|
||||
FAR const posix_spawnattr_t *attr,
|
||||
FAR char *const argv[], FAR char *const envp[])
|
||||
{
|
||||
struct sched_param param;
|
||||
pid_t proxy;
|
||||
|
|
|
@ -333,23 +333,23 @@ int spawn_proxyattrs(FAR const posix_spawnattr_t *attr,
|
|||
{
|
||||
switch (entry->action)
|
||||
{
|
||||
case SPAWN_FILE_ACTION_CLOSE:
|
||||
ret = spawn_close((FAR struct spawn_close_file_action_s *)entry);
|
||||
break;
|
||||
case SPAWN_FILE_ACTION_CLOSE:
|
||||
ret = spawn_close((FAR struct spawn_close_file_action_s *)entry);
|
||||
break;
|
||||
|
||||
case SPAWN_FILE_ACTION_DUP2:
|
||||
ret = spawn_dup2((FAR struct spawn_dup2_file_action_s *)entry);
|
||||
break;
|
||||
case SPAWN_FILE_ACTION_DUP2:
|
||||
ret = spawn_dup2((FAR struct spawn_dup2_file_action_s *)entry);
|
||||
break;
|
||||
|
||||
case SPAWN_FILE_ACTION_OPEN:
|
||||
ret = spawn_open((FAR struct spawn_open_file_action_s *)entry);
|
||||
break;
|
||||
case SPAWN_FILE_ACTION_OPEN:
|
||||
ret = spawn_open((FAR struct spawn_open_file_action_s *)entry);
|
||||
break;
|
||||
|
||||
case SPAWN_FILE_ACTION_NONE:
|
||||
default:
|
||||
serr("ERROR: Unknown action: %d\n", entry->action);
|
||||
ret = EINVAL;
|
||||
break;
|
||||
case SPAWN_FILE_ACTION_NONE:
|
||||
default:
|
||||
serr("ERROR: Unknown action: %d\n", entry->action);
|
||||
ret = EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ void task_starthook(FAR struct task_tcb_s *tcb, starthook_t starthook,
|
|||
|
||||
#ifndef CONFIG_DISABLE_PTHREAD
|
||||
DEBUGASSERT(tcb &&
|
||||
(tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_PTHREAD);
|
||||
(tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_PTHREAD);
|
||||
#endif
|
||||
|
||||
/* Set up the start hook */
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
|
||||
#include "sched/sched.h"
|
||||
#ifndef CONFIG_DISABLE_SIGNALS
|
||||
# include "signal/signal.h"
|
||||
# include "signal/signal.h"
|
||||
#endif
|
||||
#include "task/task.h"
|
||||
|
||||
|
|
|
@ -332,15 +332,15 @@ int timer_settime(timer_t timerid, int flags,
|
|||
|
||||
if (value->it_interval.tv_sec > 0 || value->it_interval.tv_nsec > 0)
|
||||
{
|
||||
(void)clock_time2ticks(&value->it_interval, &delay);
|
||||
(void)clock_time2ticks(&value->it_interval, &delay);
|
||||
|
||||
/* REVISIT: Should pt_delay be ssystime_t? */
|
||||
/* REVISIT: Should pt_delay be ssystime_t? */
|
||||
|
||||
timer->pt_delay = (int)delay;
|
||||
timer->pt_delay = (int)delay;
|
||||
}
|
||||
else
|
||||
{
|
||||
timer->pt_delay = 0;
|
||||
timer->pt_delay = 0;
|
||||
}
|
||||
|
||||
/* We need to disable timer interrupts through the following section so
|
||||
|
|
|
@ -293,8 +293,8 @@ static void lpwork_restoreworker(pid_t wpid, uint8_t reqprio)
|
|||
wtcb->pend_reprios[index] = wtcb->pend_reprios[selected];
|
||||
}
|
||||
|
||||
wtcb->npend_reprio = selected;
|
||||
break;
|
||||
wtcb->npend_reprio = selected;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -201,10 +201,10 @@ int work_lpstart(void)
|
|||
|
||||
for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
|
||||
{
|
||||
pid = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
|
||||
CONFIG_SCHED_LPWORKSTACKSIZE,
|
||||
(main_t)work_lpthread,
|
||||
(FAR char * const *)NULL);
|
||||
pid = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
|
||||
CONFIG_SCHED_LPWORKSTACKSIZE,
|
||||
(main_t)work_lpthread,
|
||||
(FAR char * const *)NULL);
|
||||
|
||||
DEBUGASSERT(pid > 0);
|
||||
if (pid < 0)
|
||||
|
|
|
@ -233,7 +233,7 @@ void work_process(FAR struct kwork_wqueue_s *wqueue, systime_t period, int wndx)
|
|||
|
||||
wqueue->worker[wndx].busy = false;
|
||||
DEBUGVERIFY(sigwaitinfo(&set, NULL));
|
||||
wqueue->worker[wndx].busy = true;
|
||||
wqueue->worker[wndx].busy = true;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue