mirror of
https://github.com/apache/nuttx.git
synced 2025-01-12 22:08:35 +08:00
Compare commits
9 commits
16bd7ad7ed
...
1fa95394c0
Author | SHA1 | Date | |
---|---|---|---|
|
1fa95394c0 | ||
|
aa0aecbd80 | ||
|
39780fdae1 | ||
|
ee2f3df2ff | ||
|
ff488133c9 | ||
|
48846954d8 | ||
|
657247bda8 | ||
|
be40c01ddd | ||
|
cedd250c1f |
18 changed files with 177 additions and 75 deletions
|
@ -546,7 +546,9 @@ ifeq ($(CONFIG_PIC),y)
|
|||
# Generate an executable elf, need to ignore undefined symbols
|
||||
LDELFFLAGS += --unresolved-symbols=ignore-in-object-files --emit-relocs
|
||||
else
|
||||
LDELFFLAGS += -r
|
||||
ifneq ($(CONFIG_BINFMT_ELF_EXECUTABLE),y)
|
||||
LDELFFLAGS += -r
|
||||
endif
|
||||
endif
|
||||
|
||||
LDELFFLAGS += -e main -T $(call CONVERT_PATH,$(TOPDIR)$(DELIM)libs$(DELIM)libc$(DELIM)modlib$(DELIM)gnu-elf.ld)
|
||||
|
|
|
@ -15,6 +15,7 @@ config ARCH_CHIP_QEMU_CORTEXA7
|
|||
bool "Qemu virtual Processor (cortex-a7)"
|
||||
select ARCH_CORTEXA7
|
||||
select ARCH_HAVE_ADDRENV
|
||||
select ARCH_HAVE_ELF_EXECUTABLE
|
||||
select ARCH_HAVE_LOWVECTORS
|
||||
select ARCH_HAVE_MULTICPU
|
||||
select ARCH_NEED_ADDRENV_MAPPING
|
||||
|
|
|
@ -3215,8 +3215,6 @@ static void sam_callback(void *arg)
|
|||
ret = work_cancel(LPWORK, &priv->cbwork);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_cancel only returns success */
|
||||
|
||||
lcderr("ERROR: Failed to cancel work: %d\n", ret);
|
||||
}
|
||||
|
||||
|
@ -3225,8 +3223,6 @@ static void sam_callback(void *arg)
|
|||
priv->cbarg, 0);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_queue only returns success */
|
||||
|
||||
lcderr("ERROR: Failed to schedule work: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3355,8 +3355,6 @@ static void sam_callback(void *arg)
|
|||
ret = work_cancel(LPWORK, &priv->cbwork);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_cancel only returns success */
|
||||
|
||||
mcerr("ERROR: Failed to cancel work: %d\n", ret);
|
||||
}
|
||||
|
||||
|
@ -3365,8 +3363,6 @@ static void sam_callback(void *arg)
|
|||
priv->cbarg, 0);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_queue only returns success */
|
||||
|
||||
mcerr("ERROR: Failed to schedule work: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include <nuttx/kmalloc.h>
|
||||
#include <nuttx/spinlock.h>
|
||||
#include <nuttx/vhost/vhost.h>
|
||||
#include <nuttx/wqueue.h>
|
||||
|
||||
|
@ -151,7 +152,7 @@ static int vhost_rng_probe(FAR struct vhost_device *hdev)
|
|||
|
||||
vqnames[0] = "virtio_rng";
|
||||
callback[0] = vhost_rng_handler;
|
||||
ret = vhost_create_virtqueues(hdev, 0, 1, vqnames, callback);
|
||||
ret = vhost_create_virtqueues(hdev, 0, 1, vqnames, callback, NULL);
|
||||
if (ret < 0)
|
||||
{
|
||||
vhosterr("virtio_device_create_virtqueue failed, ret=%d\n", ret);
|
||||
|
|
|
@ -83,8 +83,15 @@ static struct vhost_bus_s g_vhost_bus =
|
|||
|
||||
static bool vhost_status_driver_ok(FAR struct vhost_device *hdev)
|
||||
{
|
||||
uint8_t status = vhost_get_status(hdev);
|
||||
bool driver_ok = false;
|
||||
uint8_t status;
|
||||
int ret;
|
||||
|
||||
ret = vhost_get_status(hdev, &status);
|
||||
if (ret)
|
||||
{
|
||||
return driver_ok;
|
||||
}
|
||||
|
||||
/* Busy wait until the remote is ready */
|
||||
|
||||
|
|
|
@ -659,8 +659,6 @@ static void automount_timeout(wdparm_t arg)
|
|||
ret = work_queue(LPWORK, &priv->work, automount_worker, priv, 0);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_queue only returns success */
|
||||
|
||||
ferr("ERROR: Failed to schedule work: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
@ -772,8 +770,6 @@ static int automount_interrupt(FAR const struct automount_lower_s *lower,
|
|||
priv->lower->ddelay);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_queue only returns success */
|
||||
|
||||
ferr("ERROR: Failed to schedule work: %d\n", ret);
|
||||
}
|
||||
else
|
||||
|
@ -848,8 +844,6 @@ FAR void *automount_initialize(FAR const struct automount_lower_s *lower)
|
|||
priv->lower->ddelay);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* NOTE: Currently, work_queue only returns success */
|
||||
|
||||
ferr("ERROR: Failed to schedule work: %d\n", ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_BUILD_KERNEL
|
||||
# include <signal.h>
|
||||
#endif
|
||||
|
@ -40,6 +42,8 @@
|
|||
|
||||
#include <arch/arch.h>
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_ARCH_ADDRENV
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -241,6 +245,9 @@
|
|||
(CONFIG_ARCH_PGPOOL_VBASE + CONFIG_ARCH_PGPOOL_SIZE)
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/****************************************************************************
|
||||
* Public Type Definitions
|
||||
****************************************************************************/
|
||||
|
@ -251,8 +258,6 @@ struct tcb_s; /* Forward reference to TCB */
|
|||
* Public Types
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct addrenv_s
|
||||
{
|
||||
struct arch_addrenv_s addrenv; /* The address environment page directory */
|
||||
|
|
1
libs/libc/.gitignore
vendored
1
libs/libc/.gitignore
vendored
|
@ -1,2 +1,3 @@
|
|||
/exec_symtab.c
|
||||
/modlib_symtab.c
|
||||
modlib/gnu-elf.ld
|
||||
|
|
|
@ -183,6 +183,10 @@ context:: bin kbin
|
|||
ifeq ($(CONFIG_LIBC_ZONEINFO_ROMFS),y)
|
||||
$(Q) $(MAKE) -C zoneinfo context BIN=$(BIN)
|
||||
endif
|
||||
ifeq ($(CONFIG_LIBC_MODLIB),y)
|
||||
$(Q) $(MAKE) -C modlib context
|
||||
endif
|
||||
|
||||
|
||||
# Dependencies
|
||||
|
||||
|
@ -210,6 +214,7 @@ depend:: .depend
|
|||
|
||||
clean::
|
||||
$(Q) $(MAKE) -C zoneinfo clean BIN=$(BIN)
|
||||
$(Q) $(MAKE) -C modlib clean
|
||||
$(call DELFILE, $(BIN))
|
||||
$(call DELFILE, $(KBIN))
|
||||
$(call CLEAN)
|
||||
|
@ -218,6 +223,7 @@ clean::
|
|||
|
||||
distclean:: clean
|
||||
$(Q) $(MAKE) -C zoneinfo distclean BIN=$(BIN)
|
||||
$(Q) $(MAKE) -C modlib distclean
|
||||
$(call DELFILE, exec_symtab.c)
|
||||
$(call DELFILE, .depend)
|
||||
$(call DELDIR, bin)
|
||||
|
|
40
libs/libc/modlib/Makefile
Normal file
40
libs/libc/modlib/Makefile
Normal file
|
@ -0,0 +1,40 @@
|
|||
############################################################################
|
||||
# libs/libc/modlib/Makefile
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership. The
|
||||
# ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance with the
|
||||
# License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
############################################################################
|
||||
|
||||
include $(TOPDIR)/Make.defs
|
||||
|
||||
# Generate gnu-elf.ld from gnu-elf.ld.in
|
||||
|
||||
gnu-elf.ld: gnu-elf.ld.in
|
||||
$(call PREPROCESS, $<, $@)
|
||||
|
||||
# Create initial context
|
||||
|
||||
context: gnu-elf.ld
|
||||
|
||||
.PHONY: context clean distclean
|
||||
|
||||
clean:
|
||||
$(call CLEAN)
|
||||
|
||||
distclean: clean
|
||||
$(call DELFILE, gnu-elf.ld)
|
|
@ -1,5 +1,5 @@
|
|||
/****************************************************************************
|
||||
* libs/libc/modlib/gnu-elf.ld
|
||||
* libs/libc/modlib/gnu-elf.ld.in
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -18,9 +18,23 @@
|
|||
*
|
||||
****************************************************************************/
|
||||
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_BINFMT_ELF_EXECUTABLE)
|
||||
# define __ASSEMBLY__
|
||||
# include <nuttx/addrenv.h>
|
||||
|
||||
# define TEXT CONFIG_ARCH_TEXT_VBASE
|
||||
# define DATA CONFIG_ARCH_DATA_VBASE + ARCH_DATA_RESERVE_SIZE
|
||||
#else
|
||||
# define TEXT 0x0
|
||||
# define DATA
|
||||
#endif
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
.text 0x00000000 :
|
||||
.text TEXT :
|
||||
{
|
||||
_stext = . ;
|
||||
*(.text)
|
||||
|
@ -58,7 +72,7 @@ SECTIONS
|
|||
_erodata = . ;
|
||||
}
|
||||
|
||||
.data :
|
||||
.data DATA :
|
||||
{
|
||||
_sdata = . ;
|
||||
*(.data)
|
|
@ -397,16 +397,17 @@ retry:
|
|||
|
||||
pool->nalloc++;
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
blk = kasan_unpoison(blk, pool->blocksize);
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
|
||||
#endif
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
|
||||
((FAR char *)blk + pool->blocksize));
|
||||
#endif
|
||||
|
||||
blk = kasan_unpoison(blk, pool->blocksize);
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
|
||||
#endif
|
||||
|
||||
return blk;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,23 +58,20 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
|
|||
* new work is typically added to the work queue from interrupt handlers.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
if (work->worker != NULL)
|
||||
{
|
||||
/* Remove the entry from the work queue and make sure that it is
|
||||
* marked as available (i.e., the worker field is nullified).
|
||||
*/
|
||||
|
||||
if (WDOG_ISACTIVE(&work->u.timer))
|
||||
{
|
||||
wd_cancel(&work->u.timer);
|
||||
}
|
||||
else
|
||||
work->worker = NULL;
|
||||
wd_cancel(&work->u.timer);
|
||||
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
|
||||
{
|
||||
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
|
||||
}
|
||||
|
||||
work->worker = NULL;
|
||||
ret = OK;
|
||||
}
|
||||
else if (!up_interrupt_context() && !sched_idletask() && sync)
|
||||
|
@ -86,14 +83,15 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync,
|
|||
if (wqueue->worker[wndx].work == work &&
|
||||
wqueue->worker[wndx].pid != nxsched_gettid())
|
||||
{
|
||||
wqueue->worker[wndx].wait_count--;
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
nxsem_wait_uninterruptible(&wqueue->worker[wndx].wait);
|
||||
ret = 1;
|
||||
break;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,8 @@ static dq_queue_t g_notifier_free;
|
|||
|
||||
static dq_queue_t g_notifier_pending;
|
||||
|
||||
static spinlock_t g_work_notifier_lock;
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
@ -166,17 +168,21 @@ static void work_notifier_worker(FAR void *arg)
|
|||
|
||||
/* Disable interrupts very briefly. */
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_work_notifier_lock);
|
||||
|
||||
/* Remove the notification from the pending list */
|
||||
|
||||
dq_rem(¬ifier->entry, &g_notifier_pending);
|
||||
notifier = work_notifier_find(notifier->key);
|
||||
if (notifier != NULL)
|
||||
{
|
||||
dq_rem(¬ifier->entry, &g_notifier_pending);
|
||||
|
||||
/* Put the notification to the free list */
|
||||
/* Put the notification to the free list */
|
||||
|
||||
dq_addlast(¬ifier->entry, &g_notifier_free);
|
||||
dq_addlast(¬ifier->entry, &g_notifier_free);
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -213,14 +219,14 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
|
|||
|
||||
/* Disable interrupts very briefly. */
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_work_notifier_lock);
|
||||
|
||||
/* Try to get the entry from the free list */
|
||||
|
||||
notifier = (FAR struct work_notifier_entry_s *)
|
||||
dq_remfirst(&g_notifier_free);
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
|
||||
|
||||
if (notifier == NULL)
|
||||
{
|
||||
|
@ -245,7 +251,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
|
|||
|
||||
/* Disable interrupts very briefly. */
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_work_notifier_lock);
|
||||
|
||||
/* Generate a unique key for this notification */
|
||||
|
||||
|
@ -262,7 +268,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info)
|
|||
dq_addlast(¬ifier->entry, &g_notifier_pending);
|
||||
ret = notifier->key;
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -293,7 +299,7 @@ void work_notifier_teardown(int key)
|
|||
|
||||
/* Disable interrupts very briefly. */
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_work_notifier_lock);
|
||||
|
||||
/* Find the entry matching this key in the g_notifier_pending list. We
|
||||
* assume that there is only one.
|
||||
|
@ -304,19 +310,18 @@ void work_notifier_teardown(int key)
|
|||
{
|
||||
/* Cancel the work, this may be waiting */
|
||||
|
||||
if (work_cancel_sync(notifier->info.qid, ¬ifier->work) != 1)
|
||||
{
|
||||
/* Remove the notification from the pending list */
|
||||
work_cancel(notifier->info.qid, ¬ifier->work);
|
||||
|
||||
dq_rem(¬ifier->entry, &g_notifier_pending);
|
||||
/* Remove the notification from the pending list */
|
||||
|
||||
/* Put the notification to the free list */
|
||||
dq_rem(¬ifier->entry, &g_notifier_pending);
|
||||
|
||||
dq_addlast(¬ifier->entry, &g_notifier_free);
|
||||
}
|
||||
/* Put the notification to the free list */
|
||||
|
||||
dq_addlast(¬ifier->entry, &g_notifier_free);
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -352,7 +357,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
|
|||
* the notifications have been sent.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_work_notifier_lock);
|
||||
sched_lock();
|
||||
|
||||
/* Process the notification at the head of the pending list until the
|
||||
|
@ -397,7 +402,7 @@ void work_notifier_signal(enum work_evtype_e evtype,
|
|||
}
|
||||
|
||||
sched_unlock();
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_work_notifier_lock, flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_WQUEUE_NOTIFIER */
|
||||
|
|
|
@ -47,11 +47,10 @@
|
|||
#define queue_work(wqueue, work) \
|
||||
do \
|
||||
{ \
|
||||
int sem_count; \
|
||||
dq_addlast((FAR dq_entry_t *)(work), &(wqueue)->q); \
|
||||
nxsem_get_value(&(wqueue)->sem, &sem_count); \
|
||||
if (sem_count < 0) /* There are threads waiting for sem. */ \
|
||||
if ((wqueue)->wait_count < 0) /* There are threads waiting for sem. */ \
|
||||
{ \
|
||||
(wqueue)->wait_count++; \
|
||||
nxsem_post(&(wqueue)->sem); \
|
||||
} \
|
||||
} \
|
||||
|
@ -68,24 +67,30 @@
|
|||
static void work_timer_expiry(wdparm_t arg)
|
||||
{
|
||||
FAR struct work_s *work = (FAR struct work_s *)arg;
|
||||
irqstate_t flags = enter_critical_section();
|
||||
irqstate_t flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
sched_lock();
|
||||
|
||||
queue_work(work->wq, work);
|
||||
leave_critical_section(flags);
|
||||
/* We have being canceled */
|
||||
|
||||
if (work->worker != NULL)
|
||||
{
|
||||
queue_work(work->wq, work);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
sched_unlock();
|
||||
}
|
||||
|
||||
static bool work_is_canceling(FAR struct kworker_s *kworkers, int nthreads,
|
||||
FAR struct work_s *work)
|
||||
{
|
||||
int semcount;
|
||||
int wndx;
|
||||
|
||||
for (wndx = 0; wndx < nthreads; wndx++)
|
||||
{
|
||||
if (kworkers[wndx].work == work)
|
||||
{
|
||||
nxsem_get_value(&kworkers[wndx].wait, &semcount);
|
||||
if (semcount < 0)
|
||||
if (kworkers[wndx].wait_count < 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -145,13 +150,23 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
|
|||
* task logic or from interrupt handling logic.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
sched_lock();
|
||||
|
||||
/* Remove the entry from the timer and work queue. */
|
||||
|
||||
if (work->worker != NULL)
|
||||
{
|
||||
work_cancel_wq(wqueue, work);
|
||||
/* Remove the entry from the work queue and make sure that it is
|
||||
* marked as available (i.e., the worker field is nullified).
|
||||
*/
|
||||
|
||||
work->worker = NULL;
|
||||
wd_cancel(&work->u.timer);
|
||||
if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
|
||||
{
|
||||
dq_rem((FAR dq_entry_t *)work, &wqueue->q);
|
||||
}
|
||||
}
|
||||
|
||||
if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
|
||||
|
@ -177,7 +192,8 @@ int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
|
|||
}
|
||||
|
||||
out:
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
sched_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,8 @@ struct lp_wqueue_s g_lpwork =
|
|||
|
||||
#endif /* CONFIG_SCHED_LPWORK */
|
||||
|
||||
spinlock_t g_wqueue_lock = SP_UNLOCKED;
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
@ -138,7 +140,6 @@ static int work_thread(int argc, FAR char *argv[])
|
|||
worker_t worker;
|
||||
irqstate_t flags;
|
||||
FAR void *arg;
|
||||
int semcount;
|
||||
|
||||
/* Get the handle from argv */
|
||||
|
||||
|
@ -147,7 +148,8 @@ static int work_thread(int argc, FAR char *argv[])
|
|||
kworker = (FAR struct kworker_s *)
|
||||
((uintptr_t)strtoul(argv[2], NULL, 16));
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
sched_lock();
|
||||
|
||||
/* Loop forever */
|
||||
|
||||
|
@ -189,9 +191,12 @@ static int work_thread(int argc, FAR char *argv[])
|
|||
* performed... we don't have any idea how long this will take!
|
||||
*/
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
sched_unlock();
|
||||
|
||||
CALL_WORKER(worker, arg);
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
sched_lock();
|
||||
|
||||
/* Mark the thread un-busy */
|
||||
|
||||
|
@ -199,9 +204,9 @@ static int work_thread(int argc, FAR char *argv[])
|
|||
|
||||
/* Check if someone is waiting, if so, wakeup it */
|
||||
|
||||
nxsem_get_value(&kworker->wait, &semcount);
|
||||
while (semcount++ < 0)
|
||||
while (kworker->wait_count < 0)
|
||||
{
|
||||
kworker->wait_count++;
|
||||
nxsem_post(&kworker->wait);
|
||||
}
|
||||
}
|
||||
|
@ -211,10 +216,17 @@ static int work_thread(int argc, FAR char *argv[])
|
|||
* posted.
|
||||
*/
|
||||
|
||||
wqueue->wait_count--;
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
sched_unlock();
|
||||
|
||||
nxsem_wait_uninterruptible(&wqueue->sem);
|
||||
flags = spin_lock_irqsave(&g_wqueue_lock);
|
||||
sched_lock();
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&g_wqueue_lock, flags);
|
||||
sched_unlock();
|
||||
|
||||
nxsem_post(&wqueue->exsem);
|
||||
return OK;
|
||||
|
@ -277,6 +289,7 @@ static int work_thread_create(FAR const char *name, int priority,
|
|||
}
|
||||
|
||||
wqueue->worker[wndx].pid = pid;
|
||||
wqueue->worker[wndx].wait_count = 0;
|
||||
}
|
||||
|
||||
sched_unlock();
|
||||
|
@ -337,6 +350,7 @@ FAR struct kwork_wqueue_s *work_queue_create(FAR const char *name,
|
|||
nxsem_init(&wqueue->sem, 0, 0);
|
||||
nxsem_init(&wqueue->exsem, 0, 0);
|
||||
wqueue->nthreads = nthreads;
|
||||
wqueue->wait_count = 0;
|
||||
|
||||
/* Create the work queue thread pool */
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <nuttx/clock.h>
|
||||
#include <nuttx/queue.h>
|
||||
#include <nuttx/wqueue.h>
|
||||
#include <nuttx/spinlock.h>
|
||||
|
||||
#ifdef CONFIG_SCHED_WORKQUEUE
|
||||
|
||||
|
@ -58,6 +59,7 @@ struct kworker_s
|
|||
pid_t pid; /* The task ID of the worker thread */
|
||||
FAR struct work_s *work; /* The work structure */
|
||||
sem_t wait; /* Sync waiting for worker done */
|
||||
volatile int16_t wait_count;
|
||||
};
|
||||
|
||||
/* This structure defines the state of one kernel-mode work queue */
|
||||
|
@ -69,6 +71,7 @@ struct kwork_wqueue_s
|
|||
sem_t exsem; /* Sync waiting for thread exit */
|
||||
uint8_t nthreads; /* Number of worker threads */
|
||||
bool exit; /* A flag to request the thread to exit */
|
||||
volatile int16_t wait_count;
|
||||
struct kworker_s worker[0]; /* Describes a worker thread */
|
||||
};
|
||||
|
||||
|
@ -126,6 +129,8 @@ extern struct hp_wqueue_s g_hpwork;
|
|||
extern struct lp_wqueue_s g_lpwork;
|
||||
#endif
|
||||
|
||||
extern spinlock_t g_wqueue_lock;
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
****************************************************************************/
|
||||
|
|
Loading…
Reference in a new issue