mirror of
https://github.com/apache/nuttx.git
synced 2025-01-13 12:08:36 +08:00
If there mutliple low-priority worker threads, only one needs to perform garbage collection
This commit is contained in:
parent
370b48d1e6
commit
0218f01f12
4 changed files with 94 additions and 53 deletions
|
@ -108,22 +108,27 @@ static int work_hpthread(int argc, char *argv[])
|
|||
|
||||
for (;;)
|
||||
{
|
||||
/* First, perform garbage collection. This cleans-up memory de-allocations
|
||||
* that were queued because they could not be freed in that execution
|
||||
* context (for example, if the memory was freed from an interrupt handler).
|
||||
#ifndef CONFIG_SCHED_LPWORK
|
||||
/* First, perform garbage collection. This cleans-up memory
|
||||
* de-allocations that were queued because they could not be freed in
|
||||
* that execution context (for example, if the memory was freed from
|
||||
* an interrupt handler).
|
||||
*
|
||||
* NOTE: If the work thread is disabled, this clean-up is performed by
|
||||
* the IDLE thread (at a very, very low priority).
|
||||
* the IDLE thread (at a very, very low priority). If the low-priority
|
||||
* work thread is enabled, then the garbage collection is done on that
|
||||
* thread instead.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_SCHED_LPWORK
|
||||
sched_garbagecollection();
|
||||
#endif
|
||||
|
||||
/* Then process queued work. We need to keep interrupts disabled while
|
||||
* we process items in the work list.
|
||||
/* Then process queued work. work_process will not return until: (1)
|
||||
* there is no further work in the work queue, and (2) the polling
|
||||
* period provided by g_hpwork.delay expires.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_hpwork, 0);
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_hpwork, g_hpwork.delay, 0);
|
||||
}
|
||||
|
||||
return OK; /* To keep some compilers happy */
|
||||
|
|
|
@ -106,52 +106,65 @@ struct lp_wqueue_s g_lpwork;
|
|||
|
||||
static int work_lpthread(int argc, char *argv[])
|
||||
{
|
||||
#if CONFIG_SCHED_LPNTHREADS > 0
|
||||
int wndx;
|
||||
pid_t me = getpid();
|
||||
int i;
|
||||
|
||||
/* Find out thread index by search the workers in g_lpwork */
|
||||
|
||||
{
|
||||
pid_t me = getpid();
|
||||
int i;
|
||||
for (wndx = 0, i = 0; i < CONFIG_SCHED_LPNTHREADS; i++)
|
||||
{
|
||||
if (g_lpwork.worker[i].pid == me)
|
||||
{
|
||||
wndx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check each entry if we have to */
|
||||
|
||||
for (wndx = 0, i = 0; i < CONFIG_SCHED_LPNTHREADS; i++)
|
||||
{
|
||||
if (g_lpwork.worker[i].pid == me)
|
||||
{
|
||||
wndx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DEBUGASSERT(i < CONFIG_SCHED_LPNTHREADS);
|
||||
}
|
||||
DEBUGASSERT(i < CONFIG_SCHED_LPNTHREADS);
|
||||
#endif
|
||||
|
||||
/* Loop forever */
|
||||
|
||||
for (;;)
|
||||
{
|
||||
/* First, perform garbage collection. This cleans-up memory de-allocations
|
||||
* that were queued because they could not be freed in that execution
|
||||
* context (for example, if the memory was freed from an interrupt handler).
|
||||
* NOTE: If the work thread is disabled, this clean-up is performed by
|
||||
* the IDLE thread (at a very, very low priority).
|
||||
*
|
||||
* In the event of multiple low priority threads, on index == 0 will do
|
||||
* the garbage collection.
|
||||
*/
|
||||
#if CONFIG_SCHED_LPNTHREADS > 0
|
||||
/* Thread 0 is special. Only thread 0 performs period garbage collection */
|
||||
|
||||
if (wndx == 0)
|
||||
if (wndx > 0)
|
||||
{
|
||||
sched_garbagecollection();
|
||||
/* The other threads will perform work, waiting indefinitely until
|
||||
* signalled for the next work availability.
|
||||
*
|
||||
* The special value of zero for the poll period instructs work_process
|
||||
* to wait indefinitely until a signal is received.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, 0, wndx);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* Perform garbage collection. This cleans-up memory de-allocations
|
||||
* that were queued because they could not be freed in that execution
|
||||
* context (for example, if the memory was freed from an interrupt handler).
|
||||
* NOTE: If the work thread is disabled, this clean-up is performed by
|
||||
* the IDLE thread (at a very, very low priority).
|
||||
*
|
||||
* In the event of multiple low priority threads, on index == 0 will do
|
||||
* the garbage collection.
|
||||
*/
|
||||
|
||||
/* Then process queued work. We need to keep interrupts disabled while
|
||||
* we process items in the work list.
|
||||
*/
|
||||
sched_garbagecollection();
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, wndx);
|
||||
/* Then process queued work. work_process will not return until:
|
||||
* (1) there is no further work in the work queue, and (2) the polling
|
||||
* period provided by g_lpwork.delay expires.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, g_lpwork.delay, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return OK; /* To keep some compilers happy */
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
#include <queue.h>
|
||||
|
||||
#include <nuttx/clock.h>
|
||||
|
@ -107,7 +108,7 @@
|
|||
*
|
||||
****************************************************************************/
|
||||
|
||||
void work_process(FAR struct kwork_wqueue_s *wqueue, int wndx)
|
||||
void work_process(FAR struct kwork_wqueue_s *wqueue, uint32_t period, int wndx)
|
||||
{
|
||||
volatile FAR struct work_s *work;
|
||||
worker_t worker;
|
||||
|
@ -123,7 +124,7 @@ void work_process(FAR struct kwork_wqueue_s *wqueue, int wndx)
|
|||
* we process items in the work list.
|
||||
*/
|
||||
|
||||
next = wqueue->delay;
|
||||
next = period;
|
||||
flags = irqsave();
|
||||
|
||||
/* Get the time that we started this polling cycle in clock ticks. */
|
||||
|
@ -221,21 +222,41 @@ void work_process(FAR struct kwork_wqueue_s *wqueue, int wndx)
|
|||
}
|
||||
}
|
||||
|
||||
/* Get the delay (in clock ticks) since we started the sampling */
|
||||
#if defined(CONFIG_SCHED_LPWORK) && CONFIG_SCHED_LPNTHREADS > 0
|
||||
/* Value of zero for period means that we should wait indefinitely until
|
||||
* signalled. This option is used only for the case where there are
|
||||
* multiple, low-priority worker threads. In that case, only one of
|
||||
* the threads does the poll... the others simple. In all other cases
|
||||
* period will be non-zero and equal to wqueue->delay.
|
||||
*/
|
||||
|
||||
elapsed = clock_systimer() - stick;
|
||||
if (elapsed <= wqueue->delay)
|
||||
if (period == 0)
|
||||
{
|
||||
sigset_t set;
|
||||
|
||||
/* Wait indefinitely until signalled with SIGWORK */
|
||||
|
||||
sigemptyset(&set);
|
||||
sigaddset(&set, SIGWORK);
|
||||
DEBUGVERIFY(sigwaitinfo(&set, NULL));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* How much time would we need to delay to get to the end of the
|
||||
* sampling period? The amount of time we delay should be the smaller
|
||||
* of the time to the end of the sampling period and the time to the
|
||||
* next work expiry.
|
||||
*/
|
||||
/* Get the delay (in clock ticks) since we started the sampling */
|
||||
|
||||
remaining = wqueue->delay - elapsed;
|
||||
next = MIN(next, remaining);
|
||||
if (next > 0)
|
||||
elapsed = clock_systimer() - stick;
|
||||
if (elapsed < period && next > 0)
|
||||
{
|
||||
/* How much time would we need to delay to get to the end of the
|
||||
* sampling period? The amount of time we delay should be the smaller
|
||||
* of the time to the end of the sampling period and the time to the
|
||||
* next work expiry.
|
||||
*/
|
||||
|
||||
remaining = period - elapsed;
|
||||
next = MIN(next, remaining);
|
||||
|
||||
/* Wait awhile to check the work list. We will wait here until
|
||||
* either the time elapses or until we are awakened by a signal.
|
||||
* Interrupts will be re-enabled while we wait.
|
||||
|
|
|
@ -180,13 +180,15 @@ int work_lpstart(void);
|
|||
*
|
||||
* Input parameters:
|
||||
* wqueue - Describes the work queue to be processed
|
||||
* period - The polling period in clock ticks
|
||||
* wndx - The worker thread index
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void work_process(FAR struct kwork_wqueue_s *wqueue, int wndx);
|
||||
void work_process(FAR struct kwork_wqueue_s *wqueue, uint32_t period, int wndx);
|
||||
|
||||
#endif /* CONFIG_SCHED_WORKQUEUE */
|
||||
#endif /* __SCHED_WQUEUE_WQUEUE_H */
|
||||
|
|
Loading…
Reference in a new issue