sched: Dynamically extend the pid hash table

Change-Id: I3a719bc575cd408cd530a39efa203f507b44fa9c
Signed-off-by: Jiuzhu Dong <dongjiuzhu1@xiaomi.com>
This commit is contained in:
Jiuzhu Dong 2021-06-15 16:45:40 +08:00 committed by Xiang Xiao
parent bb867a38fc
commit 9b1f554429
15 changed files with 142 additions and 107 deletions

View file

@ -86,15 +86,7 @@ there are three important configuration options that can change that.
allocations and to improve allocation performance, child task exit
status structures are pre-allocated when the system boots. This
setting determines the number of child status structures that will be
pre-allocated. If this setting is not defined or if it is defined to
be zero then a value of 2\*\ ``MAX_TASKS`` is used.
Note that there cannot be more that ``CONFIG_MAX_TASKS`` tasks in
total. However, the number of child status structures may need to be
significantly larger because this number includes the maximum number
of tasks that are running PLUS the number of tasks that have exit'ed
without having their exit status reaped (via :c:func:`wait`,
:c:func:`waitpid` or :c:func:`waitid`).
pre-allocated.
Obviously, if tasks spawn children indefinitely and never have the
exit status reaped, then you may have a memory leak! (See **Warning**

View file

@ -318,6 +318,13 @@ config Z180_TOOLCHAIN_SDCCW
endchoice
config Z180_MAX_TASKS
int "Max number of tasks for arch chip z180"
default 8
---help---
The maximum number of simultaneously active tasks. This value must be
a power of two.
config LINKER_HOME_AREA
hex "Physical start of _HOME area"
default 0x0000

View file

@ -58,7 +58,7 @@
****************************************************************************/
static GRAN_HANDLE g_physhandle;
static struct z180_cbr_s g_cbrs[CONFIG_MAX_TASKS];
static struct z180_cbr_s g_cbrs[CONFIG_Z180_MAX_TASKS];
/****************************************************************************
* Private Functions
@ -79,7 +79,7 @@ static inline FAR struct z180_cbr_s *z180_mmu_alloccbr(void)
{
int i;
for (i = 0; i < CONFIG_MAX_TASKS; i++)
for (i = 0; i < CONFIG_Z180_MAX_TASKS; i++)
{
FAR struct z180_cbr_s *cbr = &g_cbrs[i];
if (cbr->crefs == 0)

View file

@ -21,6 +21,12 @@ config FS_PROCFS_REGISTER
Support run-time registration of the new entries in the procfs file
system.
config FS_PROCFS_MAX_TASKS
int "The maxinum number of active tasks for procfs snapshot"
default 128
---help---
The maxinum number of active tasks for procfs snapshot.
menu "Exclude individual procfs entries"
config FS_PROCFS_EXCLUDE_PROCESS

View file

@ -279,9 +279,9 @@ struct procfs_level0_s
/* Our private data */
uint8_t lastlen; /* length of last reported static dir */
pid_t pid[CONFIG_MAX_TASKS]; /* Snapshot of all active task IDs */
FAR const char *lastread; /* Pointer to last static dir read */
uint8_t lastlen; /* length of last reported static dir */
pid_t pid[CONFIG_FS_PROCFS_MAX_TASKS]; /* Snapshot of all active task IDs */
FAR const char *lastread; /* Pointer to last static dir read */
};
/* Level 1 is an internal virtual directory (such as /proc/fs) which
@ -321,7 +321,10 @@ static void procfs_enum(FAR struct tcb_s *tcb, FAR void *arg)
/* Add the PID to the list */
index = dir->base.nentries;
DEBUGASSERT(index < CONFIG_MAX_TASKS);
if (index >= CONFIG_FS_PROCFS_MAX_TASKS)
{
return;
}
dir->pid[index] = tcb->pid;
dir->base.nentries = index + 1;

View file

@ -483,13 +483,6 @@ config TASK_NAME_SIZE
will be TASK_NAME_SIZE + 1. The default of 31 then results in
a align-able 32-byte allocation.
config MAX_TASKS
int "Max number of tasks"
default 32
---help---
The maximum number of simultaneously active tasks. This value must be
a power of two.
config SCHED_HAVE_PARENT
bool "Support parent/child task relationships"
default n
@ -551,11 +544,8 @@ config PREALLOC_CHILDSTATUS
To prevent runaway child status allocations and to improve
allocation performance, child task exit status structures are pre-
allocated when the system boots. This setting determines the number
of child status structures that will be pre-allocated. If this
setting is not defined or if it is defined to be zero then a value
of 2*MAX_TASKS is used.
of child status structures that will be pre-allocated.
Note that there cannot be more than MAX_TASKS tasks in total.
However, the number of child status structures may need to be
significantly larger because this number includes the maximum number
of tasks that are running PLUS the number of tasks that have exit'ed

View file

@ -38,19 +38,9 @@
* Pre-processor Definitions
****************************************************************************/
/* Note that there cannot be more that CONFIG_MAX_TASKS tasks in total.
* However, the number of child status structures may need to be
* significantly larger because this number includes the maximum number of
* tasks that are running PLUS the number of tasks that have exit'ed without
* having their exit status reaped (via wait(), waitid(), or waitpid()).
*
* Obviously, if tasks spawn children indefinitely and never have the exit
* status reaped, then you have a memory leak!
*/
#if !defined(CONFIG_PREALLOC_CHILDSTATUS) || CONFIG_PREALLOC_CHILDSTATUS == 0
# undef CONFIG_PREALLOC_CHILDSTATUS
# define CONFIG_PREALLOC_CHILDSTATUS (2*CONFIG_MAX_TASKS)
# define CONFIG_PREALLOC_CHILDSTATUS 16
#endif
#ifndef CONFIG_DEBUG_INFO

View file

@ -197,12 +197,11 @@ volatile pid_t g_lastpid;
* 1. This hash table greatly speeds the determination of a new unique
* process ID for a task, and
* 2. Is used to quickly map a process ID into a TCB.
*
* It has the side effects of using more memory and limiting
* the number of tasks to CONFIG_MAX_TASKS.
*/
struct pidhash_s g_pidhash[CONFIG_MAX_TASKS];
FAR struct pidhash_s *g_pidhash;
volatile int g_npidhash;
/* This is a table of task lists. This table is indexed by the task stat
* enumeration type (tstate_t) and provides a pointer to the associated
@ -378,27 +377,11 @@ void nx_start(void)
}
#endif
/* Initialize the logic that determine unique process IDs. */
g_lastpid = 0;
for (i = 0; i < CONFIG_MAX_TASKS; i++)
{
g_pidhash[i].tcb = NULL;
g_pidhash[i].pid = INVALID_PROCESS_ID;
}
/* Initialize the IDLE task TCB *******************************************/
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
FAR dq_queue_t *tasklist;
int hashndx;
/* Assign the process ID(s) of ZERO to the idle task(s) */
hashndx = PIDHASH(i);
g_pidhash[hashndx].tcb = &g_idletcb[i].cmn;
g_pidhash[hashndx].pid = i;
/* Initialize a TCB for this thread of execution. NOTE: The default
* value for most components of the g_idletcb are zero. The entire
@ -510,8 +493,6 @@ void nx_start(void)
}
}
g_lastpid = CONFIG_SMP_NCPUS - 1;
/* Task lists are initialized */
g_nx_initstate = OSINIT_TASKLISTS;
@ -574,6 +555,36 @@ void nx_start(void)
iob_initialize();
#endif
/* Initialize the logic that determine unique process IDs. */
g_npidhash = 4;
while (g_npidhash <= CONFIG_SMP_NCPUS)
{
g_npidhash <<= 1;
}
g_pidhash = kmm_malloc(sizeof(struct pidhash_s) * g_npidhash);
DEBUGASSERT(g_pidhash);
for (i = 0; i < g_npidhash; i++)
{
g_pidhash[i].tcb = NULL;
g_pidhash[i].pid = INVALID_PROCESS_ID;
}
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
int hashndx;
/* Assign the process ID(s) of ZERO to the idle task(s) */
hashndx = PIDHASH(i);
g_pidhash[hashndx].tcb = &g_idletcb[i].cmn;
g_pidhash[hashndx].pid = i;
}
g_lastpid = CONFIG_SMP_NCPUS - 1;
/* The memory manager is available */
g_nx_initstate = OSINIT_MEMORY;
@ -757,7 +768,7 @@ void nx_start(void)
/* A few basic sanity checks */
DEBUGASSERT(this_cpu() == 0 && CONFIG_MAX_TASKS > CONFIG_SMP_NCPUS);
DEBUGASSERT(this_cpu() == 0);
/* Then start the other CPUs */
@ -790,7 +801,7 @@ void nx_start(void)
/* Check stack in idle thread */
for (i = 0; i < CONFIG_MAX_TASKS; i++)
for (i = 0; i < g_npidhash; i++)
{
FAR struct tcb_s *tcb;
irqstate_t flags;

View file

@ -40,20 +40,7 @@
* Pre-processor Definitions
****************************************************************************/
/* Although task IDs can take the (positive, non-zero)
* range of pid_t, the number of tasks that will be supported
* at any one time is (artificially) limited by the CONFIG_MAX_TASKS
* configuration setting. Limiting the number of tasks speeds certain
* OS functions (this is the only limitation in the number of
* tasks built into the design).
*/
#if CONFIG_MAX_TASKS & (CONFIG_MAX_TASKS - 1)
# error CONFIG_MAX_TASKS must be power of 2
#endif
#define MAX_TASKS_MASK (CONFIG_MAX_TASKS-1)
#define PIDHASH(pid) ((pid) & MAX_TASKS_MASK)
#define PIDHASH(pid) ((pid) & (g_npidhash - 1))
/* These are macros to access the current CPU and the current task on a CPU.
* These macros are intended to support a future SMP implementation.
@ -254,12 +241,11 @@ extern volatile pid_t g_lastpid;
* 1. This hash table greatly speeds the determination of a new unique
* process ID for a task, and
* 2. Is used to quickly map a process ID into a TCB.
*
* It has the side effects of using more memory and limiting the number
* of tasks to CONFIG_MAX_TASKS.
*/
extern struct pidhash_s g_pidhash[CONFIG_MAX_TASKS];
extern FAR struct pidhash_s *g_pidhash;
extern volatile int g_npidhash;
/* This is a table of task lists. This table is indexed by the task stat
* enumeration type (tstate_t) and provides a pointer to the associated

View file

@ -160,13 +160,13 @@ static inline void nxsched_cpu_process_cpuload(int cpu)
void weak_function nxsched_process_cpuload(void)
{
int i;
#ifdef CONFIG_SMP
irqstate_t flags;
/* Perform scheduler operations on all CPUs. */
flags = enter_critical_section();
#ifdef CONFIG_SMP
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
nxsched_cpu_process_cpuload(i);
@ -191,7 +191,7 @@ void weak_function nxsched_process_cpuload(void)
* total.
*/
for (i = 0; i < CONFIG_MAX_TASKS; i++)
for (i = 0; i < g_npidhash; i++)
{
g_pidhash[i].ticks >>= 1;
total += g_pidhash[i].ticks;
@ -202,9 +202,7 @@ void weak_function nxsched_process_cpuload(void)
g_cpuload_total = total;
}
#ifdef CONFIG_SMP
leave_critical_section(flags);
#endif
}
/****************************************************************************
@ -230,7 +228,7 @@ void weak_function nxsched_process_cpuload(void)
int clock_cpuload(int pid, FAR struct cpuload_s *cpuload)
{
irqstate_t flags;
int hash_index = PIDHASH(pid);
int hash_index;
int ret = -ESRCH;
DEBUGASSERT(cpuload);
@ -241,6 +239,7 @@ int clock_cpuload(int pid, FAR struct cpuload_s *cpuload)
*/
flags = enter_critical_section();
hash_index = PIDHASH(pid);
/* Make sure that the entry is valid (TCB field is not NULL) and matches
* the requested PID. The first check is needed if the thread has exited.

View file

@ -62,16 +62,16 @@ void nxsched_foreach(nxsched_foreach_t handler, FAR void *arg)
/* Visit each active task */
for (ndx = 0; ndx < CONFIG_MAX_TASKS; ndx++)
flags = enter_critical_section();
for (ndx = 0; ndx < g_npidhash; ndx++)
{
/* This test and the function call must be atomic */
flags = enter_critical_section();
if (g_pidhash[ndx].tcb)
{
handler(g_pidhash[ndx].tcb, arg);
}
leave_critical_section(flags);
}
leave_critical_section(flags);
}

View file

@ -60,10 +60,6 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
if (pid >= 0)
{
/* Get the hash_ndx associated with the pid */
hash_ndx = PIDHASH(pid);
/* The test and the return setup should be atomic. This still does
* not provide proper protection if the recipient of the TCB does not
* also protect against the task associated with the TCB from
@ -72,6 +68,10 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
flags = enter_critical_section();
/* Get the hash_ndx associated with the pid */
hash_ndx = PIDHASH(pid);
/* Verify that the correct TCB was found. */
if (pid == g_pidhash[hash_ndx].pid)

View file

@ -48,6 +48,7 @@
static void nxsched_releasepid(pid_t pid)
{
irqstate_t flags = enter_critical_section();
int hash_ndx = PIDHASH(pid);
/* Make any pid associated with this hash available. Note:
@ -67,6 +68,8 @@ static void nxsched_releasepid(pid_t pid)
g_cpuload_total -= g_pidhash[hash_ndx].ticks;
g_pidhash[hash_ndx].ticks = 0;
#endif
leave_critical_section(flags);
}
/****************************************************************************

View file

@ -68,5 +68,12 @@ bool nxsched_verify_tcb(FAR struct tcb_s *tcb)
* information available.
*/
return tcb == g_pidhash[PIDHASH(tcb->pid)].tcb;
irqstate_t flags;
bool vaild;
flags = enter_critical_section();
vaild = tcb == g_pidhash[PIDHASH(tcb->pid)].tcb;
leave_critical_section(flags);
return vaild;
}

View file

@ -79,10 +79,10 @@ static const char g_noname[] = "<noname>";
static int nxtask_assign_pid(FAR struct tcb_s *tcb)
{
FAR struct pidhash_s *pidhash;
pid_t next_pid;
int hash_ndx;
int tries;
int ret = ERROR;
int i;
/* NOTE:
* ERROR means that the g_pidhash[] table is completely full.
@ -97,17 +97,17 @@ static int nxtask_assign_pid(FAR struct tcb_s *tcb)
/* We'll try every allowable pid */
for (tries = 0; tries < CONFIG_MAX_TASKS; tries++)
retry:
/* Get the next process ID candidate */
next_pid = g_lastpid + 1;
for (i = 0; i < g_npidhash; i++)
{
/* Get the next process ID candidate */
next_pid = ++g_lastpid;
/* Verify that the next_pid is in the valid range */
if (next_pid <= 0)
{
g_lastpid = 1;
next_pid = 1;
}
@ -127,16 +127,57 @@ static int nxtask_assign_pid(FAR struct tcb_s *tcb)
g_pidhash[hash_ndx].ticks = 0;
#endif
tcb->pid = next_pid;
g_lastpid = next_pid;
ret = OK;
goto out;
leave_critical_section(flags);
return OK;
}
next_pid++;
}
out:
/* If we get here, then the g_pidhash[] table is completely full.
* We will alloc new space and copy original g_pidhash to it to
* expand space.
*/
leave_critical_section(flags);
return ret;
pidhash = kmm_malloc(g_npidhash * 2 * sizeof(struct pidhash_s));
if (pidhash == NULL)
{
leave_critical_section(flags);
return -ENOMEM;
}
g_npidhash *= 2;
/* Reset the new hash table to the initial state */
for (i = 0; i < g_npidhash; i++)
{
pidhash[i].tcb = NULL;
pidhash[i].pid = INVALID_PROCESS_ID;
}
/* All original pid and hash_ndx are mismatch,
* so we need to rebuild their relationship
*/
for (i = 0; i < g_npidhash / 2; i++)
{
hash_ndx = PIDHASH(g_pidhash[i].pid);
DEBUGASSERT(pidhash[hash_ndx].tcb == NULL);
pidhash[hash_ndx].tcb = g_pidhash[i].tcb;
pidhash[hash_ndx].pid = g_pidhash[i].pid;
}
/* Release resource for original g_pidhash, using new g_pidhash */
kmm_free(g_pidhash);
g_pidhash = pidhash;
/* Let's try every allowable pid again */
goto retry;
}
/****************************************************************************