global change: repace sched_xfree() to kxmm_free()
Changes: sched_xfree() => kxmm_free() remove garbage related APIs remove ARCH_HAVE_GARBAGE Cause garbage feature move to mm_heap, then don't need garbage anymore. Change-Id: If310790a3208155ca8ab319e8d038cb6ff92c518 Signed-off-by: ligd <liguiding@fishsemi.com>
This commit is contained in:
parent
cec53bb133
commit
231ad202ee
54 changed files with 81 additions and 767 deletions
|
@ -276,10 +276,6 @@ config ARCH_HAVE_RTC_SUBSECONDS
|
|||
bool
|
||||
default n
|
||||
|
||||
config ARCH_HAVE_GARBAGE
|
||||
bool
|
||||
default n
|
||||
|
||||
config ARCH_GLOBAL_IRQDISABLE
|
||||
bool
|
||||
default n
|
||||
|
|
|
@ -106,7 +106,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -116,7 +116,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -106,7 +106,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -106,7 +106,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -116,7 +116,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,14 +91,14 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (ttype == TCB_FLAG_TTYPE_KERNEL)
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* Use the user-space allocator if this is a task or pthread */
|
||||
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Mark the stack freed */
|
||||
|
|
|
@ -95,7 +95,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -105,7 +105,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Mark the stack freed */
|
||||
|
|
|
@ -88,7 +88,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
{
|
||||
if (kmm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_kfree(dtcb->stack_alloc_ptr);
|
||||
kmm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -98,7 +98,7 @@ void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
|
|||
|
||||
if (umm_heapmember(dtcb->stack_alloc_ptr))
|
||||
{
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
kumm_free(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -599,12 +599,12 @@ static void usbhost_freeclass(FAR struct usbhost_cdcacm_s *usbclass)
|
|||
{
|
||||
DEBUGASSERT(usbclass != NULL);
|
||||
|
||||
/* Free the class instance (calling sched_kfree() in case we are executing
|
||||
/* Free the class instance (calling kmm_free() in case we are executing
|
||||
* from an interrupt handler.
|
||||
*/
|
||||
|
||||
uinfo("Freeing: %p\n", usbclass);
|
||||
sched_kfree(usbclass);
|
||||
kmm_free(usbclass);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -749,7 +749,7 @@ static inline void usbhost_freeclass(FAR struct usbhost_state_s *usbclass)
|
|||
/* Free the class instance. */
|
||||
|
||||
uinfo("Freeing: %p\n", usbclass);
|
||||
sched_kfree(usbclass);
|
||||
kmm_free(usbclass);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -538,7 +538,7 @@ static inline void usbhost_freeclass(FAR struct usbhost_state_s *usbclass)
|
|||
/* Free the class instance. */
|
||||
|
||||
uinfo("Freeing: %p\n", usbclass);
|
||||
sched_kfree(usbclass);
|
||||
kmm_free(usbclass);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -440,12 +440,12 @@ static inline void usbhost_freeclass(FAR struct usbhost_state_s *usbclass)
|
|||
{
|
||||
DEBUGASSERT(usbclass != NULL);
|
||||
|
||||
/* Free the class instance (calling sched_kfree() in case we are executing
|
||||
/* Free the class instance (calling kmm_free() in case we are executing
|
||||
* from an interrupt handler.
|
||||
*/
|
||||
|
||||
uinfo("Freeing: %p\n", usbclass);
|
||||
sched_kfree(usbclass);
|
||||
kmm_free(usbclass);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -767,25 +767,6 @@ FAR void *up_module_text_alloc(size_t size);
|
|||
void up_module_text_free(FAR void *p);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_sched_have_garbage and up_sched_garbage_collection
|
||||
*
|
||||
* Description:
|
||||
* Some architectures may support unique memory allocators. If
|
||||
* CONFIG_ARCH_HAVE_GARBAGE is defined, those architectures must provide
|
||||
* both up_sched_have_garbage and up_sched_garbage_collection. These will
|
||||
* be tied into the NuttX memory garbage collection logic.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_ARCH_HAVE_GARBAGE
|
||||
bool up_sched_have_garbage(void);
|
||||
void up_sched_garbage_collection(void);
|
||||
#else
|
||||
# define up_sched_have_garbage() false
|
||||
# define up_sched_garbage_collection()
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_setpicbase and up_getpicbase
|
||||
*
|
||||
|
|
|
@ -191,48 +191,6 @@ void group_free(FAR struct task_group_s *group, FAR void *mem);
|
|||
|
||||
#endif
|
||||
|
||||
/* Functions defined in sched/sched_kfree.c **********************************/
|
||||
|
||||
/* Handles memory freed from an interrupt handler. In that context, kmm_free()
|
||||
* (or kumm_free()) cannot be called. Instead, the allocations are saved in a
|
||||
* list of delayed allocations that will be periodically cleaned up by
|
||||
* sched_garbage_collection().
|
||||
*/
|
||||
|
||||
void sched_ufree(FAR void *address);
|
||||
|
||||
#if defined(CONFIG_MM_KERNEL_HEAP) && defined(__KERNEL__)
|
||||
void sched_kfree(FAR void *address);
|
||||
#else
|
||||
# define sched_kfree(a) sched_ufree(a)
|
||||
#endif
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
void sched_signal_free(void);
|
||||
|
||||
/* Functions defined in sched/sched_garbage *********************************/
|
||||
|
||||
/* Must be called periodically to clean up deallocations delayed by
|
||||
* sched_kmm_free(). This may be done from either the IDLE thread or from a
|
||||
* worker thread. The IDLE thread has very low priority and could starve
|
||||
* the system for memory in some context.
|
||||
*/
|
||||
|
||||
void sched_garbage_collection(void);
|
||||
|
||||
/* Is is not a good idea for the IDLE threads to take the KMM semaphore.
|
||||
* That can cause the IDLE thread to take processing time from higher
|
||||
* priority tasks. The IDLE threads will only take the KMM semaphore if
|
||||
* there is garbage to be collected.
|
||||
*
|
||||
* Certainly there is a race condition involved in sampling the garbage
|
||||
* state. The looping nature of the IDLE loops should catch any missed
|
||||
* garbage from the test on the next time around.
|
||||
*/
|
||||
|
||||
bool sched_have_garbage(void);
|
||||
|
||||
#undef KMALLOC_EXTERN
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
|
|
@ -61,7 +61,6 @@
|
|||
* handle delayed processing from interrupt handlers. This feature
|
||||
* is required for some drivers but, if there are not complaints,
|
||||
* can be safely disabled. The worker thread also performs
|
||||
* garbage collection -- completing any delayed memory deallocations
|
||||
* from interrupt handlers. If the worker thread is disabled,
|
||||
* then that clean will be performed by the IDLE thread instead
|
||||
* (which runs at the lowest of priority and may not be appropriate
|
||||
|
|
|
@ -159,7 +159,7 @@ void lib_stream_release(FAR struct task_group_s *group)
|
|||
#ifndef CONFIG_BUILD_KERNEL
|
||||
/* Release memory from the user heap */
|
||||
|
||||
sched_ufree(stream->fs_bufstart);
|
||||
kumm_free(stream->fs_bufstart);
|
||||
#else
|
||||
/* If the exiting group is unprivileged, then it has an address
|
||||
* environment. Don't bother to release the memory in this case...
|
||||
|
@ -171,7 +171,7 @@ void lib_stream_release(FAR struct task_group_s *group)
|
|||
|
||||
if ((group->tg_flags & GROUP_FLAG_PRIVILEGED) != 0)
|
||||
{
|
||||
sched_kfree(stream->fs_bufstart);
|
||||
kmm_free(stream->fs_bufstart);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -215,6 +215,6 @@ void bluetooth_container_free(FAR struct bluetooth_container_s *container)
|
|||
/* Otherwise, deallocate it. */
|
||||
|
||||
net_unlock();
|
||||
sched_kfree(container);
|
||||
kmm_free(container);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -215,6 +215,6 @@ void ieee802154_container_free(FAR struct ieee802154_container_s *container)
|
|||
/* Otherwise, deallocate it. */
|
||||
|
||||
net_unlock();
|
||||
sched_kfree(container);
|
||||
kmm_free(container);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -246,8 +246,8 @@ void igmp_grpfree(FAR struct net_driver_s *dev, FAR struct igmp_group_s *group)
|
|||
|
||||
/* Then release the group structure resources. */
|
||||
|
||||
grpinfo("Call sched_kfree()\n");
|
||||
sched_kfree(group);
|
||||
grpinfo("Call kmm_free()\n");
|
||||
kmm_free(group);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NET_IGMP */
|
||||
|
|
|
@ -267,7 +267,7 @@ void mld_grpfree(FAR struct net_driver_s *dev, FAR struct mld_group_s *group)
|
|||
|
||||
/* Then release the group structure resources. */
|
||||
|
||||
mldinfo("Call sched_kfree()\n");
|
||||
mldinfo("Call kmm_free()\n");
|
||||
kmm_free(group);
|
||||
|
||||
#ifndef CONFIG_NET_MLD_ROUTER
|
||||
|
|
|
@ -446,7 +446,7 @@ void sixlowpan_reass_free(FAR struct sixlowpan_reassbuf_s *reass)
|
|||
|
||||
/* Otherwise, deallocate it. */
|
||||
|
||||
sched_kfree(reass);
|
||||
kmm_free(reass);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ void env_release(FAR struct task_group_s *group)
|
|||
{
|
||||
/* Free the environment */
|
||||
|
||||
sched_ufree(group->tg_envp);
|
||||
kumm_free(group->tg_envp);
|
||||
}
|
||||
|
||||
/* In any event, make sure that all environment-related variables in the
|
||||
|
|
|
@ -217,7 +217,7 @@ static inline void group_release(FAR struct task_group_s *group)
|
|||
|
||||
if (group->tg_members)
|
||||
{
|
||||
sched_kfree(group->tg_members);
|
||||
kmm_free(group->tg_members);
|
||||
group->tg_members = NULL;
|
||||
}
|
||||
#endif
|
||||
|
@ -234,7 +234,7 @@ static inline void group_release(FAR struct task_group_s *group)
|
|||
* and freed from the single, global user allocator.
|
||||
*/
|
||||
|
||||
sched_ufree(group->tg_streamlist);
|
||||
kumm_free(group->tg_streamlist);
|
||||
|
||||
# elif defined(CONFIG_BUILD_KERNEL)
|
||||
/* In the kernel build, the unprivileged process's stream list will be
|
||||
|
@ -251,7 +251,7 @@ static inline void group_release(FAR struct task_group_s *group)
|
|||
* must explicitly freed here.
|
||||
*/
|
||||
|
||||
sched_kfree(group->tg_streamlist);
|
||||
kmm_free(group->tg_streamlist);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
@ -285,7 +285,7 @@ static inline void group_release(FAR struct task_group_s *group)
|
|||
{
|
||||
/* Release the group container itself */
|
||||
|
||||
sched_kfree(group);
|
||||
kmm_free(group);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ void group_delwaiter(FAR struct task_group_s *group)
|
|||
* freed).
|
||||
*/
|
||||
|
||||
sched_kfree(group);
|
||||
kmm_free(group);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -119,32 +119,6 @@ int nx_idle_task(int argc, FAR char *argv[])
|
|||
|
||||
for (; ; )
|
||||
{
|
||||
/* Perform garbage collection (if it is not being done by the worker
|
||||
* thread). This cleans-up memory de-allocations that were queued
|
||||
* because they could not be freed in that execution context (for
|
||||
* example, if the memory was freed from an interrupt handler).
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_SCHED_WORKQUEUE
|
||||
/* We must have exclusive access to the memory manager to do this
|
||||
* BUT the idle task cannot wait on a semaphore. So we only do
|
||||
* the cleanup now if we can get the semaphore -- this should be
|
||||
* possible because if the IDLE thread is running, no other task is!
|
||||
*
|
||||
* WARNING: This logic could have undesirable side-effects if priority
|
||||
* inheritance is enabled. Imagine the possible issues if the
|
||||
* priority of the IDLE thread were to get boosted! Moral: If you
|
||||
* use priority inheritance, then you should also enable the work
|
||||
* queue so that is done in a safer context.
|
||||
*/
|
||||
|
||||
if (sched_have_garbage() && kmm_trysemaphore() == 0)
|
||||
{
|
||||
sched_garbage_collection();
|
||||
kmm_givesemaphore();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Perform any processor-specific idle state operations */
|
||||
|
||||
up_idle();
|
||||
|
|
|
@ -204,29 +204,6 @@ volatile dq_queue_t g_stoppedtasks;
|
|||
|
||||
volatile dq_queue_t g_inactivetasks;
|
||||
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
/* These are lists of delayed memory deallocations that need to be handled
|
||||
* within the IDLE loop or worker thread. These deallocations get queued
|
||||
* by sched_kufree and sched_kfree() if the OS needs to deallocate memory
|
||||
* while it is within an interrupt handler.
|
||||
*/
|
||||
|
||||
volatile sq_queue_t g_delayed_kfree;
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_BUILD_KERNEL
|
||||
/* REVISIT: It is not safe to defer user allocation in the kernel mode
|
||||
* build. Why? Because the correct user context will not be in place
|
||||
* when these deferred de-allocations are performed. In order to make this
|
||||
* work, we would need to do something like: (1) move g_delayed_kufree
|
||||
* into the group structure, then traverse the groups to collect garbage
|
||||
* on a group-by-group basis.
|
||||
*/
|
||||
|
||||
volatile sq_queue_t g_delayed_kufree;
|
||||
#endif
|
||||
|
||||
/* This is the value of the last process ID assigned to a task */
|
||||
|
||||
volatile pid_t g_lastpid;
|
||||
|
@ -418,13 +395,6 @@ void nx_start(void)
|
|||
dq_init(&g_stoppedtasks);
|
||||
#endif
|
||||
dq_init(&g_inactivetasks);
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
sq_init(&g_delayed_kfree);
|
||||
#endif
|
||||
#ifndef CONFIG_BUILD_KERNEL
|
||||
sq_init(&g_delayed_kufree);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
|
||||
|
@ -850,32 +820,6 @@ void nx_start(void)
|
|||
sinfo("CPU0: Beginning Idle Loop\n");
|
||||
for (; ; )
|
||||
{
|
||||
/* Perform garbage collection (if it is not being done by the worker
|
||||
* thread). This cleans-up memory de-allocations that were queued
|
||||
* because they could not be freed in that execution context (for
|
||||
* example, if the memory was freed from an interrupt handler).
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_SCHED_WORKQUEUE
|
||||
/* We must have exclusive access to the memory manager to do this
|
||||
* BUT the idle task cannot wait on a semaphore. So we only do
|
||||
* the cleanup now if we can get the semaphore -- this should be
|
||||
* possible because if the IDLE thread is running, no other task is!
|
||||
*
|
||||
* WARNING: This logic could have undesirable side-effects if priority
|
||||
* inheritance is enabled. Imagine the possible issues if the
|
||||
* priority of the IDLE thread were to get boosted! Moral: If you
|
||||
* use priority inheritance, then you should also enable the work
|
||||
* queue so that is done in a safer context.
|
||||
*/
|
||||
|
||||
if (sched_have_garbage() && kmm_trysemaphore() == 0)
|
||||
{
|
||||
sched_garbage_collection();
|
||||
kmm_givesemaphore();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Perform any processor-specific idle state operations */
|
||||
|
||||
up_idle();
|
||||
|
|
|
@ -93,7 +93,7 @@ void nxmq_free_msg(FAR struct mqueue_msg_s *mqmsg)
|
|||
|
||||
else if (mqmsg->type == MQ_ALLOC_DYN)
|
||||
{
|
||||
sched_kfree(mqmsg);
|
||||
kmm_free(mqmsg);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -69,5 +69,5 @@ void nxmq_free_msgq(FAR struct mqueue_inode_s *msgq)
|
|||
|
||||
/* Then deallocate the message queue itself */
|
||||
|
||||
sched_kfree(msgq);
|
||||
kmm_free(msgq);
|
||||
}
|
||||
|
|
|
@ -284,5 +284,5 @@ void pthread_destroyjoin(FAR struct task_group_s *group,
|
|||
|
||||
/* And deallocate the pjoin structure */
|
||||
|
||||
sched_kfree(pjoin);
|
||||
kmm_free(pjoin);
|
||||
}
|
||||
|
|
|
@ -612,7 +612,7 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
|
|||
return ret;
|
||||
|
||||
errout_with_join:
|
||||
sched_kfree(pjoin);
|
||||
kmm_free(pjoin);
|
||||
ptcb->joininfo = NULL;
|
||||
|
||||
errout_with_tcb:
|
||||
|
|
|
@ -94,7 +94,7 @@ void pthread_release(FAR struct task_group_s *group)
|
|||
|
||||
/* And deallocate the join structure */
|
||||
|
||||
sched_kfree(join);
|
||||
kmm_free(join);
|
||||
}
|
||||
|
||||
/* Destroy the join list semaphore */
|
||||
|
|
|
@ -33,11 +33,11 @@
|
|||
#
|
||||
############################################################################
|
||||
|
||||
CSRCS += sched_garbage.c sched_getfiles.c
|
||||
CSRCS += sched_getfiles.c
|
||||
CSRCS += sched_addreadytorun.c sched_removereadytorun.c
|
||||
CSRCS += sched_addprioritized.c sched_mergeprioritized.c sched_mergepending.c
|
||||
CSRCS += sched_addblocked.c sched_removeblocked.c
|
||||
CSRCS += sched_free.c sched_gettcb.c sched_verifytcb.c sched_releasetcb.c
|
||||
CSRCS += sched_gettcb.c sched_verifytcb.c sched_releasetcb.c
|
||||
CSRCS += sched_getsockets.c sched_getstreams.c
|
||||
CSRCS += sched_setparam.c sched_setpriority.c sched_getparam.c
|
||||
CSRCS += sched_setscheduler.c sched_getscheduler.c
|
||||
|
|
|
@ -250,29 +250,6 @@ extern volatile dq_queue_t g_waitingforfill;
|
|||
|
||||
extern volatile dq_queue_t g_inactivetasks;
|
||||
|
||||
/* These are lists of dayed memory deallocations that need to be handled
|
||||
* within the IDLE loop or worker thread. These deallocations get queued
|
||||
* by sched_kufree and sched_kfree() if the OS needs to deallocate memory
|
||||
* while it is within an interrupt handler.
|
||||
*/
|
||||
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
extern volatile sq_queue_t g_delayed_kfree;
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_BUILD_KERNEL
|
||||
/* REVISIT: It is not safe to defer user allocation in the kernel mode
|
||||
* build. Why? Because the correct user context will not be in place
|
||||
* when these deferred de-allocations are performed. In order to make
|
||||
* this work, we would need to do something like: (1) move g_delayed_kufree
|
||||
* into the group structure, then traverse the groups to collect garbage on
|
||||
* a group-by-group basis.
|
||||
*/
|
||||
|
||||
extern volatile sq_queue_t g_delayed_kufree;
|
||||
#endif
|
||||
|
||||
/* This is the value of the last process ID assigned to a task */
|
||||
|
||||
extern volatile pid_t g_lastpid;
|
||||
|
|
|
@ -1,176 +0,0 @@
|
|||
/****************************************************************************
|
||||
* sched/sched/sched_free.c
|
||||
*
|
||||
* Copyright (C) 2007, 2009, 2012-2013, 2015-2016, 2018 Gregory Nutt. All
|
||||
* rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* 3. Neither the name NuttX nor the names of its contributors may be
|
||||
* used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <queue.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <nuttx/irq.h>
|
||||
#include <nuttx/kmalloc.h>
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/wqueue.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_ufree and sched_kfree
|
||||
*
|
||||
* Description:
|
||||
* These function performs deallocations that the operating system may
|
||||
* need to make. This special interface to free is used in handling
|
||||
* corner cases where the operating system may have to perform
|
||||
* deallocations from within an interrupt handler.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void sched_ufree(FAR void *address)
|
||||
{
|
||||
#ifdef CONFIG_BUILD_KERNEL
|
||||
/* REVISIT: It is not safe to defer user allocation in the kernel mode
|
||||
* build. Why? Because the correct user context is in place now but
|
||||
* will not be in place when the deferred de-allocation is performed. In
|
||||
* order to make this work, we would need to do something like: (1) move
|
||||
* g_delayed_kufree into the group structure, then traverse the groups to
|
||||
* collect garbage on a group-by-group basis.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!up_interrupt_context());
|
||||
kumm_free(address);
|
||||
|
||||
#else
|
||||
/* Check if this is an attempt to deallocate memory from an exception
|
||||
* handler. If this function is called from the IDLE task, then we
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
*/
|
||||
|
||||
if (up_interrupt_context() || kumm_trysemaphore() != 0)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Yes.. Make sure that this is not a attempt to free kernel memory
|
||||
* using the user deallocator.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
DEBUGASSERT(!kmm_heapmember(address));
|
||||
#endif
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kufree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No.. just deallocate the memory now. */
|
||||
|
||||
kumm_free(address);
|
||||
kumm_givesemaphore();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MM_KERNEL_HEAP
|
||||
void sched_kfree(FAR void *address)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Check if this is an attempt to deallocate memory from an exception
|
||||
* handler. If this function is called from the IDLE task, then we
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
*/
|
||||
|
||||
if (up_interrupt_context() || kmm_trysemaphore() != 0)
|
||||
{
|
||||
/* Yes.. Make sure that this is not a attempt to free user memory
|
||||
* using the kernel deallocator.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
DEBUGASSERT(kmm_heapmember(address));
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t *)address,
|
||||
(FAR sq_queue_t *)&g_delayed_kfree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
sched_signal_free();
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No.. just deallocate the memory now. */
|
||||
|
||||
kmm_free(address);
|
||||
kmm_givesemaphore();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_signal_free
|
||||
*
|
||||
* Description:
|
||||
* Signal the worker thread that is has some clean up to do.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void sched_signal_free(void)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_WORKQUEUE
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
work_signal(LPWORK);
|
||||
#endif
|
||||
}
|
|
@ -1,273 +0,0 @@
|
|||
/****************************************************************************
|
||||
* sched/sched/sched_garbage.c
|
||||
*
|
||||
* Copyright (C) 2009, 2011, 2013, 2016 Gregory Nutt. All rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* 3. Neither the name NuttX nor the names of its contributors may be
|
||||
* used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
#include <nuttx/irq.h>
|
||||
#include <nuttx/kmalloc.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_kucleanup
|
||||
*
|
||||
* Description:
|
||||
* Clean-up deferred de-allocations of user memory
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static inline void nxsched_kucleanup(void)
|
||||
{
|
||||
#ifdef CONFIG_BUILD_KERNEL
|
||||
/* REVISIT: It is not safe to defer user allocation in the kernel mode
|
||||
* build. Why? Because the correct user context will not be in place
|
||||
* when these deferred de-allocations are performed. In order to make
|
||||
* this work, we would need to do something like: (1) move
|
||||
* g_delayed_kufree into the group structure, then traverse the groups to
|
||||
* collect garbage on a group-by-group basis.
|
||||
*/
|
||||
|
||||
#else
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
*/
|
||||
|
||||
while (g_delayed_kufree.head)
|
||||
{
|
||||
/* Remove the first delayed deallocation. This is not atomic and so
|
||||
* we must disable interrupts around the queue operation.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
address = (FAR void *)sq_remfirst((FAR sq_queue_t *)&g_delayed_kufree);
|
||||
leave_critical_section(flags);
|
||||
|
||||
/* The address should always be non-NULL since that was checked in the
|
||||
* 'while' condition above.
|
||||
*/
|
||||
|
||||
if (address)
|
||||
{
|
||||
/* Return the memory to the user heap */
|
||||
|
||||
kumm_free(address);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_have_kugarbage
|
||||
*
|
||||
* Description:
|
||||
* Return TRUE if there is user heap garbage to be collected.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* TRUE if there is kernel heap garbage to be collected.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef CONFIG_BUILD_KERNEL
|
||||
static inline bool nxsched_have_kugarbage(void)
|
||||
{
|
||||
return (g_delayed_kufree.head != NULL);
|
||||
}
|
||||
#else
|
||||
# define nxsched_have_kugarbage() false
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_kcleanup
|
||||
*
|
||||
* Description:
|
||||
* Clean-up deferred de-allocations of kernel memory
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
static inline void nxsched_kcleanup(void)
|
||||
{
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
*/
|
||||
|
||||
while (g_delayed_kfree.head)
|
||||
{
|
||||
/* Remove the first delayed deallocation. This is not atomic and so
|
||||
* we must disable interrupts around the queue operation.
|
||||
*/
|
||||
|
||||
flags = enter_critical_section();
|
||||
address = (FAR void *)sq_remfirst((FAR sq_queue_t *)&g_delayed_kfree);
|
||||
leave_critical_section(flags);
|
||||
|
||||
/* The address should always be non-NULL since that was checked in the
|
||||
* 'while' condition above.
|
||||
*/
|
||||
|
||||
if (address)
|
||||
{
|
||||
/* Return the memory to the kernel heap */
|
||||
|
||||
kmm_free(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
# define nxsched_kcleanup()
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_have_kgarbage
|
||||
*
|
||||
* Description:
|
||||
* Return TRUE if there is kernel heap garbage to be collected.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* TRUE if there is kernel heap garbage to be collected.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
|
||||
defined(CONFIG_MM_KERNEL_HEAP)
|
||||
static inline bool nxsched_have_kgarbage(void)
|
||||
{
|
||||
return (g_delayed_kfree.head != NULL);
|
||||
}
|
||||
#else
|
||||
# define nxsched_have_kgarbage() false
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_garbage_collection
|
||||
*
|
||||
* Description:
|
||||
* Clean-up memory de-allocations that we queued because they could not
|
||||
* be freed in that execution context (for example, if the memory was freed
|
||||
* from an interrupt handler).
|
||||
*
|
||||
* This logic may be called from the worker thread (see work_thread.c).
|
||||
* If, however, CONFIG_SCHED_WORKQUEUE is not defined, then this logic will
|
||||
* be called from the IDLE thread. It is less optimal for the garbage
|
||||
* collection to be called from the IDLE thread because it runs at a very
|
||||
* low priority and could cause false memory out conditions.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void sched_garbage_collection(void)
|
||||
{
|
||||
/* Handle deferred deallocations for the kernel heap */
|
||||
|
||||
nxsched_kcleanup();
|
||||
|
||||
/* Handle deferred deallocations for the user heap */
|
||||
|
||||
nxsched_kucleanup();
|
||||
|
||||
/* Handle the architecure-specific garbage collection */
|
||||
|
||||
up_sched_garbage_collection();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_have_garbage
|
||||
*
|
||||
* Description:
|
||||
* Return TRUE if there is garbage to be collected.
|
||||
*
|
||||
* Is is not a good idea for the IDLE threads to take the KMM semaphore.
|
||||
* That can cause the IDLE thread to take processing time from higher
|
||||
* priority tasks. The IDLE threads will only take the KMM semaphore if
|
||||
* there is garbage to be collected.
|
||||
*
|
||||
* Certainly there is a race condition involved in sampling the garbage
|
||||
* state. The looping nature of the IDLE loops should catch any missed
|
||||
* garbage from the test on the next time around.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* TRUE if there is garbage to be collected.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool sched_have_garbage(void)
|
||||
{
|
||||
return (nxsched_have_kgarbage() || nxsched_have_kugarbage() ||
|
||||
up_sched_have_garbage());
|
||||
}
|
|
@ -172,7 +172,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype)
|
|||
{
|
||||
if (tcb->dspace->crefs <= 1)
|
||||
{
|
||||
sched_kfree(tcb->dspace);
|
||||
kmm_free(tcb->dspace);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -199,7 +199,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype)
|
|||
|
||||
/* And, finally, release the TCB itself */
|
||||
|
||||
sched_kfree(tcb);
|
||||
kmm_free(tcb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -900,7 +900,7 @@ int sched_sporadic_stop(FAR struct tcb_s *tcb)
|
|||
|
||||
/* The free the container holder the sporadic scheduling parameters */
|
||||
|
||||
sched_kfree(tcb->sporadic);
|
||||
kmm_free(tcb->sporadic);
|
||||
tcb->sporadic = NULL;
|
||||
return OK;
|
||||
}
|
||||
|
|
|
@ -98,6 +98,6 @@ void nxsig_release_pendingsigaction(FAR sigq_t *sigq)
|
|||
|
||||
else if (sigq->type == SIG_ALLOC_DYN)
|
||||
{
|
||||
sched_kfree(sigq);
|
||||
kmm_free(sigq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,6 +107,6 @@ void nxsig_release_pendingsignal(FAR sigpendq_t *sigpend)
|
|||
|
||||
else if (sigpend->type == SIG_ALLOC_DYN)
|
||||
{
|
||||
sched_kfree(sigpend);
|
||||
kmm_free(sigpend);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ static inline void timer_free(struct posix_timer_s *timer)
|
|||
/* Otherwise, return it to the heap */
|
||||
|
||||
leave_critical_section(flags);
|
||||
sched_kfree(timer);
|
||||
kmm_free(timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,16 +87,16 @@ int wd_delete(WDOG_ID wdog)
|
|||
|
||||
if (WDOG_ISALLOCED(wdog))
|
||||
{
|
||||
/* It was allocated from the heap. Use sched_kfree() to release the
|
||||
/* It was allocated from the heap. Use kmm_free() to release the
|
||||
* memory. If the timer was released from an interrupt handler,
|
||||
* sched_kfree() will defer the actual deallocation of the memory
|
||||
* kmm_free() will defer the actual deallocation of the memory
|
||||
* until a more appropriate time.
|
||||
*
|
||||
* We don't need interrupts disabled to do this.
|
||||
*/
|
||||
|
||||
leave_critical_section(flags);
|
||||
sched_kfree(wdog);
|
||||
kmm_free(wdog);
|
||||
}
|
||||
|
||||
/* Check if this is pre-allocated timer. */
|
||||
|
|
|
@ -60,11 +60,7 @@ struct hp_wqueue_s g_hpwork;
|
|||
* high priority work queue.
|
||||
*
|
||||
* These, along with the lower priority worker thread(s) are the kernel
|
||||
* mode work queues (also build in the flat build). One of these threads
|
||||
* also performs periodic garbage collection (that would otherwise be
|
||||
* performed by the idle thread if CONFIG_SCHED_WORKQUEUE is not defined).
|
||||
* That will be the higher priority worker thread only if a lower priority
|
||||
* worker thread is available.
|
||||
* mode work queues (also build in the flat build).
|
||||
*
|
||||
* All kernel mode worker threads are started by the OS during normal
|
||||
* bring up. This entry point is referenced by OS internally and should
|
||||
|
@ -80,8 +76,8 @@ struct hp_wqueue_s g_hpwork;
|
|||
|
||||
static int work_hpthread(int argc, char *argv[])
|
||||
{
|
||||
int wndx = 0;
|
||||
#if CONFIG_SCHED_HPNTHREADS > 1
|
||||
int wndx;
|
||||
pid_t me = getpid();
|
||||
int i;
|
||||
|
||||
|
@ -103,42 +99,12 @@ static int work_hpthread(int argc, char *argv[])
|
|||
|
||||
for (; ; )
|
||||
{
|
||||
#if CONFIG_SCHED_HPNTHREADS > 1
|
||||
/* Thread 0 is special. Only thread 0 performs period garbage collection */
|
||||
/* Then process queued work. work_process will not return until: (1)
|
||||
* there is no further work in the work queue, and (2) signal is
|
||||
* triggered, or delayed work expires.
|
||||
*/
|
||||
|
||||
if (wndx > 0)
|
||||
{
|
||||
/* The other threads will perform work, waiting indefinitely until
|
||||
* signalled for the next work availability.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_hpwork, wndx);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
#ifndef CONFIG_SCHED_LPWORK
|
||||
/* First, perform garbage collection. This cleans-up memory
|
||||
* de-allocations that were queued because they could not be freed
|
||||
* in that execution context (for example, if the memory was freed
|
||||
* from an interrupt handler).
|
||||
*
|
||||
* NOTE: If the work thread is disabled, this clean-up is
|
||||
* performed by the IDLE thread (at a very, very low priority).
|
||||
* If the low-priority work thread is enabled, then the garbage
|
||||
* collection is done on that thread instead.
|
||||
*/
|
||||
|
||||
sched_garbage_collection();
|
||||
#endif
|
||||
|
||||
/* Then process queued work. work_process will not return until:
|
||||
* (1) there is no further work in the work queue, and (2) signal
|
||||
* is triggered, or delayed work expires.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_hpwork, 0);
|
||||
}
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_hpwork, wndx);
|
||||
}
|
||||
|
||||
return OK; /* To keep some compilers happy */
|
||||
|
|
|
@ -60,10 +60,7 @@ struct lp_wqueue_s g_lpwork;
|
|||
* low priority work queue.
|
||||
*
|
||||
* These, along with the higher priority worker thread are the kernel mode
|
||||
* work queues (also build in the flat build). One of these threads also
|
||||
* performs periodic garbage collection (that would otherwise be performed
|
||||
* by the idle thread if CONFIG_SCHED_WORKQUEUE is not defined). That will
|
||||
* be the lower priority worker thread if it is available.
|
||||
* work queues (also build in the flat build).
|
||||
*
|
||||
* All kernel mode worker threads are started by the OS during normal
|
||||
* bring up. This entry point is referenced by OS internally and should
|
||||
|
@ -79,8 +76,8 @@ struct lp_wqueue_s g_lpwork;
|
|||
|
||||
static int work_lpthread(int argc, char *argv[])
|
||||
{
|
||||
int wndx = 0;
|
||||
#if CONFIG_SCHED_LPNTHREADS > 1
|
||||
int wndx;
|
||||
pid_t me = getpid();
|
||||
int i;
|
||||
|
||||
|
@ -102,41 +99,12 @@ static int work_lpthread(int argc, char *argv[])
|
|||
|
||||
for (; ; )
|
||||
{
|
||||
#if CONFIG_SCHED_LPNTHREADS > 1
|
||||
/* Thread 0 is special. Only thread 0 performs period garbage collection */
|
||||
/* Then process queued work. work_process will not return until:
|
||||
* (1) there is no further work in the work queue, and (2) signal is
|
||||
* triggered, or delayed work expires.
|
||||
*/
|
||||
|
||||
if (wndx > 0)
|
||||
{
|
||||
/* The other threads will perform work, waiting indefinitely until
|
||||
* signalled for the next work availability.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, wndx);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* Perform garbage collection. This cleans-up memory de-
|
||||
* allocations that were queued because they could not be freed in
|
||||
* that execution context (for example, if the memory was freed
|
||||
* from an interrupt handler).
|
||||
*
|
||||
* NOTE: If the work thread is disabled, this clean-up is
|
||||
* performed by the IDLE thread (at a very, very low priority).
|
||||
*
|
||||
* In the event of multiple low priority threads, on index == 0
|
||||
* will do the garbage collection.
|
||||
*/
|
||||
|
||||
sched_garbage_collection();
|
||||
|
||||
/* Then process queued work. work_process will not return until:
|
||||
* (1) there is no further work in the work queue, and (2) signal
|
||||
* is triggered, or delayed work expires.
|
||||
*/
|
||||
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, 0);
|
||||
}
|
||||
work_process((FAR struct kwork_wqueue_s *)&g_lpwork, wndx);
|
||||
}
|
||||
|
||||
return OK; /* To keep some compilers happy */
|
||||
|
|
|
@ -453,7 +453,7 @@ void bt_buf_release(FAR struct bt_buf_s *buf)
|
|||
/* Otherwise, deallocate it. */
|
||||
|
||||
DEBUGASSERT(buf->pool == POOL_BUFFER_DYNAMIC);
|
||||
sched_kfree(buf);
|
||||
kmm_free(buf);
|
||||
}
|
||||
|
||||
wlinfo("Buffer freed: %p\n", buf);
|
||||
|
|
|
@ -409,7 +409,7 @@ void ieee802154_primitive_free(FAR struct ieee802154_primitive_s *prim)
|
|||
/* Otherwise, deallocate it. */
|
||||
|
||||
DEBUGASSERT(priv->pool == POOL_PRIMITIVE_DYNAMIC);
|
||||
sched_kfree(priv);
|
||||
kmm_free(priv);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -237,6 +237,6 @@ void pktradio_metadata_free(FAR struct pktradio_metadata_s *metadata)
|
|||
/* Otherwise, deallocate it. We won't access the free list */
|
||||
|
||||
nxsem_post(&g_metadata_sem);
|
||||
sched_kfree(metadata);
|
||||
kmm_free(metadata);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue