mirror of
https://github.com/apache/nuttx.git
synced 2025-01-13 12:08:36 +08:00
Create sched_ufree and sched_kfree from sched_free; Use user-accessible heap to allocae stacks
git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@5725 42af7a65-404d-4744-a932-0658087f49c3
This commit is contained in:
parent
1c52dce216
commit
1ef904003e
56 changed files with 283 additions and 154 deletions
|
@ -1,4 +1,4 @@
|
|||
/************************************************************
|
||||
/************************************************************************
|
||||
* up_idle.c
|
||||
*
|
||||
* Copyright (C) 2007, 2009 Gregory Nutt. All rights reserved.
|
||||
|
@ -33,7 +33,7 @@
|
|||
*
|
||||
************************************************************************/
|
||||
|
||||
/************************************************************
|
||||
/************************************************************************
|
||||
* Included Files
|
||||
************************************************************************/
|
||||
|
||||
|
@ -44,27 +44,27 @@
|
|||
|
||||
#include "up_internal.h"
|
||||
|
||||
/************************************************************
|
||||
* Private Definitions
|
||||
/************************************************************************
|
||||
* Pre-processor Definitions
|
||||
************************************************************************/
|
||||
|
||||
/************************************************************
|
||||
/************************************************************************
|
||||
* Private Data
|
||||
************************************************************************/
|
||||
|
||||
#if defined(CONFIG_ARCH_LEDS) && defined(CONFIG_ARCH_BRINGUP)
|
||||
static /**************** g_ledtoggle = 0;
|
||||
static uint8_t g_ledtoggle = 0;
|
||||
#endif
|
||||
|
||||
/************************************************************
|
||||
/************************************************************************
|
||||
* Private Functions
|
||||
************************************************************************/
|
||||
|
||||
/************************************************************
|
||||
/************************************************************************
|
||||
* Public Functions
|
||||
************************************************************************/
|
||||
|
||||
/************************************************************
|
||||
/************************************************************************
|
||||
* Name: up_idle
|
||||
*
|
||||
* Description:
|
||||
|
@ -93,4 +93,3 @@ void up_idle(void)
|
|||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -149,9 +149,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -95,7 +95,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -108,9 +108,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (FAR void *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (FAR void *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (FAR void *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (FAR void *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -92,7 +92,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -94,7 +94,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -107,9 +107,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (FAR void *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (FAR void *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (FAR void *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (FAR void *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -89,7 +89,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -104,9 +104,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -72,7 +72,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr && tcb->adj_stack_size != stack_size)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -125,9 +125,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -123,7 +123,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
|
||||
/* Allocate the memory for the stack */
|
||||
|
||||
uint32_t *stack_alloc_ptr = (uint32_t*)kmalloc(adj_stack_size);
|
||||
uint32_t *stack_alloc_ptr = (uint32_t*)kumalloc(adj_stack_size);
|
||||
if (stack_alloc_ptr) {
|
||||
/* This is the address of the last word in the allocation */
|
||||
|
||||
|
@ -161,7 +161,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
void up_release_stack(struct tcb_s *dtcb)
|
||||
{
|
||||
if (dtcb->stack_alloc_ptr) {
|
||||
kfree(dtcb->stack_alloc_ptr);
|
||||
kufree(dtcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
|
|
|
@ -91,7 +91,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -104,9 +104,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -96,7 +96,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
|
||||
/* Allocate the memory for the stack */
|
||||
|
||||
uint32_t *stack_alloc_ptr = (uint32_t*)kmalloc(adj_stack_size);
|
||||
uint32_t *stack_alloc_ptr = (uint32_t*)kumalloc(adj_stack_size);
|
||||
if (stack_alloc_ptr)
|
||||
{
|
||||
/* This is the address of the last word in the allocation */
|
||||
|
|
|
@ -93,7 +93,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -106,9 +106,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -92,7 +92,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -105,9 +105,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -91,7 +91,7 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
{
|
||||
/* Yes.. free it */
|
||||
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
tcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -104,9 +104,9 @@ int up_create_stack(struct tcb_s *tcb, size_t stack_size)
|
|||
*/
|
||||
|
||||
#if defined(CONFIG_DEBUG) && !defined(CONFIG_DEBUG_STACK)
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kzalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kuzalloc(stack_size);
|
||||
#else
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kmalloc(stack_size);
|
||||
tcb->stack_alloc_ptr = (uint32_t *)kumalloc(stack_size);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG
|
||||
if (!tcb->stack_alloc_ptr)
|
||||
|
|
|
@ -71,7 +71,7 @@ void up_release_stack(struct tcb_s *dtcb)
|
|||
{
|
||||
if (dtcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(dtcb->stack_alloc_ptr);
|
||||
sched_ufree(dtcb->stack_alloc_ptr);
|
||||
dtcb->stack_alloc_ptr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
|
|||
|
||||
if (tcb->stack_alloc_ptr)
|
||||
{
|
||||
sched_free(tcb->stack_alloc_ptr);
|
||||
sched_ufree(tcb->stack_alloc_ptr);
|
||||
}
|
||||
|
||||
/* Save the stack allocation */
|
||||
|
|
|
@ -276,7 +276,7 @@ static inline void usbhost_freeclass(FAR struct usbhost_state_s *class)
|
|||
{
|
||||
DEBUGASSERT(class != NULL);
|
||||
|
||||
/* Free the class instance (perhaps calling sched_free() in case we are
|
||||
/* Free the class instance (perhaps calling sched_kfree() in case we are
|
||||
* executing from an interrupt handler.
|
||||
*/
|
||||
|
||||
|
|
|
@ -424,7 +424,7 @@ static inline void usbhost_freeclass(FAR struct usbhost_state_s *class)
|
|||
{
|
||||
DEBUGASSERT(class != NULL);
|
||||
|
||||
/* Free the class instance (calling sched_free() in case we are executing
|
||||
/* Free the class instance (calling sched_kfree() in case we are executing
|
||||
* from an interrupt handler.
|
||||
*/
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/****************************************************************************
|
||||
* include/nuttx/kmalloc.h
|
||||
*
|
||||
* Copyright (C) 2007, 2008, 2011 Gregory Nutt. All rights reserved.
|
||||
* Copyright (C) 2007-2008, 2011, 2013 Gregory Nutt. All rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -145,24 +145,31 @@ FAR void *kzalloc(size_t size);
|
|||
FAR void *krealloc(FAR void *oldmem, size_t newsize);
|
||||
void kfree(FAR void *mem);
|
||||
|
||||
#ifdef CONFIG_DEBUG
|
||||
bool kmm_heapmember(FAR void *mem);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Functions defined in sched/sched_free.c **********************************/
|
||||
/* Functions defined in sched/sched_kfree.c **********************************/
|
||||
|
||||
/* Handles memory freed from an interrupt handler. In that context, kfree()
|
||||
* cannot be called. Instead, the allocations are saved in a list of
|
||||
* delayed allocations that will be periodically cleaned up by
|
||||
* (or kufree()) cannot be called. Instead, the allocations are saved in a
|
||||
* list of delayed allocations that will be periodically cleaned up by
|
||||
* sched_garbagecollection().
|
||||
*/
|
||||
|
||||
void sched_free(FAR void *address);
|
||||
void sched_ufree(FAR void *address);
|
||||
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
void sched_kfree(FAR void *address);
|
||||
#else
|
||||
# define sched_kfree(a) sched_ufree(a)
|
||||
#endif
|
||||
|
||||
/* Functions defined in sched/sched_garbage *********************************/
|
||||
|
||||
/* Must be called periodically to clean up deallocations delayed by
|
||||
* sched_free(). This may be done from either the IDLE thread or from a
|
||||
* sched_kfree(). This may be done from either the IDLE thread or from a
|
||||
* worker thread. The IDLE thread has very low priority and could starve
|
||||
* the system for memory in some context.
|
||||
*/
|
||||
|
|
|
@ -49,6 +49,8 @@
|
|||
|
||||
#include "lib_internal.h"
|
||||
|
||||
#if !defined(CONFIG_NUTTX_KERNEL) || defined(__KERNEL__)
|
||||
|
||||
/************************************************************
|
||||
* Definitions
|
||||
************************************************************/
|
||||
|
@ -138,12 +140,13 @@ void lib_releaselist(FAR struct streamlist *list)
|
|||
|
||||
if (list->sl_streams[i].fs_bufstart)
|
||||
{
|
||||
sched_free(list->sl_streams[i].fs_bufstart);
|
||||
sched_ufree(list->sl_streams[i].fs_bufstart);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_NUTTX_KERNEL || __KERNEL__ */
|
||||
#endif /* CONFIG_NFILE_STREAMS */
|
||||
|
||||
|
||||
|
|
|
@ -239,6 +239,7 @@ void kmm_givesemaphore(void)
|
|||
*
|
||||
************************************************************************/
|
||||
|
||||
#ifdef CONFIG_DEBUG
|
||||
bool kmm_heapmember(FAR void *mem)
|
||||
{
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
|
@ -278,5 +279,6 @@ bool kmm_heapmember(FAR void *mem)
|
|||
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_NUTTX_KERNEL && CONFIG_MM_KERNEL_HEAP && __KERNEL__ */
|
||||
|
|
|
@ -378,13 +378,13 @@ void uip_grpfree(FAR struct uip_driver_s *dev, FAR struct igmp_group_s *group)
|
|||
else
|
||||
#endif
|
||||
{
|
||||
/* No.. deallocate the group structure. Use sched_free() just in case
|
||||
/* No.. deallocate the group structure. Use sched_kfree() just in case
|
||||
* this function is executing within an interrupt handler.
|
||||
*/
|
||||
|
||||
uip_unlock(flags);
|
||||
grplldbg("Call sched_free()\n");
|
||||
sched_free(group);
|
||||
grplldbg("Call sched_kfree()\n");
|
||||
sched_kfree(group);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ int env_dup(FAR struct task_group_s *group)
|
|||
/* Yes..The parent task has an environment, duplicate it */
|
||||
|
||||
envlen = ptcb->group->tg_envsize;
|
||||
envp = (FAR char *)kmalloc(envlen);
|
||||
envp = (FAR char *)kumalloc(envlen);
|
||||
if (!envp)
|
||||
{
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -85,7 +85,7 @@ void env_release(FAR struct task_group_s *group)
|
|||
{
|
||||
/* Free the environment */
|
||||
|
||||
sched_free(group->tg_envp);
|
||||
sched_ufree(group->tg_envp);
|
||||
}
|
||||
|
||||
/* In any event, make sure that all environment-related varialbles in the
|
||||
|
|
|
@ -161,7 +161,7 @@ int setenv(FAR const char *name, FAR const char *value, int overwrite)
|
|||
if (group->tg_envp)
|
||||
{
|
||||
newsize = group->tg_envsize + varlen;
|
||||
newenvp = (FAR char *)krealloc(group->tg_envp, newsize);
|
||||
newenvp = (FAR char *)kurealloc(group->tg_envp, newsize);
|
||||
if (!newenvp)
|
||||
{
|
||||
ret = ENOMEM;
|
||||
|
@ -173,7 +173,7 @@ int setenv(FAR const char *name, FAR const char *value, int overwrite)
|
|||
else
|
||||
{
|
||||
newsize = varlen;
|
||||
newenvp = (FAR char *)kmalloc(varlen);
|
||||
newenvp = (FAR char *)kumalloc(varlen);
|
||||
if (!newenvp)
|
||||
{
|
||||
ret = ENOMEM;
|
||||
|
|
|
@ -98,7 +98,7 @@ int unsetenv(FAR const char *name)
|
|||
/* Reallocate the new environment buffer */
|
||||
|
||||
newsize = group->tg_envsize;
|
||||
newenvp = (FAR char *)krealloc(group->tg_envp, newsize);
|
||||
newenvp = (FAR char *)kurealloc(group->tg_envp, newsize);
|
||||
if (!newenvp)
|
||||
{
|
||||
set_errno(ENOMEM);
|
||||
|
|
|
@ -213,14 +213,14 @@ static inline void group_release(FAR struct task_group_s *group)
|
|||
|
||||
if (group->tg_members)
|
||||
{
|
||||
sched_free(group->tg_members);
|
||||
sched_kfree(group->tg_members);
|
||||
group->tg_members = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Release the group container itself */
|
||||
|
||||
sched_free(group);
|
||||
sched_kfree(group);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -125,7 +125,7 @@ void mq_msgfree(FAR mqmsg_t *mqmsg)
|
|||
|
||||
else if (mqmsg->type == MQ_ALLOC_DYN)
|
||||
{
|
||||
sched_free(mqmsg);
|
||||
sched_kfree(mqmsg);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -104,5 +104,5 @@ void mq_msgqfree(FAR msgq_t *msgq)
|
|||
|
||||
/* Then deallocate the message queue itself */
|
||||
|
||||
sched_free(msgq);
|
||||
sched_kfree(msgq);
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ mqd_t mq_open(const char *mq_name, int oflags, ...)
|
|||
* uninitialized, mq_deallocate() is not used.
|
||||
*/
|
||||
|
||||
sched_free(msgq);
|
||||
sched_kfree(msgq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -216,13 +216,17 @@ extern volatile dq_queue_t g_waitingforfill;
|
|||
|
||||
extern volatile dq_queue_t g_inactivetasks;
|
||||
|
||||
/* This is the list of dayed memory deallocations that need to be handled
|
||||
* within the IDLE loop. These deallocations get queued by sched_free()
|
||||
* if the OS attempts to deallocate memory while it is within an interrupt
|
||||
* handler.
|
||||
/* These are lists of dayed memory deallocations that need to be handled
|
||||
* within the IDLE loop or worker thread. These deallocations get queued
|
||||
* by sched_kufree and sched_kfree() if the OS needs to deallocate memory
|
||||
* while it is within an interrupt handler.
|
||||
*/
|
||||
|
||||
extern volatile sq_queue_t g_delayeddeallocations;
|
||||
extern volatile sq_queue_t g_delayed_kufree;
|
||||
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
extern volatile sq_queue_t g_delayed_kfree;
|
||||
#endif
|
||||
|
||||
/* This is the value of the last process ID assigned to a task */
|
||||
|
||||
|
|
|
@ -141,13 +141,17 @@ volatile dq_queue_t g_waitingforfill;
|
|||
|
||||
volatile dq_queue_t g_inactivetasks;
|
||||
|
||||
/* This is the list of dayed memory deallocations that need to be handled
|
||||
* within the IDLE loop. These deallocations get queued by sched_free()
|
||||
* if the OS attempts to deallocate memory while it is within an interrupt
|
||||
* handler.
|
||||
/* These are lists of dayed memory deallocations that need to be handled
|
||||
* within the IDLE loop or worker thread. These deallocations get queued
|
||||
* by sched_kufree and sched_kfree() if the OS needs to deallocate memory
|
||||
* while it is within an interrupt handler.
|
||||
*/
|
||||
|
||||
volatile sq_queue_t g_delayeddeallocations;
|
||||
volatile sq_queue_t g_delayed_kufree;
|
||||
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
volatile sq_queue_t g_delayed_kfree;
|
||||
#endif
|
||||
|
||||
/* This is the value of the last process ID assigned to a task */
|
||||
|
||||
|
@ -249,7 +253,10 @@ void os_start(void)
|
|||
dq_init(&g_waitingforfill);
|
||||
#endif
|
||||
dq_init(&g_inactivetasks);
|
||||
sq_init(&g_delayeddeallocations);
|
||||
sq_init(&g_delayed_kufree);
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
sq_init(&g_delayed_kfree);
|
||||
#endif
|
||||
|
||||
/* Initialize the logic that determine unique process IDs. */
|
||||
|
||||
|
|
|
@ -300,6 +300,6 @@ void pthread_destroyjoin(FAR struct task_group_s *group,
|
|||
|
||||
/* And deallocate the pjoin structure */
|
||||
|
||||
sched_free(pjoin);
|
||||
sched_kfree(pjoin);
|
||||
}
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ int pthread_create(FAR pthread_t *thread, FAR pthread_attr_t *attr,
|
|||
return ret;
|
||||
|
||||
errout_with_join:
|
||||
sched_free(pjoin);
|
||||
sched_kfree(pjoin);
|
||||
ptcb->joininfo = NULL;
|
||||
|
||||
errout_with_tcb:
|
||||
|
|
|
@ -114,7 +114,7 @@ void pthread_release(FAR struct task_group_s *group)
|
|||
|
||||
/* And deallocate the join structure */
|
||||
|
||||
sched_free(join);
|
||||
sched_kfree(join);
|
||||
}
|
||||
|
||||
/* Destroy the join list semaphore */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/************************************************************************
|
||||
* sched/sched_free.c
|
||||
*
|
||||
* Copyright (C) 2007, 2009, 2012 Gregory Nutt. All rights reserved.
|
||||
* Copyright (C) 2007, 2009, 2012-2013 Gregory Nutt. All rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -72,18 +72,61 @@
|
|||
************************************************************************/
|
||||
|
||||
/************************************************************************
|
||||
* Name: sched_free
|
||||
* Name: sched_ufree and sched_kfree
|
||||
*
|
||||
* Description:
|
||||
* This function performs deallocations that the operating system may
|
||||
* need to make. This special interface to free is used to handling
|
||||
* These function performs deallocations that the operating system may
|
||||
* need to make. This special interface to free is used in handling
|
||||
* corner cases where the operating system may have to perform
|
||||
* deallocations from within an interrupt handler.
|
||||
*
|
||||
************************************************************************/
|
||||
|
||||
void sched_free(FAR void *address)
|
||||
void sched_ufree(FAR void *address)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Check if this is an attempt to deallocate memory from an exception
|
||||
* handler. If this function is called from the IDLE task, then we
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
*/
|
||||
|
||||
if (up_interrupt_context() || kumm_trysemaphore() != 0)
|
||||
{
|
||||
/* Yes.. Make sure that this is not a attempt to free kernel memory
|
||||
* using the user deallocator.
|
||||
*/
|
||||
|
||||
flags = irqsave();
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
DEBUGASSERT(!kmm_heapmember(address));
|
||||
#endif
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayed_kufree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
#ifdef CONFIG_SCHED_WORKQUEUE
|
||||
work_signal(LPWORK);
|
||||
#endif
|
||||
irqrestore(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No.. just deallocate the memory now. */
|
||||
|
||||
kufree(address);
|
||||
kumm_givesemaphore();
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
void sched_kfree(FAR void *address)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Check if this is an attempt to deallocate memory from an exception
|
||||
* handler. If this function is called from the IDLE task, then we
|
||||
* must have exclusive access to the memory manager to do this.
|
||||
|
@ -91,17 +134,23 @@ void sched_free(FAR void *address)
|
|||
|
||||
if (up_interrupt_context() || kmm_trysemaphore() != 0)
|
||||
{
|
||||
/* Yes.. Delay the deallocation until a more appropriate time. */
|
||||
/* Yes.. Make sure that this is not a attempt to free user memory
|
||||
* using the kernel deallocator.
|
||||
*/
|
||||
|
||||
irqstate_t saved_state = irqsave();
|
||||
sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayeddeallocations);
|
||||
flags = irqsave();
|
||||
DEBUGASSERT(kmm_heapmember(address));
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayed_kfree);
|
||||
|
||||
/* Signal the worker thread that is has some clean up to do */
|
||||
|
||||
#ifdef CONFIG_SCHED_WORKQUEUE
|
||||
work_signal(LPWORK);
|
||||
#endif
|
||||
irqrestore(saved_state);
|
||||
irqrestore(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -111,4 +160,4 @@ void sched_free(FAR void *address)
|
|||
kmm_givesemaphore();
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -62,6 +62,102 @@
|
|||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_kucleanup
|
||||
*
|
||||
* Description:
|
||||
* Clean-up deferred de-allocations of user memory
|
||||
*
|
||||
* Input parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static inline void sched_kucleanup(void)
|
||||
{
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
*/
|
||||
|
||||
while (g_delayed_kufree.head)
|
||||
{
|
||||
/* Remove the first delayed deallocation. This is not atomic and so
|
||||
* we must disable interrupts around the queue operation.
|
||||
*/
|
||||
|
||||
flags = irqsave();
|
||||
address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayed_kufree);
|
||||
irqrestore(flags);
|
||||
|
||||
/* The address should always be non-NULL since that was checked in the
|
||||
* 'while' condition above.
|
||||
*/
|
||||
|
||||
if (address)
|
||||
{
|
||||
/* Return the memory to the user heap */
|
||||
|
||||
kufree(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_kcleanup
|
||||
*
|
||||
* Description:
|
||||
* Clean-up deferred de-allocations of kernel memory
|
||||
*
|
||||
* Input parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
static inline void sched_kcleanup(void)
|
||||
{
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
*/
|
||||
|
||||
while (g_delayed_kfree.head)
|
||||
{
|
||||
/* Remove the first delayed deallocation. This is not atomic and so
|
||||
* we must disable interrupts around the queue operation.
|
||||
*/
|
||||
|
||||
flags = irqsave();
|
||||
address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayed_kfree);
|
||||
irqrestore(flags);
|
||||
|
||||
/* The address should always be non-NULL since that was checked in the
|
||||
* 'while' condition above.
|
||||
*/
|
||||
|
||||
if (address)
|
||||
{
|
||||
/* Return the memory to the kernel heap */
|
||||
|
||||
kfree(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
# define sched_kcleanup()
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -89,49 +185,11 @@
|
|||
|
||||
void sched_garbagecollection(void)
|
||||
{
|
||||
irqstate_t flags;
|
||||
FAR void *address;
|
||||
/* Handle deferred deallocations for the kernel heap */
|
||||
|
||||
/* Test if the delayed deallocation queue is empty. No special protection
|
||||
* is needed because this is an atomic test.
|
||||
*/
|
||||
|
||||
while (g_delayeddeallocations.head)
|
||||
{
|
||||
/* Remove the first delayed deallocation. This is not atomic and so
|
||||
* we must disable interrupts around the queue operation.
|
||||
*/
|
||||
sched_kcleanup();
|
||||
|
||||
flags = irqsave();
|
||||
address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayeddeallocations);
|
||||
irqrestore(flags);
|
||||
/* Handle deferred dealloctions for the user heap */
|
||||
|
||||
/* The address should always be non-NULL since that was checked in the
|
||||
* 'while' condition above.
|
||||
*/
|
||||
|
||||
if (address)
|
||||
{
|
||||
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
|
||||
/* Does the address to be freed lie in the kernel heap? */
|
||||
|
||||
if (kmm_heapmember(address))
|
||||
{
|
||||
/* Yes.. return the memory to the kernel heap */
|
||||
|
||||
kfree(address);
|
||||
}
|
||||
|
||||
/* No.. then the address must lie in the user heap (unchecked) */
|
||||
|
||||
else
|
||||
#endif
|
||||
{
|
||||
/* Return the memory to the user heap */
|
||||
|
||||
kufree(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
sched_kucleanup();
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
|
|||
{
|
||||
if (tcb->dspace->crefs <= 1)
|
||||
{
|
||||
sched_free(tcb->dspace);
|
||||
sched_kfree(tcb->dspace);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -162,7 +162,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
|
|||
FAR struct task_tcb_s *ttcb = (FAR struct task_tcb_s *)tcb;
|
||||
for (i = 1; i < CONFIG_MAX_TASK_ARGS+1 && ttcb->argv[i]; i++)
|
||||
{
|
||||
sched_free((FAR void*)ttcb->argv[i]);
|
||||
sched_kfree((FAR void*)ttcb->argv[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
|
|||
#endif
|
||||
/* And, finally, release the TCB itself */
|
||||
|
||||
sched_free(tcb);
|
||||
sched_kfree(tcb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -128,7 +128,7 @@ int sem_close(FAR sem_t *sem)
|
|||
if (!psem->nconnect && psem->unlinked)
|
||||
{
|
||||
dq_rem((FAR dq_entry_t*)psem, &g_nsems);
|
||||
sched_free(psem);
|
||||
sched_kfree(psem);
|
||||
}
|
||||
ret = OK;
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ int sem_unlink(FAR const char *name)
|
|||
if (!psem->nconnect)
|
||||
{
|
||||
dq_rem((FAR dq_entry_t*)psem, &g_nsems);
|
||||
sched_free(psem);
|
||||
sched_kfree(psem);
|
||||
}
|
||||
|
||||
/* If one or more process still has the semaphore open,
|
||||
|
|
|
@ -115,6 +115,6 @@ void sig_releasependingsigaction(FAR sigq_t *sigq)
|
|||
|
||||
else if (sigq->type == SIG_ALLOC_DYN)
|
||||
{
|
||||
sched_free(sigq);
|
||||
sched_kfree(sigq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,6 +126,6 @@ void sig_releasependingsignal(FAR sigpendq_t *sigpend)
|
|||
|
||||
else if (sigpend->type == SIG_ALLOC_DYN)
|
||||
{
|
||||
sched_free(sigpend);
|
||||
sched_kfree(sigpend);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ static inline void timer_free(struct posix_timer_s *timer)
|
|||
/* Otherwise, return it to the heap */
|
||||
|
||||
irqrestore(flags);
|
||||
sched_free(timer);
|
||||
sched_kfree(timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue