1
0
Fork 0
forked from nuttx/nuttx-update

Completes the implementation of sbrk() (untested)

This commit is contained in:
Gregory Nutt 2014-09-01 10:46:51 -06:00
parent a33c0533f4
commit 45c31d633c
16 changed files with 364 additions and 40 deletions

View file

@ -135,9 +135,7 @@ struct group_addrenv_s
{
FAR uintptr_t *text[ARCH_TEXT_NSECTS];
FAR uintptr_t *data[ARCH_DATA_NSECTS];
#if 0 /* Not yet implemented */
FAR uintptr_t *heap[ARCH_HEAP_NSECTS];
#endif
};
typedef struct group_addrenv_s group_addrenv_t;

View file

@ -87,7 +87,7 @@ CMN_CSRCS += up_task_start.c up_pthread_start.c up_stackframe.c
endif
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CMN_CSRCS += arm_addrenv.c
CMN_CSRCS += arm_addrenv.c arm_pgalloc.c
endif
ifeq ($(CONFIG_ELF),y)

View file

@ -199,7 +199,7 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
}
DEBUGASSERT(MM_ISALIGNED(paddr));
list[i] = (FAR uint32_t *)paddr;
list[i] = (FAR uintptr_t *)paddr;
/* Temporarily map the page into the virtual address space */
@ -241,7 +241,7 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
/* Restore the original L1 page table entry */
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
irqrestore(flags);
@ -300,7 +300,7 @@ static void up_addrenv_destroy_region(FAR uintptr_t **list,
}
}
/* Restore the original L1 page table entry */
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
irqrestore(flags);

View file

@ -0,0 +1,280 @@
/****************************************************************************
* arch/arm/src/armv7/arm_pgalloc.c
*
* Copyright (C) 2014 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <string.h>
#include <errno.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/arch.h>
#include <nuttx/addrenv.h>
#include "cache.h"
#include "mmu.h"
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: alloc_pgtable
*
* Description:
* Add one page table to a memory region.
*
****************************************************************************/
static uintptr_t alloc_pgtable(void)
{
irqstate_t flags;
uintptr_t paddr;
FAR uint32_t *l2table;
uint32_t l1save;
/* Allocate one physical page for the L2 page table */
paddr = mm_pgalloc(1);
if (paddr)
{
DEBUGASSERT(MM_ISALIGNED(paddr));
/* Temporarily map the page into the virtual address space */
flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
/* Initialize the page table */
memset(l2table, 0, MM_PGSIZE);
/* Make sure that the initialized L2 table is flushed to physical
* memory.
*/
arch_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table + MM_PGSIZE);
/* Restore the scratch section page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
irqrestore(flags);
}
return paddr;
}
/****************************************************************************
* Name: get_pgtable
*
* Description:
* Return the physical address of the L2 page table corresponding to
* 'vaddr'
*
****************************************************************************/
static int get_pgtable(FAR group_addrenv_t *addrenv, uintptr_t vaddr)
{
uint32_t l1entry;
uintptr_t paddr;
unsigned int hpoffset;
unsigned int hpndx;
/* The current implementation only supports extending the user heap
* region as part of the implementation of user sbrk().
*/
DEBUGASSERT(vadddr >= CONFIG_ARCH_HEAP_VBASE && vaddr < ARCH_HEAP_VEND);
/* Get the current level 1 entry corresponding to this vaddr */
hpoffset = vaddr - CONFIG_ARCH_HEAP_VBASE;
if (hpoffset >= ARCH_HEAP_SIZE)
{
return 0;
}
hpndx = hpoffset >> 20;
l1entry = (uintptr_t)addrenv->heap[hpndx];
if (l1entry == 0)
{
/* No page table has been allocated... allocate one now */
paddr = alloc_pgtable();
if (paddr != 0)
{
/* Set the new level 1 page table entry in the address environment. */
l1entry = paddr | MMU_L1_PGTABFLAGS;
addrenv->heap[hpndx] = (FAR uintptr_t *)l1entry;
/* And instantiate the modified environment */
(void)up_addrenv_select(addrenv, NULL);
}
}
return l1entry & ~SECTION_MASK;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: pgalloc
*
* Description:
* If there is a page allocator in the configuration and if and MMU is
* available to map physical addresses to virtual address, then function
* must be provided by the platform-specific code. This is part of the
* implementation of sbrk(). This function will allocate the requested
* number of pages using the page allocator and map them into consecutive
* virtual addresses beginning with 'brkaddr'
*
* NOTE: This function does not use the up_ naming standard because it
* is indirectly callable from user-space code via a system trap.
* Therefore, it is a system interface and follows a different naming
* convention.
*
****************************************************************************/
int pgalloc(uintptr_t vaddr, unsigned int npages)
{
FAR struct tcb_s *tcb = sched_self();
FAR struct task_group_s *group;
FAR uint32_t *l2table;
irqstate_t flags;
uintptr_t paddr;
uint32_t l1save;
unsigned int index;
DEBUGASSERT(tcb && tcb->group);
group = tcb->group;
/* The current implementation only supports extending the user heap
* region as part of the implementation of user sbrk(). This function
* needs to be expanded to also handle (1) extending the user stack
* space and (2) extending the kernel memory regions as well.
*/
DEBUGASSERT((group->flags & GROUP_FLAG_ADDRENV) != 0);
/* vaddr = 0 means that no heap has yet been allocated */
if (vaddr == 0)
{
vaddr = CONFIG_ARCH_HEAP_VBASE;
}
DEBUGASSERT(vadddr >= CONFIG_ARCH_HEAP_VBASE && vaddr < ARCH_HEAP_VEND);
DEBUGASSERT(MM_ISALIGNED(vaddr));
for (; npages > 0; npages--)
{
/* Get the physical address of the level 2 page table */
paddr = get_pgtable(&group->addrenv, vaddr);
if (paddr == 0)
{
return -ENOMEM; /* ENOMEM might have correct meaning for sbrk? */
}
/* Temporarily map the level 2 page table into the "scratch" virtual
* address space
*/
flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
/* Back up L2 entry with physical memory */
paddr = mm_pgalloc(1);
if (paddr == 0)
{
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
irqrestore(flags);
return -EAGAIN; /* ENOMEM has different meaning for sbrk */
}
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Map the .text region virtual address to this physical address */
DEBUGASSERT(l2table[index] == 0);
l2table[index] = paddr | MMU_L2_UDATAFLAGS;
vaddr += MM_PGSIZE;
/* Make sure that the modified L2 table is flushed to physical
* memory.
*/
arch_flush_dcache((uintptr_t)&l2table[index],
(uintptr_t)&l2table[index] + sizeof(uint32_t));
/* Restore the scratch L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
irqrestore(flags);
}
return OK;
}
#endif /* CONFIG_MM_PGALLOC && CONFIG_ARCH_USE_MMU */

View file

@ -89,7 +89,7 @@ CMN_CSRCS += up_task_start.c up_pthread_start.c up_stackframe.c
endif
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CMN_CSRCS += arm_addrenv.c
CMN_CSRCS += arm_addrenv.c arm_pgalloc.c
endif
ifeq ($(CONFIG_ELF),y)

View file

@ -74,13 +74,14 @@
# define CONFIG_ARCH_TEXT_NPAGES 1
#endif
#define CONFIG_ARCH_TEXT_SIZE (CONFIG_ARCH_TEXT_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_TEXT_SIZE (CONFIG_ARCH_TEXT_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_TEXT_VEND (CONFIG_ARCH_TEXT_VBASE + ARCH_TEXT_SIZE)
/* .bss/.data region */
#ifndef CONFIG_ARCH_DATA_VBASE
# error CONFIG_ARCH_DATA_VBASE not defined
# define CONFIG_ARCH_DATA_VBASE (CONFIG_ARCH_TEXT_VBASE + CONFIG_ARCH_TEXT_SIZE)
# define CONFIG_ARCH_DATA_VBASE (CONFIG_ARCH_TEXT_VBASE + ARCH_TEXT_SIZE)
#endif
#if (CONFIG_ARCH_DATA_VBASE & CONFIG_MM_MASK) != 0
@ -92,13 +93,14 @@
# define CONFIG_ARCH_DATA_NPAGES 1
#endif
#define CONFIG_ARCH_DATA_SIZE (CONFIG_ARCH_DATA_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_DATA_SIZE (CONFIG_ARCH_DATA_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_DATA_VEND (CONFIG_ARCH_DATA_VBASE + ARCH_DATA_SIZE)
/* Heap region */
#ifndef CONFIG_ARCH_HEAP_VBASE
# error CONFIG_ARCH_HEAP_VBASE not defined
# define CONFIG_ARCH_HEAP_VBASE (CONFIG_ARCH_DATA_VBASE + CONFIG_ARCH_DATA_SIZE)
# define CONFIG_ARCH_HEAP_VBASE (CONFIG_ARCH_DATA_VBASE + ARCH_DATA_SIZE)
#endif
#if (CONFIG_ARCH_HEAP_VBASE & CONFIG_MM_MASK) != 0
@ -110,13 +112,14 @@
# define CONFIG_ARCH_HEAP_NPAGES 1
#endif
#define CONFIG_ARCH_HEAP_SIZE (CONFIG_ARCH_HEAP_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_HEAP_SIZE (CONFIG_ARCH_HEAP_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_HEAP_VEND (CONFIG_ARCH_HEAP_VBASE + ARCH_HEAP_SIZE)
/* Stack region */
#ifndef CONFIG_ARCH_STACK_VBASE
# error CONFIG_ARCH_STACK_VBASE not defined
# define CONFIG_ARCH_STACK_VBASE (CONFIG_ARCH_HEAP_VBASE + CONFIG_ARCH_HEAP_SIZE)
# define CONFIG_ARCH_STACK_VBASE (CONFIG_ARCH_HEAP_VBASE + ARCH_HEAP_SIZE)
#endif
#if (CONFIG_ARCH_STACK_VBASE & CONFIG_MM_MASK) != 0
@ -128,11 +131,12 @@
# define CONFIG_ARCH_STACK_NPAGES 1
#endif
#define CONFIG_ARCH_STACK_SIZE (CONFIG_ARCH_STACK_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_STACK_SIZE (CONFIG_ARCH_STACK_NPAGES * CONFIG_MM_PGSIZE)
#define ARCH_STACK_VEND (CONFIG_ARCH_STACK_VBASE + ARCH_STACK_SIZE)
/* A single page scratch region used for temporary mappings */
#define ARCH_SCRATCH_VBASE (CONFIG_ARCH_STACK_VBASE + CONFIG_ARCH_STACK_SIZE)
#define ARCH_SCRATCH_VBASE (CONFIG_ARCH_STACK_VBASE + ARCH_STACK_SIZE)
/****************************************************************************
* Private Data

View file

@ -689,7 +689,7 @@ void up_allocate_pgheap(FAR void **heap_start, size_t *heap_size);
*
****************************************************************************/
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
#ifdef CONFIG_ARCH_ADDRENV
int pgalloc(uintptr_t vaddr, unsigned int npages);
#endif

View file

@ -397,14 +397,14 @@ FAR void *kmm_brkaddr(int region);
/* Functions contained in mm_sbrk.c *****************************************/
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
#ifdef CONFIG_ARCH_ADDRENV
FAR void *mm_sbrk(FAR struct mm_heap_s *heap, intptr_t incr,
uintptr_t maxbreak);
#endif
/* Functions contained in kmm_sbrk.c ****************************************/
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
#ifdef CONFIG_ARCH_ADDRENV
FAR void *kmm_sbrk(intptr_t incr);
#endif

View file

@ -47,7 +47,7 @@
* Pre-processor Definitions
****************************************************************************/
/* The number of functions that may be registerd to be called
/* The number of functions that may be registered to be called
* at program exit.
*/
@ -149,7 +149,7 @@ ssize_t write(int fd, FAR const void *buf, size_t nbytes);
/* Memory management */
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
#ifdef CONFIG_ARCH_ADDRENV
FAR void *sbrk(intptr_t incr);
#endif

View file

@ -56,11 +56,9 @@ CSRCS += mm_shrinkchunk.c
CSRCS += mm_brkaddr.c mm_calloc.c mm_extend.c mm_free.c mm_mallinfo.c
CSRCS += mm_malloc.c mm_memalign.c mm_realloc.c mm_zalloc.c
ifeq ($(CONFIG_MM_PGALLOC),y)
ifeq ($(CONFIG_ARCH_USE_MMU),y)
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CSRCS += mm_sbrk.c
endif
endif
# User allocator
@ -68,11 +66,9 @@ CSRCS += umm_initialize.c umm_addregion.c umm_sem.c
CSRCS += umm_brkaddr.c umm_calloc.c umm_extend.c umm_free.c umm_mallinfo.c
CSRCS += umm_malloc.c umm_memalign.c umm_realloc.c umm_zalloc.c
ifeq ($(CONFIG_MM_PGALLOC),y)
ifeq ($(CONFIG_ARCH_USE_MMU),y)
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CSRCS += umm_sbrk.c
endif
endif
# Kernel allocator
@ -81,11 +77,9 @@ CSRCS += kmm_initialize.c kmm_addregion.c kmm_sem.c
CSRCS += kmm_brkaddr.c kmm_calloc.c kmm_extend.c kmm_free.c kmm_mallinfo.c
CSRCS += kmm_malloc.c kmm_memalign.c kmm_realloc.c kmm_zalloc.c
ifeq ($(CONFIG_MM_PGALLOC),y)
ifeq ($(CONFIG_ARCH_USE_MMU),y)
ifeq ($(CONFIG_ARCH_ADDRENV),y)
CSRCS += kmm_sbrk.c
endif
endif
ifeq ($(CONFIG_DEBUG),y)
CSRCS += kmm_heapmember.c

View file

@ -41,8 +41,7 @@
#include <nuttx/mm.h>
#if defined(CONFIG_MM_KERNEL_HEAP) && defined(CONFIG_MM_PGALLOC) && \
defined(CONFIG_ARCH_USE_MMU)
#if defined(CONFIG_MM_KERNEL_HEAP) && defined(CONFIG_ARCH_ADDRENV)
/****************************************************************************
* Pre-processor Definitions
@ -89,4 +88,4 @@ FAR void *kmm_sbrk(intptr_t incr)
return mm_sbrk(&g_kmmheap, incr, UINTPTR_MAX);
}
#endif /* CONFIG_MM_USER_HEAP && CONFIG_MM_PGALLOC && CONFIG_ARCH_USE_MMU */
#endif /* CONFIG_MM_USER_HEAP && CONFIG_ARCH_ADDRENV */

View file

@ -55,7 +55,8 @@
* Name: mm_brkaddr
*
* Description:
* Return the break address of a heap region
* Return the break address of a heap region. Zero is returned if the
* memory region is not initialized.
*
****************************************************************************/
@ -65,5 +66,5 @@ FAR void *mm_brkaddr(FAR struct mm_heap_s *heap, int region)
DEBUGASSERT(heap && region < heap->mm_nregions);
brkaddr = (uintptr_t)heap->mm_heapend[region];
return (FAR void *)(brkaddr + SIZEOF_MM_ALLOCNODE);
return brkaddr ? (FAR void *)(brkaddr + SIZEOF_MM_ALLOCNODE) : 0;
}

View file

@ -47,7 +47,7 @@
#include <nuttx/mm.h>
#include <nuttx/pgalloc.h>
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
#ifdef CONFIG_ARCH_ADDRENV
/****************************************************************************
* Pre-processor Definitions
@ -115,7 +115,7 @@ FAR void *mm_sbrk(FAR struct mm_heap_s *heap, intptr_t incr,
/* Check if this increment would exceed the maximum break value */
if ((maxbreak - brkaddr) < (pgincr << MM_PGSHIFT))
if ((brkaddr > 0) && ((maxbreak - brkaddr) < (pgincr << MM_PGSHIFT)))
{
err = ENOMEM;
goto errout;
@ -143,4 +143,4 @@ errout:
set_errno(err);
return (FAR void *)-1;
}
#endif /* CONFIG_MM_PGALLOC && CONFIG_ARCH_USE_MMU */
#endif /* CONFIG_ARCH_ADDRENV */

View file

@ -40,6 +40,7 @@
#include <nuttx/config.h>
#include <stdlib.h>
#include <unistd.h>
#include <nuttx/mm.h>
@ -85,7 +86,39 @@
FAR void *malloc(size_t size)
{
#ifdef CONFIG_ARCH_ADDRENV
FAR void *brkaddr;
FAR void *mem;
/* Loop until we successfully allocate the memory or until an error
* occurs. If we fail to allocate memory on the first pass, then call
* sbrk to extend the heap by one page. This may require several
* passes if more the size of the allocation is more than one page.
*
* An alternative would be to increase the size of the heap by the
* full requested allocation in sbrk(). Then the loop should never
* execute more than twice (but more memory than we need may be
* allocated).
*/
do
{
mem = mm_malloc(&g_mmheap, size);
if (!mem)
{
brkaddr = sbrk(size);
if (brkaddr == (FAR void *)-1)
{
return NULL;
}
}
}
while (mem == NULL);
return mem;
#else
return mm_malloc(&g_mmheap, size);
#endif
}
#endif /* CONFIG_MM_USER_HEAP */

View file

@ -44,8 +44,7 @@
#include <nuttx/mm.h>
#include <nuttx/pgalloc.h>
#if defined(CONFIG_MM_USER_HEAP) && defined(CONFIG_MM_PGALLOC) && \
defined(CONFIG_ARCH_USE_MMU)
#if defined(CONFIG_MM_USER_HEAP) && defined(CONFIG_ARCH_ADDRENV)
/****************************************************************************
* Pre-processor Definitions
@ -92,4 +91,4 @@ FAR void *sbrk(intptr_t incr)
return mm_sbrk(&g_mmheap, incr, CONFIG_ARCH_STACK_NPAGES << MM_PGSHIFT);
}
#endif /* CONFIG_MM_USER_HEAP && CONFIG_MM_PGALLOC && CONFIG_ARCH_USE_MMU */
#endif /* CONFIG_MM_USER_HEAP && CONFIG_ARCH_ADDRENV */

View file

@ -40,6 +40,7 @@
#include <nuttx/config.h>
#include <stdlib.h>
#include <string.h>
#include <nuttx/mm.h>
@ -69,7 +70,22 @@
FAR void *zalloc(size_t size)
{
#ifdef CONFIG_ARCH_ADDRENV
/* Use malloc() because it implements the sbrk() logic */
FAR void *alloc = malloc(size);
if (alloc)
{
memset(alloc, 0, size);
}
return alloc;
#else
/* Use mm_zalloc() becuase it implements the clear */
return mm_zalloc(&g_mmheap, size);
#endif
}
#endif /* CONFIG_MM_USER_HEAP */