diff --git a/arch/risc-v/src/common/pgalloc.h b/arch/risc-v/src/common/pgalloc.h index 40da29805660b..be39da0d3a0d5 100644 --- a/arch/risc-v/src/common/pgalloc.h +++ b/arch/risc-v/src/common/pgalloc.h @@ -75,6 +75,10 @@ static inline uintptr_t riscv_pgvaddr(uintptr_t paddr) { return paddr - CONFIG_ARCH_PGPOOL_PBASE + CONFIG_ARCH_PGPOOL_VBASE; } + else if (paddr >= CONFIG_RAM_START && paddr < CONFIG_RAM_END) + { + return paddr - CONFIG_RAM_START + CONFIG_RAM_VSTART; + } return 0; } diff --git a/arch/risc-v/src/common/riscv_addrenv.c b/arch/risc-v/src/common/riscv_addrenv.c index cb49c1a51daf0..dfdcfb6aea2fb 100644 --- a/arch/risc-v/src/common/riscv_addrenv.c +++ b/arch/risc-v/src/common/riscv_addrenv.c @@ -65,6 +65,7 @@ #include #include #include +#include #include @@ -421,6 +422,12 @@ int up_addrenv_create(size_t textsize, size_t datasize, size_t heapsize, heapsize = heapsize + MM_PGALIGNUP(CONFIG_DEFAULT_TASK_STACKSIZE); +#ifdef CONFIG_TLS_ALIGNED + /* Need more stack for TLS alignment */ + + heapsize += MM_PGALIGNUP(2 * TLS_MAXSTACK); +#endif + /* Map the reserved area */ ret = create_region(addrenv, resvbase, resvsize, MMU_UDATA_FLAGS); diff --git a/arch/risc-v/src/common/riscv_addrenv_pgmap.c b/arch/risc-v/src/common/riscv_addrenv_pgmap.c index 9dbac8592fecc..edf9687649105 100644 --- a/arch/risc-v/src/common/riscv_addrenv_pgmap.c +++ b/arch/risc-v/src/common/riscv_addrenv_pgmap.c @@ -202,7 +202,7 @@ int up_addrenv_kmap_pages(void **pages, unsigned int npages, uintptr_t vaddr, } /**************************************************************************** - * Name: riscv_unmap_pages + * Name: up_addrenv_kunmap_pages * * Description: * Unmap a previously mapped virtual memory region. diff --git a/arch/risc-v/src/common/riscv_addrenv_utils.c b/arch/risc-v/src/common/riscv_addrenv_utils.c index 810563aedd47a..e13fb89ff9e64 100644 --- a/arch/risc-v/src/common/riscv_addrenv_utils.c +++ b/arch/risc-v/src/common/riscv_addrenv_utils.c @@ -63,13 +63,22 @@ uintptr_t riscv_get_pgtable(arch_addrenv_t *addrenv, uintptr_t vaddr) uintptr_t paddr; uintptr_t ptprev; uint32_t ptlevel; + uint32_t flags; /* Get the current level MAX_LEVELS-1 entry corresponding to this vaddr */ ptlevel = ARCH_SPGTS; ptprev = riscv_pgvaddr(addrenv->spgtables[ARCH_SPGTS - 1]); - paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, ptprev, vaddr)); + if (!ptprev) + { + /* Something is very wrong */ + + return 0; + } + + /* Find the physical address of the final level page table */ + paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, ptprev, vaddr)); if (!paddr) { /* No page table has been allocated... allocate one now */ @@ -77,10 +86,21 @@ uintptr_t riscv_get_pgtable(arch_addrenv_t *addrenv, uintptr_t vaddr) paddr = mm_pgalloc(1); if (paddr) { + /* Determine page table flags */ + + if (riscv_uservaddr(vaddr)) + { + flags = MMU_UPGT_FLAGS; + } + else + { + flags = MMU_KPGT_FLAGS; + } + /* Wipe the page and assign it */ riscv_pgwipe(paddr); - mmu_ln_setentry(ptlevel, ptprev, paddr, vaddr, MMU_UPGT_FLAGS); + mmu_ln_setentry(ptlevel, ptprev, paddr, vaddr, flags); } } diff --git a/arch/risc-v/src/common/riscv_mmu.h b/arch/risc-v/src/common/riscv_mmu.h index 0c0c6b5373749..6c9d1baebb997 100644 --- a/arch/risc-v/src/common/riscv_mmu.h +++ b/arch/risc-v/src/common/riscv_mmu.h @@ -59,6 +59,10 @@ #define MMU_IO_FLAGS (PTE_R | PTE_W | PTE_G) +/* Flags for kernel page tables */ + +#define MMU_KPGT_FLAGS (PTE_G) + /* Kernel FLASH and RAM are mapped globally */ #define MMU_KTEXT_FLAGS (PTE_R | PTE_X | PTE_G) diff --git a/include/nuttx/arch.h b/include/nuttx/arch.h index 4a647afedb2af..9f4bdf78221ad 100644 --- a/include/nuttx/arch.h +++ b/include/nuttx/arch.h @@ -1352,7 +1352,7 @@ int up_addrenv_kmap_pages(FAR void **pages, unsigned int npages, #endif /**************************************************************************** - * Name: riscv_unmap_pages + * Name: up_addrenv_kunmap_pages * * Description: * Unmap a previously mapped virtual memory region. diff --git a/mm/kmap/kmm_map.c b/mm/kmap/kmm_map.c index 1312e6af0e31c..cb270706f137b 100644 --- a/mm/kmap/kmm_map.c +++ b/mm/kmap/kmm_map.c @@ -82,7 +82,7 @@ static int get_user_pages(FAR void **pages, size_t npages, uintptr_t vaddr) for (i = 0; i < npages; i++, vaddr += MM_PGSIZE) { - page = up_addrenv_find_page(&tcb->addrenv_own->addrenv, vaddr); + page = up_addrenv_find_page(&tcb->addrenv_curr->addrenv, vaddr); if (!page) { /* Something went wrong, get out */ @@ -125,7 +125,7 @@ static FAR void *map_pages(FAR void **pages, size_t npages, int prot) /* Find a virtual memory area that fits */ - vaddr = gran_alloc(&g_kmm_map_vpages, size); + vaddr = gran_alloc(g_kmm_map_vpages, size); if (!vaddr) { return NULL; @@ -182,7 +182,7 @@ static FAR void *map_single_user_page(uintptr_t vaddr) /* Find the page associated with this virtual address */ - page = up_addrenv_find_page(&tcb->addrenv_own->addrenv, vaddr); + page = up_addrenv_find_page(&tcb->addrenv_curr->addrenv, vaddr); if (!page) { return NULL; @@ -192,6 +192,47 @@ static FAR void *map_single_user_page(uintptr_t vaddr) return (FAR void *)vaddr; } +/**************************************************************************** + * Name: map_single_page + * + * Description: + * Map (find) a single page from the kernel addressable virtual memory + * pool. + * + * Input Parameters: + * page - The physical page. + * + * Returned Value: + * The kernel virtual address for the page, or NULL if page is not kernel + * addressable. + * + ****************************************************************************/ + +static FAR void *map_single_page(uintptr_t page) +{ + return (FAR void *)up_addrenv_page_vaddr(page); +} + +/**************************************************************************** + * Name: is_kmap_vaddr + * + * Description: + * Return true if the virtual address, vaddr, lies in the kmap address + * space. + * + * Input Parameters: + * vaddr - The kernel virtual address where the mapping begins. + * + * Returned Value: + * True if vaddr is in the kmap address space; false otherwise. + * + ****************************************************************************/ + +static bool is_kmap_vaddr(uintptr_t vaddr) +{ + return (vaddr >= CONFIG_ARCH_KMAP_VBASE && vaddr < ARCH_KMAP_VEND); +} + /**************************************************************************** * Name: kmm_map_lock * @@ -270,6 +311,13 @@ FAR void *kmm_map(FAR void **pages, size_t npages, int prot) return NULL; } + /* A single page can be addressed directly, if it is a kernel page */ + + if (npages == 1) + { + return map_single_page((uintptr_t)pages[0]); + } + /* Attempt to map the pages */ vaddr = (uintptr_t)map_pages(pages, npages, prot); @@ -301,6 +349,15 @@ void kmm_unmap(FAR void *kaddr) unsigned int npages; int ret; + /* Speed optimization: check that addr is within kmap area */ + + if (!is_kmap_vaddr((uintptr_t)kaddr)) + { + /* Nope: get out */ + + return; + } + /* Lock the mapping list when we fiddle around with it */ ret = kmm_map_lock(); @@ -348,7 +405,7 @@ void kmm_unmap(FAR void *kaddr) FAR void *kmm_map_user(FAR void *uaddr, size_t size) { - FAR void *pages; + FAR void **pages; uintptr_t vaddr; uintptr_t offset; size_t npages; @@ -385,7 +442,7 @@ FAR void *kmm_map_user(FAR void *uaddr, size_t size) /* No, the area must be mapped into kernel virtual address space */ - pages = kmm_zalloc(npages * sizeof(FAR void *)); + pages = (FAR void **)kmm_zalloc(npages * sizeof(FAR void *)); if (!pages) { return NULL; @@ -393,7 +450,7 @@ FAR void *kmm_map_user(FAR void *uaddr, size_t size) /* Fetch the physical pages for the user virtual address range */ - ret = get_user_pages(&pages, npages, vaddr); + ret = get_user_pages(pages, npages, vaddr); if (ret < 0) { goto errout_with_pages; @@ -401,7 +458,7 @@ FAR void *kmm_map_user(FAR void *uaddr, size_t size) /* Map the physical pages to kernel memory */ - vaddr = (uintptr_t)map_pages(&pages, npages, PROT_READ | PROT_WRITE); + vaddr = (uintptr_t)map_pages(pages, npages, PROT_READ | PROT_WRITE); if (!vaddr) { goto errout_with_pages; @@ -409,6 +466,7 @@ FAR void *kmm_map_user(FAR void *uaddr, size_t size) /* Ok, we have a virtual memory area, add the offset back */ + kmm_free(pages); return (FAR void *)(vaddr + offset); errout_with_pages: diff --git a/sched/misc/assert.c b/sched/misc/assert.c index 1cbe45aef76b5..95ec7ce5a428a 100644 --- a/sched/misc/assert.c +++ b/sched/misc/assert.c @@ -553,11 +553,21 @@ void _assert(FAR const char *filename, int linenum, FAR const char *msg, FAR void *regs) { FAR struct tcb_s *rtcb = running_task(); +#if CONFIG_TASK_NAME_SIZE > 0 + FAR struct tcb_s *ptcb = NULL; +#endif struct panic_notifier_s notifier_data; struct utsname name; bool fatal = true; int flags; +#if CONFIG_TASK_NAME_SIZE > 0 + if (rtcb->group && !(rtcb->flags & TCB_FLAG_TTYPE_KERNEL)) + { + ptcb = nxsched_get_tcb(rtcb->group->tg_pid); + } +#endif + flags = enter_critical_section(); sched_lock(); @@ -602,6 +612,7 @@ void _assert(FAR const char *filename, int linenum, ": " #if CONFIG_TASK_NAME_SIZE > 0 "%s " + "process: %s " #endif "%p\n", msg ? msg : "", @@ -611,6 +622,7 @@ void _assert(FAR const char *filename, int linenum, #endif #if CONFIG_TASK_NAME_SIZE > 0 rtcb->name, + ptcb ? ptcb->name : "Kernel", #endif rtcb->entry.main);