You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by ma...@apache.org on 2022/09/12 09:01:16 UTC

[incubator-nuttx] branch master updated: risc-v/mmu: Fix L3 mappings for kernel, and mpfs protected mode userspace

This is an automated email from the ASF dual-hosted git repository.

masayuki pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new ff05cc593f risc-v/mmu: Fix L3 mappings for kernel, and mpfs protected mode userspace
ff05cc593f is described below

commit ff05cc593fa91d4cb57a0c746bf2bd62cec0d8de
Author: Ville Juven <vi...@unikie.com>
AuthorDate: Fri Sep 9 14:31:31 2022 +0300

    risc-v/mmu: Fix L3 mappings for kernel, and mpfs protected mode userspace
    
    The L3 mapping function was just way too simplistic. Depending on memory
    configuration it either works or not.
    
    Noticed that with icicle:pnsh the software crashes due to instruction
    page fault, reason is the map_region() implementation that does not
    work for regions that are not aligned to 2MB (the L2 page size).
    
    Implemented an extremely simplistic page table allocator for the L3
    references, that should once and for all get rid of the L3 mapping issue.
    
    NOTE: gran_alloc() cannot be used at this point, it is too early for it.
---
 arch/risc-v/src/common/riscv_mmu.h        |   1 +
 arch/risc-v/src/mpfs/mpfs_mm_init.c       | 119 +++++++++++++++++++---
 arch/risc-v/src/mpfs/mpfs_userspace.c     | 158 +++++++++++++++++++++++-------
 arch/risc-v/src/qemu-rv/qemu_rv_mm_init.c | 119 +++++++++++++++++++---
 4 files changed, 332 insertions(+), 65 deletions(-)

diff --git a/arch/risc-v/src/common/riscv_mmu.h b/arch/risc-v/src/common/riscv_mmu.h
index 4eae21645f..9b4173597d 100644
--- a/arch/risc-v/src/common/riscv_mmu.h
+++ b/arch/risc-v/src/common/riscv_mmu.h
@@ -25,6 +25,7 @@
 
 #define RV_MMU_PAGE_SHIFT       (12)
 #define RV_MMU_PAGE_SIZE        (1 << RV_MMU_PAGE_SHIFT) /* 4K pages */
+#define RV_MMU_PAGE_MASK        (RV_MMU_PAGE_SIZE - 1)
 
 /* Entries per PGT */
 
diff --git a/arch/risc-v/src/mpfs/mpfs_mm_init.c b/arch/risc-v/src/mpfs/mpfs_mm_init.c
index 2e2e135a64..cc5cf5eab3 100644
--- a/arch/risc-v/src/mpfs/mpfs_mm_init.c
+++ b/arch/risc-v/src/mpfs/mpfs_mm_init.c
@@ -58,6 +58,19 @@
 #define PGT_L2_SIZE     (512)  /* Enough to map 1 GiB */
 #define PGT_L3_SIZE     (1024) /* Enough to map 4 MiB */
 
+#define SLAB_COUNT      (sizeof(m_l3_pgtable) / RV_MMU_PAGE_SIZE)
+
+/****************************************************************************
+ * Private Types
+ ****************************************************************************/
+
+struct pgalloc_slab_s
+{
+  sq_entry_t  *next;
+  void        *memory;
+};
+typedef struct pgalloc_slab_s pgalloc_slab_t;
+
 /****************************************************************************
  * Private Data
  ****************************************************************************/
@@ -73,37 +86,107 @@ static uint64_t         m_l3_pgtable[PGT_L3_SIZE] locate_data(".pgtables");
 uintptr_t               g_kernel_mappings  = PGT_L1_VBASE;
 uintptr_t               g_kernel_pgt_pbase = PGT_L1_PBASE;
 
+/* L3 page table allocator */
+
+static sq_queue_t       g_free_slabs;
+static pgalloc_slab_t   g_slabs[SLAB_COUNT];
+
 /****************************************************************************
  * Private Functions
  ****************************************************************************/
 
+/****************************************************************************
+ * Name: slab_init
+ *
+ * Description:
+ *   Initialize slab allocator for L3 page table entries
+ *
+ * Input Parameters:
+ *   start - Beginning of the L3 page table pool
+ *
+ ****************************************************************************/
+
+static void slab_init(uintptr_t start)
+{
+  int i;
+
+  sq_init(&g_free_slabs);
+
+  for (i = 0; i < SLAB_COUNT; i++)
+    {
+      g_slabs[i].memory = (void *)start;
+      sq_addlast((sq_entry_t *)&g_slabs[i], (sq_queue_t *)&g_free_slabs);
+      start += RV_MMU_PAGE_SIZE;
+    }
+}
+
+/****************************************************************************
+ * Name: slab_alloc
+ *
+ * Description:
+ *   Allocate single slab for L3 page table entry
+ *
+ ****************************************************************************/
+
+static uintptr_t slab_alloc(void)
+{
+  pgalloc_slab_t *slab = (pgalloc_slab_t *)sq_remfirst(&g_free_slabs);
+  return slab ? (uintptr_t)slab->memory : (uintptr_t)NULL;
+}
+
+/****************************************************************************
+ * Name: map_region
+ *
+ * Description:
+ *   Map a region of physical memory to the L3 page table
+ *
+ * Input Parameters:
+ *   paddr - Beginning of the physical address mapping
+ *   vaddr - Beginning of the virtual address mapping
+ *   size - Size of the region in bytes
+ *   mmuflags - The MMU flags to use in the mapping
+ *
+ ****************************************************************************/
+
 static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
                        uint32_t mmuflags)
 {
-  uintptr_t offset;
-  uintptr_t l3base;
-  uintptr_t end_vaddr;
+  uintptr_t endaddr;
+  uintptr_t l3pbase;
+  int npages;
+  int i;
+  int j;
 
-  /* Start offset for the L3 table, kernel flash is always first */
+  /* How many pages */
 
-  offset = ((paddr - KFLASH_START) / RV_MMU_L2_PAGE_SIZE) * RV_MMU_PAGE_SIZE;
+  npages = (size + RV_MMU_PAGE_MASK) >> RV_MMU_PAGE_SHIFT;
+  endaddr = vaddr + size;
 
-  /* L3 base address per 2MiB boundary */
+  for (i = 0; i < npages; i += RV_MMU_PAGE_ENTRIES)
+    {
+      /* See if a L3 mapping exists ? */
 
-  l3base = PGT_L3_PBASE + offset;
+      l3pbase = mmu_pte_to_paddr(mmu_ln_getentry(2, PGT_L2_VBASE, vaddr));
+      if (!l3pbase)
+        {
+          /* No, allocate 1 page, this must not fail */
 
-  /* Map the region to the L3 table as a whole */
+          l3pbase = slab_alloc();
+          DEBUGASSERT(l3pbase);
 
-  mmu_ln_map_region(3, l3base, paddr, vaddr, size, mmuflags);
+          /* Map it to the L3 table */
 
-  /* Connect to L2 table */
+          mmu_ln_setentry(2, PGT_L2_VBASE, l3pbase, vaddr, MMU_UPGT_FLAGS);
+        }
 
-  end_vaddr = vaddr + size;
-  while (vaddr < end_vaddr)
-    {
-      mmu_ln_setentry(2, PGT_L2_VBASE, l3base, vaddr, PTE_G);
-      l3base += RV_MMU_L3_PAGE_SIZE;
-      vaddr += RV_MMU_L2_PAGE_SIZE;
+      /* Then add the L3 mappings */
+
+      for (j = 0; j < RV_MMU_PAGE_ENTRIES && vaddr < endaddr; j++)
+        {
+          mmu_ln_setentry(3, l3pbase, paddr, vaddr, mmuflags);
+          paddr += RV_MMU_L3_PAGE_SIZE;
+          vaddr += RV_MMU_L3_PAGE_SIZE;
+        }
     }
 }
 
@@ -122,6 +205,10 @@ static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
 
 void mpfs_kernel_mappings(void)
 {
+  /* Initialize slab allocator for L3 page tables */
+
+  slab_init(PGT_L3_PBASE);
+
   /* Begin mapping memory to MMU; note that at this point the MMU is not yet
    * active, so the page table virtual addresses are actually physical
    * addresses and so forth. M-mode does not perform translations anyhow, so
diff --git a/arch/risc-v/src/mpfs/mpfs_userspace.c b/arch/risc-v/src/mpfs/mpfs_userspace.c
index e6e7189fb4..e63e683149 100644
--- a/arch/risc-v/src/mpfs/mpfs_userspace.c
+++ b/arch/risc-v/src/mpfs/mpfs_userspace.c
@@ -26,6 +26,7 @@
 
 #include <stdint.h>
 #include <assert.h>
+#include <queue.h>
 
 #include <nuttx/userspace.h>
 
@@ -45,19 +46,30 @@
 
 #define PGT_L1_PBASE    (uint64_t)&m_l1_pgtable
 #define PGT_L2_PBASE    (uint64_t)&m_l2_pgtable
-#define PGT_L3_ROMPBASE (uint64_t)&m_l3_romtbl
-#define PGT_L3_RAMPBASE (uint64_t)&m_l3_ramtbl
+#define PGT_L3_PBASE    (uint64_t)&m_l3_pgtable
 #define PGT_L1_VBASE    PGT_L1_PBASE
 #define PGT_L2_VBASE    PGT_L2_PBASE
-#define PGT_L3_ROMVBASE PGT_L3_ROMPBASE
-#define PGT_L3_RAMVBASE PGT_L3_RAMPBASE
+#define PGT_L3_VBASE    PGT_L3_PBASE
 
 #define PGT_L1_SIZE     (512)  /* Enough to map 512 GiB */
 #define PGT_L2_SIZE     (512)  /* Enough to map 1 GiB */
-#define PGT_L3_SIZE     (512)  /* Enough to map 2 MiB */
+#define PGT_L3_SIZE     (1024) /* Enough to map 4 MiB */
+
+#define SLAB_COUNT      (sizeof(m_l3_pgtable) / RV_MMU_PAGE_SIZE)
 
 /****************************************************************************
- * Private Functions
+ * Private Types
+ ****************************************************************************/
+
+struct pgalloc_slab_s
+{
+  sq_entry_t  *next;
+  void        *memory;
+};
+typedef struct pgalloc_slab_s pgalloc_slab_t;
+
+/****************************************************************************
+ * Private Function Prototypes
  ****************************************************************************/
 
 /****************************************************************************
@@ -82,6 +94,29 @@ static void configure_mpu(void);
 
 static void configure_mmu(void);
 
+/****************************************************************************
+ * Name: slab_init
+ *
+ * Description:
+ *   Initialize slab allocator for L3 page table entries
+ *
+ * Input Parameters:
+ *   start - Beginning of the L3 page table pool
+ *
+ ****************************************************************************/
+
+static void slab_init(uintptr_t start);
+
+/****************************************************************************
+ * Name: slab_alloc
+ *
+ * Description:
+ *   Allocate single slab for L3 page table entry
+ *
+ ****************************************************************************/
+
+static uintptr_t slab_alloc(void);
+
 /****************************************************************************
  * Name: map_region
  *
@@ -89,7 +124,6 @@ static void configure_mmu(void);
  *   Map a region of physical memory to the L3 page table
  *
  * Input Parameters:
- *   l3base - L3 page table physical base address
  *   paddr - Beginning of the physical address mapping
  *   vaddr - Beginning of the virtual address mapping
  *   size - Size of the region in bytes
@@ -97,8 +131,8 @@ static void configure_mmu(void);
  *
  ****************************************************************************/
 
-static void map_region(uintptr_t l3base, uintptr_t paddr, uintptr_t vaddr,
-                       size_t size, uint32_t mmuflags);
+static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
+                       uint32_t mmuflags);
 
 /****************************************************************************
  * Private Data
@@ -115,11 +149,12 @@ static void map_region(uintptr_t l3base, uintptr_t paddr, uintptr_t vaddr,
 
 static uint64_t         m_l1_pgtable[PGT_L1_SIZE] locate_data(".pgtables");
 static uint64_t         m_l2_pgtable[PGT_L2_SIZE] locate_data(".pgtables");
+static uint64_t         m_l3_pgtable[PGT_L3_SIZE] locate_data(".pgtables");
 
-/* Allocate separate tables for ROM/RAM mappings */
+/* L3 page table allocator */
 
-static uint64_t         m_l3_romtbl[PGT_L3_SIZE]  locate_data(".pgtables");
-static uint64_t         m_l3_ramtbl[PGT_L3_SIZE]  locate_data(".pgtables");
+static sq_queue_t       g_free_slabs;
+static pgalloc_slab_t   g_slabs[SLAB_COUNT];
 
 /****************************************************************************
  * Public Functions
@@ -204,17 +239,14 @@ static void configure_mpu(void)
 
 static void configure_mmu(void)
 {
-  /* Setup MMU for user */
-
-  /* Setup the L3 references for executable memory */
+  /* Setup MMU for user. First granule allocator for L3 entries */
 
-  map_region(PGT_L3_ROMPBASE, UFLASH_START, UFLASH_START, UFLASH_SIZE,
-             MMU_UTEXT_FLAGS);
+  slab_init(PGT_L3_PBASE);
 
-  /* Setup the L3 references for data memory */
+  /* Setup the L3 references for text and data */
 
-  map_region(PGT_L3_RAMPBASE, USRAM_START, USRAM_START, USRAM_SIZE,
-             MMU_UDATA_FLAGS);
+  map_region(UFLASH_START, UFLASH_START, UFLASH_SIZE, MMU_UTEXT_FLAGS);
+  map_region(USRAM_START, USRAM_START, USRAM_SIZE, MMU_UDATA_FLAGS);
 
   /* Connect the L1 and L2 page tables */
 
@@ -225,6 +257,45 @@ static void configure_mmu(void)
   mmu_enable(PGT_L1_PBASE, 0);
 }
 
+/****************************************************************************
+ * Name: slab_init
+ *
+ * Description:
+ *   Initialize slab allocator for L3 page table entries
+ *
+ * Input Parameters:
+ *   start - Beginning of the L3 page table pool
+ *
+ ****************************************************************************/
+
+static void slab_init(uintptr_t start)
+{
+  int i;
+
+  sq_init(&g_free_slabs);
+
+  for (i = 0; i < SLAB_COUNT; i++)
+    {
+      g_slabs[i].memory = (void *)start;
+      sq_addlast((sq_entry_t *)&g_slabs[i], (sq_queue_t *)&g_free_slabs);
+      start += RV_MMU_PAGE_SIZE;
+    }
+}
+
+/****************************************************************************
+ * Name: slab_alloc
+ *
+ * Description:
+ *   Allocate single slab for L3 page table entry
+ *
+ ****************************************************************************/
+
+static uintptr_t slab_alloc(void)
+{
+  pgalloc_slab_t *slab = (pgalloc_slab_t *)sq_remfirst(&g_free_slabs);
+  return slab ? (uintptr_t)slab->memory : (uintptr_t)NULL;
+}
+
 /****************************************************************************
  * Name: map_region
  *
@@ -232,7 +303,6 @@ static void configure_mmu(void)
  *   Map a region of physical memory to the L3 page table
  *
  * Input Parameters:
- *   l3base - L3 page table physical base address
  *   paddr - Beginning of the physical address mapping
  *   vaddr - Beginning of the virtual address mapping
  *   size - Size of the region in bytes
@@ -240,23 +310,45 @@ static void configure_mmu(void)
  *
  ****************************************************************************/
 
-static void map_region(uintptr_t l3base, uintptr_t paddr, uintptr_t vaddr,
-                       size_t size, uint32_t mmuflags)
+static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
+                       uint32_t mmuflags)
 {
-  uintptr_t end_vaddr;
-
-  /* Map the region to the L3 table as a whole */
+  uintptr_t endaddr;
+  uintptr_t l3pbase;
+  int npages;
+  int i;
+  int j;
 
-  mmu_ln_map_region(3, l3base, paddr, vaddr, size, mmuflags);
+  /* How many pages */
 
-  /* Connect to L2 table */
+  npages = (size + RV_MMU_PAGE_MASK) >> RV_MMU_PAGE_SHIFT;
+  endaddr = vaddr + size;
 
-  end_vaddr = vaddr + size;
-  while (vaddr < end_vaddr)
+  for (i = 0; i < npages; i += RV_MMU_PAGE_ENTRIES)
     {
-      mmu_ln_setentry(2, PGT_L2_VBASE, l3base, vaddr, PTE_G);
-      l3base += RV_MMU_L3_PAGE_SIZE;
-      vaddr += RV_MMU_L2_PAGE_SIZE;
+      /* See if a L3 mapping exists ? */
+
+      l3pbase = mmu_pte_to_paddr(mmu_ln_getentry(2, PGT_L2_VBASE, vaddr));
+      if (!l3pbase)
+        {
+          /* No, allocate 1 page, this must not fail */
+
+          l3pbase = slab_alloc();
+          DEBUGASSERT(l3pbase);
+
+          /* Map it to the L3 table */
+
+          mmu_ln_setentry(2, PGT_L2_VBASE, l3pbase, vaddr, MMU_UPGT_FLAGS);
+        }
+
+      /* Then add the L3 mappings */
+
+      for (j = 0; j < RV_MMU_PAGE_ENTRIES && vaddr < endaddr; j++)
+        {
+          mmu_ln_setentry(3, l3pbase, paddr, vaddr, mmuflags);
+          paddr += RV_MMU_L3_PAGE_SIZE;
+          vaddr += RV_MMU_L3_PAGE_SIZE;
+        }
     }
 }
 
diff --git a/arch/risc-v/src/qemu-rv/qemu_rv_mm_init.c b/arch/risc-v/src/qemu-rv/qemu_rv_mm_init.c
index 3673721053..5b4d35cd55 100644
--- a/arch/risc-v/src/qemu-rv/qemu_rv_mm_init.c
+++ b/arch/risc-v/src/qemu-rv/qemu_rv_mm_init.c
@@ -58,6 +58,19 @@
 #define PGT_L2_SIZE     (512)  /* Enough to map 1 GiB */
 #define PGT_L3_SIZE     (1024) /* Enough to map 4 MiB (2MiB x 2) */
 
+#define SLAB_COUNT      (sizeof(m_l3_pgtable) / RV_MMU_PAGE_SIZE)
+
+/****************************************************************************
+ * Private Types
+ ****************************************************************************/
+
+struct pgalloc_slab_s
+{
+  sq_entry_t  *next;
+  void        *memory;
+};
+typedef struct pgalloc_slab_s pgalloc_slab_t;
+
 /****************************************************************************
  * Private Data
  ****************************************************************************/
@@ -73,37 +86,107 @@ static uint64_t         m_l3_pgtable[PGT_L3_SIZE] locate_data(".pgtables");
 uintptr_t               g_kernel_mappings  = PGT_L1_VBASE;
 uintptr_t               g_kernel_pgt_pbase = PGT_L1_PBASE;
 
+/* L3 page table allocator */
+
+static sq_queue_t       g_free_slabs;
+static pgalloc_slab_t   g_slabs[SLAB_COUNT];
+
 /****************************************************************************
  * Private Functions
  ****************************************************************************/
 
+/****************************************************************************
+ * Name: slab_init
+ *
+ * Description:
+ *   Initialize slab allocator for L3 page table entries
+ *
+ * Input Parameters:
+ *   start - Beginning of the L3 page table pool
+ *
+ ****************************************************************************/
+
+static void slab_init(uintptr_t start)
+{
+  int i;
+
+  sq_init(&g_free_slabs);
+
+  for (i = 0; i < SLAB_COUNT; i++)
+    {
+      g_slabs[i].memory = (void *)start;
+      sq_addlast((sq_entry_t *)&g_slabs[i], (sq_queue_t *)&g_free_slabs);
+      start += RV_MMU_PAGE_SIZE;
+    }
+}
+
+/****************************************************************************
+ * Name: slab_alloc
+ *
+ * Description:
+ *   Allocate single slab for L3 page table entry
+ *
+ ****************************************************************************/
+
+static uintptr_t slab_alloc(void)
+{
+  pgalloc_slab_t *slab = (pgalloc_slab_t *)sq_remfirst(&g_free_slabs);
+  return slab ? (uintptr_t)slab->memory : (uintptr_t)NULL;
+}
+
+/****************************************************************************
+ * Name: map_region
+ *
+ * Description:
+ *   Map a region of physical memory to the L3 page table
+ *
+ * Input Parameters:
+ *   paddr - Beginning of the physical address mapping
+ *   vaddr - Beginning of the virtual address mapping
+ *   size - Size of the region in bytes
+ *   mmuflags - The MMU flags to use in the mapping
+ *
+ ****************************************************************************/
+
 static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
                        uint32_t mmuflags)
 {
-  uintptr_t offset;
-  uintptr_t l3base;
-  uintptr_t end_vaddr;
+  uintptr_t endaddr;
+  uintptr_t l3pbase;
+  int npages;
+  int i;
+  int j;
 
-  /* Start offset for the L3 table, kernel flash is always first */
+  /* How many pages */
 
-  offset = ((paddr - KFLASH_START) / RV_MMU_L2_PAGE_SIZE) * RV_MMU_PAGE_SIZE;
+  npages = (size + RV_MMU_PAGE_MASK) >> RV_MMU_PAGE_SHIFT;
+  endaddr = vaddr + size;
 
-  /* L3 base address per 2MiB boundary */
+  for (i = 0; i < npages; i += RV_MMU_PAGE_ENTRIES)
+    {
+      /* See if a L3 mapping exists ? */
 
-  l3base = PGT_L3_PBASE + offset;
+      l3pbase = mmu_pte_to_paddr(mmu_ln_getentry(2, PGT_L2_VBASE, vaddr));
+      if (!l3pbase)
+        {
+          /* No, allocate 1 page, this must not fail */
 
-  /* Map the region to the L3 table as a whole */
+          l3pbase = slab_alloc();
+          DEBUGASSERT(l3pbase);
 
-  mmu_ln_map_region(3, l3base, paddr, vaddr, size, mmuflags);
+          /* Map it to the L3 table */
 
-  /* Connect to L2 table */
+          mmu_ln_setentry(2, PGT_L2_VBASE, l3pbase, vaddr, MMU_UPGT_FLAGS);
+        }
 
-  end_vaddr = vaddr + size;
-  while (vaddr < end_vaddr)
-    {
-      mmu_ln_setentry(2, PGT_L2_VBASE, l3base, vaddr, PTE_G);
-      l3base += RV_MMU_L3_PAGE_SIZE;
-      vaddr += RV_MMU_L2_PAGE_SIZE;
+      /* Then add the L3 mappings */
+
+      for (j = 0; j < RV_MMU_PAGE_ENTRIES && vaddr < endaddr; j++)
+        {
+          mmu_ln_setentry(3, l3pbase, paddr, vaddr, mmuflags);
+          paddr += RV_MMU_L3_PAGE_SIZE;
+          vaddr += RV_MMU_L3_PAGE_SIZE;
+        }
     }
 }
 
@@ -122,6 +205,10 @@ static void map_region(uintptr_t paddr, uintptr_t vaddr, size_t size,
 
 void qemu_rv_kernel_mappings(void)
 {
+  /* Initialize slab allocator for L3 page tables */
+
+  slab_init(PGT_L3_PBASE);
+
   /* Begin mapping memory to MMU; note that at this point the MMU is not yet
    * active, so the page table virtual addresses are actually physical
    * addresses and so forth. M-mode does not perform translations anyhow, so