MonkOS  v0.1
A simple 64-bit operating system (x86_64)
kmem.c
Go to the documentation of this file.
1 //============================================================================
2 /// @file kmem.c
3 /// @brief Kernel physical (and virtual) memory map.
4 //
5 // Copyright 2016 Brett Vickers.
6 // Use of this source code is governed by a BSD-style license that can
7 // be found in the MonkOS LICENSE file.
8 //============================================================================
9 
10 #include <core.h>
11 #include <libc/string.h>
12 #include <kernel/x86/cpu.h>
14 #include <kernel/mem/kmem.h>
15 #include <kernel/mem/paging.h>
16 #include <kernel/mem/pmap.h>
17 
18 /// Return flags for large-page leaf entries in level 3 (PDPT) and level 2
19 /// (PDT) tables.
20 static uint64_t
21 get_pdflags(uint32_t memtype)
22 {
23  switch (memtype)
24  {
25  case PMEMTYPE_ACPI_NVS:
26  case PMEMTYPE_UNCACHED:
27  return PF_PRESENT | PF_GLOBAL | PF_SYSTEM |
28  PF_RW | PF_PS | PF_PWT | PF_PCD;
29 
30  case PMEMTYPE_BAD:
31  case PMEMTYPE_UNMAPPED:
32  return 0;
33 
34  case PMEMTYPE_USABLE:
35  case PMEMTYPE_RESERVED:
36  case PMEMTYPE_ACPI:
37  return PF_PRESENT | PF_GLOBAL | PF_SYSTEM | PF_RW | PF_PS;
38 
39  default:
40  fatal();
41  return 0;
42  }
43 }
44 
45 /// Return flags for entries in level 1 (PT) tables.
46 static uint64_t
47 get_ptflags(uint32_t memtype)
48 {
49  switch (memtype)
50  {
51  case PMEMTYPE_ACPI_NVS:
52  case PMEMTYPE_UNCACHED:
53  return PF_PRESENT | PF_GLOBAL | PF_SYSTEM |
54  PF_RW | PF_PWT | PF_PCD;
55 
56  case PMEMTYPE_BAD:
57  case PMEMTYPE_UNMAPPED:
58  return 0;
59 
60  case PMEMTYPE_USABLE:
61  case PMEMTYPE_RESERVED:
62  case PMEMTYPE_ACPI:
63  return PF_PRESENT | PF_GLOBAL | PF_SYSTEM | PF_RW;
64 
65  default:
66  fatal();
67  return 0;
68  }
69 }
70 
71 /// Allocate the next available page in the kernel page table and return
72 /// its virtual address.
73 static inline uint64_t
75 {
76  if (pt->vnext >= pt->vterm)
77  fatal();
78 
79  uint64_t vaddr = pt->vnext;
80  pt->vnext += PAGE_SIZE;
81  return vaddr | PF_SYSTEM | PF_PRESENT | PF_RW;
82 }
83 
84 /// Create a 1GiB page entry in the kernel page table.
85 static void
86 create_huge_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
87 {
88  uint64_t pml4te = PML4E(addr);
89  uint64_t pdpte = PDPTE(addr);
90 
91  page_t *pml4t = (page_t *)pt->proot;
92  if (pml4t->entry[pml4te] == 0)
93  pml4t->entry[pml4te] = alloc_page(pt);
94 
95  page_t *pdpt = PGPTR(pml4t->entry[pml4te]);
96  pdpt->entry[pdpte] = addr | get_pdflags(memtype);
97 }
98 
99 /// Create a 2MiB page entry in the kernel page table.
100 static void
101 create_large_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
102 {
103  uint64_t pml4te = PML4E(addr);
104  uint64_t pdpte = PDPTE(addr);
105  uint64_t pde = PDE(addr);
106 
107  page_t *pml4t = (page_t *)pt->proot;
108  if (pml4t->entry[pml4te] == 0)
109  pml4t->entry[pml4te] = alloc_page(pt);
110 
111  page_t *pdpt = PGPTR(pml4t->entry[pml4te]);
112  if (pdpt->entry[pdpte] == 0)
113  pdpt->entry[pdpte] = alloc_page(pt);
114 
115  page_t *pdt = PGPTR(pdpt->entry[pdpte]);
116  pdt->entry[pde] = addr | get_pdflags(memtype);
117 }
118 
119 /// Create a 4KiB page entry in the kernel page table.
120 static void
121 create_small_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
122 {
123  uint64_t pml4te = PML4E(addr);
124  uint64_t pdpte = PDPTE(addr);
125  uint64_t pde = PDE(addr);
126  uint64_t pte = PTE(addr);
127 
128  page_t *pml4t = (page_t *)pt->proot;
129  if (pml4t->entry[pml4te] == 0)
130  pml4t->entry[pml4te] = alloc_page(pt);
131 
132  page_t *pdpt = PGPTR(pml4t->entry[pml4te]);
133  if (pdpt->entry[pdpte] == 0)
134  pdpt->entry[pdpte] = alloc_page(pt);
135 
136  page_t *pdt = PGPTR(pdpt->entry[pdpte]);
137  if (pdt->entry[pde] == 0)
138  pdt->entry[pde] = alloc_page(pt);
139 
140  page_t *ptt = PGPTR(pdt->entry[pde]);
141  ptt->entry[pte] = addr | get_ptflags(memtype);
142 }
143 
144 /// Map a region of memory into the kernel page table, using the largest
145 /// page sizes possible.
146 static void
147 map_region(pagetable_t *pt, const pmap_t *map, const pmapregion_t *region)
148 {
149  // Don't map bad (or unmapped) memory.
150  if (region->type == PMEMTYPE_UNMAPPED || region->type == PMEMTYPE_BAD)
151  return;
152 
153  // Don't map reserved regions beyond the last usable physical address.
154  if (region->type == PMEMTYPE_RESERVED &&
155  region->addr >= map->last_usable)
156  return;
157 
158  uint64_t addr = region->addr;
159  uint64_t term = region->addr + region->size;
160 
161  // Create a series of pages that cover the region. Try to use the largest
162  // page sizes possible to keep the page table small.
163  while (addr < term) {
164  uint64_t remain = term - addr;
165 
166  // Create a huge page (1GiB) if possible.
167  if ((addr & (PAGE_SIZE_HUGE - 1)) == 0 &&
168  (remain >= PAGE_SIZE_HUGE)) {
169  create_huge_page(pt, addr, region->type);
170  addr += PAGE_SIZE_HUGE;
171  }
172 
173  // Create a large page (2MiB) if possible.
174  else if ((addr & (PAGE_SIZE_LARGE - 1)) == 0 &&
175  (remain >= PAGE_SIZE_LARGE)) {
176  create_large_page(pt, addr, region->type);
177  addr += PAGE_SIZE_LARGE;
178  }
179 
180  // Create a small page (4KiB).
181  else {
182  create_small_page(pt, addr, region->type);
183  addr += PAGE_SIZE;
184  }
185  }
186 }
187 
188 void
190 {
191  // Zero all kernel page table memory.
193 
194  // Initialize the kernel page table.
199 
200  // For each region in the physical memory map, create appropriate page
201  // table entries.
202  const pmap_t *map = pmap();
203  for (uint64_t r = 0; r < map->count; r++)
204  map_region(pt, map, &map->region[r]);
205 }
const pmap_t * pmap()
Return a pointer to the current physical memory map.
Definition: pmap.c:316
Physical memory map describing usable and reserved regions of physical memory.
static void map_region(pagetable_t *pt, const pmap_t *map, const pmapregion_t *region)
Map a region of memory into the kernel page table, using the largest page sizes possible.
Definition: kmem.c:147
static void create_small_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
Create a 4KiB page entry in the kernel page table.
Definition: kmem.c:121
static uint64_t get_pdflags(uint32_t memtype)
Return flags for large-page leaf entries in level 3 (PDPT) and level 2 (PDT) tables.
Definition: kmem.c:21
String and memory operations.
#define PF_PRESENT
Definition: paging.h:20
#define PAGE_SIZE_HUGE
Definition: paging.h:17
#define PTE(a)
Definition: paging.h:43
Core include file.
Kernel physical (and virtual) memory map.
void kmem_init(pagetable_t *pt)
Using the contents of the physical memory map, identity map all physical memory into the kernel&#39;s pag...
Definition: kmem.c:189
Marked as uncacheable, usually for I/O.
Definition: pmap.h:28
Reported usable by the BIOS.
Definition: pmap.h:23
#define PML4E(a)
Definition: paging.h:40
#define PAGE_SIZE_LARGE
Definition: paging.h:16
Used for ACPI tables or code.
Definition: pmap.h:25
__forceinline void fatal()
Definition: cpu_inl.h:158
A pagetable structure.
Definition: paging.h:66
#define PF_SYSTEM
Definition: paging.h:29
#define PF_GLOBAL
Definition: paging.h:28
#define PF_PWT
Definition: paging.h:23
static pmap_t * map
Definition: pmap.c:21
A pagetable page record.
Definition: paging.h:54
Paged memory management.
#define PF_PS
Definition: paging.h:27
#define PAGE_SIZE
Definition: paging.h:15
uint64_t entry[PAGE_SIZE/sizeof(uint64_t)]
Definition: paging.h:56
uint64_t proot
Physical address of root page table (PML4T) entry.
Definition: paging.h:68
#define KMEM_KERNEL_PAGETABLE_SIZE
Definition: kmem.h:45
static uint64_t alloc_page(pagetable_t *pt)
Allocate the next available page in the kernel page table and return its virtual address.
Definition: kmem.c:74
Reported as bad memory.
Definition: pmap.h:27
Interrupt handling operations.
x86 CPU-specific function implementations.
#define KMEM_KERNEL_PAGETABLE
Definition: kmem.h:22
#define PF_PCD
Definition: paging.h:24
#define PF_RW
Definition: paging.h:21
#define PDPTE(a)
Definition: paging.h:41
Reported (or inferred) to be reserved.
Definition: pmap.h:24
uint64_t vnext
Virtual address to use for table&#39;s next page.
Definition: paging.h:70
void * memzero(void *dst, size_t num)
Fill a region of memory with zeroes.
static uint64_t get_ptflags(uint32_t memtype)
Return flags for entries in level 1 (PT) tables.
Definition: kmem.c:47
uint64_t vterm
Boundary of pages used to store the table.
Definition: paging.h:71
Marked as "do not map".
Definition: pmap.h:29
uint64_t vroot
Virtual address of root page table (PML4T) entry.
Definition: paging.h:69
static void create_large_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
Create a 2MiB page entry in the kernel page table.
Definition: kmem.c:101
#define KMEM_KERNEL_PAGETABLE_END
Definition: kmem.h:23
static void create_huge_page(pagetable_t *pt, uint64_t addr, uint32_t memtype)
Create a 1GiB page entry in the kernel page table.
Definition: kmem.c:86
#define PDE(a)
Definition: paging.h:42
#define PGPTR(pte)
Definition: paging.h:46
Used for ACPI non-volatile storage.
Definition: pmap.h:26