2 * Copyright (c) 2009, 2011 ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
12 #include <paging_kernel_arch.h>
13 #include <cap_predicates.h>
17 // ------------------------------------------------------------------------
18 // Internal declarations
25 uint32_t type :2; // == 0
28 /// L1 entry for 256 4K L2 entries
30 uint32_t base_address :22;
31 uint32_t sbz1 :1; // Should-be-zero
33 uint32_t mb1 :1; // Must-be-one
34 uint32_t sbz0 :2; // Should-be-zero
35 uint32_t type :2; // == 1
38 /// L1 entry for 1MB mapped section
40 uint32_t base_address :12;
45 uint32_t mb1 :1; // Must-be-One
46 uint32_t cacheable :1;
47 uint32_t bufferable :1;
48 uint32_t type :2; // == 2
52 /// L1 entry for 1024 1K L2 descriptors
54 uint32_t base_address :20;
59 uint32_t type :2; // == 3
64 STATIC_ASSERT_SIZEOF(union l1_entry, 4);
66 #define L1_TYPE_INVALID_ENTRY 0
67 #define L1_TYPE_COARSE_ENTRY 1
68 #define L1_TYPE_SECTION_ENTRY 2
69 #define L1_TYPE_FINE_ENTRY 3
70 #define L1_TYPE(x) ((x) & 3)
77 uint32_t type :2; // == 0
80 /// Descriptior for a 64K page
82 uint32_t base_address :16;
88 uint32_t cacheable :1;
89 uint32_t bufferable :1;
90 uint32_t type :2; // == 1
93 /// Descriptor for a 4K page
95 uint32_t base_address :20;
100 uint32_t cacheable :1;
101 uint32_t bufferable :1;
102 uint32_t type :2; // == 2
105 /// Descriptor for a 1K page
107 uint32_t base_address :22;
110 uint32_t cacheable :1;
111 uint32_t bufferable :1;
112 uint32_t type :2; // == 3
116 STATIC_ASSERT_SIZEOF(union l2_entry, 4);
118 #define L2_TYPE_INVALID_PAGE 0
119 #define L2_TYPE_LARGE_PAGE 1
120 #define L2_TYPE_SMALL_PAGE 2
121 #define L2_TYPE_TINY_PAGE 3
122 #define L2_TYPE(x) ((x) & 3)
124 #define BYTES_PER_SECTION 0x100000
125 #define BYTES_PER_PAGE 0x1000
126 #define BYTES_PER_SMALL_PAGE 0x400
128 // ------------------------------------------------------------------------
129 // Utility declarations
131 inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
133 return address & ~(size - 1);
136 inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
138 return (address + size - 1) & ~(size - 1);
141 inline static int aligned(uintptr_t address, uintptr_t bytes)
143 return (address & (bytes - 1)) == 0;
146 // ------------------------------------------------------------------------
147 // Exported functions
150 paging_write_section_entry(uintptr_t ttbase, lvaddr_t va, union l1_entry l1)
152 union l1_entry *l1_table;
154 ttbase = cp15_read_ttbr() + KERNEL_OFFSET;
156 l1_table = (union l1_entry *) ttbase;
157 l1_table[va >> 20u] = l1;
162 void paging_map_kernel_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
168 l1.section.type = L1_TYPE_SECTION_ENTRY;
169 l1.section.bufferable = 1;
170 l1.section.cacheable = 1;
172 l1.section.base_address = pa >> 20u;
174 paging_write_section_entry(ttbase, va, l1);
177 void paging_map_memory(uintptr_t ttbase, lpaddr_t paddr, size_t bytes)
179 lpaddr_t pend = paging_round_down(paddr + bytes, BYTES_PER_SECTION);
180 while (paddr < pend) {
181 paging_map_kernel_section(0, paddr + MEMORY_OFFSET, paddr);
182 paddr += BYTES_PER_SECTION;
186 static void paging_map_device_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
191 l1.section.type = L1_TYPE_SECTION_ENTRY;
192 l1.section.bufferable = 0;
193 l1.section.cacheable = 0;
195 l1.section.base_address = pa >> 20u;
200 paging_write_section_entry(ttbase, va, l1);
203 lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
205 // HACK to put device in high memory.
206 // Should likely track these allocations.
207 static lvaddr_t dev_alloc = KERNEL_OFFSET;
208 assert(device_bytes <= BYTES_PER_SECTION);
209 dev_alloc -= BYTES_PER_SECTION;
211 paging_map_device_section(0, dev_alloc, device_base);
216 void paging_make_good(lvaddr_t new_table_base, size_t new_table_bytes)
218 assert(new_table_base >= MEMORY_OFFSET);
219 assert(new_table_bytes == ARM_L1_ALIGN);
220 assert(aligned(new_table_base, ARM_L1_ALIGN));
222 lvaddr_t ttbr = local_phys_to_mem(cp15_read_ttbr());
223 size_t st = (MEMORY_OFFSET / ARM_L1_SECTION_BYTES) * ARM_L1_BYTES_PER_ENTRY;
225 // Copy kernel pages (everything from MEMORY_OFFSET upwards)
226 memcpy((void*)new_table_base + st, (void*)ttbr + st,
227 ARM_L1_MAX_ENTRIES * ARM_L1_BYTES_PER_ENTRY - st);
230 void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
232 assert(aligned(table_base, ARM_L1_ALIGN));
233 assert(aligned(va, BYTES_PER_SECTION));
234 assert(aligned(pa, BYTES_PER_SMALL_PAGE));
239 e.coarse.type = L1_TYPE_COARSE_ENTRY;
242 e.coarse.base_address = (pa >> 10);
244 uintptr_t* l1table = (uintptr_t*)table_base;
245 l1table[va / BYTES_PER_SECTION] = e.raw;
248 void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
250 assert(0 == (flags & 0xfffff000));
251 assert(0 == (flags & 0x3));
252 assert(0 == (addr & 0xfff));
256 assert(e.small_page.ap0 == e.small_page.ap1 &&
257 e.small_page.ap0 == e.small_page.ap2 &&
258 e.small_page.ap0 == e.small_page.ap3);
260 e.small_page.type = L2_TYPE_SMALL_PAGE;
261 e.small_page.base_address = (addr >> 12);
266 void paging_context_switch(lpaddr_t ttbr)
268 assert(ttbr < MEMORY_OFFSET);
269 assert((ttbr & 0x3fff) == 0);
271 lpaddr_t old_ttbr = cp15_read_ttbr();
272 if (ttbr != old_ttbr)
274 cp15_write_ttbr(ttbr);
275 cp15_invalidate_tlb();
276 cp15_invalidate_i_and_d_caches();
281 paging_set_flags(union l2_entry *entry, uintptr_t kpi_paging_flags)
283 entry->small_page.bufferable = 1;
284 entry->small_page.cacheable =
285 (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
287 entry->small_page.ap0 =
288 (kpi_paging_flags & KPI_PAGING_FLAGS_READ) ? 2 : 0;
289 entry->small_page.ap0 |=
290 (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
291 entry->small_page.ap1 = entry->small_page.ap0;
292 entry->small_page.ap2 = entry->small_page.ap0;
293 entry->small_page.ap3 = entry->small_page.ap0;
297 caps_map_l1(struct capability* dest,
299 struct capability* src,
300 uintptr_t kpi_paging_flags,
307 // We have chicken-and-egg problem in initializing resources so
308 // instead of treating an L2 table it's actual 1K size, we treat
309 // it as being 4K. As a result when we map an "L2" table we actually
310 // map a page of memory as if it is 4 consecutive L2 tables.
312 // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
314 const int ARM_L1_SCALE = 4;
317 printf("slot = %"PRIuCSLOT"\n",slot);
318 panic("oops: slot id >= 1024");
319 return SYS_ERR_VNODE_SLOT_INVALID;
322 if (pte_count != 1) {
323 printf("pte_count = %zu\n",(size_t)pte_count);
324 panic("oops: pte_count");
325 return SYS_ERR_VM_MAP_SIZE;
328 if (src->type != ObjType_VNode_ARM_l2) {
329 panic("oops: wrong src type");
330 return SYS_ERR_WRONG_MAPPING;
333 if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET) / ARM_L1_SCALE) {
334 printf("slot = %"PRIuCSLOT"\n",slot);
335 panic("oops: slot id");
336 return SYS_ERR_VNODE_SLOT_RESERVED;
340 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
341 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
343 union l1_entry* entry = (union l1_entry*)dest_lvaddr + (slot * ARM_L1_SCALE);
346 genpaddr_t src_gpaddr = get_address(src);
347 lpaddr_t src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
350 assert(aligned(src_lpaddr, 1u << 10));
351 assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
353 struct cte *src_cte = cte_for_cap(src);
354 src_cte->mapping_info.pte_count = pte_count;
355 src_cte->mapping_info.pte = dest_lpaddr + (slot * ARM_L1_SCALE);
356 src_cte->mapping_info.offset = 0;
358 for (int i = 0; i < 4; i++, entry++)
361 entry->coarse.type = L1_TYPE_COARSE_ENTRY;
362 entry->coarse.mb1 = 1;
363 entry->coarse.domain = 0;
364 entry->coarse.base_address =
365 (src_lpaddr + i * BASE_PAGE_SIZE / ARM_L1_SCALE) >> 10;
366 debug(SUBSYS_PAGING, "L1 mapping %ld. @%p = %08"PRIu32"\n",
367 slot * ARM_L1_SCALE + i, entry, entry->raw);
370 cp15_invalidate_tlb();
376 caps_map_l2(struct capability* dest,
378 struct capability* src,
379 uintptr_t kpi_paging_flags,
383 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
385 // ARM L2 has 256 entries, but we treat a 4K page as a consecutive
386 // region of L2 with a single index. 4K == 4 * 1K
387 if (slot >= (256 * 4)) {
389 return SYS_ERR_VNODE_SLOT_INVALID;
392 if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
394 return SYS_ERR_WRONG_MAPPING;
397 // check offset within frame
398 if ((offset + BYTES_PER_PAGE > get_size(src)) ||
399 ((offset % BYTES_PER_PAGE) != 0)) {
401 return SYS_ERR_FRAME_OFFSET_INVALID;
404 // check mapping does not overlap leaf page table
405 if (slot + pte_count > (256 * 4)) {
406 return SYS_ERR_VM_MAP_SIZE;
410 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
411 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
413 union l2_entry* entry = (union l2_entry*)dest_lvaddr + slot;
414 if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
415 panic("Remapping valid page.");
418 lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src));
419 if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
420 panic("Invalid target");
423 struct cte *src_cte = cte_for_cap(src);
424 src_cte->mapping_info.pte_count = pte_count;
425 src_cte->mapping_info.pte = dest_lpaddr;
426 src_cte->mapping_info.offset = offset;
428 for (int i = 0; i < pte_count; i++) {
431 entry->small_page.type = L2_TYPE_SMALL_PAGE;
432 paging_set_flags(entry, kpi_paging_flags);
433 entry->small_page.base_address = src_lpaddr >> 12;
435 debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
436 dest_lvaddr, slot, entry, entry->raw);
439 // Flush TLB if remapping.
440 cp15_invalidate_tlb();
445 /// Create page mappings
446 errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
447 struct cte *src_cte, uintptr_t flags,
448 uintptr_t offset, uintptr_t pte_count)
450 struct capability *src_cap = &src_cte->cap;
451 struct capability *dest_cap = &dest_vnode_cte->cap;
453 if (ObjType_VNode_ARM_l1 == dest_cap->type) {
454 return caps_map_l1(dest_cap, dest_slot, src_cap,
460 else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
461 return caps_map_l2(dest_cap, dest_slot, src_cap,
468 panic("ObjType not VNode");
472 size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
474 size_t unmapped_pages = 0;
475 union l2_entry *ptentry = (union l2_entry *)pt + slot;
476 for (int i = 0; i < num_pages; i++) {
480 return unmapped_pages;
483 static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
485 assert(type_is_vnode(pgtable->type));
488 genpaddr_t gp = get_address(pgtable);
489 lpaddr_t lp = gen_phys_to_local_phys(gp);
490 lvaddr_t lv = local_phys_to_mem(lp);
492 switch (pgtable->type) {
493 case ObjType_VNode_ARM_l1:
495 union l1_entry *e = (union l1_entry*)lv;
496 *paddr = (genpaddr_t)(e->coarse.base_address) << 10;
499 case ObjType_VNode_ARM_l2:
501 union l2_entry *e = (union l2_entry*)lv;
502 *paddr = (genpaddr_t)(e->small_page.base_address) << 12;
506 assert(!"Should not get here");
510 errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping, size_t slot, size_t num_pages)
512 assert(type_is_vnode(pgtable->type));
513 //printf("page_mappings_unmap(%zd pages, slot = %zd)\n", num_pages, slot);
515 // get page table entry data
518 read_pt_entry(pgtable, slot, &paddr);
519 lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));
521 // get virtual address of first page
522 // TODO: error checking
524 struct cte *leaf_pt = cte_for_cap(pgtable);
525 compile_vaddr(leaf_pt, slot, &vaddr);
526 //genvaddr_t vend = vaddr + num_pages * BASE_PAGE_SIZE;
527 // printf("vaddr = 0x%"PRIxGENVADDR"\n", vaddr);
528 // printf("num_pages = %zu\n", num_pages);
530 // get cap for mapping
533 errval_t err = lookup_cap_for_mapping(paddr, pte, &mem);
534 if (err_is_fail(err)) {
535 printf("page_mappings_unmap: %ld\n", err);
539 //printf("state before unmap: mapped_pages = %zd\n", mem->mapping_info.mapped_pages);
540 //printf("state before unmap: num_pages = %zd\n", num_pages);
542 if (num_pages != mapping->mapping_info.pte_count) {
543 printf("num_pages = %zu, mapping = %zu\n", num_pages, mapping->mapping_info.pte_count);
544 // want to unmap a different amount of pages than was mapped
545 return SYS_ERR_VM_MAP_SIZE;
548 do_unmap(pt, slot, num_pages);
550 // flush TLB for unmapped pages
551 // TODO: selective TLB flush
552 cp15_invalidate_tlb();
554 // update mapping info
555 memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));
560 errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
561 uintptr_t pages, uintptr_t kpi_paging_flags)
564 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
566 struct cte *mapping = cte_for_cap(frame);
567 struct mapping_info *info = &mapping->mapping_info;
569 /* Calculate location of page table entries we need to modify */
570 lvaddr_t base = info->pte + offset;
572 for (int i = 0; i < pages; i++) {
573 union l2_entry *entry =
574 (union l2_entry *)base + i;
575 paging_set_flags(entry, kpi_paging_flags);
581 void paging_dump_tables(struct dcb *dispatcher)
583 printf("dump_hw_page_tables\n");
584 lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
586 for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
588 union l1_entry *l2 = (union l1_entry *)l1 + l1_index;
589 if (!l2->raw) { continue; }
590 genpaddr_t ptable_gp = (genpaddr_t)(l2->coarse.base_address) << 10;
591 lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
593 for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
595 (union l2_entry *)ptable_lv + entry;
596 genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
600 printf("%d.%d: 0x%"PRIxGENPADDR"\n", l1_index, entry, paddr);