2 * Copyright (c) 2009-2013,2016 ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
14 #include <paging_kernel_arch.h>
16 #include <exceptions.h>
18 #include <cap_predicates.h>
20 #include <mdb/mdb_tree.h>
21 #include <barrelfish_kpi/paging_arch.h>
23 #define MSG(format, ...) printk( LOG_NOTE, "ARMv7-A: "format, ## __VA_ARGS__ )
25 inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
27 return address & ~(size - 1);
30 inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
32 return (address + size - 1) & ~(size - 1);
35 inline static int aligned(uintptr_t address, uintptr_t bytes)
37 return (address & (bytes - 1)) == 0;
42 paging_set_flags(union arm_l2_entry *entry, uintptr_t kpi_paging_flags)
44 entry->small_page.tex = 1; /* Write-allocate. */
45 entry->small_page.shareable = 1; /* Coherent. */
46 entry->small_page.bufferable = 1;
47 entry->small_page.cacheable =
48 (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
49 entry->small_page.ap10 =
50 (kpi_paging_flags & KPI_PAGING_FLAGS_READ) ? 2 : 0;
51 entry->small_page.ap10 |=
52 (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
53 entry->small_page.ap2 = 0;
56 static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1);
57 static union arm_l1_entry make_dev_section(lpaddr_t pa);
58 static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte);
60 void paging_print_l1(void);
62 /* In the non-boot paging code, these are pointers to be set to the values
63 * passed from the boot driver. */
64 union arm_l1_entry *l1_low;
65 union arm_l1_entry *l1_high;
66 union arm_l2_entry *l2_vec;
68 void paging_load_pointers(struct arm_core_data *boot_core_data) {
69 l1_low= (union arm_l1_entry *)
70 local_phys_to_mem(boot_core_data->kernel_l1_low);
71 l1_high= (union arm_l1_entry *)
72 local_phys_to_mem(boot_core_data->kernel_l1_high);
73 l2_vec= (union arm_l2_entry *)
74 local_phys_to_mem(boot_core_data->kernel_l2_vec);
77 static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1)
79 assert( va >= MEMORY_OFFSET );
80 l1_high[ARM_L1_OFFSET(va)] = l1;
84 * /brief Return an L1 page table entry to map a 1MB 'section' of
85 * device memory located at physical address 'pa'.
87 static union arm_l1_entry make_dev_section(lpaddr_t pa)
89 union arm_l1_entry l1;
92 l1.section.type = L1_TYPE_SECTION_ENTRY;
93 // l1.section.tex = 1;
94 l1.section.bufferable = 0;
95 l1.section.cacheable = 0;
96 l1.section.ap10 = 3; // prev value: 3 // RW/NA RW/RW
97 // l1.section.ap10 = 1; // RW/NA
99 l1.section.base_address = ARM_L1_SECTION_NUMBER(pa);
104 * \brief Return whether we have enabled the MMU. Useful for
105 * initialization assertions
107 bool paging_mmu_enabled(void)
113 * /brief Perform a context switch. Reload TTBR0 with the new
114 * address, and invalidate the TLBs and caches.
116 void paging_context_switch(lpaddr_t ttbr)
118 assert(ttbr >= phys_memory_start &&
119 ttbr < phys_memory_start + RAM_WINDOW_SIZE);
120 lpaddr_t old_ttbr = cp15_read_ttbr0();
121 if (ttbr != old_ttbr)
123 dsb(); isb(); /* Make sure any page table updates have completed. */
124 cp15_write_ttbr0(ttbr);
125 isb(); /* The update must occur before we invalidate. */
126 /* With no ASIDs, we've got to flush everything. */
128 /* Clean and invalidate. */
129 invalidate_data_caches_pouu(true);
130 invalidate_instruction_cache();
131 /* Make sure the invalidates are completed and visible before any
132 * user-level code can execute. */
137 /* Map the exception vectors at VECTORS_BASE. */
139 paging_map_vectors(void) {
140 /* The addresses installed into the page tables must be physical. */
141 lpaddr_t vectors_phys= mem_to_local_phys((lvaddr_t)exception_vectors);
142 lpaddr_t l2_vec_phys= mem_to_local_phys((lvaddr_t)l2_vec);
144 MSG("Mapping vectors at P:%"PRIxLPADDR" to %"PRIxLVADDR
145 " using L2 table at P:%"PRIxLPADDR"\n",
146 vectors_phys, VECTORS_BASE, l2_vec_phys);
149 * Install a single small page mapping to cover the vectors.
151 * The mapping fields are set exactly as for the kernel's RAM sections -
152 * see make_ram_section() for details.
154 union arm_l2_entry *e_l2= &l2_vec[ARM_L2_OFFSET(VECTORS_BASE)];
155 e_l2->small_page.type= L2_TYPE_SMALL_PAGE;
156 e_l2->small_page.tex= 1;
157 e_l2->small_page.cacheable= 1;
158 e_l2->small_page.bufferable= 1;
159 e_l2->small_page.not_global= 0;
160 e_l2->small_page.shareable= 1;
161 e_l2->small_page.ap10= 1;
162 e_l2->small_page.ap2= 0;
164 /* The vectors must be at the beginning of a frame. */
165 assert((vectors_phys & BASE_PAGE_MASK) == 0);
166 e_l2->small_page.base_address= vectors_phys >> BASE_PAGE_BITS;
168 /* Clean the modified entry to L2 cache. */
172 * Map the L2 table to hold the high vectors mapping.
174 union arm_l1_entry *e_l1= &l1_high[ARM_L1_OFFSET(VECTORS_BASE)];
175 e_l1->page_table.type= L1_TYPE_PAGE_TABLE_ENTRY;
176 e_l1->page_table.base_address= l2_vec_phys >> ARM_L2_TABLE_BITS;
178 /* Clean the modified entry to L2 cache. */
181 /* We shouldn't need to invalidate any TLB entries, as this entry has
182 * never been mapped. */
186 * \brief Map a device into the kernel's address space.
188 * \param device_base is the physical address of the device
189 * \param device_size is the number of bytes of physical address space
190 * the device occupies.
192 * \return the kernel virtual address of the mapped device, or panic.
194 lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
196 // We map all hardware devices in the kernel using sections in the
197 // top quarter (0xC0000000-0xFE000000) of the address space, just
198 // below the exception vectors.
200 // It makes sense to use sections since (1) we don't map many
201 // devices in the CPU driver anyway, and (2) if we did, it might
202 // save a wee bit of TLB space.
205 // First, we make sure that the device fits into a single
207 if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
208 panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
209 dev_base, dev_size );
212 // Now, walk down the page table looking for either (a) an
214 // existing mapping, in which case return the address the device
215 // is already mapped to, or an invalid mapping, in which case map
217 uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
218 uint32_t dev_offset = ARM_L1_SECTION_OFFSET(dev_base);
219 lvaddr_t dev_virt = 0;
221 for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {
223 // Work out the virtual address we're looking at
224 dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);
226 // If we already have a mapping for that address, return it.
227 if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
228 l1_high[i].section.base_address == dev_section ) {
229 return dev_virt + dev_offset;
232 // Otherwise, if it's free, map it.
233 if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
234 map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
235 invalidate_data_caches_pouu(true);
236 invalidate_tlb(); /* XXX selective */
237 return dev_virt + dev_offset;
240 // We're all out of section entries :-(
241 panic("Ran out of section entries to map a kernel device");
245 * \brief Print out a L1 page table entry 'pte', interpreted relative
246 * to a given virtual address 'va'.
248 static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte)
250 printf("(memory offset=%x):\n", va);
251 if ( L1_TYPE(pte.raw) == L1_TYPE_INVALID_ENTRY) {
254 printf( " %x-%"PRIxLVADDR": ", va, va + ARM_L1_SECTION_BYTES - 1);
255 switch( L1_TYPE(pte.raw) ) {
256 case L1_TYPE_INVALID_ENTRY:
259 case L1_TYPE_PAGE_TABLE_ENTRY:
260 printf("L2 PT 0x%"PRIxLPADDR" pxn=%d ns=%d sbz=%d dom=0x%04x sbz1=%d \n",
261 pte.page_table.base_address << 10,
265 pte.page_table.domain,
266 pte.page_table.sbz1 );
268 case L1_TYPE_SECTION_ENTRY:
269 printf("SECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
270 pte.section.base_address << 20,
271 pte.section.bufferable,
272 pte.section.cacheable,
273 pte.section.execute_never,
274 pte.section.domain );
275 printf(" sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
277 (pte.section.ap2) << 2 | pte.section.ap10,
279 pte.section.shareable,
280 pte.section.not_global,
284 case L1_TYPE_SUPER_SECTION_ENTRY:
285 printf("SUPERSECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
286 pte.super_section.base_address << 24,
287 pte.super_section.bufferable,
288 pte.super_section.cacheable,
289 pte.super_section.execute_never,
290 pte.super_section.domain );
291 printf(" sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
292 pte.super_section.sbz0,
293 (pte.super_section.ap2) << 2 | pte.super_section.ap10,
294 pte.super_section.tex,
295 pte.super_section.shareable,
296 pte.super_section.not_global,
297 pte.super_section.mbz0,
298 pte.super_section.ns );
304 * /brief Print out the CPU driver's two static page tables. Note:
306 * 1) This is a lot of output. Each table has 4096 entries, each of
307 * which takes one or two lines of output.
308 * 2) The first half of the TTBR1 table is similarly used, and is
309 * probably (hopefully) all empty.
310 * 3) The second half of the TTBR0 table is similarly never used, and
312 * 4) The TTBR0 table is only used anyway at boot, since thereafter it
313 * is replaced by a user page table.
314 * Otherwise, go ahead and knock yourself out.
316 void paging_print_l1(void)
320 printf("TTBR1 table:\n");
321 for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
322 paging_print_l1_pte(base, l1_high[i]);
324 printf("TTBR0 table:\n");
326 for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
327 paging_print_l1_pte(base, l1_low[i]);
333 caps_map_l1(struct capability* dest,
335 struct capability* src,
336 uintptr_t kpi_paging_flags,
339 struct cte* mapping_cte)
341 if (src->type != ObjType_VNode_ARM_l2) {
342 //large page mapping goes here
343 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
345 // ARM L1 has 4K entries, we need to fill in individual entries for
347 // XXX: magic constant
349 panic("oops: slot >= 4096");
350 return SYS_ERR_VNODE_SLOT_INVALID;
353 if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
354 panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
355 return SYS_ERR_WRONG_MAPPING;
358 // check offset within frame
359 if ((offset + pte_count * BYTES_PER_SECTION > get_size(src)) ||
360 ((offset % BYTES_PER_SECTION) != 0)) {
361 printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
362 ", src->size = %"PRIuGENSIZE", src->type = %d\n",
363 offset, pte_count, get_size(src), src->type);
364 panic("oops: frame offset invalid");
365 return SYS_ERR_FRAME_OFFSET_INVALID;
368 // check mapping does not overlap leaf page table
369 if (slot + pte_count > 4096) {
370 return SYS_ERR_VM_MAP_SIZE;
374 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
375 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
377 union arm_l1_entry* entry = ((union arm_l1_entry*)dest_lvaddr) + slot;
378 if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
379 panic("Remapping valid page.");
382 lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
383 if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
384 panic("Invalid target");
387 create_mapping_cap(mapping_cte, src,
388 dest_lpaddr + slot * sizeof(union arm_l1_entry),
392 for (int i = 0; i < pte_count; i++) {
395 entry->section.type = L1_TYPE_SECTION_ENTRY;
396 entry->section.bufferable = 1;
397 entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
398 entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
399 entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
400 entry->section.ap2 = 0;
401 entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 20;
405 /* Clean the modified entry to L2 cache. */
408 debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT
409 "] @%p = %08"PRIx32"\n",
410 dest_lvaddr, slot, entry, entry->raw);
413 // Flush TLB if remapping.
414 invalidate_tlb(); /* XXX selective */
418 // XXX: magic constant
419 if (slot >= ARM_L1_MAX_ENTRIES) {
420 printf("slot = %"PRIuCSLOT"\n",slot);
421 return SYS_ERR_VNODE_SLOT_INVALID;
425 // check offset within frame
426 if ((offset + pte_count * ARM_L2_TABLE_BYTES > get_size(src)) ||
427 ((offset % ARM_L2_TABLE_BYTES) != 0)) {
428 printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
429 ", src->size = %"PRIuGENSIZE", src->type = %d\n",
430 offset, pte_count, get_size(src), src->type);
431 return SYS_ERR_FRAME_OFFSET_INVALID;
434 // check mapping does not overlap leaf page table
435 if (slot + pte_count > 4096) {
436 return SYS_ERR_VM_MAP_SIZE;
440 if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET)) {
441 printf("slot = %"PRIuCSLOT"\n",slot);
442 return SYS_ERR_VNODE_SLOT_RESERVED;
445 debug(SUBSYS_PAGING, "caps_map_l1: mapping %"PRIuPTR" L2 tables @%"PRIuCSLOT"\n",
448 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
449 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
451 union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
454 genpaddr_t src_gpaddr = get_address(src);
455 lpaddr_t src_lpaddr = gen_phys_to_local_phys(src_gpaddr) + offset;
457 assert(aligned(src_lpaddr, 1u << 10));
458 assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
460 create_mapping_cap(mapping_cte, src,
461 dest_lpaddr + slot * sizeof(union arm_l1_entry),
465 for (int i = 0; i < pte_count; i++, entry++)
468 entry->page_table.type = L1_TYPE_PAGE_TABLE_ENTRY;
469 entry->page_table.domain = 0;
470 entry->page_table.base_address =
471 (src_lpaddr + i * ARM_L2_TABLE_BYTES) >> 10;
473 /* Clean the modified entry to L2 cache. */
476 debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
477 slot + i, entry, entry->raw);
480 invalidate_tlb(); /* XXX selective */
486 caps_map_l2(struct capability* dest,
488 struct capability* src,
489 uintptr_t kpi_paging_flags,
492 struct cte* mapping_cte)
494 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
496 if (slot >= ARM_L2_MAX_ENTRIES) {
497 panic("oops: slot >= 256");
498 return SYS_ERR_VNODE_SLOT_INVALID;
501 if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
502 panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
503 return SYS_ERR_WRONG_MAPPING;
506 // check offset within frame
507 if ((offset + pte_count * BASE_PAGE_SIZE > get_size(src)) ||
508 ((offset % BASE_PAGE_SIZE) != 0)) {
509 return SYS_ERR_FRAME_OFFSET_INVALID;
512 // check mapping does not overlap leaf page table
513 if (slot + pte_count > ARM_L2_MAX_ENTRIES) {
514 return SYS_ERR_VM_MAP_SIZE;
518 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
519 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
521 union arm_l2_entry* entry = (union arm_l2_entry*)dest_lvaddr + slot;
522 if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
523 panic("Remapping valid page.");
526 lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
527 if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
528 panic("Invalid target");
531 create_mapping_cap(mapping_cte, src,
532 dest_lpaddr + slot * sizeof(union arm_l2_entry),
536 for (int i = 0; i < pte_count; i++) {
539 entry->small_page.type = L2_TYPE_SMALL_PAGE;
540 paging_set_flags(entry, kpi_paging_flags);
541 entry->small_page.base_address = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
543 /* Clean the modified entry to L2 cache. */
546 debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
547 dest_lvaddr, slot, entry, entry->raw);
552 // Flush TLB if remapping.
553 invalidate_tlb(); /* XXX selective */
558 /// Create page mappings
559 errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
560 struct cte *src_cte, uintptr_t flags,
561 uintptr_t offset, uintptr_t pte_count,
562 struct cte *mapping_cte)
564 struct capability *src_cap = &src_cte->cap;
565 struct capability *dest_cap = &dest_vnode_cte->cap;
566 assert(mapping_cte->cap.type == ObjType_Null);
569 if (ObjType_VNode_ARM_l1 == dest_cap->type) {
570 //printf("caps_map_l1: %zu\n", (size_t)pte_count);
571 err = caps_map_l1(dest_cap, dest_slot, src_cap,
578 else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
579 //printf("caps_map_l2: %zu\n", (size_t)pte_count);
580 err = caps_map_l2(dest_cap, dest_slot, src_cap,
588 panic("ObjType not VNode");
591 if (err_is_fail(err)) {
592 memset(mapping_cte, 0, sizeof(*mapping_cte));
596 assert(type_is_mapping(mapping_cte->cap.type));
597 err = mdb_insert(mapping_cte);
598 if (err_is_fail(err)) {
599 printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
602 TRACE_CAP_MSG("created", mapping_cte);
607 size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
609 size_t unmapped_pages = 0;
610 union arm_l2_entry *ptentry = (union arm_l2_entry *)pt + slot;
611 for (int i = 0; i < num_pages; i++) {
615 return unmapped_pages;
618 errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
619 uintptr_t pages, uintptr_t kpi_paging_flags)
621 // XXX: modify flags for sections?
622 assert(type_is_mapping(mapping->type));
624 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
626 struct Frame_Mapping *info = &mapping->u.frame_mapping;
628 /* Calculate location of page table entries we need to modify */
629 lvaddr_t base = local_phys_to_mem(info->pte) +
630 offset * sizeof(union arm_l2_entry);
632 for (int i = 0; i < pages; i++) {
633 union arm_l2_entry *entry =
634 (union arm_l2_entry *)base + i;
635 paging_set_flags(entry, kpi_paging_flags);
637 /* Clean the modified entry to L2 cache. */
641 return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
644 void paging_dump_tables(struct dcb *dispatcher)
646 if (!local_phys_is_valid(dispatcher->vspace)) {
647 printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
651 lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
653 for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
655 union arm_l1_entry *l1_e = (union arm_l1_entry *)l1 + l1_index;
656 if (!l1_e->raw) { continue; }
657 if (l1_e->invalid.type == 2) { // section
658 genpaddr_t paddr = (genpaddr_t)(l1_e->section.base_address) << 20;
659 printf("%d: (section) 0x%"PRIxGENPADDR"\n", l1_index, paddr);
660 } else if (l1_e->invalid.type == 1) { // l2 table
661 genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
662 lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
664 printf("%d: (l2table) 0x%"PRIxGENPADDR"\n", l1_index, ptable_gp);
666 for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
667 union arm_l2_entry *e =
668 (union arm_l2_entry *)ptable_lv + entry;
669 genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
673 printf("%d.%d: 0x%"PRIxGENPADDR" (rw=%d%d)\n", l1_index, entry, paddr,
674 (e->small_page.ap10 >> 1) & 1, e->small_page.ap10 & 1);
681 * \brief Install a page table pointer in a level 1 page table
682 * located at 'table_base' to map addresses starting at virtual
683 * address 'va'. The level 2 page table to be used is assumed to be
684 * located at physical address 'pa'.
686 void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
688 assert(aligned(table_base, ARM_L1_ALIGN));
689 assert(aligned(pa, BYTES_PER_SMALL_PAGE));
691 union arm_l1_entry e;
692 union arm_l1_entry *l1_table;
695 e.page_table.type = L1_TYPE_PAGE_TABLE_ENTRY;
696 e.page_table.domain = 0;
697 e.page_table.base_address = ARM_L2_TABLE_PPN(pa);
699 if (table_base == 0) {
700 if(va < MEMORY_OFFSET) {
701 table_base = cp15_read_ttbr0() + MEMORY_OFFSET;
703 table_base = cp15_read_ttbr1() + MEMORY_OFFSET;
706 l1_table = (union arm_l1_entry *) table_base;
707 l1_table[ARM_L1_OFFSET(va)] = e;
709 /* Clean the modified entry to L2 cache. */
710 clean_to_pou(&l1_table[ARM_L1_OFFSET(va)]);
714 * /brief Install a level 2 page table entry located at l2e, to map
715 * physical address 'pa', with flags 'flags'. 'flags' here is in the
716 * form of a prototype 32-bit L2 *invalid* PTE with address 0.
718 void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
720 union arm_l2_entry e;
723 assert( L2_TYPE(e.raw) == L2_TYPE_INVALID_PAGE );
724 assert( e.small_page.base_address == 0);
725 assert( ARM_PAGE_OFFSET(addr) == 0 );
727 e.small_page.type = L2_TYPE_SMALL_PAGE;
728 e.small_page.base_address = (addr >> 12);
732 /* Clean the modified entry to L2 cache. */