2 * Copyright (c) 2009 - 2012 ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 #include <paging_kernel_arch.h>
15 #include <exceptions.h>
17 #include <cap_predicates.h>
20 inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
22 return address & ~(size - 1);
25 inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
27 return (address + size - 1) & ~(size - 1);
30 inline static int aligned(uintptr_t address, uintptr_t bytes)
32 return (address & (bytes - 1)) == 0;
38 paging_set_flags(union arm_l2_entry *entry, uintptr_t kpi_paging_flags)
40 entry->small_page.bufferable = 1;
41 entry->small_page.cacheable =
42 (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
43 entry->small_page.ap10 =
44 (kpi_paging_flags & KPI_PAGING_FLAGS_READ) ? 2 : 0;
45 entry->small_page.ap10 |=
46 (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
47 entry->small_page.ap2 = 0;
51 caps_map_l1(struct capability* dest,
53 struct capability* src,
54 uintptr_t kpi_paging_flags,
61 // We have chicken-and-egg problem in initializing resources so
62 // instead of treating an L2 table it's actual 1K size, we treat
63 // it as being 4K. As a result when we map an "L2" table we actually
64 // map a page of memory as if it is 4 consecutive L2 tables.
66 // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
68 const int ARM_L1_SCALE = 4;
71 printf("slot = %"PRIuCSLOT"\n",slot);
72 panic("oops: slot id >= 1024");
73 return SYS_ERR_VNODE_SLOT_INVALID;
77 printf("pte_count = %zu\n",(size_t)pte_count);
78 panic("oops: pte_count");
79 return SYS_ERR_VM_MAP_SIZE;
82 if (src->type != ObjType_VNode_ARM_l2) {
83 panic("oops: wrong src type");
84 return SYS_ERR_WRONG_MAPPING;
87 if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET) / ARM_L1_SCALE) {
88 printf("slot = %"PRIuCSLOT"\n",slot);
89 panic("oops: slot id");
90 return SYS_ERR_VNODE_SLOT_RESERVED;
94 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
95 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
97 union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + (slot * ARM_L1_SCALE);
100 genpaddr_t src_gpaddr = get_address(src);
101 lpaddr_t src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
104 assert(aligned(src_lpaddr, 1u << 10));
105 assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
107 struct cte *src_cte = cte_for_cap(src);
108 src_cte->mapping_info.pte_count = pte_count;
109 src_cte->mapping_info.pte = dest_lpaddr + (slot * ARM_L1_SCALE);
110 src_cte->mapping_info.offset = 0;
112 for (int i = 0; i < 4; i++, entry++)
115 entry->page_table.type = L1_TYPE_PAGE_TABLE_ENTRY;
116 entry->page_table.domain = 0;
117 entry->page_table.base_address =
118 (src_lpaddr + i * BASE_PAGE_SIZE / ARM_L1_SCALE) >> 10;
119 debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
120 slot * ARM_L1_SCALE + i, entry, entry->raw);
123 cp15_invalidate_tlb();
129 caps_map_l2(struct capability* dest,
131 struct capability* src,
132 uintptr_t kpi_paging_flags,
136 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
138 // ARM L2 has 256 entries, but we treat a 4K page as a consecutive
139 // region of L2 with a single index. 4K == 4 * 1K
140 if (slot >= (256 * 4)) {
141 panic("oops: slot >= (256 * 4)");
142 return SYS_ERR_VNODE_SLOT_INVALID;
145 if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
146 panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
147 return SYS_ERR_WRONG_MAPPING;
150 // check offset within frame
151 if ((offset + BYTES_PER_PAGE > get_size(src)) ||
152 ((offset % BYTES_PER_PAGE) != 0)) {
153 panic("oops: frame offset invalid");
154 return SYS_ERR_FRAME_OFFSET_INVALID;
157 // check mapping does not overlap leaf page table
158 if (slot + pte_count > (256 * 4)) {
159 return SYS_ERR_VM_MAP_SIZE;
163 lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
164 lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
166 union arm_l2_entry* entry = (union arm_l2_entry*)dest_lvaddr + slot;
167 if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
168 panic("Remapping valid page.");
171 lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
172 if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
173 panic("Invalid target");
176 struct cte *src_cte = cte_for_cap(src);
177 src_cte->mapping_info.pte_count = pte_count;
178 src_cte->mapping_info.pte = dest_lpaddr;
179 src_cte->mapping_info.offset = offset;
181 for (int i = 0; i < pte_count; i++) {
184 entry->small_page.type = L2_TYPE_SMALL_PAGE;
185 paging_set_flags(entry, kpi_paging_flags);
186 entry->small_page.base_address = (src_lpaddr + i * BYTES_PER_PAGE) >> 12;
190 debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
191 dest_lvaddr, slot, entry, entry->raw);
194 // Flush TLB if remapping.
195 cp15_invalidate_tlb();
200 /// Create page mappings
201 errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
202 struct cte *src_cte, uintptr_t flags,
203 uintptr_t offset, uintptr_t pte_count)
205 struct capability *src_cap = &src_cte->cap;
206 struct capability *dest_cap = &dest_vnode_cte->cap;
208 if (src_cte->mapping_info.pte) {
209 return SYS_ERR_VM_ALREADY_MAPPED;
212 if (ObjType_VNode_ARM_l1 == dest_cap->type) {
213 //printf("caps_map_l1: %zu\n", (size_t)pte_count);
214 return caps_map_l1(dest_cap, dest_slot, src_cap,
220 else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
221 //printf("caps_map_l2: %zu\n", (size_t)pte_count);
222 return caps_map_l2(dest_cap, dest_slot, src_cap,
229 panic("ObjType not VNode");
233 size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
235 size_t unmapped_pages = 0;
236 union arm_l2_entry *ptentry = (union arm_l2_entry *)pt + slot;
237 for (int i = 0; i < num_pages; i++) {
241 return unmapped_pages;
244 static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
246 assert(type_is_vnode(pgtable->type));
249 genpaddr_t gp = get_address(pgtable);
250 lpaddr_t lp = gen_phys_to_local_phys(gp);
251 lvaddr_t lv = local_phys_to_mem(lp);
253 switch (pgtable->type) {
254 case ObjType_VNode_ARM_l1:
256 union arm_l1_entry *e = (union arm_l1_entry*)lv;
257 *paddr = (genpaddr_t)(e->page_table.base_address) << 10;
260 case ObjType_VNode_ARM_l2:
262 union arm_l2_entry *e = (union arm_l2_entry*)lv;
263 *paddr = (genpaddr_t)(e->small_page.base_address) << 12;
267 assert(!"Should not get here");
271 errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping, size_t slot, size_t num_pages)
273 assert(type_is_vnode(pgtable->type));
274 //printf("page_mappings_unmap(%zd pages, slot = %zd)\n", num_pages, slot);
276 // get page table entry data
279 read_pt_entry(pgtable, slot, &paddr);
280 lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));
282 // get virtual address of first page
283 // TODO: error checking
285 struct cte *leaf_pt = cte_for_cap(pgtable);
286 compile_vaddr(leaf_pt, slot, &vaddr);
287 //genvaddr_t vend = vaddr + num_pages * BASE_PAGE_SIZE;
288 // printf("vaddr = 0x%"PRIxGENVADDR"\n", vaddr);
289 // printf("num_pages = %zu\n", num_pages);
291 // get cap for mapping
294 errval_t err = lookup_cap_for_mapping(paddr, pte, &mem);
295 if (err_is_fail(err)) {
296 printf("page_mappings_unmap: %ld\n", err);
300 //printf("state before unmap: mapped_pages = %zd\n", mem->mapping_info.mapped_pages);
301 //printf("state before unmap: num_pages = %zd\n", num_pages);
303 if (num_pages != mapping->mapping_info.pte_count) {
304 printf("num_pages = %zu, mapping = %zu\n", num_pages, mapping->mapping_info.pte_count);
305 // want to unmap a different amount of pages than was mapped
306 return SYS_ERR_VM_MAP_SIZE;
309 do_unmap(pt, slot, num_pages);
311 // flush TLB for unmapped pages
312 // TODO: selective TLB flush
313 cp15_invalidate_tlb();
315 // update mapping info
316 memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));
321 errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
322 uintptr_t pages, uintptr_t kpi_paging_flags)
325 assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
327 struct cte *mapping = cte_for_cap(frame);
328 struct mapping_info *info = &mapping->mapping_info;
330 /* Calculate location of page table entries we need to modify */
331 lvaddr_t base = local_phys_to_mem(info->pte) + offset;
333 for (int i = 0; i < pages; i++) {
334 union arm_l2_entry *entry =
335 (union arm_l2_entry *)base + i;
336 paging_set_flags(entry, kpi_paging_flags);
339 return paging_tlb_flush_range(mapping, pages);
342 void paging_dump_tables(struct dcb *dispatcher)
344 printf("dump_hw_page_tables\n");
345 lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
347 for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
349 union arm_l1_entry *l1_e = (union arm_l1_entry *)l1 + l1_index;
350 if (!l1_e->raw) { continue; }
351 genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
352 lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
354 for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
355 union arm_l2_entry *e =
356 (union arm_l2_entry *)ptable_lv + entry;
357 genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
361 printf("%d.%d: 0x%"PRIxGENPADDR"\n", l1_index, entry, paddr);