type == ObjType_VNode_x86_32_pdir ||
type == ObjType_VNode_x86_32_ptable)
{
- return 12; // BASE_PAGE_BITS
+ return 12;
}
- else if (type == ObjType_VNode_AARCH64_l1 ||
+ else if (type == ObjType_VNode_AARCH64_l0 ||
+ type == ObjType_VNode_AARCH64_l1 ||
type == ObjType_VNode_AARCH64_l2 ||
type == ObjType_VNode_AARCH64_l3)
{
// round up to page size for caps
remain = ROUND_UP(remain, BASE_PAGE_SIZE);
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
- // Create max-sized caps to multiboot module in module cnode
- while (remain > 0) {
- assert((base_addr & BASE_PAGE_MASK) == 0);
- assert((remain & BASE_PAGE_MASK) == 0);
-
- // determine size of next chunk
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
- // create as DevFrame cap to avoid zeroing memory contents
- err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
- block_size, my_core_id,
- caps_locate_slot(CNODE(st->modulecn),
- st->modulecn_slot++));
- assert(err_is_ok(err));
-
- // Advance by that chunk
- base_addr += ((genpaddr_t)1 << block_size);
- remain -= ((genpaddr_t)1 << block_size);
- }
+ assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, remain,
+ remain, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
// Copy multiboot module string to mmstrings area
- strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
- mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1;
+ strcpy((char *)mmstrings, module->cmdline);
+ mmstrings += strlen(module->cmdline) + 1;
assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
+
+ module = ((void *) module) + module->size;
+ position += module->size;
+ module = (struct multiboot_tag_module_64 *) multiboot2_find_header(
+ (struct multiboot_header_tag *) module, size - position,
+ MULTIBOOT_TAG_TYPE_MODULE_64);
}
}
/*
* AARCH64 has:
*
- * L1 has 4 entries (4KB).
- * L2 Coarse has 512 entries (512 * 8B = 4KB).
- * L3 Coarse has 512 entries (512 * 8B = 4KB).
+ * L0 has 1 entry.
+ * L1 has 1 entry.
+ * L2 Coarse has 16 entries (512 * 8B = 4KB).
+ * L3 Coarse has 16*512 entries (512 * 8B = 4KB).
*
*/
+
+ printk(LOG_NOTE, "init page tables: l0=%p, l1=%p, l2=%p, l3=%p\n",
+ init_l0, init_l1, init_l2, init_l3);
+
caps_create_new(
- ObjType_VNode_AARCH64_l1,
- mem_to_local_phys((lvaddr_t)init_l1),
- vnode_objsize(ObjType_VNode_AARCH64_l1), 0,
+ ObjType_VNode_AARCH64_l0,
+ mem_to_local_phys((lvaddr_t)init_l0),
- vnode_objbits(ObjType_VNode_AARCH64_l0), 0,
++ vnode_objsize(ObjType_VNode_AARCH64_l0), 0,
my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
+ for (size_t i = 0; i < INIT_L1_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l1);
- assert(objbits_vnode == BASE_PAGE_BITS);
++ size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l1);
++ assert(objsize_vnode == BASE_PAGE_SIZE);
+ caps_create_new(
+ ObjType_VNode_AARCH64_l1,
- mem_to_local_phys((lvaddr_t)init_l1) + (i << objbits_vnode),
- objbits_vnode, 0, my_core_id,
++ mem_to_local_phys((lvaddr_t)init_l1) + (i << objsize_vnode),
++ objsize_vnode, 0, my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+ }
+
- for (size_t i = 0; i < INIT_L2_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l2);
- assert(objbits_vnode == BASE_PAGE_BITS);
+ //STARTUP_PROGRESS();
+ for(size_t i = 0; i < INIT_L2_SIZE; i++) {
+ size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
caps_create_new(
ObjType_VNode_AARCH64_l2,
- mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
- objbits_vnode, 0, my_core_id,
+ mem_to_local_phys((lvaddr_t)init_l2) + (i << objsize_vnode),
+ objsize_vnode, 0, my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
}
// Map L3 into successive slots in pagecn
for(size_t i = 0; i < INIT_L3_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l3);
- assert(objbits_vnode == BASE_PAGE_BITS);
- caps_create_new(
- ObjType_VNode_AARCH64_l3,
- mem_to_local_phys((lvaddr_t)init_l3) + (i << objbits_vnode),
- objbits_vnode, 0, my_core_id,
- caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
- );
- }
- size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
++ size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l3);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
+ caps_create_new(
+ ObjType_VNode_AARCH64_l3,
+ mem_to_local_phys((lvaddr_t)init_l3) + (i << objsize_vnode),
+ objsize_vnode, 0,
+ my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+ }
/*
+ * Initialize init page tables - this just wires the L0
+ * entries through to the corresponding L1 entries.
+ */
+ for(lvaddr_t vaddr = INIT_VBASE;
+ vaddr < INIT_SPACE_LIMIT;
+ vaddr += VMSAv8_64_L0_SIZE)
+ {
+ uintptr_t section = (vaddr - INIT_VBASE) / VMSAv8_64_L0_SIZE;
+ uintptr_t l1_off = section * VMSAv8_64_PTABLE_SIZE;
+ lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l1) + l1_off;
+ paging_map_table_l0(init_l0, vaddr, paddr);
+ }
+ /*
* Initialize init page tables - this just wires the L1
* entries through to the corresponding L2 entries.
*/
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-
- static size_t caps_numobjs(enum objtype type, uint8_t bits, uint8_t objbits)
+ static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize)
{
switch(type) {
case ObjType_PhysAddr:
*
* For the meaning of the parameters, see the 'caps_create' function.
*/
-STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
- static errval_t caps_init_objects(enum objtype type, lpaddr_t lpaddr, uint8_t
- bits, uint8_t objbits, size_t numobjs)
+ static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr,
+ gensize_t objsize, size_t count)
{
+ assert(type < ObjType_Num);
+
// Virtual address of the memory the kernel object resides in
// XXX: A better of doing this,
// this is creating caps that the kernel cannot address.
break;
case ObjType_CNode:
+ // scale objsize by size of slot for CNodes; objsize for CNodes given
+ // in slots.
+ objsize *= sizeof(struct cte);
+ debug(SUBSYS_CAPS, "CNode: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ (size_t)objsize * count, lpaddr);
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, objsize * count);
+ TRACE(KERNEL, BZERO, 0);
+ break;
+
case ObjType_VNode_ARM_l1:
case ObjType_VNode_ARM_l2:
+ case ObjType_VNode_AARCH64_l0:
case ObjType_VNode_AARCH64_l1:
case ObjType_VNode_AARCH64_l2:
case ObjType_VNode_AARCH64_l3:
*/
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
- static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, uint8_t bits,
- uint8_t objbits, size_t numobjs, coreid_t owner,
+ static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size,
+ gensize_t objsize, size_t count, coreid_t owner,
struct cte *dest_caps)
{
errval_t err;
break;
}
+ case ObjType_VNode_AARCH64_l0:
+ {
+ size_t objbits_vnode = vnode_objbits(type);
+
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ TRACE(KERNEL, BZERO, 0);
+
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ // Initialize type specific fields
+ src_cap.u.vnode_aarch64_l0.base =
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+
+ // Insert the capability
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ if (err_is_fail(err)) {
+ break;
+ }
+ }
+
+ break;
+ }
+
case ObjType_VNode_AARCH64_l1:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_aarch64_l1.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_aarch64_l1.base =
+ genpaddr + dest_i * objsize_vnode;
-#ifdef __aarch64__
- // Insert kernel/mem mappings into new table.
- lpaddr_t var = gen_phys_to_local_phys(temp_cap.u.vnode_aarch64_l1.base);
- paging_make_good(var);
-#endif
-
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
return SYS_ERR_OK;
}
-
-STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
/// Retype caps
- errval_t caps_retype(enum objtype type, size_t objbits,
+ /// Create `count` new caps of `type` from `offset` in src, and put them in
+ /// `dest_cnode` starting at `dest_slot`.
+ /// Note: currently objsize is in slots for type == ObjType_CNode
+ errval_t caps_retype(enum objtype type, gensize_t objsize, size_t count,
struct capability *dest_cnode, cslot_t dest_slot,
- struct cte *src_cte, bool from_monitor)
+ struct cte *src_cte, gensize_t offset,
+ bool from_monitor)
{
+ // TODO List for this:
+ // * do not complain if there's non-overlapping descendants,
+ // only complain about overlapping descendants
TRACE(KERNEL, CAP_RETYPE, 0);
- size_t numobjs;
- uint8_t bits = 0;
+ size_t maxobjs;
genpaddr_t base = 0;
+ gensize_t size = 0;
errval_t err;
+ bool do_range_check = false;
/* Parameter checking */
assert(type != ObjType_Null);
-- Rules to build assorted boot images
--
+ -- Build a boot image for the ARM FVP
+ Rule ([ In BuildTree "tools" "/bin/mkmb_fvp",
+ In SrcTree "tools" "/hake/fvp.cfg",
+ Str "0x100000000", -- 4GiB of RAM
+ In BuildTree "armv8" "/sbin/fvp_shim",
+ In BuildTree "root" "/",
+ Out "root" "/armv8_fvp_image"
+ ] ++ [ (Dep BuildTree "armv8" m) | m <- modules_fvp ]),
+
+ -- Emit the debug addresses for the FVP
+ Rule ([ In BuildTree "tools" "/bin/mkmb_fvp",
+ In SrcTree "tools" "/hake/fvp.cfg",
+ Str "0x100000000", -- 4GiB of RAM
+ In BuildTree "armv8" "/sbin/fvp_shim",
+ In BuildTree "root" "/",
+ Out "root" "/armv8_fvp_debug",
+ Str "-d"
+ ] ++ [ (Dep BuildTree "armv8" m) | m <- modules_fvp ]),
+
-- Build the default PandaBoard boot image
Rule ([ In SrcTree "tools" "/tools/arm_molly/build_pandaboard_image.sh",
- Str "--srcdir", NoDep SrcTree "root" "/.",
- Str "--builddir", NoDep BuildTree "root" "/.",
+ Str "--srcdir", NoDep SrcTree "root" "/.",
+ Str "--builddir", NoDep BuildTree "root" "/.",
Str "--arch armv7-a",
Str "--menu", In SrcTree "tools" "/hake/menu.lst.pandaboard",
Str "--baseaddr", Str "0x82001000",