/.dist-buildwrapper
toolchain/*
/build
+.cproject
+.project
+
/* well-known cnodes */
extern struct cnoderef cnode_root, cnode_task, cnode_base,
- cnode_super0, cnode_super1, cnode_page, cnode_module;
+ cnode_super, cnode_page, cnode_module;
/* well-known capabilities */
extern struct capref cap_root, cap_monitorep, cap_irq, cap_io, cap_dispatcher,
/**
* Size of bootinfo mapping.
- */
+ */
#define BOOTINFO_SIZEBITS (BASE_PAGE_BITS + 2)
#define BOOTINFO_SIZE (1UL << BOOTINFO_SIZEBITS)
#define ROOTCN_SLOT_TASKCN 0 ///< Taskcn slot in root cnode
#define ROOTCN_SLOT_PAGECN 1 ///< Pagecn slot in root cnode
#define ROOTCN_SLOT_BASE_PAGE_CN 2 ///< Slot for a cnode of BASE_PAGE_SIZE frames
-#define ROOTCN_SLOT_SUPERCN0 3 ///< Slot for a cnode of SUPER frames
-#define ROOTCN_SLOT_SUPERCN1 4 ///< Slot for a cnode of SUPER frames
-#define ROOTCN_SLOT_SEGCN 5 ///< SegCN slot in root cnode
-#define ROOTCN_SLOT_PACN 6 ///< PhysAddr cnode slot in root cnode
-#define ROOTCN_SLOT_MODULECN 7 ///< Multiboot modules cnode slot in root cnode
-#define ROOTCN_SLOT_SLOT_ALLOC0 8 ///< Root of slot alloc0
-#define ROOTCN_SLOT_SLOT_ALLOC1 9 ///< Root of slot alloc1
-#define ROOTCN_SLOT_SLOT_ALLOC2 10 ///< Root of slot alloc2
-#define ROOTCN_SLOT_ARGCN 11 ///< Argcn slot in root cnode
-#define ROOTCN_SLOTS_USER 12 ///< First free slot in root cnode for user
+#define ROOTCN_SLOT_SUPERCN 3 ///< Slot for a cnode of SUPER frames
+#define ROOTCN_SLOT_SEGCN 4 ///< SegCN slot in root cnode
+#define ROOTCN_SLOT_PACN 5 ///< PhysAddr cnode slot in root cnode
+#define ROOTCN_SLOT_MODULECN 6 ///< Multiboot modules cnode slot in root cnode
+#define ROOTCN_SLOT_SLOT_ALLOC0 7 ///< Root of slot alloc0
+#define ROOTCN_SLOT_SLOT_ALLOC1 8 ///< Root of slot alloc1
+#define ROOTCN_SLOT_SLOT_ALLOC2 9 ///< Root of slot alloc2
+#define ROOTCN_SLOT_ARGCN 10 ///< Argcn slot in root cnode
+#define ROOTCN_SLOTS_USER 11 ///< First free slot in root cnode for user
/* Size of CNodes in Root CNode if not the default size */
#define SLOT_ALLOC_CNODE_BITS (DEFAULT_CNODE_BITS * 2)
#define DEFAULT_CN_ADDR_BITS (CPTR_BITS - DEFAULT_CNODE_BITS)
#define CPTR_BASE_PAGE_CN_BASE (ROOTCN_SLOT_BASE_PAGE_CN << DEFAULT_CN_ADDR_BITS)
-#define CPTR_SUPERCN0_BASE (ROOTCN_SLOT_SUPERCN0 << DEFAULT_CN_ADDR_BITS)
-#define CPTR_SUPERCN1_BASE (ROOTCN_SLOT_SUPERCN1 << DEFAULT_CN_ADDR_BITS)
+#define CPTR_SUPERCN_BASE (ROOTCN_SLOT_SUPERCN << DEFAULT_CN_ADDR_BITS)
#define CPTR_PHYADDRCN_BASE (ROOTCN_SLOT_PACN << DEFAULT_CN_ADDR_BITS)
#define CPTR_MODULECN_BASE (ROOTCN_SLOT_MODULECN << DEFAULT_CN_ADDR_BITS)
#define CPTR_PML4_BASE (ROOTCN_SLOT_PAGECN << (CPTR_BITS - PAGE_CNODE_BITS))
/// The maximum number of coprocessor cards in a system
#define XEON_PHI_NUM_MAX 8
-
-#define XEON_PHY_HOST_MEM_OFFSET 0x8000000000UL
-
-#define XEON_PHI_HOST_TO_CARD_MEM(x) \
- ((lpaddr_t)(x)+XEON_PHY_HOST_MEM_OFFSET)
-
-
-/**
- * this struct represents the information passed from the host to the
- * coprocessor kernels
- */
-struct xeon_phi_info {
- uintptr_t comm_base; // TODO: communication base
-
- uint8_t present; ///< flag indicating the present cards in the system
-};
-
#endif // XEON_PHI_XEON_PHI_H_
/* Load pointer to endpoint cap */
shl $OBJBITS_CTE, %rsi
mov OFFSETOF_CAP_CNODE_CNODE(%rdi), %rcx
- mov $0xfffffe0000000000, %rdi // phys_to_mem()
+ mov $0xffffff8000000000, %rdi // phys_to_mem()
add %rdi, %rcx
add %rsi, %rcx
xor %eax, %eax
mov %eax, %fs
mov %eax, %gs
-
+
/* Get new dispatcher pointer */
mov OFFSETOF_DCB_DISP(%rsi), %rax
/* Disable target dispatcher -- gotta do it here for TLB hit reasons */
je load_ldt_invalid
cmpq $0, %r14
je load_ldt_invalid
-
+
/* Update segment descriptor for LDT */
movq %r11, current_ldt_base(%rip)
// Store new descriptor (low half) to GDT
mov %rcx, (gdt + 8*LDT_LO_SEL)(%rip)
-
+
// Construct segment selector and load it
- mov $LDT_SELECTOR, %cx
+ mov $LDT_SELECTOR, %cx
lldt %cx
jmp load_ldt_continue
struct spawn_state {
/// Init's cnodes
- struct cte *taskcn, *segcn,*supercn0,*supercn1, *physaddrcn, *modulecn,
+ struct cte *taskcn, *segcn, *supercn, *physaddrcn, *modulecn,
*pagecn, *basepagecn,
*slot_alloc_cn0, *slot_alloc_cn1, *slot_alloc_cn2;
/// Next slot in each cnode
- cslot_t segcn_slot, supercn0_slot, supercn1_slot, physaddrcn_slot, modulecn_slot;
+ cslot_t segcn_slot, supercn_slot, physaddrcn_slot, modulecn_slot;
/// Address of arguments page
lpaddr_t args_page;
* for a bigger physical address space. We set this to 37-bit,
* i.e. 128 GBytes.
*/
-#define X86_64_PADDR_SPACE_LIMIT ((genpaddr_t)1 << 40)
+#define X86_64_PADDR_SPACE_LIMIT ((genpaddr_t)1 << 37)
/**
* Static address space limit for the init user-space domain. The
* Aligns an address to the nearest PML4 entry by masking out lower 39
* bits.
*/
-#define X86_64_PML4_ALIGN(addr) ((addr) & ((genpaddr_t)0x7fffff << 41))
+#define X86_64_PML4_ALIGN(addr) ((addr) & ((genpaddr_t)0x1ffffff << 39))
/**
* Absolute offset of mapped physical memory within virtual address
switch(type) {
case RegionType_Empty:
cap_type = ObjType_RAM;
- cnode = &st->supercn0->cap;
- slot = &st->supercn0_slot;
- if (*slot >= 1UL << cnode->u.cnode.bits) {
- slot = &st->supercn1_slot;
- cnode = &st->supercn1->cap;
- }
+ cnode = &st->supercn->cap;
+ slot = &st->supercn_slot;
break;
case RegionType_PhyAddr:
while (remain > 0) {
/* Cannot insert anymore into this cnode */
if (*slot >= 1UL << cnode->u.cnode.bits) {
- /*
- * it may be the case that we run over so switch to the other
- * supercn1 the switching should only happen once during this loop
- */
- if (cnode == &st->supercn0->cap) {
- slot = &st->supercn1_slot;
- cnode = &st->supercn1->cap;
- assert(*slot < 1UL << cnode->u.cnode.bits);
- } else {
- printk(LOG_WARN, "create_caps_to_cnode: Cannot create more caps "
- "in CNode\n");
- return -1;
- }
+ printk(LOG_WARN, "create_caps_to_cnode: Cannot create more caps "
+ "in CNode\n");
+ return -1;
}
/* Cannot insert anymore into the mem_region */
if (*regions_index >= MAX_MEM_REGIONS) {
assert(err_is_ok(err));
// Super cnode in root cnode
- st->supercn0 = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_SUPERCN0);
+ st->supercn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_SUPERCN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->supercn0);
+ BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->supercn);
assert(err_is_ok(err));
- st->supercn1 = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_SUPERCN1);
- err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->supercn1);
- assert(err_is_ok(err));
-
// slot_alloc cnodes in root cnode
st->slot_alloc_cn0 = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_SLOT_ALLOC0);
};
/// Super CNode
-struct cnoderef cnode_super0 = {
- .address = CPTR_SUPERCN0_BASE,
- .address_bits = DEFAULT_CNODE_BITS,
- .size_bits = DEFAULT_CNODE_BITS,
- .guard_size = 0
-};
-struct cnoderef cnode_super1 = {
- .address = CPTR_SUPERCN1_BASE,
+struct cnoderef cnode_super = {
+ .address = CPTR_SUPERCN_BASE,
.address_bits = DEFAULT_CNODE_BITS,
.size_bits = DEFAULT_CNODE_BITS,
.guard_size = 0
* or else the next closest less than MM_MAXSIZEBITS */
int mem_region = -1, mem_slot = 0;
struct capref mem_cap = {
- .cnode = cnode_super0,
+ .cnode = cnode_super,
.slot = 0,
};
for (int i = 0; i < bi->regions_length; i++) {
assert(!bi->regions[i].mr_consumed);
if (bi->regions[i].mr_type == RegionType_Empty) {
- if (bi->regions[i].mr_bits >= MM_REQUIREDBITS
+ if (bi->regions[i].mr_bits >= MM_REQUIREDBITS
&& bi->regions[i].mr_bits <= MM_MAXSIZEBITS && (mem_region == -1
|| bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) {
mem_region = i;
/* copy supercn to memory server */;
struct capref init_supercn_cap = {
.cnode = cnode_root,
- .slot = ROOTCN_SLOT_SUPERCN0
+ .slot = ROOTCN_SLOT_SUPERCN
};
struct capref child_supercn_cap = {
.cnode = si->rootcn,
- .slot = ROOTCN_SLOT_SUPERCN0
+ .slot = ROOTCN_SLOT_SUPERCN
};
err = cap_copy(child_supercn_cap, init_supercn_cap);
if (err_is_fail(err)) {
return err_push(err, INIT_ERR_COPY_SUPERCN_CAP);
}
- /* copy supercn to memory server */;
- init_supercn_cap.slot = ROOTCN_SLOT_SUPERCN1;
- child_supercn_cap.slot = ROOTCN_SLOT_SUPERCN1;
-
- err = cap_copy(child_supercn_cap, init_supercn_cap);
- if (err_is_fail(err)) {
- return err_push(err, INIT_ERR_COPY_SUPERCN_CAP);
- }
-
return SYS_ERR_OK;
}
// architecture, we use paddr_t as the type to represent region
// limits, which limits us its size.
#if defined(__x86_64__)
-# define MAXSIZEBITS 40 ///< Max size of memory in allocator
+# define MAXSIZEBITS 38 ///< Max size of memory in allocator
#elif defined(__i386__)
# define MAXSIZEBITS 32
#elif defined(__arm__)
/* walk bootinfo and add all unused RAM caps to allocator */
struct capref mem_cap = {
- .cnode = cnode_super0,
+ .cnode = cnode_super,
.slot = 0,
};
if (bi->regions[i].mr_type == RegionType_Empty) {
dump_ram_region(i, bi->regions + i);
- /*
- * we may have more memory regions than we have space in a single
- * CNode, thus we switch to the second.
- *
- */
- if (mem_cap.slot >= (1UL << mem_cap.cnode.size_bits)) {
- mem_cap.slot=0;
- mem_cap.cnode = cnode_super1;
- }
-
mem_total += ((size_t)1) << bi->regions[i].mr_bits;
if (bi->regions[i].mr_consumed) {