3 * \brief System calls implementation.
7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
17 #include <sys_debug.h>
19 #include <barrelfish_kpi/syscalls.h>
21 #include <mdb/mdb_tree.h>
23 #include <paging_kernel_arch.h>
24 #include <paging_generic.h>
27 #include <arch/x86/x86.h>
28 #include <arch/x86/apic.h>
29 #include <arch/x86/global.h>
30 #include <arch/x86/perfmon.h>
31 #include <arch/x86/debugregs.h>
32 #include <arch/x86/syscall.h>
33 #include <arch/x86/timing.h>
34 #include <arch/x86/ipi_notify.h>
35 #include <barrelfish_kpi/sys_debug.h>
36 #include <barrelfish_kpi/lmp.h>
37 #include <barrelfish_kpi/dispatcher_shared_target.h>
38 #include <trace/trace.h>
41 #include <dev/amd_vmcb_dev.h>
44 #define MIN(a,b) ((a) < (b) ? (a) : (b))
46 extern uint64_t user_stack_save;
48 /* FIXME: lots of missing argument checks in this function */
49 static struct sysret handle_dispatcher_setup(struct capability *to,
50 int cmd, uintptr_t *args)
52 capaddr_t cptr = args[0];
54 capaddr_t vptr = args[2];
55 capaddr_t dptr = args[3];
57 capaddr_t odptr = args[5];
59 TRACE(KERNEL, SC_DISP_SETUP, 0);
60 struct sysret sr = sys_dispatcher_setup(to, cptr, depth, vptr, dptr, run, odptr);
61 TRACE(KERNEL, SC_DISP_SETUP, 1);
65 static struct sysret handle_dispatcher_properties(struct capability *to,
66 int cmd, uintptr_t *args)
68 enum task_type type = args[0];
69 unsigned long deadline = args[1];
70 unsigned long wcet = args[2];
71 unsigned long period = args[3];
72 unsigned long release = args[4];
73 unsigned short weight = args[5];
75 TRACE(KERNEL, SC_DISP_PROPS, 0);
76 struct sysret sr = sys_dispatcher_properties(to, type, deadline, wcet, period,
78 TRACE(KERNEL, SC_DISP_PROPS, 1);
82 static struct sysret handle_retype_common(struct capability *root,
86 uint64_t source_cptr = args[0];
87 uint64_t type = args[1];
88 uint64_t objbits = args[2];
89 uint64_t dest_cnode_cptr = args[3];
90 uint64_t dest_slot = args[4];
91 uint64_t dest_vbits = args[5];
93 TRACE(KERNEL, SC_RETYPE, 0);
94 struct sysret sr = sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
95 dest_slot, dest_vbits, from_monitor);
96 TRACE(KERNEL, SC_RETYPE, 1);
100 static struct sysret handle_retype(struct capability *root,
101 int cmd, uintptr_t *args)
103 return handle_retype_common(root, args, false);
106 static struct sysret handle_create(struct capability *root,
107 int cmd, uintptr_t *args)
109 /* Retrieve arguments */
110 enum objtype type = args[0];
111 uint8_t objbits = args[1];
112 capaddr_t dest_cnode_cptr = args[2];
113 cslot_t dest_slot = args[3];
114 uint8_t dest_vbits = args[4];
116 TRACE(KERNEL, SC_CREATE, 0);
117 struct sysret sr = sys_create(root, type, objbits, dest_cnode_cptr, dest_slot,
119 TRACE(KERNEL, SC_CREATE, 1);
125 * Common code for copying and minting except the mint flag and param passing
127 static struct sysret copy_or_mint(struct capability *root,
128 uintptr_t *args, bool mint)
130 /* Retrive arguments */
131 capaddr_t destcn_cptr = args[0];
132 uint64_t dest_slot = args[1];
133 capaddr_t source_cptr = args[2];
134 int destcn_vbits = args[3];
135 int source_vbits = args[4];
136 uint64_t param1, param2;
137 // params only sent if mint operation
145 TRACE(KERNEL, SC_COPY_OR_MINT, 0);
146 struct sysret sr = sys_copy_or_mint(root, destcn_cptr, dest_slot, source_cptr,
147 destcn_vbits, source_vbits, param1, param2, mint);
148 TRACE(KERNEL, SC_COPY_OR_MINT, 1);
152 static struct sysret handle_map(struct capability *ptable,
153 int cmd, uintptr_t *args)
155 /* Retrieve arguments */
156 uint64_t slot = args[0];
157 capaddr_t source_cptr = args[1];
158 int source_vbits = args[2];
159 uint64_t flags = args[3];
160 uint64_t offset = args[4];
161 uint64_t pte_count = args[5];
162 capaddr_t mapping_cnptr = args[6];
163 int mapping_cnvbits = args[7];
164 cslot_t mapping_slot = args[8];
166 TRACE(KERNEL, SC_MAP, 0);
167 struct sysret sr = sys_map(ptable, slot, source_cptr, source_vbits, flags,
168 offset, pte_count, mapping_cnptr, mapping_cnvbits,
170 TRACE(KERNEL, SC_MAP, 1);
174 static struct sysret handle_mint(struct capability *root,
175 int cmd, uintptr_t *args)
177 return copy_or_mint(root, args, true);
180 static struct sysret handle_copy(struct capability *root,
181 int cmd, uintptr_t *args)
183 return copy_or_mint(root, args, false);
186 static struct sysret handle_delete(struct capability *root,
187 int cmd, uintptr_t *args)
189 capaddr_t cptr = args[0];
191 return sys_delete(root, cptr, bits);
194 static struct sysret handle_revoke(struct capability *root,
195 int cmd, uintptr_t *args)
197 capaddr_t cptr = args[0];
199 return sys_revoke(root, cptr, bits);
202 static struct sysret handle_get_state(struct capability *root,
203 int cmd, uintptr_t *args)
205 capaddr_t cptr = args[0];
207 return sys_get_state(root, cptr, bits);
210 static struct sysret handle_unmap(struct capability *pgtable,
211 int cmd, uintptr_t *args)
213 capaddr_t cptr = args[0];
218 err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, bits,
219 &mapping, CAPRIGHTS_READ_WRITE);
220 if (err_is_fail(err)) {
221 return SYSRET(err_push(err, SYS_ERR_CAP_NOT_FOUND));
224 TRACE(KERNEL, SC_UNMAP, 0);
225 err = page_mappings_unmap(pgtable, mapping);
226 TRACE(KERNEL, SC_UNMAP, 1);
230 static struct sysret handle_mapping_destroy(struct capability *mapping,
231 int cmd, uintptr_t *args)
234 return SYSRET(SYS_ERR_OK);
237 static struct sysret handle_mapping_modify(struct capability *mapping,
238 int cmd, uintptr_t *args)
240 // Modify flags of (part of) mapped region of frame
241 assert(type_is_mapping(mapping->type));
244 size_t offset = args[0]; // in pages; of first page to modify from first
245 // page in mapped region
246 size_t pages = args[1]; // #pages to modify
247 size_t flags = args[2]; // new flags
248 genvaddr_t va = args[3]; // virtual addr hint
250 errval_t err = page_mappings_modify_flags(mapping, offset, pages, flags, va);
252 return (struct sysret) {
258 /// Different handler for cap operations performed by the monitor
259 static struct sysret monitor_handle_retype(struct capability *kernel_cap,
260 int cmd, uintptr_t *args)
264 capaddr_t root_caddr = args[0];
265 capaddr_t root_vbits = args[1];
267 struct capability *root;
268 err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
269 &root, CAPRIGHTS_READ);
270 if (err_is_fail(err)) {
271 return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
274 /* XXX: this hides the first two arguments */
275 return handle_retype_common(root, &args[2], true);
278 static struct sysret monitor_handle_has_descendants(struct capability *kernel_cap,
279 int cmd, uintptr_t *args)
281 struct capability *src = (struct capability *)args;
283 struct cte *next = mdb_find_greater(src, false);
285 return (struct sysret) {
287 .value = (next && is_ancestor(&next->cap, src)),
291 static struct sysret monitor_handle_delete_last(struct capability *kernel_cap,
292 int cmd, uintptr_t *args)
294 capaddr_t root_caddr = args[0];
295 uint8_t root_vbits = args[1];
296 capaddr_t target_caddr = args[2];
297 uint8_t target_vbits = args[3];
298 capaddr_t retcn_caddr = args[4];
299 uint8_t retcn_vbits = args[5];
300 cslot_t ret_slot = args[6];
302 return sys_monitor_delete_last(root_caddr, root_vbits, target_caddr,
303 target_vbits, retcn_caddr, retcn_vbits,
307 static struct sysret monitor_handle_delete_foreigns(struct capability *kernel_cap,
308 int cmd, uintptr_t *args)
310 capaddr_t caddr = args[0];
311 uint8_t bits = args[1];
312 return sys_monitor_delete_foreigns(caddr, bits);
315 static struct sysret monitor_handle_revoke_mark_tgt(struct capability *kernel_cap,
316 int cmd, uintptr_t *args)
318 capaddr_t root_caddr = args[0];
319 uint8_t root_vbits = args[1];
320 capaddr_t target_caddr = args[2];
321 uint8_t target_vbits = args[3];
323 return sys_monitor_revoke_mark_tgt(root_caddr, root_vbits,
324 target_caddr, target_vbits);
327 static struct sysret monitor_handle_revoke_mark_rels(struct capability *kernel_cap,
328 int cmd, uintptr_t *args)
330 struct capability *base = (struct capability*)args;
332 return sys_monitor_revoke_mark_rels(base);
335 static struct sysret monitor_handle_delete_step(struct capability *kernel_cap,
336 int cmd, uintptr_t *args)
338 capaddr_t ret_cn_addr = args[0];
339 capaddr_t ret_cn_bits = args[1];
340 capaddr_t ret_slot = args[2];
341 return sys_monitor_delete_step(ret_cn_addr, ret_cn_bits, ret_slot);
344 static struct sysret monitor_handle_clear_step(struct capability *kernel_cap,
345 int cmd, uintptr_t *args)
347 capaddr_t ret_cn_addr = args[0];
348 capaddr_t ret_cn_bits = args[1];
349 capaddr_t ret_slot = args[2];
350 return sys_monitor_clear_step(ret_cn_addr, ret_cn_bits, ret_slot);
353 static struct sysret monitor_handle_register(struct capability *kernel_cap,
354 int cmd, uintptr_t *args)
356 capaddr_t ep_caddr = args[0];
358 TRACE(KERNEL, SC_MONITOR_REGISTER, 0);
359 struct sysret sr = sys_monitor_register(ep_caddr);
360 TRACE(KERNEL, SC_MONITOR_REGISTER, 1);
364 static struct sysret monitor_get_core_id(struct capability *kernel_cap,
365 int cmd, uintptr_t *args)
367 return (struct sysret){.error = SYS_ERR_OK, .value = my_core_id};
370 static struct sysret monitor_get_arch_id(struct capability *kernel_cap,
371 int cmd, uintptr_t *args)
373 return (struct sysret){.error = SYS_ERR_OK, .value = apic_id};
376 static struct sysret monitor_identify_cap_common(struct capability *kernel_cap,
377 struct capability *root,
380 capaddr_t cptr = args[0];
381 uint8_t bits = args[1];
383 struct capability *retbuf = (void *)args[2];
385 return sys_monitor_identify_cap(root, cptr, bits, retbuf);
388 static struct sysret monitor_identify_cap(struct capability *kernel_cap,
389 int cmd, uintptr_t *args)
391 return monitor_identify_cap_common(kernel_cap, &dcb_current->cspace.cap, args);
394 static struct sysret monitor_identify_domains_cap(struct capability *kernel_cap,
395 int cmd, uintptr_t *args)
399 capaddr_t root_caddr = args[0];
400 capaddr_t root_vbits = args[1];
402 struct capability *root;
403 err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
404 &root, CAPRIGHTS_READ);
406 if (err_is_fail(err)) {
407 return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
410 /* XXX: this hides the first two arguments */
411 return monitor_identify_cap_common(kernel_cap, root, &args[2]);
414 static struct sysret monitor_cap_has_relations(struct capability *kernel_cap,
415 int cmd, uintptr_t *args)
417 capaddr_t caddr = args[0];
418 uint8_t vbits = args[1];
419 uint8_t mask = args[2];
421 return sys_cap_has_relations(caddr, vbits, mask);
424 static struct sysret monitor_remote_relations(struct capability *kernel_cap,
425 int cmd, uintptr_t *args)
427 capaddr_t root_addr = args[0];
428 int root_bits = args[1];
429 capaddr_t cptr = args[2];
431 uint8_t relations = args[4] & 0xFF;
432 uint8_t mask = (args[4] >> 8) & 0xFF;
434 return sys_monitor_remote_relations(root_addr, root_bits, cptr, bits,
439 static struct sysret monitor_create_cap(struct capability *kernel_cap,
440 int cmd, uintptr_t *args)
442 /* XXX: Get the raw metadata of the capability to create */
443 struct capability *src = (struct capability *)args;
444 int pos = ROUND_UP(sizeof(struct capability), sizeof(uint64_t)) / sizeof(uint64_t);
446 /* Cannot create null caps */
447 if (src->type == ObjType_Null) {
448 return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
451 coreid_t owner = args[pos + 3];
453 /* For certain types, only foreign copies can be created here */
454 if ((src->type == ObjType_EndPoint || src->type == ObjType_Dispatcher
455 || src->type == ObjType_Kernel || src->type == ObjType_IRQTable)
456 && owner == my_core_id)
458 return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
461 /* Create the cap in the destination */
462 capaddr_t cnode_cptr = args[pos];
463 int cnode_vbits = args[pos + 1];
464 size_t slot = args[pos + 2];
465 assert(cnode_vbits < CPTR_BITS);
467 return SYSRET(caps_create_from_existing(&dcb_current->cspace.cap,
468 cnode_cptr, cnode_vbits,
472 static struct sysret monitor_copy_existing(struct capability *kernel_cap,
473 int cmd, uintptr_t *args)
475 /* XXX: Get the raw metadata of the capability to create */
476 struct capability *src = (struct capability *)args;
477 int pos = ROUND_UP(sizeof(struct capability), sizeof(uint64_t)) / sizeof(uint64_t);
479 capaddr_t cnode_cptr = args[pos];
480 int cnode_vbits = args[pos + 1];
481 size_t slot = args[pos + 2];
483 return sys_monitor_copy_existing(src, cnode_cptr, cnode_vbits, slot);
486 static struct sysret monitor_nullify_cap(struct capability *kernel_cap,
487 int cmd, uintptr_t *args)
489 capaddr_t cptr = args[0];
490 uint8_t bits = args[1];
492 return sys_monitor_nullify_cap(cptr, bits);
495 static struct sysret monitor_handle_sync_timer(struct capability *kern_cap,
496 int cmd, uintptr_t *args)
498 uint64_t synctime = args[0];
499 return sys_monitor_handle_sync_timer(synctime);
502 static struct sysret handle_frame_identify(struct capability *to,
503 int cmd, uintptr_t *args)
505 // Return with physical base address of frame
506 // XXX: pack size into bottom bits of base address
507 assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
508 assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
509 return (struct sysret) {
511 .value = to->u.frame.base | to->u.frame.bits,
515 static struct sysret handle_vnode_identify(struct capability *to,
516 int cmd, uintptr_t *args)
518 // Return with physical base address of the VNode
519 // XXX: pack type into bottom bits of base address
520 assert(to->type == ObjType_VNode_x86_64_pml4 ||
521 to->type == ObjType_VNode_x86_64_pdpt ||
522 to->type == ObjType_VNode_x86_64_pdir ||
523 to->type == ObjType_VNode_x86_64_ptable);
525 uint64_t base_addr = 0;
527 case ObjType_VNode_x86_64_pml4:
528 base_addr = (uint64_t)(to->u.vnode_x86_64_pml4.base);
530 case ObjType_VNode_x86_64_pdpt:
531 base_addr = (uint64_t)(to->u.vnode_x86_64_pdpt.base);
533 case ObjType_VNode_x86_64_pdir:
534 base_addr = (uint64_t)(to->u.vnode_x86_64_pdir.base);
536 case ObjType_VNode_x86_64_ptable:
537 base_addr = (uint64_t)(to->u.vnode_x86_64_ptable.base);
542 assert((base_addr & BASE_PAGE_MASK) == 0);
544 return (struct sysret) {
546 .value = (genpaddr_t)base_addr | ((uint8_t)to->type),
551 static struct sysret handle_io(struct capability *to, int cmd, uintptr_t *args)
553 uint64_t port = args[0];
554 uint64_t data = args[1]; // ignored for input
556 return sys_io(to, cmd, port, data);
559 static struct sysret handle_vmread(struct capability *to,
560 int cmd, uintptr_t *args)
562 #if defined(__k1om__) || defined(CONFIG_SVM)
563 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
566 struct dcb *dcb = to->u.dispatcher.dcb;
567 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
568 if (vmcs_base != vmptrst()) {
569 err = SYS_ERR_VMKIT_VMX_VMFAIL_INVALID;
571 err = vmread(args[0], (lvaddr_t *)args[1]);
577 static struct sysret handle_vmwrite(struct capability *to,
578 int cmd, uintptr_t *args)
580 #if defined(__k1om__) || defined(CONFIG_SVM)
581 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
584 struct dcb *dcb = to->u.dispatcher.dcb;
585 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
586 if (vmcs_base != vmptrst()) {
587 err = SYS_ERR_VMKIT_VMX_VMFAIL_INVALID;
589 err = vmwrite(args[0], args[1]);
595 static struct sysret handle_vmptrld(struct capability *to,
596 int cmd, uintptr_t *args)
598 #if defined(__k1om__) || defined(CONFIG_SVM)
599 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
602 struct dcb *dcb = to->u.dispatcher.dcb;
603 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
604 err = vmptrld(vmcs_base);
609 static struct sysret handle_vmclear(struct capability *to,
610 int cmd, uintptr_t *args)
612 #if defined(__k1om__) || defined(CONFIG_SVM)
613 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
616 struct dcb *dcb = to->u.dispatcher.dcb;
617 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
618 err = vmclear(vmcs_base);
625 handle_dispatcher_setup_guest (struct capability *to, int cmd, uintptr_t *args)
628 struct dcb *dcb = to->u.dispatcher.dcb;
630 capaddr_t epp = args[0];
631 capaddr_t vnodep = args[1];
632 capaddr_t vmcbp = args[2];
633 capaddr_t ctrlp = args[3];
635 // 0. Enable VM extensions
636 err = vmkit_enable_virtualization();
637 if (err != SYS_ERR_OK) {
641 // 1. Check arguments
642 // Monitor endpoint for exits of this geust
645 err = caps_lookup_slot(&dcb_current->cspace.cap, epp, CPTR_BITS,
646 &ep_cte, CAPRIGHTS_READ_WRITE);
647 if (err_is_fail(err)) {
650 if (ep_cte->cap.type != ObjType_EndPoint) {
651 return SYSRET(SYS_ERR_VMKIT_ENDPOINT_INVALID);
653 err = caps_copy_to_cte(&dcb->guest_desc.monitor_ep, ep_cte, false, 0, 0);
654 if (err_is_fail(err)) {
655 return SYSRET(err_push(err, SYS_ERR_VMKIT_ENDPOINT));
659 struct capability *vnode_cap;
660 err = caps_lookup_cap(&dcb_current->cspace.cap, vnodep, CPTR_BITS,
661 &vnode_cap, CAPRIGHTS_WRITE);
662 if (err_is_fail(err)) {
665 if (vnode_cap->type != ObjType_VNode_x86_64_pml4) {
666 return SYSRET(SYS_ERR_DISP_VSPACE_INVALID);
669 assert(vnode_cap->type == ObjType_VNode_x86_64_pml4);
672 struct cte *vmcb_cte;
673 err = caps_lookup_slot(&dcb_current->cspace.cap, vmcbp, CPTR_BITS,
674 &vmcb_cte, CAPRIGHTS_READ_WRITE);
675 if (err_is_fail(err)) {
678 if (vmcb_cte->cap.type != ObjType_Frame ||
679 vmcb_cte->cap.u.frame.bits < BASE_PAGE_BITS) {
680 return SYSRET(SYS_ERR_VMKIT_VMCB_INVALID);
682 err = caps_copy_to_cte(&dcb->guest_desc.vmcb, vmcb_cte, false, 0, 0);
683 if (err_is_fail(err)) {
684 return SYSRET(err_push(err, SYS_ERR_VMKIT_VMCB));
688 struct cte *ctrl_cte;
689 err = caps_lookup_slot(&dcb_current->cspace.cap, ctrlp, CPTR_BITS,
690 &ctrl_cte, CAPRIGHTS_READ_WRITE);
691 if (err_is_fail(err)) {
694 if (ctrl_cte->cap.type != ObjType_Frame ||
695 ctrl_cte->cap.u.frame.bits < BASE_PAGE_BITS) {
696 return SYSRET(SYS_ERR_VMKIT_CTRL_INVALID);
698 err = caps_copy_to_cte(&dcb->guest_desc.ctrl, ctrl_cte, false, 0, 0);
699 if (err_is_fail(err)) {
700 return SYSRET(err_push(err, SYS_ERR_VMKIT_CTRL));
704 // Initialize VMCS for the single virtual-CPU here instead of in
705 // userspace, where the privilege level is not 0.
706 err = initialize_vmcs(vmcb_cte->cap.u.frame.base);
707 assert(err_is_ok(err));
710 // 2. Set up the target DCB
711 /* dcb->guest_desc.monitor_ep = ep_cap; */
712 dcb->vspace = vnode_cap->u.vnode_x86_64_pml4.base;
713 dcb->is_vm_guest = true;
714 /* dcb->guest_desc.vmcb = vmcb_cap->u.frame.base; */
715 /* dcb->guest_desc.ctrl = (void *)x86_64_phys_to_mem(ctrl_cap->u.frame.base); */
717 return SYSRET(SYS_ERR_OK);
721 static struct sysret monitor_handle_domain_id(struct capability *monitor_cap,
722 int cmd, uintptr_t *args)
724 capaddr_t cptr = args[0];
725 domainid_t domain_id = args[1];
727 return sys_monitor_domain_id(cptr, domain_id);
730 static struct sysret monitor_get_cap_owner(struct capability *monitor_cap,
731 int cmd, uintptr_t *args)
733 capaddr_t root_addr = args[0];
734 uint8_t root_bits = args[1];
735 capaddr_t cptr = args[2];
736 uint8_t bits = args[3];
738 return sys_get_cap_owner(root_addr, root_bits, cptr, bits);
741 static struct sysret monitor_set_cap_owner(struct capability *monitor_cap,
742 int cmd, uintptr_t *args)
744 capaddr_t root_addr = args[0];
745 uint8_t root_bits = args[1];
746 capaddr_t cptr = args[2];
747 uint8_t bits = args[3];
748 coreid_t owner = args[4];
750 return sys_set_cap_owner(root_addr, root_bits, cptr, bits, owner);
753 static struct sysret monitor_lock_cap(struct capability *monitor_cap,
754 int cmd, uintptr_t *args)
756 capaddr_t root_addr = args[0];
757 uint8_t root_bits = args[1];
758 capaddr_t cptr = args[2];
759 uint8_t bits = args[3];
761 return sys_lock_cap(root_addr, root_bits, cptr, bits);
764 static struct sysret monitor_unlock_cap(struct capability *monitor_cap,
765 int cmd, uintptr_t *args)
767 capaddr_t root_addr = args[0];
768 uint8_t root_bits = args[1];
769 capaddr_t cptr = args[2];
770 uint8_t bits = args[3];
772 return sys_unlock_cap(root_addr, root_bits, cptr, bits);
776 * \brief Set up tracing in the kernel
778 static struct sysret handle_trace_setup(struct capability *cap,
779 int cmd, uintptr_t *args)
781 struct capability *frame;
784 /* lookup passed cap */
785 capaddr_t cptr = args[0];
786 err = caps_lookup_cap(&dcb_current->cspace.cap, cptr, CPTR_BITS, &frame,
787 CAPRIGHTS_READ_WRITE);
788 if (err_is_fail(err)) {
792 lpaddr_t lpaddr = gen_phys_to_local_phys(frame->u.frame.base);
793 kernel_trace_buf = local_phys_to_mem(lpaddr);
794 //printf("kernel.%u: handle_trace_setup at %lx\n", apic_id, kernel_trace_buf);
796 // Copy boot applications.
797 trace_copy_boot_applications();
799 return SYSRET(SYS_ERR_OK);
802 static struct sysret handle_irq_get_vector(struct capability * to, int cmd,
806 ret.error = SYS_ERR_OK;
807 ret.value = to->u.irq.line;
812 static struct sysret handle_irqvector_get_vector(struct capability *to, int cmd,
816 ret.error = SYS_ERR_OK;
817 ret.value = to->u.irqvector.vector;
821 static struct sysret handle_irqvector_connect(struct capability *to, int cmd,
824 return SYSRET(irq_connect(to, args[0]));
827 static struct sysret handle_irq_table_alloc(struct capability *to, int cmd,
832 ret.error = irq_table_alloc(&outvec);
837 static struct sysret handle_irq_table_alloc_dest_cap(struct capability *to, int cmd,
840 return SYSRET(irq_table_alloc_dest_cap(args[0],args[1],args[2]));
844 static struct sysret handle_irq_table_set(struct capability *to, int cmd,
847 return SYSRET(irq_table_set(args[0], args[1]));
850 static struct sysret handle_irq_table_delete(struct capability *to, int cmd,
853 return SYSRET(irq_table_delete(args[0]));
856 static struct sysret handle_ipi_notify_send(struct capability *cap,
857 int cmd, uintptr_t *args)
859 assert(cap->type == ObjType_Notify_IPI);
860 return ipi_raise_notify(cap->u.notify_ipi.coreid, cap->u.notify_ipi.chanid);
863 static struct sysret kernel_ipi_register(struct capability *cap,
864 int cmd, uintptr_t *args)
866 assert(cap->type == ObjType_Kernel);
867 capaddr_t ep = args[0];
868 int chanid = args[1];
869 return SYSRET(ipi_register_notification(ep, chanid));
872 static struct sysret kernel_ipi_delete(struct capability *cap,
873 int cmd, uintptr_t *args)
875 assert(cap->type == ObjType_Kernel);
877 return SYSRET(SYS_ERR_OK);
880 static struct sysret dispatcher_dump_ptables(struct capability *cap,
881 int cmd, uintptr_t *args)
883 assert(cap->type == ObjType_Dispatcher);
885 printf("kernel_dump_ptables\n");
887 struct dcb *dispatcher = cap->u.dispatcher.dcb;
889 paging_dump_tables(dispatcher);
891 return SYSRET(SYS_ERR_OK);
894 static struct sysret dispatcher_dump_capabilities(struct capability *cap,
895 int cmd, uintptr_t *args)
897 assert(cap->type == ObjType_Dispatcher);
899 printf("dispatcher_dump_capabilities\n");
901 struct dcb *dispatcher = cap->u.dispatcher.dcb;
903 errval_t err = debug_print_cababilities(dispatcher);
909 * \brief Activate performance monitoring
911 * Activates performance monitoring.
912 * \param xargs Expected parameters in args:
913 * - performance monitoring type
914 * - mask for given type
916 * - Also count in privileged mode
917 * - Number of counts before overflow. This parameter may be used to
918 * set tradeoff between accuracy and overhead. Set the counter to 0
919 * to deactivate the usage of APIC.
920 * - Endpoint capability to be invoked when the counter overflows.
921 * The buffer associated with the endpoint needs to be large enough
922 * to hold several overflow notifications depending on the overflow
925 static struct sysret performance_counter_activate(struct capability *cap,
926 int cmd, uintptr_t *args)
928 uint8_t event = args[0];
929 uint8_t umask = args[1];
930 uint8_t counter_id = args[2];
931 bool kernel = args[3];
932 uint64_t counter_value = args[4];
933 capaddr_t ep_addr = args[5];
936 struct capability *ep;
937 extern struct capability perfmon_callback_ep;
940 assert(ep_addr!=0 || counter_value==0);
943 perfmon_measure_start(event, umask, counter_id, kernel, counter_value);
947 err = caps_lookup_cap(&dcb_current->cspace.cap, ep_addr, CPTR_BITS, &ep,
949 if(err_is_fail(err)) {
953 perfmon_callback_ep = *ep;
956 return SYSRET(SYS_ERR_OK);
960 * \brief Write counter values.
962 static struct sysret performance_counter_write(struct capability *cap,
963 int cmd, uintptr_t *args)
965 uint8_t counter_id = args[0];
966 uint64_t counter_value = args[1];
968 perfmon_measure_write(counter_id, counter_value);
969 return SYSRET(SYS_ERR_OK);
973 * \brief Deactivate performance counters again.
975 static struct sysret performance_counter_deactivate(struct capability *cap,
976 int cmd, uintptr_t *args)
978 perfmon_measure_stop();
979 return SYSRET(SYS_ERR_OK);
983 * \brief Return system-wide unique ID of this ID cap.
985 static struct sysret handle_idcap_identify(struct capability *cap, int cmd,
989 struct sysret sysret = sys_idcap_identify(cap, &id);
995 static struct sysret kernel_send_init_ipi(struct capability *cap, int cmd,
998 coreid_t destination = args[0];
999 // printk(LOG_DEBUG, "%s:%s:%d: destination=%"PRIuCOREID"\n",
1000 // __FILE__, __FUNCTION__, __LINE__, destination);
1002 apic_send_init_assert(destination, xapic_none);
1003 apic_send_init_deassert();
1005 return SYSRET(SYS_ERR_OK);
1008 static struct sysret kernel_send_start_ipi(struct capability *cap,
1012 coreid_t destination = args[0];
1013 genvaddr_t start_vector = X86_64_REAL_MODE_SEGMENT_TO_REAL_MODE_PAGE(X86_64_REAL_MODE_SEGMENT);
1014 // printk(LOG_DEBUG, "%s:%d: destination=%"PRIuCOREID" start_vector=%"PRIxGENVADDR"\n",
1015 // __FILE__, __LINE__, destination, start_vector);
1017 apic_send_start_up(destination, xapic_none, start_vector);
1019 return SYSRET(SYS_ERR_OK);
1022 static struct sysret kernel_get_global_phys(struct capability *cap,
1027 struct sysret sysret;
1028 sysret.value = mem_to_local_phys((lvaddr_t)global);
1029 sysret.error = SYS_ERR_OK;
1034 static struct sysret kernel_add_kcb(struct capability *kern_cap,
1035 int cmd, uintptr_t *args)
1037 uintptr_t kcb_addr = args[0];
1038 struct kcb *new_kcb = (struct kcb *)kcb_addr;
1040 return sys_kernel_add_kcb(new_kcb);
1043 static struct sysret kernel_remove_kcb(struct capability *kern_cap,
1044 int cmd, uintptr_t *args)
1046 printk(LOG_NOTE, "in kernel_remove_kcb invocation!\n");
1047 uintptr_t kcb_addr = args[0];
1048 struct kcb *to_remove = (struct kcb *)kcb_addr;
1050 return sys_kernel_remove_kcb(to_remove);
1053 static struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
1054 int cmd, uintptr_t *args)
1056 printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
1057 return sys_kernel_suspend_kcb_sched((bool)args[0]);
1060 static struct sysret handle_kcb_identify(struct capability *to,
1061 int cmd, uintptr_t *args)
1063 return sys_handle_kcb_identify(to);
1067 typedef struct sysret (*invocation_handler_t)(struct capability *to,
1068 int cmd, uintptr_t *args);
1070 static invocation_handler_t invocations[ObjType_Num][CAP_MAX_CMD] = {
1071 [ObjType_Dispatcher] = {
1072 [DispatcherCmd_Setup] = handle_dispatcher_setup,
1073 [DispatcherCmd_Properties] = handle_dispatcher_properties,
1075 [DispatcherCmd_SetupGuest] = handle_dispatcher_setup_guest,
1077 [DispatcherCmd_DumpPTables] = dispatcher_dump_ptables,
1078 [DispatcherCmd_DumpCapabilities] = dispatcher_dump_capabilities,
1079 [DispatcherCmd_Vmread] = handle_vmread,
1080 [DispatcherCmd_Vmwrite] = handle_vmwrite,
1081 [DispatcherCmd_Vmptrld] = handle_vmptrld,
1082 [DispatcherCmd_Vmclear] = handle_vmclear,
1084 [ObjType_KernelControlBlock] = {
1085 [FrameCmd_Identify] = handle_kcb_identify,
1088 [FrameCmd_Identify] = handle_frame_identify,
1090 [ObjType_DevFrame] = {
1091 [FrameCmd_Identify] = handle_frame_identify,
1094 [CNodeCmd_Copy] = handle_copy,
1095 [CNodeCmd_Mint] = handle_mint,
1096 [CNodeCmd_Retype] = handle_retype,
1097 [CNodeCmd_Create] = handle_create,
1098 [CNodeCmd_Delete] = handle_delete,
1099 [CNodeCmd_Revoke] = handle_revoke,
1100 [CNodeCmd_GetState] = handle_get_state,
1102 [ObjType_VNode_x86_64_pml4] = {
1103 [VNodeCmd_Identify] = handle_vnode_identify,
1104 [VNodeCmd_Map] = handle_map,
1105 [VNodeCmd_Unmap] = handle_unmap,
1107 [ObjType_VNode_x86_64_pdpt] = {
1108 [VNodeCmd_Identify] = handle_vnode_identify,
1109 [VNodeCmd_Map] = handle_map,
1110 [VNodeCmd_Unmap] = handle_unmap,
1112 [ObjType_VNode_x86_64_pdir] = {
1113 [VNodeCmd_Identify] = handle_vnode_identify,
1114 [VNodeCmd_Map] = handle_map,
1115 [VNodeCmd_Unmap] = handle_unmap,
1117 [ObjType_VNode_x86_64_ptable] = {
1118 [VNodeCmd_Identify] = handle_vnode_identify,
1119 [VNodeCmd_Map] = handle_map,
1120 [VNodeCmd_Unmap] = handle_unmap,
1122 [ObjType_Frame_Mapping] = {
1123 [MappingCmd_Destroy] = handle_mapping_destroy,
1124 [MappingCmd_Modify] = handle_mapping_modify,
1126 [ObjType_DevFrame_Mapping] = {
1127 [MappingCmd_Destroy] = handle_mapping_destroy,
1128 [MappingCmd_Modify] = handle_mapping_modify,
1130 [ObjType_VNode_x86_64_pml4_Mapping] = {
1131 [MappingCmd_Destroy] = handle_mapping_destroy,
1132 [MappingCmd_Modify] = handle_mapping_modify,
1134 [ObjType_VNode_x86_64_pdpt_Mapping] = {
1135 [MappingCmd_Destroy] = handle_mapping_destroy,
1136 [MappingCmd_Modify] = handle_mapping_modify,
1138 [ObjType_VNode_x86_64_pdir_Mapping] = {
1139 [MappingCmd_Destroy] = handle_mapping_destroy,
1140 [MappingCmd_Modify] = handle_mapping_modify,
1142 [ObjType_VNode_x86_64_ptable_Mapping] = {
1143 [MappingCmd_Destroy] = handle_mapping_destroy,
1144 [MappingCmd_Modify] = handle_mapping_modify,
1146 [ObjType_Kernel] = {
1147 [KernelCmd_Get_core_id] = monitor_get_core_id,
1148 [KernelCmd_Get_arch_id] = monitor_get_arch_id,
1149 [KernelCmd_Identify_cap] = monitor_identify_cap,
1150 [KernelCmd_Identify_domains_cap] = monitor_identify_domains_cap,
1151 [KernelCmd_Remote_relations] = monitor_remote_relations,
1152 [KernelCmd_Cap_has_relations] = monitor_cap_has_relations,
1153 [KernelCmd_Create_cap] = monitor_create_cap,
1154 [KernelCmd_Copy_existing] = monitor_copy_existing,
1155 [KernelCmd_Nullify_cap] = monitor_nullify_cap,
1156 [KernelCmd_Setup_trace] = handle_trace_setup,
1157 [KernelCmd_Register] = monitor_handle_register,
1158 [KernelCmd_Domain_Id] = monitor_handle_domain_id,
1159 [KernelCmd_Get_cap_owner] = monitor_get_cap_owner,
1160 [KernelCmd_Set_cap_owner] = monitor_set_cap_owner,
1161 [KernelCmd_Lock_cap] = monitor_lock_cap,
1162 [KernelCmd_Unlock_cap] = monitor_unlock_cap,
1163 [KernelCmd_Retype] = monitor_handle_retype,
1164 [KernelCmd_Has_descendants] = monitor_handle_has_descendants,
1165 [KernelCmd_Delete_last] = monitor_handle_delete_last,
1166 [KernelCmd_Delete_foreigns] = monitor_handle_delete_foreigns,
1167 [KernelCmd_Revoke_mark_target] = monitor_handle_revoke_mark_tgt,
1168 [KernelCmd_Revoke_mark_relations] = monitor_handle_revoke_mark_rels,
1169 [KernelCmd_Delete_step] = monitor_handle_delete_step,
1170 [KernelCmd_Clear_step] = monitor_handle_clear_step,
1171 [KernelCmd_Sync_timer] = monitor_handle_sync_timer,
1172 [KernelCmd_IPI_Register] = kernel_ipi_register,
1173 [KernelCmd_IPI_Delete] = kernel_ipi_delete,
1174 [KernelCmd_GetGlobalPhys] = kernel_get_global_phys,
1175 [KernelCmd_Add_kcb] = kernel_add_kcb,
1176 [KernelCmd_Remove_kcb] = kernel_remove_kcb,
1177 [KernelCmd_Suspend_kcb_sched] = kernel_suspend_kcb_sched,
1180 [IPICmd_Send_Start] = kernel_send_start_ipi,
1181 [IPICmd_Send_Init] = kernel_send_init_ipi,
1184 [IRQCmd_GetVector] = handle_irq_get_vector
1186 [ObjType_IRQVector] = {
1187 [IRQVectorCmd_Connect] = handle_irqvector_connect,
1188 [IRQVectorCmd_GetVector] = handle_irqvector_get_vector
1190 [ObjType_IRQTable] = {
1191 [IRQTableCmd_Alloc] = handle_irq_table_alloc,
1192 [IRQTableCmd_AllocDestCap] = handle_irq_table_alloc_dest_cap,
1193 [IRQTableCmd_Set] = handle_irq_table_set,
1194 [IRQTableCmd_Delete] = handle_irq_table_delete
1197 [IOCmd_Outb] = handle_io,
1198 [IOCmd_Outw] = handle_io,
1199 [IOCmd_Outd] = handle_io,
1200 [IOCmd_Inb] = handle_io,
1201 [IOCmd_Inw] = handle_io,
1202 [IOCmd_Ind] = handle_io
1204 [ObjType_Notify_IPI] = {
1205 [NotifyCmd_Send] = handle_ipi_notify_send
1207 [ObjType_PerfMon] = {
1208 [PerfmonCmd_Activate] = performance_counter_activate,
1209 [PerfmonCmd_Deactivate] = performance_counter_deactivate,
1210 [PerfmonCmd_Write] = performance_counter_write,
1213 [IDCmd_Identify] = handle_idcap_identify,
1217 /* syscall C entry point; called only from entry.S so no prototype in header */
1218 struct sysret sys_syscall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
1219 uint64_t *args, uint64_t rflags, uint64_t rip);
1220 struct sysret sys_syscall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
1221 uint64_t *args, uint64_t rflags, uint64_t rip)
1223 struct sysret retval = { .error = SYS_ERR_OK, .value = 0 };
1226 case SYSCALL_INVOKE: /* Handle capability invocation */
1228 // unpack "header" word
1229 capaddr_t invoke_cptr = arg0 >> 32;
1230 uint8_t send_bits = arg0 >> 24;
1231 uint8_t invoke_bits = arg0 >> 16;
1232 uint8_t length_words = arg0 >> 8;
1233 uint8_t flags = arg0;
1235 debug(SUBSYS_SYSCALL, "sys_invoke(0x%x(%d), 0x%lx)\n",
1236 invoke_cptr, invoke_bits, arg1);
1238 // Capability to invoke
1239 struct capability *to = NULL;
1240 retval.error = caps_lookup_cap(&dcb_current->cspace.cap, invoke_cptr,
1241 invoke_bits, &to, CAPRIGHTS_READ);
1242 if (err_is_fail(retval.error)) {
1247 assert(to->type < ObjType_Num);
1249 // Endpoint cap, do LMP
1250 if (to->type == ObjType_EndPoint) {
1251 struct dcb *listener = to->u.endpoint.listener;
1252 assert(listener != NULL);
1254 if (listener->disp == 0) {
1255 retval.error = SYS_ERR_LMP_NO_TARGET;
1259 /* limit length of message from buggy/malicious sender */
1260 length_words = MIN(length_words, LMP_MSG_LENGTH);
1262 // does the sender want to yield their timeslice on success?
1263 bool sync = flags & LMP_FLAG_SYNC;
1264 // does the sender want to yield to the target if undeliverable?
1265 bool yield = flags & LMP_FLAG_YIELD;
1266 // is the cap (if present) to be deleted on send?
1267 bool give_away = flags & LMP_FLAG_GIVEAWAY;
1269 // try to deliver message
1270 retval.error = lmp_deliver(to, dcb_current, args, length_words,
1271 arg1, send_bits, give_away);
1273 /* Switch to reciever upon successful delivery with sync flag,
1274 * or (some cases of) unsuccessful delivery with yield flag */
1275 enum err_code err_code = err_no(retval.error);
1276 if ((sync && err_is_ok(retval.error)) ||
1277 (yield && (err_code == SYS_ERR_LMP_BUF_OVERFLOW
1278 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_LOOKUP
1279 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID
1280 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_SLOT_OCCUPIED))
1282 if (err_is_fail(retval.error)) {
1283 struct dispatcher_shared_generic *current_disp =
1284 get_dispatcher_shared_generic(dcb_current->disp);
1285 struct dispatcher_shared_generic *listener_disp =
1286 get_dispatcher_shared_generic(listener->disp);
1287 debug(SUBSYS_DISPATCH, "LMP failed; %.*s yields to %.*s: %u\n",
1288 DISP_NAME_LEN, current_disp->name,
1289 DISP_NAME_LEN, listener_disp->name, err_code);
1292 // special-case context switch: ensure correct state in current DCB
1293 dispatcher_handle_t handle = dcb_current->disp;
1294 struct dispatcher_shared_x86_64 *disp =
1295 get_dispatcher_shared_x86_64(handle);
1296 dcb_current->disabled = dispatcher_is_disabled_ip(handle, rip);
1297 struct registers_x86_64 *save_area;
1298 if (dcb_current->disabled) {
1299 save_area = &disp->disabled_save_area;
1301 save_area = &disp->enabled_save_area;
1304 // Should be enabled. Else, how do we do an invocation??
1305 if(dcb_current->disabled) {
1306 panic("Dispatcher needs to be enabled for this invocation");
1309 // save calling dispatcher's registers, so that when the dispatcher
1310 // next runs, it has a valid state in the relevant save area.
1311 // Save RIP, RFLAGS, RSP and set RAX (return value) for later resume
1312 save_area->rax = retval.error; // XXX: x86 1st return register
1313 save_area->rip = rip;
1314 save_area->eflags = rflags;
1315 save_area->rsp = user_stack_save;
1317 if(!dcb_current->is_vm_guest) {
1318 /* save and zero FS/GS selectors (they're unmodified by the syscall path) */
1319 __asm ("mov %%fs, %[fs] \n\t"
1320 "mov %%gs, %[gs] \n\t"
1321 "mov %[zero], %%fs \n\t"
1322 "mov %[zero], %%gs \n\t"
1325 [fs] "m" (save_area->fs),
1326 [gs] "m" (save_area->gs),
1332 lpaddr_t lpaddr = gen_phys_to_local_phys(dcb_current->guest_desc.vmcb.cap.u.frame.base);
1334 amd_vmcb_initialize(&vmcb, (void *)local_phys_to_mem(lpaddr));
1335 save_area->fs = amd_vmcb_fs_selector_rd(&vmcb);
1336 save_area->gs = amd_vmcb_gs_selector_rd(&vmcb);
1339 err = vmread(VMX_GUEST_FS_SEL, (uint64_t *)&save_area->fs);
1340 err += vmread(VMX_GUEST_GS_SEL, (uint64_t *)&save_area->gs);
1341 assert(err_is_ok(err));
1344 panic("VM Guests not supported on Xeon Phi");
1348 dispatch(to->u.endpoint.listener);
1349 panic("dispatch returned");
1351 } else { // not endpoint cap, call kernel handler through dispatch table
1352 uint64_t cmd = args[0];
1353 if (cmd >= CAP_MAX_CMD) {
1354 retval.error = SYS_ERR_ILLEGAL_INVOCATION;
1358 // Call the invocation
1359 invocation_handler_t invocation = invocations[to->type][cmd];
1360 if(invocation == NULL) {
1361 printk(LOG_WARN, "invocation not found. type: %"PRIu32", cmd: %"PRIu64"\n",
1363 retval.error = SYS_ERR_ILLEGAL_INVOCATION;
1365 retval = invocation(to, cmd, &args[1]);
1371 // Yield the CPU to the next dispatcher
1373 TRACE(KERNEL, SC_YIELD, 0);
1374 retval = sys_yield((capaddr_t)arg0);
1375 TRACE(KERNEL, SC_YIELD, 1);
1378 // NOP system call for benchmarking purposes
1382 // Debug print system call
1384 TRACE(KERNEL, SC_PRINT, 0);
1385 retval.error = sys_print((char *)arg0, arg1);
1386 TRACE(KERNEL, SC_PRINT, 1);
1390 // FIXME: this should be a kernel cap invocation or similarly restricted
1391 case SYSCALL_REBOOT:
1395 case SYSCALL_X86_FPU_TRAP_ON:
1399 case SYSCALL_X86_RELOAD_LDT:
1400 maybe_reload_ldt(dcb_current, true);
1403 // Temporarily suspend the CPU
1404 case SYSCALL_SUSPEND:
1405 TRACE(KERNEL, SC_SUSPEND, 0);
1406 retval = sys_suspend((bool)arg0);
1407 TRACE(KERNEL, SC_SUSPEND, 1);
1410 case SYSCALL_GET_ABS_TIME:
1411 retval = sys_get_absolute_time();
1416 case DEBUG_CONTEXT_COUNTER_RESET:
1417 dispatch_csc_reset();
1420 case DEBUG_CONTEXT_COUNTER_READ:
1421 retval.value = dispatch_get_csc();
1424 case DEBUG_TIMESLICE_COUNTER_READ:
1425 retval.value = kernel_now;
1428 case DEBUG_FLUSH_CACHE:
1432 case DEBUG_SEND_IPI:
1433 apic_send_std_ipi(arg1, args[0], args[1]);
1436 case DEBUG_SET_BREAKPOINT:
1437 debugregs_set_breakpoint(arg1, args[0], args[1]);
1440 case DEBUG_GET_TSC_PER_MS:
1441 retval.value = timing_get_tsc_per_ms();
1444 case DEBUG_GET_APIC_TIMER:
1445 retval.value = apic_timer_get_count();
1448 case DEBUG_GET_APIC_TICKS_PER_SEC:
1449 retval.value = timing_get_apic_ticks_per_sec();
1452 case DEBUG_TRACE_PMEM_CTRL:
1453 #ifdef TRACE_PMEM_CAPS
1455 caps_trace_ctrl(arg1, args[0], args[1]);
1457 caps_trace_ctrl(arg1, 0, 0);
1461 retval.error = SYS_ERR_OK;
1465 case DEBUG_GET_APIC_ID:
1466 retval.value = apic_get_id();
1469 case DEBUG_CREATE_IRQ_SRC_CAP:
1470 retval.error = irq_debug_create_src_cap(arg1, args[0], args[1], args[2]);
1474 printk(LOG_ERR, "invalid sys_debug msg type\n");
1479 printk(LOG_ERR, "sys_syscall: Illegal system call! "
1480 "(0x%lx, 0x%lx, 0x%lx)\n", syscall, arg0, arg1);
1481 retval.error = SYS_ERR_ILLEGAL_SYSCALL;
1485 // If dcb_current got removed, dispatch someone else
1486 if (dcb_current == NULL) {
1487 assert(err_is_ok(retval.error));
1488 dispatch(schedule());
1491 if (syscall == SYSCALL_INVOKE) {
1492 debug(SUBSYS_SYSCALL, "invoke returning 0x%lx 0x%lx\n",
1493 retval.error, retval.value);