3 * \brief System calls implementation.
7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
17 #include <sys_debug.h>
19 #include <barrelfish_kpi/syscalls.h>
21 #include <mdb/mdb_tree.h>
23 #include <paging_kernel_arch.h>
24 #include <paging_generic.h>
27 #include <arch/x86/x86.h>
28 #include <arch/x86/apic.h>
29 #include <arch/x86/global.h>
30 #include <arch/x86/perfmon.h>
31 #include <arch/x86/debugregs.h>
32 #include <arch/x86/syscall.h>
33 #include <arch/x86/timing.h>
34 #include <arch/x86/ipi_notify.h>
35 #include <barrelfish_kpi/sys_debug.h>
36 #include <barrelfish_kpi/lmp.h>
37 #include <barrelfish_kpi/dispatcher_shared_target.h>
38 #include <trace/trace.h>
39 #include <useraccess.h>
42 #include <dev/amd_vmcb_dev.h>
45 #define MIN(a,b) ((a) < (b) ? (a) : (b))
47 extern uint64_t user_stack_save;
49 /* FIXME: lots of missing argument checks in this function */
50 static struct sysret handle_dispatcher_setup(struct capability *to,
51 int cmd, uintptr_t *args)
53 capaddr_t cptr = args[0];
55 capaddr_t vptr = args[2];
56 capaddr_t dptr = args[3];
58 capaddr_t odptr = args[5];
60 TRACE(KERNEL, SC_DISP_SETUP, 0);
61 struct sysret sr = sys_dispatcher_setup(to, cptr, depth, vptr, dptr, run, odptr);
62 TRACE(KERNEL, SC_DISP_SETUP, 1);
66 static struct sysret handle_dispatcher_properties(struct capability *to,
67 int cmd, uintptr_t *args)
69 enum task_type type = args[0];
70 unsigned long deadline = args[1];
71 unsigned long wcet = args[2];
72 unsigned long period = args[3];
73 unsigned long release = args[4];
74 unsigned short weight = args[5];
76 TRACE(KERNEL, SC_DISP_PROPS, 0);
77 struct sysret sr = sys_dispatcher_properties(to, type, deadline, wcet, period,
79 TRACE(KERNEL, SC_DISP_PROPS, 1);
83 static struct sysret handle_retype_common(struct capability *root,
87 uint64_t source_cptr = args[0];
88 uint64_t type = args[1];
89 uint64_t objbits = args[2];
90 uint64_t dest_cnode_cptr = args[3];
91 uint64_t dest_slot = args[4];
92 uint64_t dest_vbits = args[5];
94 TRACE(KERNEL, SC_RETYPE, 0);
95 struct sysret sr = sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
96 dest_slot, dest_vbits, from_monitor);
97 TRACE(KERNEL, SC_RETYPE, 1);
101 static struct sysret handle_retype_common2(struct capability *root,
105 uint64_t source_cptr = args[0];
106 uint64_t offset = args[1];
107 uint64_t type = args[2];
108 uint64_t objsize = args[3];
109 uint64_t objcount = args[4];
110 uint64_t dest_cnode_cptr = args[5];
111 uint64_t dest_slot = args[6];
112 uint64_t dest_vbits = args[7];
114 TRACE(KERNEL, SC_RETYPE, 0);
115 struct sysret sr = sys_retype2(root, source_cptr, offset, type, objsize,
116 objcount, dest_cnode_cptr, dest_slot, dest_vbits,
118 TRACE(KERNEL, SC_RETYPE, 1);
122 static struct sysret handle_retype(struct capability *root,
123 int cmd, uintptr_t *args)
125 return handle_retype_common(root, args, false);
128 static struct sysret handle_retype2(struct capability *root,
129 int cmd, uintptr_t *args)
131 return handle_retype_common2(root, args, false);
134 static struct sysret handle_create(struct capability *root,
135 int cmd, uintptr_t *args)
137 /* Retrieve arguments */
138 enum objtype type = args[0];
139 uint8_t objbits = args[1];
140 capaddr_t dest_cnode_cptr = args[2];
141 cslot_t dest_slot = args[3];
142 uint8_t dest_vbits = args[4];
144 TRACE(KERNEL, SC_CREATE, 0);
145 struct sysret sr = sys_create(root, type, objbits, dest_cnode_cptr, dest_slot,
147 TRACE(KERNEL, SC_CREATE, 1);
153 * Common code for copying and minting except the mint flag and param passing
155 static struct sysret copy_or_mint(struct capability *root,
156 uintptr_t *args, bool mint)
158 /* Retrive arguments */
159 capaddr_t destcn_cptr = args[0];
160 uint64_t dest_slot = args[1];
161 capaddr_t source_cptr = args[2];
162 int destcn_vbits = args[3];
163 int source_vbits = args[4];
164 uint64_t param1, param2;
165 // params only sent if mint operation
173 TRACE(KERNEL, SC_COPY_OR_MINT, 0);
174 struct sysret sr = sys_copy_or_mint(root, destcn_cptr, dest_slot, source_cptr,
175 destcn_vbits, source_vbits, param1, param2, mint);
176 TRACE(KERNEL, SC_COPY_OR_MINT, 1);
180 static struct sysret handle_map(struct capability *ptable,
181 int cmd, uintptr_t *args)
183 /* Retrieve arguments */
184 uint64_t slot = args[0];
185 capaddr_t source_cptr = args[1];
186 int source_vbits = args[2];
187 uint64_t flags = args[3];
188 uint64_t offset = args[4];
189 uint64_t pte_count = args[5];
190 capaddr_t mapping_cnptr = args[6];
191 int mapping_cnvbits = args[7];
192 cslot_t mapping_slot = args[8];
194 TRACE(KERNEL, SC_MAP, 0);
195 struct sysret sr = sys_map(ptable, slot, source_cptr, source_vbits, flags,
196 offset, pte_count, mapping_cnptr, mapping_cnvbits,
198 TRACE(KERNEL, SC_MAP, 1);
202 static struct sysret handle_mint(struct capability *root,
203 int cmd, uintptr_t *args)
205 return copy_or_mint(root, args, true);
208 static struct sysret handle_copy(struct capability *root,
209 int cmd, uintptr_t *args)
211 return copy_or_mint(root, args, false);
214 static struct sysret handle_delete(struct capability *root,
215 int cmd, uintptr_t *args)
217 capaddr_t cptr = args[0];
219 return sys_delete(root, cptr, bits);
222 static struct sysret handle_revoke(struct capability *root,
223 int cmd, uintptr_t *args)
225 capaddr_t cptr = args[0];
227 return sys_revoke(root, cptr, bits);
230 static struct sysret handle_get_state(struct capability *root,
231 int cmd, uintptr_t *args)
233 capaddr_t cptr = args[0];
235 return sys_get_state(root, cptr, bits);
238 static struct sysret handle_unmap(struct capability *pgtable,
239 int cmd, uintptr_t *args)
241 capaddr_t cptr = args[0];
246 err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, bits,
247 &mapping, CAPRIGHTS_READ_WRITE);
248 if (err_is_fail(err)) {
249 return SYSRET(err_push(err, SYS_ERR_CAP_NOT_FOUND));
252 TRACE(KERNEL, SC_UNMAP, 0);
253 err = page_mappings_unmap(pgtable, mapping);
254 TRACE(KERNEL, SC_UNMAP, 1);
258 static struct sysret handle_mapping_destroy(struct capability *mapping,
259 int cmd, uintptr_t *args)
262 return SYSRET(SYS_ERR_OK);
265 static struct sysret handle_mapping_modify(struct capability *mapping,
266 int cmd, uintptr_t *args)
268 // Modify flags of (part of) mapped region of frame
269 assert(type_is_mapping(mapping->type));
272 size_t offset = args[0]; // in pages; of first page to modify from first
273 // page in mapped region
274 size_t pages = args[1]; // #pages to modify
275 size_t flags = args[2]; // new flags
276 genvaddr_t va = args[3]; // virtual addr hint
278 errval_t err = page_mappings_modify_flags(mapping, offset, pages, flags, va);
280 return (struct sysret) {
286 /// Different handler for cap operations performed by the monitor
287 static struct sysret monitor_handle_retype(struct capability *kernel_cap,
288 int cmd, uintptr_t *args)
292 capaddr_t root_caddr = args[0];
293 capaddr_t root_vbits = args[1];
295 struct capability *root;
296 err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
297 &root, CAPRIGHTS_READ);
298 if (err_is_fail(err)) {
299 return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
302 /* XXX: this hides the first two arguments */
303 return handle_retype_common(root, &args[2], true);
306 static struct sysret monitor_handle_has_descendants(struct capability *kernel_cap,
307 int cmd, uintptr_t *args)
309 struct capability *src = (struct capability *)args;
311 struct cte *next = mdb_find_greater(src, false);
313 return (struct sysret) {
315 .value = (next && is_ancestor(&next->cap, src)),
319 static struct sysret monitor_handle_delete_last(struct capability *kernel_cap,
320 int cmd, uintptr_t *args)
322 capaddr_t root_caddr = args[0];
323 uint8_t root_vbits = args[1];
324 capaddr_t target_caddr = args[2];
325 uint8_t target_vbits = args[3];
326 capaddr_t retcn_caddr = args[4];
327 uint8_t retcn_vbits = args[5];
328 cslot_t ret_slot = args[6];
330 return sys_monitor_delete_last(root_caddr, root_vbits, target_caddr,
331 target_vbits, retcn_caddr, retcn_vbits,
335 static struct sysret monitor_handle_delete_foreigns(struct capability *kernel_cap,
336 int cmd, uintptr_t *args)
338 capaddr_t caddr = args[0];
339 uint8_t bits = args[1];
340 return sys_monitor_delete_foreigns(caddr, bits);
343 static struct sysret monitor_handle_revoke_mark_tgt(struct capability *kernel_cap,
344 int cmd, uintptr_t *args)
346 capaddr_t root_caddr = args[0];
347 uint8_t root_vbits = args[1];
348 capaddr_t target_caddr = args[2];
349 uint8_t target_vbits = args[3];
351 return sys_monitor_revoke_mark_tgt(root_caddr, root_vbits,
352 target_caddr, target_vbits);
355 static struct sysret monitor_handle_revoke_mark_rels(struct capability *kernel_cap,
356 int cmd, uintptr_t *args)
358 struct capability *base = (struct capability*)args;
360 return sys_monitor_revoke_mark_rels(base);
363 static struct sysret monitor_handle_delete_step(struct capability *kernel_cap,
364 int cmd, uintptr_t *args)
366 capaddr_t ret_cn_addr = args[0];
367 capaddr_t ret_cn_bits = args[1];
368 capaddr_t ret_slot = args[2];
369 return sys_monitor_delete_step(ret_cn_addr, ret_cn_bits, ret_slot);
372 static struct sysret monitor_handle_clear_step(struct capability *kernel_cap,
373 int cmd, uintptr_t *args)
375 capaddr_t ret_cn_addr = args[0];
376 capaddr_t ret_cn_bits = args[1];
377 capaddr_t ret_slot = args[2];
378 return sys_monitor_clear_step(ret_cn_addr, ret_cn_bits, ret_slot);
381 static struct sysret monitor_handle_register(struct capability *kernel_cap,
382 int cmd, uintptr_t *args)
384 capaddr_t ep_caddr = args[0];
386 TRACE(KERNEL, SC_MONITOR_REGISTER, 0);
387 struct sysret sr = sys_monitor_register(ep_caddr);
388 TRACE(KERNEL, SC_MONITOR_REGISTER, 1);
392 static struct sysret monitor_get_core_id(struct capability *kernel_cap,
393 int cmd, uintptr_t *args)
395 return (struct sysret){.error = SYS_ERR_OK, .value = my_core_id};
398 static struct sysret monitor_get_arch_id(struct capability *kernel_cap,
399 int cmd, uintptr_t *args)
401 return (struct sysret){.error = SYS_ERR_OK, .value = apic_id};
404 static struct sysret monitor_identify_cap_common(struct capability *kernel_cap,
405 struct capability *root,
408 capaddr_t cptr = args[0];
409 uint8_t bits = args[1];
411 struct capability *retbuf = (void *)args[2];
413 return sys_monitor_identify_cap(root, cptr, bits, retbuf);
416 static struct sysret monitor_identify_cap(struct capability *kernel_cap,
417 int cmd, uintptr_t *args)
419 return monitor_identify_cap_common(kernel_cap, &dcb_current->cspace.cap, args);
422 static struct sysret monitor_identify_domains_cap(struct capability *kernel_cap,
423 int cmd, uintptr_t *args)
427 capaddr_t root_caddr = args[0];
428 capaddr_t root_vbits = args[1];
430 struct capability *root;
431 err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
432 &root, CAPRIGHTS_READ);
434 if (err_is_fail(err)) {
435 return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
438 /* XXX: this hides the first two arguments */
439 return monitor_identify_cap_common(kernel_cap, root, &args[2]);
442 static struct sysret monitor_cap_has_relations(struct capability *kernel_cap,
443 int cmd, uintptr_t *args)
445 capaddr_t caddr = args[0];
446 uint8_t vbits = args[1];
447 uint8_t mask = args[2];
449 return sys_cap_has_relations(caddr, vbits, mask);
452 static struct sysret monitor_remote_relations(struct capability *kernel_cap,
453 int cmd, uintptr_t *args)
455 capaddr_t root_addr = args[0];
456 int root_bits = args[1];
457 capaddr_t cptr = args[2];
459 uint8_t relations = args[4] & 0xFF;
460 uint8_t mask = (args[4] >> 8) & 0xFF;
462 return sys_monitor_remote_relations(root_addr, root_bits, cptr, bits,
467 static struct sysret monitor_create_cap(struct capability *kernel_cap,
468 int cmd, uintptr_t *args)
470 /* XXX: Get the raw metadata of the capability to create */
471 struct capability *src = (struct capability *)args;
472 int pos = ROUND_UP(sizeof(struct capability), sizeof(uint64_t)) / sizeof(uint64_t);
474 /* Cannot create null caps */
475 if (src->type == ObjType_Null) {
476 return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
479 coreid_t owner = args[pos + 3];
481 /* For certain types, only foreign copies can be created here */
482 if ((src->type == ObjType_EndPoint || src->type == ObjType_Dispatcher
483 || src->type == ObjType_Kernel || src->type == ObjType_IRQTable)
484 && owner == my_core_id)
486 return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
489 /* Create the cap in the destination */
490 capaddr_t cnode_cptr = args[pos];
491 int cnode_vbits = args[pos + 1];
492 size_t slot = args[pos + 2];
493 assert(cnode_vbits < CPTR_BITS);
495 return SYSRET(caps_create_from_existing(&dcb_current->cspace.cap,
496 cnode_cptr, cnode_vbits,
500 static struct sysret monitor_copy_existing(struct capability *kernel_cap,
501 int cmd, uintptr_t *args)
503 /* XXX: Get the raw metadata of the capability to create */
504 struct capability *src = (struct capability *)args;
505 int pos = ROUND_UP(sizeof(struct capability), sizeof(uint64_t)) / sizeof(uint64_t);
507 capaddr_t cnode_cptr = args[pos];
508 int cnode_vbits = args[pos + 1];
509 size_t slot = args[pos + 2];
511 return sys_monitor_copy_existing(src, cnode_cptr, cnode_vbits, slot);
514 static struct sysret monitor_nullify_cap(struct capability *kernel_cap,
515 int cmd, uintptr_t *args)
517 capaddr_t cptr = args[0];
518 uint8_t bits = args[1];
520 return sys_monitor_nullify_cap(cptr, bits);
523 static struct sysret monitor_handle_sync_timer(struct capability *kern_cap,
524 int cmd, uintptr_t *args)
526 uint64_t synctime = args[0];
527 return sys_monitor_handle_sync_timer(synctime);
530 static struct sysret handle_frame_identify(struct capability *to,
531 int cmd, uintptr_t *args)
533 // Return with physical base address of frame
534 // XXX: pack size into bottom bits of base address
535 assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
536 assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
538 struct frame_identity *fi = (struct frame_identity *)args[0];
540 if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
541 return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
544 fi->base = get_address(to);
545 fi->bytes = get_size(to);
547 return SYSRET(SYS_ERR_OK);
550 static struct sysret handle_vnode_identify(struct capability *to,
551 int cmd, uintptr_t *args)
553 // Return with physical base address of the VNode
554 // XXX: pack type into bottom bits of base address
555 assert(to->type == ObjType_VNode_x86_64_pml4 ||
556 to->type == ObjType_VNode_x86_64_pdpt ||
557 to->type == ObjType_VNode_x86_64_pdir ||
558 to->type == ObjType_VNode_x86_64_ptable);
560 uint64_t base_addr = 0;
562 case ObjType_VNode_x86_64_pml4:
563 base_addr = (uint64_t)(to->u.vnode_x86_64_pml4.base);
565 case ObjType_VNode_x86_64_pdpt:
566 base_addr = (uint64_t)(to->u.vnode_x86_64_pdpt.base);
568 case ObjType_VNode_x86_64_pdir:
569 base_addr = (uint64_t)(to->u.vnode_x86_64_pdir.base);
571 case ObjType_VNode_x86_64_ptable:
572 base_addr = (uint64_t)(to->u.vnode_x86_64_ptable.base);
577 assert((base_addr & BASE_PAGE_MASK) == 0);
579 return (struct sysret) {
581 .value = (genpaddr_t)base_addr | ((uint8_t)to->type),
586 static struct sysret handle_io(struct capability *to, int cmd, uintptr_t *args)
588 uint64_t port = args[0];
589 uint64_t data = args[1]; // ignored for input
591 return sys_io(to, cmd, port, data);
594 static struct sysret handle_vmread(struct capability *to,
595 int cmd, uintptr_t *args)
597 #if defined(__k1om__) || defined(CONFIG_SVM)
598 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
601 struct dcb *dcb = to->u.dispatcher.dcb;
602 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
603 if (vmcs_base != vmptrst()) {
604 err = SYS_ERR_VMKIT_VMX_VMFAIL_INVALID;
606 err = vmread(args[0], (lvaddr_t *)args[1]);
612 static struct sysret handle_vmwrite(struct capability *to,
613 int cmd, uintptr_t *args)
615 #if defined(__k1om__) || defined(CONFIG_SVM)
616 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
619 struct dcb *dcb = to->u.dispatcher.dcb;
620 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
621 if (vmcs_base != vmptrst()) {
622 err = SYS_ERR_VMKIT_VMX_VMFAIL_INVALID;
624 err = vmwrite(args[0], args[1]);
630 static struct sysret handle_vmptrld(struct capability *to,
631 int cmd, uintptr_t *args)
633 #if defined(__k1om__) || defined(CONFIG_SVM)
634 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
637 struct dcb *dcb = to->u.dispatcher.dcb;
638 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
639 err = vmptrld(vmcs_base);
644 static struct sysret handle_vmclear(struct capability *to,
645 int cmd, uintptr_t *args)
647 #if defined(__k1om__) || defined(CONFIG_SVM)
648 return SYSRET(SYS_ERR_VMKIT_UNAVAIL);
651 struct dcb *dcb = to->u.dispatcher.dcb;
652 lpaddr_t vmcs_base = dcb->guest_desc.vmcb.cap.u.frame.base;
653 err = vmclear(vmcs_base);
660 handle_dispatcher_setup_guest (struct capability *to, int cmd, uintptr_t *args)
663 struct dcb *dcb = to->u.dispatcher.dcb;
665 capaddr_t epp = args[0];
666 capaddr_t vnodep = args[1];
667 capaddr_t vmcbp = args[2];
668 capaddr_t ctrlp = args[3];
670 // 0. Enable VM extensions
671 err = vmkit_enable_virtualization();
672 if (err != SYS_ERR_OK) {
676 // 1. Check arguments
677 // Monitor endpoint for exits of this geust
680 err = caps_lookup_slot(&dcb_current->cspace.cap, epp, CPTR_BITS,
681 &ep_cte, CAPRIGHTS_READ_WRITE);
682 if (err_is_fail(err)) {
685 if (ep_cte->cap.type != ObjType_EndPoint) {
686 return SYSRET(SYS_ERR_VMKIT_ENDPOINT_INVALID);
688 err = caps_copy_to_cte(&dcb->guest_desc.monitor_ep, ep_cte, false, 0, 0);
689 if (err_is_fail(err)) {
690 return SYSRET(err_push(err, SYS_ERR_VMKIT_ENDPOINT));
694 struct capability *vnode_cap;
695 err = caps_lookup_cap(&dcb_current->cspace.cap, vnodep, CPTR_BITS,
696 &vnode_cap, CAPRIGHTS_WRITE);
697 if (err_is_fail(err)) {
700 if (vnode_cap->type != ObjType_VNode_x86_64_pml4) {
701 return SYSRET(SYS_ERR_DISP_VSPACE_INVALID);
704 assert(vnode_cap->type == ObjType_VNode_x86_64_pml4);
707 struct cte *vmcb_cte;
708 err = caps_lookup_slot(&dcb_current->cspace.cap, vmcbp, CPTR_BITS,
709 &vmcb_cte, CAPRIGHTS_READ_WRITE);
710 if (err_is_fail(err)) {
713 if (vmcb_cte->cap.type != ObjType_Frame ||
714 vmcb_cte->cap.u.frame.bytes < BASE_PAGE_SIZE) {
715 return SYSRET(SYS_ERR_VMKIT_VMCB_INVALID);
717 err = caps_copy_to_cte(&dcb->guest_desc.vmcb, vmcb_cte, false, 0, 0);
718 if (err_is_fail(err)) {
719 return SYSRET(err_push(err, SYS_ERR_VMKIT_VMCB));
723 struct cte *ctrl_cte;
724 err = caps_lookup_slot(&dcb_current->cspace.cap, ctrlp, CPTR_BITS,
725 &ctrl_cte, CAPRIGHTS_READ_WRITE);
726 if (err_is_fail(err)) {
729 if (ctrl_cte->cap.type != ObjType_Frame ||
730 ctrl_cte->cap.u.frame.bytes < BASE_PAGE_SIZE) {
731 return SYSRET(SYS_ERR_VMKIT_CTRL_INVALID);
733 err = caps_copy_to_cte(&dcb->guest_desc.ctrl, ctrl_cte, false, 0, 0);
734 if (err_is_fail(err)) {
735 return SYSRET(err_push(err, SYS_ERR_VMKIT_CTRL));
739 // Initialize VMCS for the single virtual-CPU here instead of in
740 // userspace, where the privilege level is not 0.
741 err = initialize_vmcs(vmcb_cte->cap.u.frame.base);
742 assert(err_is_ok(err));
745 // 2. Set up the target DCB
746 /* dcb->guest_desc.monitor_ep = ep_cap; */
747 dcb->vspace = vnode_cap->u.vnode_x86_64_pml4.base;
748 dcb->is_vm_guest = true;
749 /* dcb->guest_desc.vmcb = vmcb_cap->u.frame.base; */
750 /* dcb->guest_desc.ctrl = (void *)x86_64_phys_to_mem(ctrl_cap->u.frame.base); */
752 return SYSRET(SYS_ERR_OK);
756 static struct sysret monitor_handle_domain_id(struct capability *monitor_cap,
757 int cmd, uintptr_t *args)
759 capaddr_t cptr = args[0];
760 domainid_t domain_id = args[1];
762 return sys_monitor_domain_id(cptr, domain_id);
765 static struct sysret monitor_get_cap_owner(struct capability *monitor_cap,
766 int cmd, uintptr_t *args)
768 capaddr_t root_addr = args[0];
769 uint8_t root_bits = args[1];
770 capaddr_t cptr = args[2];
771 uint8_t bits = args[3];
773 return sys_get_cap_owner(root_addr, root_bits, cptr, bits);
776 static struct sysret monitor_set_cap_owner(struct capability *monitor_cap,
777 int cmd, uintptr_t *args)
779 capaddr_t root_addr = args[0];
780 uint8_t root_bits = args[1];
781 capaddr_t cptr = args[2];
782 uint8_t bits = args[3];
783 coreid_t owner = args[4];
785 return sys_set_cap_owner(root_addr, root_bits, cptr, bits, owner);
788 static struct sysret monitor_lock_cap(struct capability *monitor_cap,
789 int cmd, uintptr_t *args)
791 capaddr_t root_addr = args[0];
792 uint8_t root_bits = args[1];
793 capaddr_t cptr = args[2];
794 uint8_t bits = args[3];
796 return sys_lock_cap(root_addr, root_bits, cptr, bits);
799 static struct sysret monitor_unlock_cap(struct capability *monitor_cap,
800 int cmd, uintptr_t *args)
802 capaddr_t root_addr = args[0];
803 uint8_t root_bits = args[1];
804 capaddr_t cptr = args[2];
805 uint8_t bits = args[3];
807 return sys_unlock_cap(root_addr, root_bits, cptr, bits);
811 * \brief Set up tracing in the kernel
813 static struct sysret handle_trace_setup(struct capability *cap,
814 int cmd, uintptr_t *args)
816 struct capability *frame;
819 /* lookup passed cap */
820 capaddr_t cptr = args[0];
821 err = caps_lookup_cap(&dcb_current->cspace.cap, cptr, CPTR_BITS, &frame,
822 CAPRIGHTS_READ_WRITE);
823 if (err_is_fail(err)) {
827 lpaddr_t lpaddr = gen_phys_to_local_phys(frame->u.frame.base);
828 kernel_trace_buf = local_phys_to_mem(lpaddr);
829 //printf("kernel.%u: handle_trace_setup at %lx\n", apic_id, kernel_trace_buf);
831 // Copy boot applications.
832 trace_copy_boot_applications();
834 return SYSRET(SYS_ERR_OK);
837 static struct sysret handle_irqsrc_get_vector(struct capability * to, int cmd,
841 ret.error = SYS_ERR_OK;
842 ret.value = to->u.irqsrc.vector;
848 static struct sysret handle_irqdest_get_vector(struct capability *to, int cmd,
852 ret.error = SYS_ERR_OK;
853 ret.value = to->u.irqdest.vector;
857 static struct sysret handle_irqdest_connect(struct capability *to, int cmd,
860 return SYSRET(irq_connect(to, args[0]));
863 static struct sysret handle_irq_table_alloc(struct capability *to, int cmd,
868 ret.error = irq_table_alloc(&outvec);
873 static struct sysret handle_irq_table_alloc_dest_cap(struct capability *to, int cmd,
876 return SYSRET(irq_table_alloc_dest_cap(args[0],args[1],args[2]));
880 static struct sysret handle_irq_table_set(struct capability *to, int cmd,
883 return SYSRET(irq_table_set(args[0], args[1]));
886 static struct sysret handle_irq_table_delete(struct capability *to, int cmd,
889 return SYSRET(irq_table_delete(args[0]));
892 static struct sysret handle_ipi_notify_send(struct capability *cap,
893 int cmd, uintptr_t *args)
895 assert(cap->type == ObjType_Notify_IPI);
896 return ipi_raise_notify(cap->u.notify_ipi.coreid, cap->u.notify_ipi.chanid);
899 static struct sysret kernel_ipi_register(struct capability *cap,
900 int cmd, uintptr_t *args)
902 assert(cap->type == ObjType_Kernel);
903 capaddr_t ep = args[0];
904 int chanid = args[1];
905 return SYSRET(ipi_register_notification(ep, chanid));
908 static struct sysret kernel_ipi_delete(struct capability *cap,
909 int cmd, uintptr_t *args)
911 assert(cap->type == ObjType_Kernel);
913 return SYSRET(SYS_ERR_OK);
916 static struct sysret dispatcher_dump_ptables(struct capability *cap,
917 int cmd, uintptr_t *args)
919 assert(cap->type == ObjType_Dispatcher);
921 printf("kernel_dump_ptables\n");
923 struct dcb *dispatcher = cap->u.dispatcher.dcb;
925 paging_dump_tables(dispatcher);
927 return SYSRET(SYS_ERR_OK);
930 static struct sysret dispatcher_dump_capabilities(struct capability *cap,
931 int cmd, uintptr_t *args)
933 assert(cap->type == ObjType_Dispatcher);
935 printf("dispatcher_dump_capabilities\n");
937 struct dcb *dispatcher = cap->u.dispatcher.dcb;
939 errval_t err = debug_print_cababilities(dispatcher);
945 * \brief Activate performance monitoring
947 * Activates performance monitoring.
948 * \param xargs Expected parameters in args:
949 * - performance monitoring type
950 * - mask for given type
952 * - Also count in privileged mode
953 * - Number of counts before overflow. This parameter may be used to
954 * set tradeoff between accuracy and overhead. Set the counter to 0
955 * to deactivate the usage of APIC.
956 * - Endpoint capability to be invoked when the counter overflows.
957 * The buffer associated with the endpoint needs to be large enough
958 * to hold several overflow notifications depending on the overflow
961 static struct sysret performance_counter_activate(struct capability *cap,
962 int cmd, uintptr_t *args)
964 uint8_t event = args[0];
965 uint8_t umask = args[1];
966 uint8_t counter_id = args[2];
967 bool kernel = args[3];
968 uint64_t counter_value = args[4];
969 capaddr_t ep_addr = args[5];
972 struct capability *ep;
973 extern struct capability perfmon_callback_ep;
976 assert(ep_addr!=0 || counter_value==0);
979 perfmon_measure_start(event, umask, counter_id, kernel, counter_value);
983 err = caps_lookup_cap(&dcb_current->cspace.cap, ep_addr, CPTR_BITS, &ep,
985 if(err_is_fail(err)) {
989 perfmon_callback_ep = *ep;
992 return SYSRET(SYS_ERR_OK);
996 * \brief Write counter values.
998 static struct sysret performance_counter_write(struct capability *cap,
999 int cmd, uintptr_t *args)
1001 uint8_t counter_id = args[0];
1002 uint64_t counter_value = args[1];
1004 perfmon_measure_write(counter_id, counter_value);
1005 return SYSRET(SYS_ERR_OK);
1009 * \brief Deactivate performance counters again.
1011 static struct sysret performance_counter_deactivate(struct capability *cap,
1012 int cmd, uintptr_t *args)
1014 perfmon_measure_stop();
1015 return SYSRET(SYS_ERR_OK);
1019 * \brief Return system-wide unique ID of this ID cap.
1021 static struct sysret handle_idcap_identify(struct capability *cap, int cmd,
1025 struct sysret sysret = sys_idcap_identify(cap, &id);
1031 static struct sysret kernel_send_init_ipi(struct capability *cap, int cmd,
1034 coreid_t destination = args[0];
1035 // printk(LOG_DEBUG, "%s:%s:%d: destination=%"PRIuCOREID"\n",
1036 // __FILE__, __FUNCTION__, __LINE__, destination);
1038 apic_send_init_assert(destination, xapic_none);
1039 apic_send_init_deassert();
1041 return SYSRET(SYS_ERR_OK);
1044 static struct sysret kernel_send_start_ipi(struct capability *cap,
1048 coreid_t destination = args[0];
1049 genvaddr_t start_vector = X86_64_REAL_MODE_SEGMENT_TO_REAL_MODE_PAGE(X86_64_REAL_MODE_SEGMENT);
1050 // printk(LOG_DEBUG, "%s:%d: destination=%"PRIuCOREID" start_vector=%"PRIxGENVADDR"\n",
1051 // __FILE__, __LINE__, destination, start_vector);
1053 apic_send_start_up(destination, xapic_none, start_vector);
1055 return SYSRET(SYS_ERR_OK);
1058 static struct sysret kernel_get_global_phys(struct capability *cap,
1063 struct sysret sysret;
1064 sysret.value = mem_to_local_phys((lvaddr_t)global);
1065 sysret.error = SYS_ERR_OK;
1070 static struct sysret kernel_add_kcb(struct capability *kern_cap,
1071 int cmd, uintptr_t *args)
1073 uintptr_t kcb_addr = args[0];
1074 struct kcb *new_kcb = (struct kcb *)kcb_addr;
1076 return sys_kernel_add_kcb(new_kcb);
1079 static struct sysret kernel_remove_kcb(struct capability *kern_cap,
1080 int cmd, uintptr_t *args)
1082 printk(LOG_NOTE, "in kernel_remove_kcb invocation!\n");
1083 uintptr_t kcb_addr = args[0];
1084 struct kcb *to_remove = (struct kcb *)kcb_addr;
1086 return sys_kernel_remove_kcb(to_remove);
1089 static struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
1090 int cmd, uintptr_t *args)
1092 printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
1093 return sys_kernel_suspend_kcb_sched((bool)args[0]);
1096 static struct sysret handle_kcb_identify(struct capability *to,
1097 int cmd, uintptr_t *args)
1099 return sys_handle_kcb_identify(to, (struct frame_identity *)args[0]);
1103 typedef struct sysret (*invocation_handler_t)(struct capability *to,
1104 int cmd, uintptr_t *args);
1106 static invocation_handler_t invocations[ObjType_Num][CAP_MAX_CMD] = {
1107 [ObjType_Dispatcher] = {
1108 [DispatcherCmd_Setup] = handle_dispatcher_setup,
1109 [DispatcherCmd_Properties] = handle_dispatcher_properties,
1111 [DispatcherCmd_SetupGuest] = handle_dispatcher_setup_guest,
1113 [DispatcherCmd_DumpPTables] = dispatcher_dump_ptables,
1114 [DispatcherCmd_DumpCapabilities] = dispatcher_dump_capabilities,
1115 [DispatcherCmd_Vmread] = handle_vmread,
1116 [DispatcherCmd_Vmwrite] = handle_vmwrite,
1117 [DispatcherCmd_Vmptrld] = handle_vmptrld,
1118 [DispatcherCmd_Vmclear] = handle_vmclear,
1120 [ObjType_KernelControlBlock] = {
1121 [FrameCmd_Identify] = handle_kcb_identify,
1124 [FrameCmd_Identify] = handle_frame_identify,
1126 [ObjType_DevFrame] = {
1127 [FrameCmd_Identify] = handle_frame_identify,
1130 [CNodeCmd_Copy] = handle_copy,
1131 [CNodeCmd_Mint] = handle_mint,
1132 [CNodeCmd_Retype] = handle_retype,
1133 [CNodeCmd_Retype2] = handle_retype2,
1134 [CNodeCmd_Create] = handle_create,
1135 [CNodeCmd_Delete] = handle_delete,
1136 [CNodeCmd_Revoke] = handle_revoke,
1137 [CNodeCmd_GetState] = handle_get_state,
1139 [ObjType_VNode_x86_64_pml4] = {
1140 [VNodeCmd_Identify] = handle_vnode_identify,
1141 [VNodeCmd_Map] = handle_map,
1142 [VNodeCmd_Unmap] = handle_unmap,
1144 [ObjType_VNode_x86_64_pdpt] = {
1145 [VNodeCmd_Identify] = handle_vnode_identify,
1146 [VNodeCmd_Map] = handle_map,
1147 [VNodeCmd_Unmap] = handle_unmap,
1149 [ObjType_VNode_x86_64_pdir] = {
1150 [VNodeCmd_Identify] = handle_vnode_identify,
1151 [VNodeCmd_Map] = handle_map,
1152 [VNodeCmd_Unmap] = handle_unmap,
1154 [ObjType_VNode_x86_64_ptable] = {
1155 [VNodeCmd_Identify] = handle_vnode_identify,
1156 [VNodeCmd_Map] = handle_map,
1157 [VNodeCmd_Unmap] = handle_unmap,
1159 [ObjType_Frame_Mapping] = {
1160 [MappingCmd_Destroy] = handle_mapping_destroy,
1161 [MappingCmd_Modify] = handle_mapping_modify,
1163 [ObjType_DevFrame_Mapping] = {
1164 [MappingCmd_Destroy] = handle_mapping_destroy,
1165 [MappingCmd_Modify] = handle_mapping_modify,
1167 [ObjType_VNode_x86_64_pml4_Mapping] = {
1168 [MappingCmd_Destroy] = handle_mapping_destroy,
1169 [MappingCmd_Modify] = handle_mapping_modify,
1171 [ObjType_VNode_x86_64_pdpt_Mapping] = {
1172 [MappingCmd_Destroy] = handle_mapping_destroy,
1173 [MappingCmd_Modify] = handle_mapping_modify,
1175 [ObjType_VNode_x86_64_pdir_Mapping] = {
1176 [MappingCmd_Destroy] = handle_mapping_destroy,
1177 [MappingCmd_Modify] = handle_mapping_modify,
1179 [ObjType_VNode_x86_64_ptable_Mapping] = {
1180 [MappingCmd_Destroy] = handle_mapping_destroy,
1181 [MappingCmd_Modify] = handle_mapping_modify,
1183 [ObjType_Kernel] = {
1184 [KernelCmd_Get_core_id] = monitor_get_core_id,
1185 [KernelCmd_Get_arch_id] = monitor_get_arch_id,
1186 [KernelCmd_Identify_cap] = monitor_identify_cap,
1187 [KernelCmd_Identify_domains_cap] = monitor_identify_domains_cap,
1188 [KernelCmd_Remote_relations] = monitor_remote_relations,
1189 [KernelCmd_Cap_has_relations] = monitor_cap_has_relations,
1190 [KernelCmd_Create_cap] = monitor_create_cap,
1191 [KernelCmd_Copy_existing] = monitor_copy_existing,
1192 [KernelCmd_Nullify_cap] = monitor_nullify_cap,
1193 [KernelCmd_Setup_trace] = handle_trace_setup,
1194 [KernelCmd_Register] = monitor_handle_register,
1195 [KernelCmd_Domain_Id] = monitor_handle_domain_id,
1196 [KernelCmd_Get_cap_owner] = monitor_get_cap_owner,
1197 [KernelCmd_Set_cap_owner] = monitor_set_cap_owner,
1198 [KernelCmd_Lock_cap] = monitor_lock_cap,
1199 [KernelCmd_Unlock_cap] = monitor_unlock_cap,
1200 [KernelCmd_Retype] = monitor_handle_retype,
1201 [KernelCmd_Has_descendants] = monitor_handle_has_descendants,
1202 [KernelCmd_Delete_last] = monitor_handle_delete_last,
1203 [KernelCmd_Delete_foreigns] = monitor_handle_delete_foreigns,
1204 [KernelCmd_Revoke_mark_target] = monitor_handle_revoke_mark_tgt,
1205 [KernelCmd_Revoke_mark_relations] = monitor_handle_revoke_mark_rels,
1206 [KernelCmd_Delete_step] = monitor_handle_delete_step,
1207 [KernelCmd_Clear_step] = monitor_handle_clear_step,
1208 [KernelCmd_Sync_timer] = monitor_handle_sync_timer,
1209 [KernelCmd_IPI_Register] = kernel_ipi_register,
1210 [KernelCmd_IPI_Delete] = kernel_ipi_delete,
1211 [KernelCmd_GetGlobalPhys] = kernel_get_global_phys,
1212 [KernelCmd_Add_kcb] = kernel_add_kcb,
1213 [KernelCmd_Remove_kcb] = kernel_remove_kcb,
1214 [KernelCmd_Suspend_kcb_sched] = kernel_suspend_kcb_sched,
1217 [IPICmd_Send_Start] = kernel_send_start_ipi,
1218 [IPICmd_Send_Init] = kernel_send_init_ipi,
1220 [ObjType_IRQDest] = {
1221 [IRQDestCmd_Connect] = handle_irqdest_connect,
1222 [IRQDestCmd_GetVector] = handle_irqdest_get_vector
1224 [ObjType_IRQSrc] = {
1225 [IRQSrcCmd_GetVector] = handle_irqsrc_get_vector,
1227 [ObjType_IRQTable] = {
1228 [IRQTableCmd_Alloc] = handle_irq_table_alloc,
1229 [IRQTableCmd_AllocDestCap] = handle_irq_table_alloc_dest_cap,
1230 [IRQTableCmd_Set] = handle_irq_table_set,
1231 [IRQTableCmd_Delete] = handle_irq_table_delete
1234 [IOCmd_Outb] = handle_io,
1235 [IOCmd_Outw] = handle_io,
1236 [IOCmd_Outd] = handle_io,
1237 [IOCmd_Inb] = handle_io,
1238 [IOCmd_Inw] = handle_io,
1239 [IOCmd_Ind] = handle_io
1241 [ObjType_Notify_IPI] = {
1242 [NotifyCmd_Send] = handle_ipi_notify_send
1244 [ObjType_PerfMon] = {
1245 [PerfmonCmd_Activate] = performance_counter_activate,
1246 [PerfmonCmd_Deactivate] = performance_counter_deactivate,
1247 [PerfmonCmd_Write] = performance_counter_write,
1250 [IDCmd_Identify] = handle_idcap_identify,
1254 /* syscall C entry point; called only from entry.S so no prototype in header */
1255 struct sysret sys_syscall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
1256 uint64_t *args, uint64_t rflags, uint64_t rip);
1257 struct sysret sys_syscall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
1258 uint64_t *args, uint64_t rflags, uint64_t rip)
1260 struct sysret retval = { .error = SYS_ERR_OK, .value = 0 };
1263 case SYSCALL_INVOKE: /* Handle capability invocation */
1265 // unpack "header" word
1266 capaddr_t invoke_cptr = arg0 >> 32;
1267 uint8_t send_bits = arg0 >> 24;
1268 uint8_t invoke_bits = arg0 >> 16;
1269 uint8_t length_words = arg0 >> 8;
1270 uint8_t flags = arg0;
1272 debug(SUBSYS_SYSCALL, "sys_invoke(0x%x(%d), 0x%lx)\n",
1273 invoke_cptr, invoke_bits, arg1);
1275 // Capability to invoke
1276 struct capability *to = NULL;
1277 retval.error = caps_lookup_cap(&dcb_current->cspace.cap, invoke_cptr,
1278 invoke_bits, &to, CAPRIGHTS_READ);
1279 if (err_is_fail(retval.error)) {
1284 assert(to->type < ObjType_Num);
1286 // Endpoint cap, do LMP
1287 if (to->type == ObjType_EndPoint) {
1288 struct dcb *listener = to->u.endpoint.listener;
1289 assert(listener != NULL);
1291 if (listener->disp == 0) {
1292 retval.error = SYS_ERR_LMP_NO_TARGET;
1296 /* limit length of message from buggy/malicious sender */
1297 length_words = MIN(length_words, LMP_MSG_LENGTH);
1299 // does the sender want to yield their timeslice on success?
1300 bool sync = flags & LMP_FLAG_SYNC;
1301 // does the sender want to yield to the target if undeliverable?
1302 bool yield = flags & LMP_FLAG_YIELD;
1303 // is the cap (if present) to be deleted on send?
1304 bool give_away = flags & LMP_FLAG_GIVEAWAY;
1306 // try to deliver message
1307 retval.error = lmp_deliver(to, dcb_current, args, length_words,
1308 arg1, send_bits, give_away);
1310 /* Switch to reciever upon successful delivery with sync flag,
1311 * or (some cases of) unsuccessful delivery with yield flag */
1312 enum err_code err_code = err_no(retval.error);
1313 if ((sync && err_is_ok(retval.error)) ||
1314 (yield && (err_code == SYS_ERR_LMP_BUF_OVERFLOW
1315 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_LOOKUP
1316 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID
1317 || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_SLOT_OCCUPIED))
1319 if (err_is_fail(retval.error)) {
1320 struct dispatcher_shared_generic *current_disp =
1321 get_dispatcher_shared_generic(dcb_current->disp);
1322 struct dispatcher_shared_generic *listener_disp =
1323 get_dispatcher_shared_generic(listener->disp);
1324 debug(SUBSYS_DISPATCH, "LMP failed; %.*s yields to %.*s: %u\n",
1325 DISP_NAME_LEN, current_disp->name,
1326 DISP_NAME_LEN, listener_disp->name, err_code);
1329 // special-case context switch: ensure correct state in current DCB
1330 dispatcher_handle_t handle = dcb_current->disp;
1331 struct dispatcher_shared_x86_64 *disp =
1332 get_dispatcher_shared_x86_64(handle);
1333 dcb_current->disabled = dispatcher_is_disabled_ip(handle, rip);
1334 struct registers_x86_64 *save_area;
1335 if (dcb_current->disabled) {
1336 save_area = &disp->disabled_save_area;
1338 save_area = &disp->enabled_save_area;
1341 // Should be enabled. Else, how do we do an invocation??
1342 if(dcb_current->disabled) {
1343 panic("Dispatcher needs to be enabled for this invocation");
1346 // save calling dispatcher's registers, so that when the dispatcher
1347 // next runs, it has a valid state in the relevant save area.
1348 // Save RIP, RFLAGS, RSP and set RAX (return value) for later resume
1349 save_area->rax = retval.error; // XXX: x86 1st return register
1350 save_area->rip = rip;
1351 save_area->eflags = rflags;
1352 save_area->rsp = user_stack_save;
1354 if(!dcb_current->is_vm_guest) {
1355 /* save and zero FS/GS selectors (they're unmodified by the syscall path) */
1356 __asm ("mov %%fs, %[fs] \n\t"
1357 "mov %%gs, %[gs] \n\t"
1358 "mov %[zero], %%fs \n\t"
1359 "mov %[zero], %%gs \n\t"
1362 [fs] "m" (save_area->fs),
1363 [gs] "m" (save_area->gs),
1369 lpaddr_t lpaddr = gen_phys_to_local_phys(dcb_current->guest_desc.vmcb.cap.u.frame.base);
1371 amd_vmcb_initialize(&vmcb, (void *)local_phys_to_mem(lpaddr));
1372 save_area->fs = amd_vmcb_fs_selector_rd(&vmcb);
1373 save_area->gs = amd_vmcb_gs_selector_rd(&vmcb);
1376 err = vmread(VMX_GUEST_FS_SEL, (uint64_t *)&save_area->fs);
1377 err += vmread(VMX_GUEST_GS_SEL, (uint64_t *)&save_area->gs);
1378 assert(err_is_ok(err));
1381 panic("VM Guests not supported on Xeon Phi");
1385 dispatch(to->u.endpoint.listener);
1386 panic("dispatch returned");
1388 } else { // not endpoint cap, call kernel handler through dispatch table
1389 uint64_t cmd = args[0];
1390 if (cmd >= CAP_MAX_CMD) {
1391 retval.error = SYS_ERR_ILLEGAL_INVOCATION;
1395 // Call the invocation
1396 invocation_handler_t invocation = invocations[to->type][cmd];
1397 if(invocation == NULL) {
1398 printk(LOG_WARN, "invocation not found. type: %"PRIu32", cmd: %"PRIu64"\n",
1400 retval.error = SYS_ERR_ILLEGAL_INVOCATION;
1402 retval = invocation(to, cmd, &args[1]);
1408 // Yield the CPU to the next dispatcher
1410 TRACE(KERNEL, SC_YIELD, 0);
1411 retval = sys_yield((capaddr_t)arg0);
1412 TRACE(KERNEL, SC_YIELD, 1);
1415 // NOP system call for benchmarking purposes
1419 // Debug print system call
1421 TRACE(KERNEL, SC_PRINT, 0);
1422 retval.error = sys_print((char *)arg0, arg1);
1423 TRACE(KERNEL, SC_PRINT, 1);
1427 // FIXME: this should be a kernel cap invocation or similarly restricted
1428 case SYSCALL_REBOOT:
1432 case SYSCALL_X86_FPU_TRAP_ON:
1436 case SYSCALL_X86_RELOAD_LDT:
1437 maybe_reload_ldt(dcb_current, true);
1440 // Temporarily suspend the CPU
1441 case SYSCALL_SUSPEND:
1442 TRACE(KERNEL, SC_SUSPEND, 0);
1443 retval = sys_suspend((bool)arg0);
1444 TRACE(KERNEL, SC_SUSPEND, 1);
1447 case SYSCALL_GET_ABS_TIME:
1448 retval = sys_get_absolute_time();
1453 case DEBUG_CONTEXT_COUNTER_RESET:
1454 dispatch_csc_reset();
1457 case DEBUG_CONTEXT_COUNTER_READ:
1458 retval.value = dispatch_get_csc();
1461 case DEBUG_TIMESLICE_COUNTER_READ:
1462 retval.value = kernel_now;
1465 case DEBUG_FLUSH_CACHE:
1469 case DEBUG_SEND_IPI:
1470 apic_send_std_ipi(arg1, args[0], args[1]);
1473 case DEBUG_SET_BREAKPOINT:
1474 debugregs_set_breakpoint(arg1, args[0], args[1]);
1477 case DEBUG_GET_TSC_PER_MS:
1478 retval.value = timing_get_tsc_per_ms();
1481 case DEBUG_GET_APIC_TIMER:
1482 retval.value = apic_timer_get_count();
1485 case DEBUG_GET_APIC_TICKS_PER_SEC:
1486 retval.value = timing_get_apic_ticks_per_sec();
1489 case DEBUG_TRACE_PMEM_CTRL:
1490 #ifdef TRACE_PMEM_CAPS
1492 caps_trace_ctrl(arg1, args[0], args[1]);
1494 caps_trace_ctrl(arg1, 0, 0);
1498 retval.error = SYS_ERR_OK;
1502 case DEBUG_GET_APIC_ID:
1503 retval.value = apic_get_id();
1506 case DEBUG_CREATE_IRQ_SRC_CAP:
1507 retval.error = irq_debug_create_src_cap(arg1, args[0], args[1], args[2]);
1511 printk(LOG_ERR, "invalid sys_debug msg type\n");
1516 printk(LOG_ERR, "sys_syscall: Illegal system call! "
1517 "(0x%lx, 0x%lx, 0x%lx)\n", syscall, arg0, arg1);
1518 retval.error = SYS_ERR_ILLEGAL_SYSCALL;
1522 // If dcb_current got removed, dispatch someone else
1523 if (dcb_current == NULL) {
1524 assert(err_is_ok(retval.error));
1525 dispatch(schedule());
1528 if (syscall == SYSCALL_INVOKE) {
1529 debug(SUBSYS_SYSCALL, "invoke returning 0x%lx 0x%lx\n",
1530 retval.error, retval.value);