3 * \brief Boot driver arch specific parts for x86 CPUs
6 * Copyright (c) 2013, ETH Zurich.
9 * This file is distributed under the terms in the attached LICENSE file.
10 * If you do not find this file, copies can be found by writing to:
11 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
16 #include <target/x86/barrelfish_kpi/coredata_target.h>
17 #include <target/x86_32/barrelfish_kpi/paging_target.h>
18 #include <target/x86_64/barrelfish_kpi/paging_target.h>
19 #include <barrelfish/deferred.h>
22 #include <barrelfish_kpi/asm_inlines_arch.h>
24 #include <arch/x86/start_aps.h>
25 #include <target/x86_64/offsets_target.h>
26 #include <target/x86_32/offsets_target.h>
28 #define MON_URPC_CHANNEL_LEN (32 * UMP_MSG_BYTES)
31 * Start_ap and start_ap_end mark the start end the
32 * end point of the assembler startup code to be copied
34 extern uint64_t x86_64_start_ap;
35 extern uint64_t x86_64_start_ap_end;
36 extern uint64_t x86_64_init_ap_absolute_entry;
37 extern uint64_t x86_64_init_ap_wait;
38 extern uint64_t x86_64_init_ap_lock;
39 extern uint64_t x86_64_start;
40 extern uint64_t x86_64_init_ap_global;
42 extern uint64_t x86_32_start_ap;
43 extern uint64_t x86_32_start_ap_end;
44 extern uint64_t x86_32_init_ap_absolute_entry;
45 extern uint64_t x86_32_init_ap_wait;
46 extern uint64_t x86_32_init_ap_lock;
47 extern uint64_t x86_32_start;
48 extern uint64_t x86_32_init_ap_global;
51 volatile uint64_t *ap_dispatch;
52 extern coreid_t my_arch_id;
53 extern struct capref ipi_cap;
56 errval_t get_core_info(coreid_t core_id, archid_t* apic_id, enum cpu_type* cpu_type)
60 assert(step == 1 || step == 2 || step == 4);
62 *apic_id = (core_id * step);
63 if (*apic_id == my_arch_id) {
70 errval_t err = oct_get(&record, "hw.processor.%"PRIuCOREID"", core_id);
71 if (err_is_fail(err)) {
75 uint64_t apic, enabled, type;
76 err = oct_read(record, "_ { apic_id: %d, enabled: %d, type: %d}",
77 &apic, &enabled, &type);
79 if (err_is_fail(err)) {
83 *apic_id = (archid_t) apic;
84 *cpu_type = (enum cpu_type) type;
91 errval_t get_architecture_config(enum cpu_type type,
92 genpaddr_t *arch_page_size,
93 const char **monitor_binary,
94 const char **cpu_binary)
96 extern char* cmd_kernel_binary;
97 extern char* cmd_monitor_binary;
102 *arch_page_size = X86_64_BASE_PAGE_SIZE;
103 *monitor_binary = (cmd_monitor_binary == NULL) ?
104 "/" BF_BINARY_PREFIX "x86_64/sbin/monitor" :
105 get_binary_path("/" BF_BINARY_PREFIX "x86_64/sbin/%s",
107 *cpu_binary = (cmd_kernel_binary == NULL) ?
108 "/" BF_BINARY_PREFIX "x86_64/sbin/cpu" :
109 get_binary_path("/" BF_BINARY_PREFIX "x86_64/sbin/%s",
116 *arch_page_size = X86_32_BASE_PAGE_SIZE;
117 *monitor_binary = (cmd_monitor_binary == NULL) ?
118 "/" BF_BINARY_PREFIX "x86_32/sbin/monitor" :
119 get_binary_path("/" BF_BINARY_PREFIX "x86_32/sbin/%s",
121 *cpu_binary = (cmd_kernel_binary == NULL) ?
122 "/" BF_BINARY_PREFIX "x86_32/sbin/cpu" :
123 get_binary_path("/" BF_BINARY_PREFIX "x86_32/sbin/%s",
130 *arch_page_size = X86_64_BASE_PAGE_SIZE;
131 *monitor_binary = (cmd_kernel_binary == NULL) ?
132 "/" BF_BINARY_PREFIX "k1om/sbin/monitor" :
133 get_binary_path("/" BF_BINARY_PREFIX "k1om/sbin/%s",
135 *cpu_binary = (cmd_kernel_binary == NULL) ?
136 "/" BF_BINARY_PREFIX "k1om/sbin/cpu" :
137 get_binary_path("/" BF_BINARY_PREFIX "k1om/sbin/%s",
143 return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
150 * \brief Boot a app core of x86_64 type
152 * The processors are started by a sequency of INIT and STARTUP IPIs
153 * which are sent by this function.
154 * CMOS writes to the shutdown status byte are used to execute
155 * different memory locations.
157 * \param core_id APIC ID of the core to try booting
158 * \param entry Entry address for new kernel in the destination
159 * architecture's lvaddr_t given in genvaddr_t
161 * \returns Zero on successful boot, non-zero (error code) on failure
163 int start_aps_x86_64_start(uint8_t core_id, genvaddr_t entry)
165 DEBUG("%s:%d: start_aps_x86_64_start\n", __FILE__, __LINE__);
169 // Copy the startup code to the real-mode address
170 uint8_t *real_src = (uint8_t *) &x86_64_start_ap;
171 uint8_t *real_end = (uint8_t *) &x86_64_start_ap_end;
173 struct capref bootcap;
176 struct capref realmodecap;
178 realmodecap.cnode = cnode_root;
179 realmodecap.slot = ROOTCN_SLOT_ARGCN;
180 err = slot_alloc(&bootcap);
181 if (err_is_fail(err)) {
182 USER_PANIC_ERR(err, "Allocating a new slot");
185 err = cap_copy(bootcap, realmodecap);
186 if (err_is_fail(err)) {
187 USER_PANIC_ERR(err, "Copying capability");
192 struct acpi_rpc_client* acl = get_acpi_rpc_client();
194 err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
195 &bootcap, &error_code);
196 if (err_is_fail(err)) {
197 USER_PANIC_ERR(err, "mm_alloc_range_proxy failed.");
199 if (err_is_fail(error_code)) {
200 USER_PANIC_ERR(error_code, "mm_alloc_range_proxy return failed.");
205 err = vspace_map_one_frame(&real_base, 1<<16, bootcap, NULL, NULL);
206 uint8_t* real_dest = (uint8_t*)real_base + X86_64_REAL_MODE_LINEAR_OFFSET;
208 memcpy(real_dest, real_src, real_end - real_src);
210 /* Pointer to the entry point called from init_ap.S */
211 volatile uint64_t *absolute_entry_ptr = (volatile uint64_t *)
213 (lpaddr_t) &x86_64_init_ap_absolute_entry -
214 (lpaddr_t) &x86_64_start_ap
218 //copy the address of the function start (in boot.S) to the long-mode
219 //assembler code to be able to perform an absolute jump
220 *absolute_entry_ptr = entry;
222 // pointer to the shared global variable amongst all kernels
223 volatile uint64_t *ap_global = (volatile uint64_t *)
225 (lpaddr_t) &x86_64_init_ap_global -
226 (lpaddr_t) &x86_64_start_ap
232 struct monitor_blocking_rpc_client *mc =
233 get_monitor_blocking_rpc_client();
234 err = mc->vtbl.get_global_paddr(mc, &global);
235 if (err_is_fail(err)) {
236 DEBUG_ERR(err, "invoke spawn core");
237 return err_push(err, MON_ERR_SPAWN_CORE);
239 *ap_global = (uint64_t)(genpaddr_t)global;
241 // pointer to the pseudo-lock used to detect boot up of new core
242 volatile uint32_t *ap_wait = (volatile uint32_t *)
243 ((lpaddr_t) &x86_64_init_ap_wait -
244 ((lpaddr_t) &x86_64_start_ap) +
247 // Pointer to the lock variable in the realmode code
248 volatile uint8_t *ap_lock = (volatile uint8_t *)
249 ((lpaddr_t) &x86_64_init_ap_lock -
250 ((lpaddr_t) &x86_64_start_ap) +
253 *ap_wait = AP_STARTING_UP;
257 #if defined(__k1om__)
260 err = invoke_send_init_ipi(ipi_cap, core_id);
261 if (err_is_fail(err)) {
262 DEBUG_ERR(err, "invoke send init ipi");
266 #if defined(__k1om__)
270 // x86 protocol actually would like us to do this twice
271 err = invoke_send_start_ipi(ipi_cap, core_id, entry);
272 if (err_is_fail(err)) {
273 DEBUG_ERR(err, "invoke sipi");
277 // Give the new core a bit time to start-up and set the lock
278 for (uint64_t i = 0; i < STARTUP_TIMEOUT; i++) {
284 // If the lock is set, the core has been started, otherwise assume, that
285 // a core with this APIC ID doesn't exist.
287 while (*ap_wait != AP_STARTED);
288 trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_CORE_START_REQUEST_ACK, core_id);
298 int start_aps_x86_32_start(uint8_t core_id, genvaddr_t entry)
300 DEBUG("%s:%d: start_aps_x86_32_start\n", __FILE__, __LINE__);
302 // Copy the startup code to the real-mode address
303 uint8_t *real_src = (uint8_t *) &x86_32_start_ap;
304 uint8_t *real_end = (uint8_t *) &x86_32_start_ap_end;
306 struct capref bootcap;
307 struct acpi_rpc_client* acl = get_acpi_rpc_client();
309 errval_t err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
310 &bootcap, &error_code);
311 if (err_is_fail(err)) {
312 USER_PANIC_ERR(err, "mm_alloc_range_proxy failed.");
314 if (err_is_fail(error_code)) {
315 USER_PANIC_ERR(error_code, "mm_alloc_range_proxy return failed.");
319 err = vspace_map_one_frame(&real_base, 1<<16, bootcap, NULL, NULL);
320 uint8_t* real_dest = (uint8_t*)real_base + X86_32_REAL_MODE_LINEAR_OFFSET;
322 memcpy(real_dest, real_src, real_end - real_src);
324 /* Pointer to the entry point called from init_ap.S */
325 volatile uint64_t *absolute_entry_ptr = (volatile uint64_t *)
327 (lpaddr_t) &x86_32_init_ap_absolute_entry -
328 (lpaddr_t) &x86_32_start_ap
332 //copy the address of the function start (in boot.S) to the long-mode
333 //assembler code to be able to perform an absolute jump
334 *absolute_entry_ptr = entry;
336 // pointer to the shared global variable amongst all kernels
337 volatile uint64_t *ap_global = (volatile uint64_t *)
339 (lpaddr_t) &x86_32_init_ap_global -
340 (lpaddr_t) &x86_32_start_ap
346 struct monitor_blocking_rpc_client *mc =
347 get_monitor_blocking_rpc_client();
348 err = mc->vtbl.get_global_paddr(mc, &global);
349 if (err_is_fail(err)) {
350 DEBUG_ERR(err, "invoke spawn core");
351 return err_push(err, MON_ERR_SPAWN_CORE);
353 *ap_global = (uint64_t)(genpaddr_t)global;
355 // pointer to the pseudo-lock used to detect boot up of new core
356 volatile uint32_t *ap_wait = (volatile uint32_t *)
357 ((lpaddr_t) &x86_32_init_ap_wait -
358 ((lpaddr_t) &x86_32_start_ap) +
361 // Pointer to the lock variable in the realmode code
362 volatile uint8_t *ap_lock = (volatile uint8_t *)
363 ((lpaddr_t) &x86_32_init_ap_lock -
364 ((lpaddr_t) &x86_32_start_ap) +
367 *ap_wait = AP_STARTING_UP;
370 err = invoke_send_init_ipi(ipi_cap, core_id);
371 if (err_is_fail(err)) {
372 DEBUG_ERR(err, "invoke send init ipi");
376 err = invoke_send_start_ipi(ipi_cap, core_id, entry);
377 if (err_is_fail(err)) {
378 DEBUG_ERR(err, "invoke sipi");
382 //give the new core a bit time to start-up and set the lock
383 for (uint64_t i = 0; i < STARTUP_TIMEOUT; i++) {
389 // If the lock is set, the core has been started, otherwise assume, that
390 // a core with this APIC ID doesn't exist.
392 while (*ap_wait != AP_STARTED);
393 trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_CORE_START_REQUEST_ACK, core_id);
404 * Allocates memory for kernel binary.
406 * For x86, the app kernel can only be loaded in the first 4GB
407 * of memory. Further, it must not overlap the integer
408 * boundaries, i.e. 0-1, 1-2, 2-3, or 3-4.
410 * Probably because we identity map this region during boot-phase
411 * so we can't access anything higher. Not sure about overlap tough.
413 static errval_t allocate_kernel_memory(lvaddr_t cpu_binary, genpaddr_t page_size,
414 struct capref* cpu_memory_cap, size_t* cpu_memory,
415 struct frame_identity* id)
418 *cpu_memory = elf_virtual_size(cpu_binary) + page_size;
420 uint64_t old_minbase;
421 uint64_t old_maxlimit;
422 ram_get_affinity(&old_minbase, &old_maxlimit);
423 DEBUG("%s:%d: \n", __FILE__, __LINE__);
424 for (uint64_t minbase = 0, maxlimit = (uint64_t)1 << 30;
425 minbase < (uint64_t)4 << 30;
426 minbase += (uint64_t)1 << 30, maxlimit += (uint64_t)1 << 30) {
428 ram_set_affinity(minbase, maxlimit);
429 err = frame_alloc_identify(cpu_memory_cap, *cpu_memory, cpu_memory, id);
430 if (err_is_fail(err)) {
437 USER_PANIC("No memory in the first 4GB, cannot continue booting cores");
440 ram_set_affinity(old_minbase, old_maxlimit);
445 static errval_t relocate_cpu_binary(lvaddr_t cpu_binary,
446 struct Elf64_Ehdr *cpu_head,
447 struct elf_allocate_state state,
448 struct frame_identity frameid,
449 genpaddr_t arch_page_size)
451 switch (cpu_head->e_machine) {
454 struct Elf64_Shdr *rela, *symtab, *symhead =
455 (struct Elf64_Shdr *)(cpu_binary + (uintptr_t)cpu_head->e_shoff);
457 assert(cpu_head->e_shoff != 0);
458 rela = elf64_find_section_header_type(symhead, cpu_head->e_shnum, SHT_RELA);
459 assert(rela != NULL);
460 symtab = elf64_find_section_header_type(symhead, cpu_head->e_shnum, SHT_DYNSYM);
461 assert(symtab != NULL);
462 elf64_relocate(frameid.base + arch_page_size, state.elfbase,
463 (struct Elf64_Rela *)(uintptr_t)(cpu_binary + rela->sh_offset),
465 (struct Elf64_Sym *)(uintptr_t)(cpu_binary + symtab->sh_offset),
467 state.elfbase, state.vbase);
471 struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_binary;
473 struct Elf32_Shdr *rel, *symtab, *symhead =
474 (struct Elf32_Shdr *)(cpu_binary + (uintptr_t)head32->e_shoff);
476 rel = elf32_find_section_header_type(symhead, head32->e_shnum, SHT_REL);
478 symtab = elf32_find_section_header_type(symhead, head32->e_shnum,
480 assert(symtab != NULL);
481 elf32_relocate(frameid.base + arch_page_size, state.elfbase,
482 (struct Elf32_Rel *)(uintptr_t)(cpu_binary + rel->sh_offset),
484 (struct Elf32_Sym *)(uintptr_t)(cpu_binary + symtab->sh_offset),
486 state.elfbase, state.vbase);
490 return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
496 errval_t spawn_xcore_monitor(coreid_t coreid, int hwid,
497 enum cpu_type cpu_type,
499 struct frame_identity urpc_frame_id,
503 const char *monitorname = NULL, *cpuname = NULL;
504 genpaddr_t arch_page_size;
507 err = get_architecture_config(cpu_type, &arch_page_size,
508 &monitorname, &cpuname);
509 assert(err_is_ok(err));
511 DEBUG("loading kernel: %s\n", cpuname);
512 DEBUG("loading 1st app: %s\n", monitorname);
514 // compute size of frame needed and allocate it
515 DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n",
516 __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base);
517 DEBUG("%s:%s:%d: urpc_frame_id.size=0x%" PRIuGENSIZE "\n",
518 __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bytes);
520 if (benchmark_flag) {
523 static size_t cpu_binary_size;
524 static lvaddr_t cpu_binary = 0;
525 static genpaddr_t cpu_binary_phys;
526 static const char* cached_cpuname = NULL;
527 if (cpu_binary == 0) {
528 cached_cpuname = cpuname;
529 // XXX: Caching these for now, until we have unmap
530 err = lookup_module(cpuname, &cpu_binary, &cpu_binary_phys,
532 if (err_is_fail(err)) {
533 DEBUG_ERR(err, "Can not lookup module");
537 // Ensure caching actually works and we're
538 // always loading same binary. If this starts to fail, get rid of caching.
539 assert (strcmp(cached_cpuname, cpuname) == 0);
541 static size_t monitor_binary_size;
542 static lvaddr_t monitor_binary = 0;
543 static genpaddr_t monitor_binary_phys;
544 static const char* cached_monitorname = NULL;
545 if (monitor_binary == 0) {
546 cached_monitorname = monitorname;
547 // XXX: Caching these for now, until we have unmap
548 err = lookup_module(monitorname, &monitor_binary,
549 &monitor_binary_phys, &monitor_binary_size);
550 if (err_is_fail(err)) {
551 DEBUG_ERR(err, "Can not lookup module");
555 // Again, ensure caching actually worked (see above)
556 assert (strcmp(cached_monitorname, monitorname) == 0);
558 if (benchmark_flag) {
559 bench_data->load = bench_tsc() - start;
563 struct capref cpu_memory_cap;
564 struct frame_identity frameid;
566 err = allocate_kernel_memory(cpu_binary, arch_page_size,
567 &cpu_memory_cap, &cpu_memory, &frameid);
568 if (err_is_fail(err)) {
569 DEBUG_ERR(err, "Can not allocate space for new app kernel.");
573 err = cap_mark_remote(cpu_memory_cap);
574 if (err_is_fail(err)) {
575 DEBUG_ERR(err, "Can not mark cap remote.");
579 void *cpu_buf_memory;
580 err = vspace_map_one_frame(&cpu_buf_memory, cpu_memory, cpu_memory_cap,
582 if (err_is_fail(err)) {
583 return err_push(err, LIB_ERR_VSPACE_MAP);
585 if (benchmark_flag) {
586 bench_data->alloc_cpu = bench_tsc() - start;
590 /* Chunk of memory to load monitor on the app core */
591 struct capref spawn_memory_cap;
592 struct frame_identity spawn_memory_identity;
594 err = frame_alloc_identify(&spawn_memory_cap,
595 X86_CORE_DATA_PAGES * arch_page_size,
596 NULL, &spawn_memory_identity);
597 if (err_is_fail(err)) {
598 return err_push(err, LIB_ERR_FRAME_ALLOC);
601 err = cap_mark_remote(spawn_memory_cap);
602 if (err_is_fail(err)) {
603 DEBUG_ERR(err, "Can not mark cap remote.");
606 if (benchmark_flag) {
607 bench_data->alloc_mon = bench_tsc() - start;
612 struct elf_allocate_state state;
613 state.vbase = (char *)cpu_buf_memory + arch_page_size;
614 assert(sizeof(struct x86_core_data) <= arch_page_size);
615 state.elfbase = elf_virtual_base(cpu_binary);
617 struct Elf64_Ehdr *cpu_head = (struct Elf64_Ehdr *)cpu_binary;
618 genvaddr_t cpu_entry;
620 err = elf_load(cpu_head->e_machine, elfload_allocate, &state,
621 cpu_binary, cpu_binary_size, &cpu_entry);
622 if (err_is_fail(err)) {
625 if (benchmark_flag) {
626 bench_data->elf_load = bench_tsc() - start;
630 err = relocate_cpu_binary(cpu_binary, cpu_head, state, frameid, arch_page_size);
631 if (err_is_fail(err)) {
632 DEBUG_ERR(err, "Can not relocate new kernel.");
635 if (benchmark_flag) {
636 bench_data->elf_reloc = bench_tsc() - start;
639 genvaddr_t cpu_reloc_entry = cpu_entry - state.elfbase
640 + frameid.base + arch_page_size;
641 /* Compute entry point in the foreign address space */
642 forvaddr_t foreign_cpu_reloc_entry = (forvaddr_t)cpu_reloc_entry;
644 /* Setup the core_data struct in the new kernel */
645 struct x86_core_data *core_data = (struct x86_core_data *)cpu_buf_memory;
646 switch (cpu_head->e_machine) {
649 core_data->elf.size = sizeof(struct Elf64_Shdr);
650 core_data->elf.addr = cpu_binary_phys + (uintptr_t)cpu_head->e_shoff;
651 core_data->elf.num = cpu_head->e_shnum;
654 core_data->elf.size = sizeof(struct Elf32_Shdr);
655 struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_binary;
656 core_data->elf.addr = cpu_binary_phys + (uintptr_t)head32->e_shoff;
657 core_data->elf.num = head32->e_shnum;
660 return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
662 core_data->module_start = cpu_binary_phys;
663 core_data->module_end = cpu_binary_phys + cpu_binary_size;
664 core_data->urpc_frame_base = urpc_frame_id.base;
665 assert((1UL << log2ceil(urpc_frame_id.bytes)) == urpc_frame_id.bytes);
666 core_data->urpc_frame_bits = log2ceil(urpc_frame_id.bytes);
667 core_data->monitor_binary = monitor_binary_phys;
668 core_data->monitor_binary_size = monitor_binary_size;
669 core_data->memory_base_start = spawn_memory_identity.base;
670 assert((1UL << log2ceil(spawn_memory_identity.bytes)) == spawn_memory_identity.bytes);
671 core_data->memory_bits = log2ceil(spawn_memory_identity.bytes);
672 core_data->src_core_id = disp_get_core_id();
673 core_data->src_arch_id = my_arch_id;
674 core_data->dst_core_id = coreid;
677 struct frame_identity fid;
678 err = invoke_frame_identify(kcb, &fid);
679 if (err_is_fail(err)) {
680 USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
681 "Did you add the syscall handler for that architecture?");
683 DEBUG("%s:%s:%d: fid.base is 0x%"PRIxGENPADDR"\n",
684 __FILE__, __FUNCTION__, __LINE__, fid.base);
685 core_data->kcb = (genpaddr_t) fid.base;
686 #ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
687 core_data->chan_id = chanid;
690 if (cmdline != NULL) {
691 // copy as much of command line as will fit
692 snprintf(core_data->kernel_cmdline, sizeof(core_data->kernel_cmdline),
693 "%s %s", cpuname, cmdline);
694 // ensure termination
695 core_data->kernel_cmdline[sizeof(core_data->kernel_cmdline) - 1] = '\0';
697 DEBUG("%s:%s:%d: %s\n", __FILE__, __FUNCTION__, __LINE__, core_data->kernel_cmdline);
700 /* Invoke kernel capability to boot new core */
701 if (cpu_type == CPU_X86_64 || cpu_type == CPU_K1OM) {
702 start_aps_x86_64_start(hwid, foreign_cpu_reloc_entry);
706 else if (cpu_type == CPU_X86_32) {
707 start_aps_x86_32_start(hwid, foreign_cpu_reloc_entry);
712 // XXX: Should not delete the remote caps?
713 err = cap_destroy(spawn_memory_cap);
714 if (err_is_fail(err)) {
715 USER_PANIC_ERR(err, "cap_destroy failed");
717 err = vspace_unmap(cpu_buf_memory);
718 if (err_is_fail(err)) {
719 USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed");
721 err = cap_destroy(cpu_memory_cap);
722 if (err_is_fail(err)) {
723 USER_PANIC_ERR(err, "cap_destroy failed");