]
--
+-- Build a Pleco library
+--
+plecoFile :: Options -> String -> HRule
+plecoFile opts file =
+ let arch = optArch opts
+ cfile = file ++ ".c"
+ hfile = "/include/trace_definitions/" ++ file ++ ".h"
+ jsonfile = "/trace_definitions/" ++ file ++ ".json"
+ in
+ Rules [ Rule [In InstallTree "tools" "/bin/pleco",
+ In SrcTree "src" (file++".pleco"),
+ Out arch hfile,
+ Out arch jsonfile,
+ Out arch cfile ],
+ compileGeneratedCFile opts cfile
+ ]
+
+--
-- Build a Hamlet file
--
hamletFile :: Options -> String -> HRule
"xmplthc",
"unixsock",
"bcache",
- "replay" ],
+ "replay",
+ "empty"],
arch <- allArchitectures
] ++
--- /dev/null
+/** \file
+ * \brief Empty interface
+ */
+
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+interface empty "Empty interface" {
+ message unused(); // Make compiler work
+};
\ No newline at end of file
message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack, uint8 payload[size]);
message multihop_cap_send(vci_t vci, uint8 direction, capid_t capid, errval err, caprep cap, bool null_cap);
+
+ /* Tracing Framework */
+
+ // Notify a core that it should prepare the tracing state. The origin core
+ // is the one who initiated the preparation request.
+ message trace_prepare(coreid origin_core);
+
+ // Notify the core who initiated the preparation that it is finished.
+ message trace_prepare_finished();
+
+ // Notify core 0 that you want to make a time measurement (Network Time
+ // Protocol). The origin core is the one who initiated the trace preparation.
+ call trace_measure(coreid_t origin_core, uint64 t0);
+
+ // The response to a measurement call from a core.
+ response trace_measure_ack(coreid origin_core, uint64 t0, uint64 t1, uint64 t2);
+
+ /* bfscope - the tracing server */
+
+ // Forward a trace-flush command to the monitor on the core on which bfscope
+ // is running.
+ call bfscope_flush_send_forward(iref iref);
+
+ // This message is sent, once bfscope has finished flushing.
+ response bfscope_flush_ack_forward();
};
// cap transfer
call multihop_cap_send(vci_t vci, uint8 direction, errval err, cap cap, capid_t capid);
+
+ /* Tracing Framework */
+
+ // Notify a core that it should prepare the tracing state. The origin core
+ // is the one who initiated the preparation request.
+ message trace_prepare(coreid origin_core);
+
+ // Notify the core who initiated the preparation that it is finished.
+ message trace_prepare_finished();
+
+ /* bfscope - the tracing server */
+
+ // Send a message to bfscope, to notify that it should flush
+ call bfscope_flush_send(iref iref);
+
+ // Notify the initiatior of the flush request that it has been completed.
+ response bfscope_flush_ack();
};
#include <barrelfish/dispatcher_arch.h>
#include <barrelfish/curdispatcher_arch.h>
#else // IN_KERNEL
+#include <string.h>
#include <arch/x86/apic.h> // XXX!
#endif // IN_KERNEL
#endif // __x86_64__
#endif
/**
- * \brief Constants for trace subsystems and events.
- *
- * Please try and keep this under control. Each subsystem should be
- * preceeded by two blank lines and followed by its events. If events
- * within a subsystem need to be grouped with whitespace then use a
- * single blank line.
- *
+ * The constants for the subsystems and events are generated from the file
+ * trace_definitions/trace_defs.pleco that can be included after compiling with
+ * #include <trace_definitions/trace_defs.h>
+ * .
*/
-#define TRACE_SUBSYS_KERNEL 0xFFFF
-#define TRACE_EVENT_CSWITCH 0xCCCC
-#define TRACE_EVENT_BZERO 0xB0
-#define TRACE_EVENT_TIMER 0x1
-#define TRACE_EVENT_TIMER_SYNC 0x2
-
-#define TRACE_EVENT_SCHED_MAKE_RUNNABLE 0xED00
-#define TRACE_EVENT_SCHED_REMOVE 0xED01
-#define TRACE_EVENT_SCHED_YIELD 0xED02
-#define TRACE_EVENT_SCHED_SCHEDULE 0xED03
-#define TRACE_EVENT_SCHED_CURRENT 0xED04
-
-#define TRACE_SUBSYS_THREADS 0xEEEE
-
-#define TRACE_EVENT_BARRIER_ENTER 0x0100
-#define TRACE_EVENT_BARRIER_LEAVE 0x0101
-
-#define TRACE_EVENT_MUTEX_LOCK_ENTER 0x0200
-#define TRACE_EVENT_MUTEX_LOCK_LEAVE 0x0201
-#define TRACE_EVENT_MUTEX_LOCK_NESTED_ENTER 0x0202
-#define TRACE_EVENT_MUTEX_LOCK_NESTED_LEAVE 0x0203
-#define TRACE_EVENT_MUTEX_TRYLOCK 0x0204
-#define TRACE_EVENT_MUTEX_UNLOCK 0x0205
-
-#define TRACE_EVENT_COND_WAIT_ENTER 0x0300
-#define TRACE_EVENT_COND_WAIT_LEAVE 0x0301
-#define TRACE_EVENT_COND_SIGNAL 0x0302
-#define TRACE_EVENT_COND_BROADCAST 0x0303
-
-#define TRACE_EVENT_SEM_WAIT_ENTER 0x0400
-#define TRACE_EVENT_SEM_WAIT_LEAVE 0x0401
-#define TRACE_EVENT_SEM_TRYWAIT 0x0402
-#define TRACE_EVENT_SEM_POST 0x0403
-
-
-#define TRACE_SUBSYS_MEMSERV 0xA000
-#define TRACE_EVENT_ALLOC 0x0001
-
-
-#define TRACE_SUBSYS_MONITOR 0xB000
-#define TRACE_EVENT_SPAN0 0x0000
-#define TRACE_EVENT_SPAN1 0x0001
-#define TRACE_EVENT_SPAN 0x0002
-#define TRACE_EVENT_PCREQ 0x0003
-#define TRACE_EVENT_PCREPLY 0x0004
-#define TRACE_EVENT_PCREQ_INTER 0x0005
-#define TRACE_EVENT_PCREPLY_INTER 0x0006
-#define TRACE_EVENT_URPC_BLOCK 0x0007
-#define TRACE_EVENT_URPC_UNBLOCK 0x0008
-#define TRACE_EVENT_REMOTE_CAP_RETYPE 0x0009
-#define TRACE_EVENT_REMOTE_CAP_RETYPE_RETRY 0x0010
-#define TRACE_EVENT_REMOTE_CAP_RETYPE_MSG 0x0011
-#define TRACE_EVENT_REMOTE_CAP_RETYPE_END 0x0012
-#define TRACE_EVENT_POLLING 0xBBBB
-
-
-#define TRACE_SUBSYS_CHIPS 0xC000
-#define TRACE_EVENT_CHIPS_LISTENCB 0x0001
-
-
-#define TRACE_SUBSYS_BFLIB 0xBFBF
-
-
-#define TRACE_SUBSYS_TWEED 0x2000
-#define TRACE_EVENT_TWEED_START 0x0000
-#define TRACE_EVENT_TWEED_END 0x0001
-#define TRACE_EVENT_STEAL 0x0002
-#define TRACE_EVENT_STEAL_END 0x0003
-#define TRACE_EVENT_WAIT 0x0004
-#define TRACE_EVENT_WAIT_END 0x0005
-#define TRACE_EVENT_LOCKING 0x0006
-#define TRACE_EVENT_LOCKING_END 0x0007
-
-
-#define TRACE_SUBSYS_ROUTE 0x3000
-#define TRACE_EVENT_BCAST_WITH_CCAST_SEND 0x0001
-#define TRACE_EVENT_BCAST_WITH_CCAST 0x0002
-#define TRACE_EVENT_RECV_BCAST_WITH_CCAST 0x0003
-#define TRACE_EVENT_RECV_CCAST 0x0004
-#define TRACE_EVENT_ROUTE_BENCH_START 0x0005
-#define TRACE_EVENT_ROUTE_BENCH_STOP 0x0006
-#define TRACE_EVENT_ROUTE_SEND_PING 0x0007
-#define TRACE_EVENT_ROUTE_SEND_PONG 0x0008
-#define TRACE_EVENT_ROUTE_RECV_PING 0x0009
-#define TRACE_EVENT_ROUTE_RECV_PONG 0x000A
-#define TRACE_EVENT_ROUTE_POLL 0x000B
-
-
-#define TRACE_SUBSYS_BENCH 0x1234
-#define TRACE_EVENT_PCBENCH 0x0000
-#define TRACE_EVENT_RXPING 0x0001
-#define TRACE_EVENT_RXPONG 0x0002
-
-
-#define TRACE_SUBSYS_BOMP 0x4000
-#define TRACE_EVENT_BOMP_START 0x0001
-#define TRACE_EVENT_BOMP_STOP 0x0002
-#define TRACE_EVENT_BOMP_ITER 0x0003
-
-
-#define TRACE_SUBSYS_BARRIERS 0x5000
-#define TRACE_EVENT_BARRIERS_START 0X0001
-#define TRACE_EVENT_BARRIERS_STOP 0X0002
-#define TRACE_EVENT_BARRIERS_BARRIER_WAIT 0X0003
-#define TRACE_EVENT_BARRIERS_CENTRAL_REQ 0X0004
-#define TRACE_EVENT_BARRIERS_CENTRAL_REP 0X0005
-#define TRACE_EVENT_BARRIERS_TREE_REQ 0X0006
-#define TRACE_EVENT_BARRIERS_TREE_REP 0X0007
-#define TRACE_EVENT_BARRIERS_DIST 0X0008
-#define TRACE_EVENT_BARRIERS_SEND 0X0009
-#define TRACE_EVENT_BARRIERS_POLL1 0X000A
-#define TRACE_EVENT_BARRIERS_POLL2 0X000B
-#define TRACE_EVENT_BARRIERS_HEAP_REQ 0X000C
-#define TRACE_EVENT_BARRIERS_HEAP_REP 0X000D
-#define TRACE_EVENT_BARRIERS_SEQ_BCAST_REQ 0X000E
-#define TRACE_EVENT_BARRIERS_SEQ_BCAST_RECV 0X000F
-#define TRACE_EVENT_BARRIERS_TREE_BCAST_REQ 0X0010
-#define TRACE_EVENT_BARRIERS_TREE_BCAST_RECV 0X0011
-
-/* Following constants are used in network subsystem. */
-#define TRACE_SUBSYS_NET 0x6000
-#define TRACE_EVENT_NET_START 0X0001
-#define TRACE_EVENT_NET_STOP 0X0002
-#define TRACE_EVENT_NET_NI_AI 0X0012 /* added, 0 */
-#define TRACE_EVENT_NET_NI_I 0X0010 /* added, 0 */
-#define TRACE_EVENT_NET_NI_A 0X0003 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_FILTER_FRAG 0X0018 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_FILTER_EX_1 0X0015 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_ARP 0X0011 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_FILTER_EX_2 0X0016 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_PKT_CPY_1 0X0019 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_PKT_CPY_2 0X001A /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_PKT_CPY_3 0X001B /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_PKT_CPY_4 0X001C /* added, pkt data location */
-
-#define TRACE_EVENT_NET_NI_PKT_CPY 0X0017 /* added, pkt data location */
-#define TRACE_EVENT_NET_NI_P 0X0004 /* added, pbuf_id */
-#define TRACE_EVENT_NET_NI_S 0X0005 /* added, pbuf_id */
-#define TRACE_EVENT_NET_AI_A 0X0006 /* added, pbuf_id */
-#define TRACE_EVENT_NET_AI_P 0X0007 /* added, pbuf_addr */
-#define TRACE_EVENT_NET_AO_C 0X0008 /* added, pbuf_addr */
-#define TRACE_EVENT_NET_AO_Q 0X0009 /* added, pbuf_addr */
-#define TRACE_EVENT_NET_AO_S 0X000A /* added, pbuf_addr (as client_data ) */
-#define TRACE_EVENT_NET_NO_A 0X000B /* added, client_data (pbuf_address in lwip) */
-#define TRACE_EVENT_NET_NO_S 0X000C /* added, e1000n.c client_data (pbuf_address in lwip) */
-
-/* FIXME: Add the timings of when does NIC gets TX_done */
-#define TRACE_EVENT_NET_NO_TXD 0X0013 /* yet to add */
-#define TRACE_EVENT_NET_AIR_R 0x000E /* added, pbuf_addr (TX DONE in app) */
-
-/* Response flow */
-#define TRACE_EVENT_NET_AOR_S 0x000D /* added, pbuf_id ( register_pbuf from APP)*/
-#define TRACE_EVENT_NET_NIR_REG_PBUF 0x0014 /* commented pbuf_id ( register_pbuf in NIC)*/
-
-#define TRACE_SUBSYS_MULTIHOP 0x7000
-#define TRACE_EVENT_MULTIHOP_BENCH_START 0x0001
-#define TRACE_EVENT_MULTIHOP_BENCH_STOP 0x0002
-#define TRACE_EVENT_MULTIHOP_MESSAGE_SEND 0x0003
-#define TRACE_EVENT_MULTIHOP_MESSAGE_RECEIVE 0x0004
-
-/* Following constants are used in network benchmark. */
-#define TRACE_SUBSYS_BNET 0x8000
-#define TRACE_EVENT_BNET_START 0x0001
-#define TRACE_EVENT_BNET_STOP 0x0002
-#define TRACE_EVENT_BNET_DRV_SEE 0x0003
-#define TRACE_EVENT_BNET_APP_SEE 0x0004
-#define TRACE_EVENT_BNET_DRV_INT 0x0005
-#define TRACE_EVENT_BNET_DRV_POLL 0x0006
-#define TRACE_EVENT_BNET_YIELD 0x0007
-#define TRACE_EVENT_BNET_I 0x0008
-
-
-/* Following constans are used for profiling modified stack */
-#define TRACE_SUBSYS_NNET 0x9000
-#define TRACE_EVENT_NNET_START 0x0001
-#define TRACE_EVENT_NNET_STOP 0x0002
-#define TRACE_EVENT_NNET_RXDRVSEE 0x0003 // Driver saw pkg (RX)
-#define TRACE_EVENT_NNET_RXESVSEE 0x0004 // Ethersrv saw pkg
-#define TRACE_EVENT_NNET_RXESVFRGDONE 0x0005 // Ethersrv checked frag
-#define TRACE_EVENT_NNET_RXESVAPPFDONE 0x0006 // Ethersrv app filtered
-#define TRACE_EVENT_NNET_RXESVAPPCSTART 0x0007 // Ethersrv app c2u started
-#define TRACE_EVENT_NNET_RXESVCOPIED 0x0008 // Ethersrv copied pkg
-#define TRACE_EVENT_NNET_RXESVSPPDONE 0x000D // Ethersrv spp produce done
-#define TRACE_EVENT_NNET_RXESVAPPNOTIF 0x0009 // Ethersrv app notify
-#define TRACE_EVENT_NNET_RXLWIINCOM 0x000A // LWIP handle_incoming_
-#define TRACE_EVENT_NNET_RXLWIRECH 0x000B // LWIP call rec_handler
-#define TRACE_EVENT_NNET_RXAPPRCV 0x000C // APP received
-
-#define TRACE_EVENT_NNET_TXAPPSNT 0x0020 // APP sent
-#define TRACE_EVENT_NNET_TXLWISEE 0x0021 // LWIP idc_send_packet
-#define TRACE_EVENT_NNET_TXLWIBFFENCE 0x0029 // LWIP before mfence
-#define TRACE_EVENT_NNET_TXLWIAFFENCE 0x002A // LWIP after mfence
-#define TRACE_EVENT_NNET_TXLWIFLUSHED 0x002B // LWIP payload flushed
-#define TRACE_EVENT_NNET_TXLWIBDESC 0x002C // LWIP bufferdesc fetched
-#define TRACE_EVENT_NNET_TXLWISPPSND 0x0022 // LWIP spp produced
-#define TRACE_EVENT_NNET_TXLWISPPIDX 0x0023 // LWIP update spp index
-#define TRACE_EVENT_NNET_TXLWITXWORK 0x002D // LWIP pending TX work
-#define TRACE_EVENT_NNET_TXLWINOTIF 0x0024 // LWIP notify driver
-#define TRACE_EVENT_NNET_TXESVNOTIF 0x0025 // Ethersrv notify recieved
-#define TRACE_EVENT_NNET_TXESVSPOW 0x002E // Ethersrv send_pkt_on_w..
-#define TRACE_EVENT_NNET_TXESVSSPOW 0x0026 // Ethersrv send_sng_pkt..
-#define TRACE_EVENT_NNET_TXDRVADD 0x0027 // Driver add pkg (TX)
-#define TRACE_EVENT_NNET_TXDRVSEE 0x0028 // Driver saw pkg done (TX)
-#define TRACE_EVENT_NNET_TX_TCP_WRITE 0x0030 // tcp_write done
-#define TRACE_EVENT_NNET_TX_TCP_OUTPUT 0x0031 // tcp_output done
-#define TRACE_EVENT_NNET_TX_TCP_RECV 0x0032 // tcp_recved done
-#define TRACE_EVENT_NNET_TX_TCP_FREE 0x0033 // tx pbuf freed
-#define TRACE_EVENT_NNET_TX_MEMP 0x0034 // tx pbuf memp start
-#define TRACE_EVENT_NNET_TX_MEMP_D 0x0035 // tx pbuf memp done
+#include <trace_definitions/trace_defs.h>
#define TRACE_EVENT(s,e,a) ((uint64_t)(s)<<48|(uint64_t)(e)<<32|(a))
#define TRACE_COREID_LIMIT 64
#define TRACE_EVENT_SIZE 16
#define TRACE_MAX_EVENTS 8000 // max number of events
+#define TRACE_MAX_APPLICATIONS 128
#define TRACE_PERCORE_BUF_SIZE 0x1ff00
// (TRACE_EVENT_SIZE * TRACE_MAX_EVENTS + (sizeof (struct trace_buffer flags)))
-#define TRACE_BUF_SIZE (TRACE_COREID_LIMIT*TRACE_PERCORE_BUF_SIZE)
+#define TRACE_BUF_SIZE (TRACE_COREID_LIMIT*TRACE_PERCORE_BUF_SIZE) // Size of all trace buffers
+// Size of the array storing which subsystems are enabled
+#define TRACE_SUBSYS_ENABLED_BUF_SIZE (TRACE_NUM_SUBSYSTEMS * sizeof(bool))
-#if defined(__x86_64__)
-#define TRACE_TIMESTAMP() rdtsc()
+#define TRACE_ALLOC_SIZE (TRACE_BUF_SIZE + TRACE_SUBSYS_ENABLED_BUF_SIZE)
-// XXX These are defined in xapic.dev, not sure we should repro them here
-#ifndef xapic_none
-#define xapic_none 0x00
-#define xapic_self 0x01
-#define xapic_all_inc 0x02
-#define xapic_all_exc 0x03
-#endif
+#define TRACE_MAX_BOOT_APPLICATIONS 16
-#define IPI_TRACE_COMPLETE 62
-#define IPI_TRACE_START 63
-#define TRACE_COMPLETE_IPI_IRQ (62-32)
+
+#if defined(__x86_64__)
+#define TRACE_TIMESTAMP() rdtsc()
/*
* \brief compare and set. If the value at address
} u;
};
+// Trace information about an application
+struct trace_application {
+
+ char name[8]; ///< Name of the application
+ uint64_t dcb; ///< DCB address of the application
+};
+
/// Trace buffer
struct trace_buffer {
volatile uintptr_t head_index;
// ... flags...
struct trace_buffer *master; // Pointer to the trace master
volatile bool running;
- volatile bool done_rundown; // Core specific
+ volatile bool autoflush; // Are we flushing automatically?
volatile uint64_t start_trigger;
volatile uint64_t stop_trigger;
volatile uint64_t stop_time;
+ int64_t t_offset; // Time offset relative to core 0
uint64_t t0; // Start time of trace
uint64_t duration; // Max trace duration
- uint8_t ipi_dest; // Core for completion IPI
// ... events ...
struct trace_event events[TRACE_MAX_EVENTS];
+
+ // ... applications ...
+ volatile uint8_t num_applications;
+ struct trace_application applications[TRACE_MAX_APPLICATIONS];
};
#ifndef IN_KERNEL
uint64_t stop_trigger,
uint64_t duration);
errval_t trace_wait(void);
-size_t trace_dump(char *buf, size_t buflen);
+size_t trace_dump(char *buf, size_t buflen, int *number_of_events);
+void trace_flush(struct event_closure callback);
+void trace_set_autoflush(bool enabled);
+errval_t trace_prepare(struct event_closure callback);
errval_t trace_my_setup(void);
+errval_t trace_set_subsys_enabled(uint16_t subsys, bool enabled);
+errval_t trace_set_all_subsys_enabled(bool enabled);
+
/**
* \brief Compute fixed trace buffer address according to
* given core_id
do {
i = buf->head_index;
- /* XXX For now we're going to stop tracing if the buffer fills up
- This will give us a useful one-shot tracing mode, rather than
- a continuous circular buffer */
- if (i == TRACE_MAX_EVENTS) return i;
- nw = i + 1;
- //nw = (i + 1) % TRACE_MAX_EVENTS;
+ if (buf->tail_index - buf->head_index == 1 ||
+ (buf->tail_index == 0 && (buf->head_index == TRACE_MAX_EVENTS-1))) {
+ // Buffer is full, overwrite last event
+ return i;
+ }
+
+ nw = (i + 1) % TRACE_MAX_EVENTS;
} while (!trace_cas(&buf->head_index, i, nw));
}
struct trace_buffer *trace_buf = (struct trace_buffer*) (kernel_trace_buf
+ my_core_id * TRACE_PERCORE_BUF_SIZE);
- if (!trace_buf->done_rundown) {
- /* We set it before we trace_snapshot since we only want one,
- * and in particular because trace_snapshot uses
- * trace_write_event... */
- trace_buf->done_rundown = true;
- }
(void) trace_reserve_and_fill_slot(ev, trace_buf);
if (ev->u.raw == master->stop_trigger ||
ev->timestamp > master->stop_time)) {
master->stop_trigger = 0;
master->running = false;
-#ifdef __x86_64__
- apic_send_std_ipi(master->ipi_dest, xapic_none, IPI_TRACE_COMPLETE);
-#endif
}
-#endif // __x86_64__
+#endif // TRACING_EXISTS
return SYS_ERR_OK;
}
-#else
+
+// Call this function when a new application has been created.
+// dcb: pointer to the domain control block struct of the application.
+static inline errval_t trace_new_application(char *new_application_name, uintptr_t dcb)
+{
+#ifdef TRACING_EXISTS
+
+ if (kernel_trace_buf == 0 || my_core_id >= TRACE_COREID_LIMIT) {
+ return TRACE_ERR_NO_BUFFER;
+ }
+
+ struct trace_buffer *trace_buf = (struct trace_buffer*) (kernel_trace_buf
+ + my_core_id * TRACE_PERCORE_BUF_SIZE);
+
+ int i;
+ int new_value;
+ do {
+ i = trace_buf->num_applications;
+
+ if (i == TRACE_MAX_APPLICATIONS)
+ return -1; // TODO error code
+
+ new_value = i + 1;
+
+ } while (!trace_cas((uintptr_t*)&trace_buf->num_applications, i, new_value));
+
+ trace_buf->applications[i].dcb = (uint64_t) dcb;
+ memcpy(&trace_buf->applications[i].name, new_application_name, 8);
+
+#endif // TRACING_EXISTS
+ return SYS_ERR_OK;
+}
+
+// During boot of a core the trace buffer is not yet mapped, but we still want
+// to store the applications that are started at this time. This would be fixed
+// if the Kernel would be responsible for mapping the trace buffer, but currently
+// it's the job of the monitor.
+
+extern struct trace_application kernel_trace_boot_applications[];
+extern int kernel_trace_num_boot_applications;
+
+static inline void trace_new_boot_application(char* name, uintptr_t dcb)
+{
+ if (kernel_trace_num_boot_applications < TRACE_MAX_BOOT_APPLICATIONS) {
+
+
+ kernel_trace_boot_applications[kernel_trace_num_boot_applications].dcb = (uint64_t) dcb;
+ memcpy(kernel_trace_boot_applications[kernel_trace_num_boot_applications].name, name, 8);
+
+ kernel_trace_num_boot_applications++;
+ }
+}
+
+static inline void trace_copy_boot_applications(void)
+{
+
+ int i;
+ for (i = 0; i < kernel_trace_num_boot_applications; i++) {
+ trace_new_application(kernel_trace_boot_applications[i].name, kernel_trace_boot_applications[i].dcb);
+ }
+}
+#else // !IN_KERNEL
/*
static inline coreid_t get_my_core_id(void)
// Make sure the trigger event is first in the buffer
(void) trace_reserve_and_fill_slot(ev, trace_buf);
-#ifdef __x86_64__
- sys_debug_send_ipi(xapic_none, xapic_all_inc, IPI_TRACE_START);
-#endif
return SYS_ERR_OK;
} else {
return SYS_ERR_OK;
}
}
- if (!trace_buf->done_rundown) {
- /* We set it before we trace_snapshot since we only want one,
- * and in particular because sys_rundown uses
- * trace_write_event... */
- trace_buf->done_rundown = true;
- }
(void) trace_reserve_and_fill_slot(ev, trace_buf);
if (ev->u.raw == master->stop_trigger ||
ev->timestamp > master->stop_time) {
master->stop_trigger = 0;
master->running = false;
-#ifdef __x86_64__
- sys_debug_send_ipi(master->ipi_dest, xapic_none, IPI_TRACE_COMPLETE);
-#endif
}
-#endif // __x86_64__
+#endif // TRACING_EXISTS
return SYS_ERR_OK;
}
-#endif
+#endif // !IN_KERNEL
#endif
}
+#ifdef TRACING_EXISTS
+/// Is the subsystem enabled, i.e. should we log events for it?
+static inline bool trace_is_subsys_enabled(uint16_t subsys)
+{
+#ifdef CONFIG_TRACE
+ assert(subsys < TRACE_NUM_SUBSYSTEMS);
+
+ uint8_t* base_pointer;
+#ifdef IN_KERNEL
+ base_pointer = (uint8_t*) kernel_trace_buf;
+#else // !IN_KERNEL
+ base_pointer = (uint8_t*) trace_buffer_master;
+#endif // !IN_KERNEL
+
+ if (base_pointer == NULL) {
+ // The trace buffer is not even mapped.
+ return false;
+ }
+
+ bool* subsystem_states = (bool*) (base_pointer + TRACE_BUF_SIZE);
+
+ return subsystem_states[subsys];
+#else // !CONFIG_TRACE
+ return false;
+#endif // !CONFIG_TRACE
+}
+#endif // TRACING_EXISTS
+
static inline errval_t trace_event(uint16_t subsys, uint16_t event, uint32_t arg)
{
#ifdef CONFIG_TRACE
+
+ // Check if the subsystem is enabled, i.e. we log events for it
+ if (!trace_is_subsys_enabled(subsys)) {
+ return SYS_ERR_OK;
+ }
+
struct trace_event ev;
ev.timestamp = TRACE_TIMESTAMP();
ev.u.ev.subsystem = subsys;
*/
lvaddr_t kernel_trace_buf = 0;
-/**
- * Put rundown of current state in the trace buffer
- * This would typically be called at the start of tracing
- */
-void trace_snapshot(void)
-{
- // DCBS
- struct dcb *dcb = dcbs_list;
- struct trace_event ev;
- errval_t err;
-
- while (dcb != NULL) {
- struct dispatcher_shared_generic *disp =
- get_dispatcher_shared_generic(dcb->disp);
- //printf("%d DCB: %p %.*s\n", my_core_id, dcb, DISP_NAME_LEN, disp->name);
- // Top bit of timestamp is flag to indicate dcb rundown events
- ev.timestamp = (1ULL << 63) | (uintptr_t)dcb;
- assert(sizeof(ev.u.raw) <= sizeof(disp->name));
- memcpy(&ev.u.raw, disp->name, sizeof(ev.u.raw));
- err = trace_write_event(&ev);
- dcb = dcb->next_all;
- }
-
- // TO DO: currently running domain
-}
+struct trace_application kernel_trace_boot_applications[TRACE_MAX_BOOT_APPLICATIONS];
+int kernel_trace_num_boot_applications = 0;
init_dcb = spawn_app_init(core_data, APP_INIT_PROG_NAME, app_alloc_phys);
}
- // Add to tracing
- init_dcb->next_all = dcbs_list;
- dcbs_list = init_dcb;
-
// Should not return
dispatch(init_dcb);
panic("Error spawning init!");
#include <barrelfish_kpi/dispatcher_shared_target.h>
#include <asmoffsets.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <exec.h>
#ifdef __scc__
# include <rck.h>
kernel_now += kernel_timeslice;
}
tsc_lasttime = tsc_now;
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_TIMER, kernel_now);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER, kernel_now);
wakeup_check(kernel_now);
} else if (vector == APIC_ERROR_INTERRUPT_VECTOR) {
printk(LOG_ERR, "APIC error interrupt fired!\n"); // XXX: do something?
#else
ipi_handle_notify();
#endif
- } else if (vector == 63) {
- // Record snapshot of current state
- trace_snapshot();
-
- // Record the running domain at the start of a trace
-#ifdef TRACE_CSWITCH
- trace_event(TRACE_SUBSYS_KERNEL,
- TRACE_EVENT_CSWITCH,
- (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF);
-#endif
-
- apic_eoi();
}
#if 0
else if (irq >= 0 && irq <= 15) { // classic PIC device interrupt
lpaddr_t lpaddr = gen_phys_to_local_phys(frame->u.frame.base);
kernel_trace_buf = local_phys_to_mem(lpaddr);
//printf("kernel.%u: handle_trace_setup at %lx\n", apic_id, kernel_trace_buf);
+
+ // Copy boot applications.
+ trace_copy_boot_applications();
+
return SYSRET(SYS_ERR_OK);
}
#include <barrelfish_kpi/dispatcher_shared_target.h>
#include <asmoffsets.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <arch/x86/timing.h>
#include <arch/x86/syscall.h>
#include <arch/x86/ipi_notify.h>
apic_eoi();
assert(kernel_ticks_enabled);
update_kernel_now();
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_TIMER, kernel_now);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER, kernel_now);
wakeup_check(kernel_now);
} else if (vector == APIC_PERFORMANCE_INTERRUPT_VECTOR) {
// Handle performance counter overflow
} else if (vector == APIC_INTER_CORE_VECTOR) {
apic_eoi();
ipi_handle_notify();
- } else if (vector == 63) {
- // Record snapshot of current state
- trace_snapshot();
-
- // Record the running domain at the start of a trace
-#ifdef TRACE_CSWITCH
-//#if TRACE_N_BM
-
-//#else
- trace_event(TRACE_SUBSYS_KERNEL,
- TRACE_EVENT_CSWITCH,
- (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF);
-//#endif // TRACE_N_BM
-#endif
-
- apic_eoi();
}
#if 0
else if (irq >= 0 && irq <= 15) { // classic PIC device interrupt
lpaddr_t lpaddr = gen_phys_to_local_phys(frame->u.frame.base);
kernel_trace_buf = local_phys_to_mem(lpaddr);
//printf("kernel.%u: handle_trace_setup at %lx\n", apic_id, kernel_trace_buf);
+
+ // Copy boot applications.
+ trace_copy_boot_applications();
+
return SYSRET(SYS_ERR_OK);
}
#include <mdb/mdb.h>
#include <mdb/mdb_tree.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <wakeup.h>
/// Ignore remote capabilities if this is defined
/* Set the type specific fields and insert into #dest_caps */
switch(type) {
case ObjType_Frame:
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
// XXX: SCC hack, while we don't have a devframe allocator
if(lpaddr + ((lpaddr_t)1 << bits) < PADDR_SPACE_LIMIT) {
memset((void*)lvaddr, 0, (lvaddr_t)1 << bits);
printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
" uninitialized\n", lpaddr);
}
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
src_cap.u.frame.base = genpaddr + i * ((genpaddr_t)1 << objbits);
case ObjType_CNode:
assert((1UL << OBJBITS_CTE) >= sizeof(struct cte));
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
case ObjType_Dispatcher:
assert((1UL << OBJBITS_DISPATCHER) >= sizeof(struct dcb));
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 1);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_BZERO, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
for(size_t i = 0; i < numobjs; i++) {
// Initialize type specific fields
#include <barrelfish_kpi/syscalls.h>
#include <barrelfish_kpi/lmp.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <barrelfish_kpi/dispatcher_shared_target.h>
#include <barrelfish_kpi/cpu_arch.h>
#include <barrelfish_kpi/registers_arch.h>
//#else
trace_event(TRACE_SUBSYS_KERNEL,
- TRACE_EVENT_CSWITCH,
+ TRACE_EVENT_KERNEL_CSWITCH,
(uint32_t)(lvaddr_t)dcb & 0xFFFFFFFF);
//#endif // TRACE_N_BM
#endif // TRACE_CSWITCH
bool is_vm_guest;
struct guest guest_desc; ///< Descriptor of the VM Guest
uint64_t domain_id; ///< ID of dispatcher's domain
- struct dcb *next_all; ///< Next DCB in list of all DCBs (for tracing)
systime_t wakeup_time; ///< Time to wakeup this dispatcher
struct dcb *wakeup_prev, *wakeup_next; ///< Next/prev in timeout queue
extern struct capability monitor_ep;
-/// Keep track of all DCBs for tracing snapshot
-/// XXX this is never garbage-collected at the moment
-extern struct dcb *dcbs_list;
-void trace_snapshot(void);
-
/*
* Variant based on Padraig Brady's implementation
* http://www.pixelbeat.org/programming/gcc/static_assert.html
# include <kernel.h>
# include <dispatch.h>
# include <trace/trace.h>
+# include <trace_definitions/trace_defs.h>
# include <timer.h> // update_sched_timer
#endif
// If nothing changed, run whatever ran last (task might have
// yielded to another), unless it is blocked
if(lastdisp == todisp && dcb_current != NULL && in_queue(dcb_current)) {
- /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_SCHED_CURRENT, */
+ /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_CURRENT, */
/* (uint32_t)(lvaddr_t)dcb_current & 0xFFFFFFFF); */
return dcb_current;
}
- /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_SCHED_SCHEDULE, */
+ /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_SCHEDULE, */
/* (uint32_t)(lvaddr_t)todisp & 0xFFFFFFFF); */
// Remember who we run next
return;
}
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_SCHED_MAKE_RUNNABLE,
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_MAKE_RUNNABLE,
(uint32_t)(lvaddr_t)dcb & 0xFFFFFFFF);
// Keep counters up to date
queue_remove(dcb);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_SCHED_REMOVE,
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_REMOVE,
(uint32_t)(lvaddr_t)dcb & 0xFFFFFFFF);
// Update counters
return;
}
- /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_SCHED_YIELD, */
+ /* trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_SCHED_YIELD, */
/* (uint32_t)(lvaddr_t)dcb & 0xFFFFFFFF); */
queue_remove(dcb);
void scheduler_reset_time(void)
{
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_TIMER_SYNC, 0);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER_SYNC, 0);
kernel_now = 0;
// XXX: Currently, we just re-release everything now
*/
#include <string.h>
+#include <stdio.h>
#include <kernel.h>
#include <startup.h>
#include <exec.h>
#include <barrelfish_kpi/init.h>
#include <barrelfish_kpi/paging_arch.h>
#include <barrelfish_kpi/domain_params.h>
+#include <trace/trace.h>
coreid_t my_core_id;
lpaddr_t bootinfo, lvaddr_t args_base,
alloc_phys_func alloc_phys, lvaddr_t *retparamaddr)
{
- printf("spawn_module\n");
errval_t err;
+ printf("spawn module: %s\n", name);
+
// check for reuse of static state
#ifndef NDEBUG
static bool once_only;
assert(err_is_ok(err));
}
+ // Store the application in the boot applications.
+ trace_new_boot_application((char*) name, (uintptr_t) init_dcb);
+
return init_dcb;
}
#include <exec.h>
#include <irq.h>
#include <trace/trace.h>
-
-/// Keep track of all DCBs for tracing rundown
-/// XXX this is never garbage-collected at the moment
-struct dcb *dcbs_list = NULL;
+#include <trace_definitions/trace_defs.h>
errval_t sys_print(const char *str, size_t length)
{
dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
}
- // Remember the DCB for tracing purposes
- // When we have proper process management, dead dcbs should be removed from this list
- if (dcb->next_all == NULL) {
- dcb->next_all = dcbs_list;
- dcbs_list = dcb;
- }
-
if(!dcb->is_vm_guest) {
- struct trace_event ev;
- // Top bit of timestamp is flag to indicate dcb rundown events
- ev.timestamp = (1ULL << 63) | (uintptr_t)dcb;
struct dispatcher_shared_generic *disp =
- get_dispatcher_shared_generic(dcb->disp);
- assert(sizeof(ev.u.raw) <= sizeof(disp->name));
- memcpy(&ev.u.raw, disp->name, sizeof(ev.u.raw));
- err = trace_write_event(&ev);
+ get_dispatcher_shared_generic(dcb->disp);
+ err = trace_new_application(disp->name, (uintptr_t) dcb);
+
+ if (err == TRACE_ERR_NO_BUFFER) {
+ // Try to use the boot buffer.
+ trace_new_boot_application(disp->name, (uintptr_t) dcb);
+ }
}
return SYSRET(SYS_ERR_OK);
return SYS_ERR_OK;
}
- err = vspace_map_one_frame((void**)&trace_buffer_master, TRACE_BUF_SIZE,
+ err = vspace_map_one_frame((void**)&trace_buffer_master, TRACE_ALLOC_SIZE,
cap, NULL, NULL);
if (err_is_fail(err)) {
DEBUG_ERR(err, "vspace_map_one_frame failed");
#include <barrelfish/dispatch.h>
#include <barrelfish/dispatcher_arch.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "threads_priv.h"
#ifndef TRACE_THREADS
{
dispatcher_handle_t disp = disp_disable();
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_COND_WAIT_ENTER,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_ENTER,
(uintptr_t)cond);
acquire_spinlock(&cond->lock);
thread_mutex_lock(mutex);
}
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_COND_WAIT_LEAVE,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_LEAVE,
(uintptr_t)cond);
}
struct thread *wakeup = NULL;
errval_t err = SYS_ERR_OK;
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_COND_SIGNAL,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_SIGNAL,
(uintptr_t)cond);
// Wakeup one waiting thread
struct thread *wakeupq = NULL;
bool foreignwakeup = false;
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_COND_BROADCAST,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_BROADCAST,
(uintptr_t)cond);
// Wakeup all waiting threads
dispatcher_handle_t handle = disp_disable();
struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_LOCK_ENTER,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_ENTER,
(uintptr_t)mutex);
acquire_spinlock(&mutex->lock);
disp_enable(handle);
}
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_LOCK_LEAVE,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_LEAVE,
(uintptr_t)mutex);
}
dispatcher_handle_t handle = disp_disable();
struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_LOCK_NESTED_ENTER,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_ENTER,
(uintptr_t)mutex);
acquire_spinlock(&mutex->lock);
disp_enable(handle);
}
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_LOCK_NESTED_LEAVE,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_LEAVE,
(uintptr_t)mutex);
}
*/
bool thread_mutex_trylock(struct thread_mutex *mutex)
{
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_TRYLOCK,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_TRYLOCK,
(uintptr_t)mutex);
// Try first to avoid contention
{
struct thread *ft = NULL;
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_MUTEX_UNLOCK,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_UNLOCK,
(uintptr_t)mutex);
acquire_spinlock(&mutex->lock);
{
assert(sem != NULL);
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_SEM_WAIT_ENTER,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_ENTER,
(uintptr_t)sem);
dispatcher_handle_t disp = disp_disable();
disp_enable(disp);
}
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_SEM_WAIT_LEAVE,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_LEAVE,
(uintptr_t)sem);
}
assert(sem != NULL);
bool ret = false;
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_SEM_TRYWAIT,
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_TRYWAIT,
(uintptr_t)sem);
dispatcher_handle_t disp = disp_disable();
{
assert(sem != NULL);
- trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_SEM_POST, (uintptr_t)sem);
+ trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_POST, (uintptr_t)sem);
dispatcher_handle_t disp = disp_disable();
struct thread *wakeup = NULL;
#include <stdio.h>
#include <assert.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <contmng/contmng.h>
#include <contmng/netbench.h>
#include <procon/procon.h>
#if CONFIG_TRACE && NETWORK_STACK_TRACE
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#define LWIP_TRACE_MODE 1
#endif // CONFIG_TRACE && NETWORK_STACK_TRACE
#include <stdio.h>
#include <string.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <net_queue_manager/net_queue_manager.h>
#include <bfdmuxvm/vm.h>
#include <if/net_soft_filters_defs.h>
#include <stdio.h>
#include <string.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <net_queue_manager/net_queue_manager.h>
#include <if/net_queue_manager_defs.h>
--
--------------------------------------------------------------------------
-[ build library { target = "trace", cFiles = [ "trace.c", "control.c" ] } ]
+[ build library {
+ target = "trace",
+ cFiles = [ "trace.c", "control.c" ],
+ flounderDefs = [ "monitor" ]
+} ]
#include <barrelfish/dispatch.h>
#include <barrelfish/dispatcher_arch.h>
#include <barrelfish/curdispatcher_arch.h>
+#include <barrelfish/nameservice_client.h>
+#include <barrelfish/event_queue.h>
+#include <flounder/flounder.h>
+#include <if/monitor_defs.h>
#include <trace/trace.h>
#include <inttypes.h>
#include <stdio.h>
+#define CONSOLE_DUMP_BUFLEN (2<<20)
+
/**
* \brief Reset the trace buffer on the current core.
*
//buf->master = (struct trace_buffer *)trace_buffer_master;
do {
i = buf->head_index;
- new = 0;
+ new = 1;
} while (!trace_cas(&buf->head_index, i, new));
buf->tail_index = 0;
+
+ buf->num_applications = 0;
}
/**
{
for (coreid_t core = 0; core < TRACE_COREID_LIMIT; core++) {
struct trace_buffer *tbuf = (struct trace_buffer *)compute_trace_buf_addr(core);
- tbuf->head_index = 0;
+ tbuf->head_index = 1;
tbuf->tail_index = 0;
- tbuf->done_rundown = false;
}
}
/**
* \brief Dump the contents of the trace buffers
*
+ * buf : The buffer to write the trace log into.
+ * buflen : Length of buf.
+ * number_of_events_dumped : (optional) Returns how many events have been
+ * written into the buffer.
+ *
*/
-size_t trace_dump(char *buf, size_t buflen)
+size_t trace_dump(char *buf, size_t buflen, int *number_of_events_dumped)
{
if (buf == NULL) return TRACE_ERR_NO_BUFFER;
size_t totlen = 0;
size_t len;
- uint64_t t0 = master->t0;
-
- /* Ensure tracing is stopped */
- master->start_trigger = 0;
- master->stop_trigger = 0;
- master->running = false;
-
len = snprintf(ptr, buflen-totlen,
"# Start %" PRIu64 " Duration %" PRIu64 " Stop %" PRIu64
"\n",
master->t0, master->duration, master->stop_time);
ptr += len; totlen += len;
+ if (number_of_events_dumped != NULL) {
+ *number_of_events_dumped = 0;
+ }
+
+ // Determine the minimum timestamp for which an event has been recorded.
+ uint64_t min_timestamp = 0xFFFFFFFFFFFFFFFFULL;
+ for (coreid_t core = 0; core < TRACE_COREID_LIMIT; core++) {
+ struct trace_buffer *tbuf = (struct trace_buffer *)compute_trace_buf_addr(core);
+
+ int num_events;
+ if (tbuf->head_index > tbuf->tail_index) {
+ num_events = tbuf->head_index - tbuf->tail_index - 1;
+ } else {
+ num_events = (TRACE_MAX_EVENTS - tbuf->tail_index) + tbuf->head_index - 1;
+ }
+
+ if (num_events == 0) {
+ // Ringbuffer is empty.
+ continue;
+ }
+
+ // Get the first event
+ int index = tbuf->tail_index + 1;
+ if (index == TRACE_MAX_EVENTS) {
+ index = 0;
+ }
+
+ uint64_t timestamp = tbuf->events[index].timestamp;
+ if (timestamp <= min_timestamp) {
+ min_timestamp = timestamp;
+ }
+
+ }
+
+ len = snprintf(ptr, buflen-totlen,
+ "# Min_timestamp %" PRIu64 "\n",
+ min_timestamp);
+ ptr += len; totlen += len;
+
+ // Create dumps for each core.
for (coreid_t core = 0; core < TRACE_COREID_LIMIT; core++) {
struct trace_buffer *tbuf = (struct trace_buffer *)compute_trace_buf_addr(core);
- if (tbuf->head_index == 0) continue;
+
+ int num_events;
+ if (tbuf->head_index > tbuf->tail_index) {
+ num_events = tbuf->head_index - tbuf->tail_index - 1;
+ //printf("> , head %ld tail %ld num_events %d\n", tbuf->head_index, tbuf->tail_index, num_events);
+ } else {
+ num_events = (TRACE_MAX_EVENTS - tbuf->tail_index) + tbuf->head_index - 1;
+ //printf("< , head %ld tail %ld num_events %d\n", tbuf->head_index, tbuf->tail_index, num_events);
+ }
+
+ if (num_events == 0) {
+ // Ringbuffer is empty.
+ continue;
+ }
len = snprintf(ptr, buflen-totlen,
"# Core %d LOG DUMP ==================================================\n", core);
ptr += len; totlen += len;
+ // Print the core time offset relative to core 0
+ len = snprintf(ptr, buflen-totlen,
+ "# Offset %d %" PRIi64 "\n",
+ core, tbuf->t_offset);
- for (int e = 0; e < tbuf->head_index; e++) {
+ ptr += len; totlen += len;
- if (tbuf->events[e].timestamp < t0) continue;
+ // Print all application names
+ for(int app_index = 0; app_index < tbuf->num_applications; app_index++ ) {
- if (tbuf->events[e].timestamp >> 63) {
- /* Top bit set means it's a DCB rundown event,
- timestamp is the DCB pointer and event data is first 8 chars
- of the domain name. */
- len = snprintf(ptr, buflen-totlen,
- "# DCB %d %" PRIx64 " %.*s\n",
- core, tbuf->events[e].timestamp,
- 8, (char*)&tbuf->events[e].u.raw);
- }
- else {
- len = snprintf(ptr, buflen-totlen,
- "%d %" PRIu64 " %" PRIx64 "\n",
- core, tbuf->events[e].timestamp - t0,
- tbuf->events[e].u.raw);
+ len = snprintf(ptr, buflen-totlen,
+ "# DCB %d %" PRIx64 " %.*s\n",
+ core, tbuf->applications[app_index].dcb,
+ 8, (char*)&tbuf->applications[app_index].name);
+
+ ptr += len; totlen += len;
+ }
+
+ for (int i = 0; i < num_events; i++) {
+
+ tbuf->tail_index++;
+ if (tbuf->tail_index == TRACE_MAX_EVENTS) {
+ tbuf->tail_index = 0;
}
+
+ len = snprintf(ptr, buflen-totlen,
+ "%d %" PRIu64 " %" PRIx64 "\n",
+ core, tbuf->events[tbuf->tail_index].timestamp,
+ //core, tbuf->events[tbuf->tail_index].timestamp - t0,
+ tbuf->events[tbuf->tail_index].u.raw);
+
ptr += len; totlen += len;
+
+ if(number_of_events_dumped != NULL) {
+ (*number_of_events_dumped)++;
+ }
+
}
}
return totlen;
}
+//------------------------------------------------------------------------------
+// Flushing functionality
+//------------------------------------------------------------------------------
+
+struct trace_flush_state
+{
+ struct event_queue_node qnode;
+ struct monitor_binding *monitor_binding;
+ iref_t iref;
+};
+
+// ------- Global Variables
+
+static struct event_closure flush_callback;
+
+// -------
+
+static void trace_notify_bfscope_cont(void* arg)
+{
+ errval_t err;
+
+ struct trace_flush_state *state = (struct trace_flush_state*) arg;
+ struct monitor_binding *monitor_binding = state->monitor_binding;
+
+ err = monitor_binding->tx_vtbl.bfscope_flush_send(monitor_binding, MKCONT(free, state), state->iref);
+
+ if (err_is_ok(err)) {
+ event_mutex_unlock(&monitor_binding->mutex);
+ } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(&trace_notify_bfscope_cont, state));
+ assert(err_is_ok(err));
+ } else {
+ event_mutex_unlock(&monitor_binding->mutex);
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not send flush message to bfscope");
+ }
+}
+
+static void trace_notify_bfscope_finished(struct monitor_binding *mb)
+{
+ printf("bfscope flush returned!\n");
+
+ struct event_closure callback = flush_callback;
+
+ if (callback.handler != NULL) {
+ callback.handler(callback.arg);
+ }
+}
+
+/*
+ * Send a message to bfscope, notifying it that it should dump the trace buffer.
+ */
+static void trace_notify_bfscope(struct event_closure callback, iref_t iref)
+{
+
+ flush_callback = callback;
+
+ struct trace_flush_state *state = malloc(sizeof(struct trace_flush_state));
+ //memset(state, 0, sizeof(struct trace_broadcast_start_state));
+
+ state->monitor_binding = get_monitor_binding();
+ state->iref = iref;
+
+ state->monitor_binding->rx_vtbl.bfscope_flush_ack = &trace_notify_bfscope_finished;
+
+ event_mutex_enqueue_lock(&state->monitor_binding->mutex, &state->qnode, MKCLOSURE(&trace_notify_bfscope_cont, state));
+}
+
+/*
+ * Dump the current content of the trace buffer to the console and reset the trace
+ * buffer.
+ */
+static void trace_flush_to_console(void)
+{
+ char *trace_buf = malloc(CONSOLE_DUMP_BUFLEN);
+ assert(trace_buf);
+
+ trace_dump(trace_buf, CONSOLE_DUMP_BUFLEN, NULL);
+
+ printf("%s\n", trace_buf);
+
+ trace_reset_all();
+}
+
+/**
+ * \brief Flush the trace buffer to the "best" destination.
+ *
+ * This function automatically determines if bfscope is running and if someone
+ * is connected over the network. If so, the buffer is flushed over the network.
+ * If nobody is connected or bfscope is not running, the buffer is flushed onto
+ * the console.
+ *
+ */
+void trace_flush(struct event_closure callback)
+{
+ // Check if bfscope is currently running
+ iref_t iref;
+ errval_t err = nameservice_lookup("bfscope", &iref);
+
+ if (err_no(err) == LIB_ERR_NAMESERVICE_UNKNOWN_NAME) {
+ // Bfscope is not running
+ trace_flush_to_console();
+
+ if (callback.handler != NULL) {
+ callback.handler(callback.arg);
+ }
+
+ } else if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "nameservice_lookup failed");
+ } else {
+ // Lookup was successful, bfscope is running
+
+ // Send a message to bfscope, so that it flushes
+ trace_notify_bfscope(callback, iref);
+ }
+}
+
+/**
+ * \brief Enable/Disable the autoflush mechanism of the tracing framework.
+ *
+ * If autoflush is enabled, the tracing framework will automatically flush
+ * the content of the buffer periodically.
+ *
+ * NOTE: This only works when bfscope is running!
+ *
+ * NOTE: If you enable autoflush, it will affect your performance during the
+ * time period you are flushing, as the buffer might be flushed at any given
+ * time.
+ */
+void trace_set_autoflush(bool enabled)
+{
+ struct trace_buffer *master = (struct trace_buffer*) trace_buffer_master;
+ master->autoflush = enabled;
+}
+
+//------------------------------------------------------------------------------
+// Trace subsystem enabiling/disabling functionality
+//------------------------------------------------------------------------------
+
+/**
+ * \brief Enable/Disable the logging of a given subsytem.
+ *
+ * subsys: A subsystem, i.e. the macro generated by the DSL.
+ * enabled: True iff the events should be logged.
+ */
+errval_t trace_set_subsys_enabled(uint16_t subsys, bool enabled)
+{
+ bool* subsystem_states = (bool*) (((uint8_t*) trace_buffer_master) + TRACE_BUF_SIZE);
+
+ subsystem_states[subsys] = enabled;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Enable/Disable all subsystems.
+ */
+errval_t trace_set_all_subsys_enabled(bool enabled)
+{
+ bool* subsystem_states = (bool*) (((uint8_t*) trace_buffer_master) + TRACE_BUF_SIZE);
+ int i = 0;
+ for (i = 0; i < TRACE_NUM_SUBSYSTEMS; i++) {
+ subsystem_states[i] = enabled;
+ }
+
+ return SYS_ERR_OK;
+}
+
+//------------------------------------------------------------------------------
+// Trace preparation functionality
+//------------------------------------------------------------------------------
+
+struct trace_prepare_state
+{
+ struct event_queue_node qnode;
+ struct monitor_binding *monitor_binding;
+};
+
+// ------- Global Variables
+
+/// Callback that will be invoked at the end of preparing the tracing environment.
+static struct event_closure prepare_callback;
+
+// -------
+
+/*
+ * Continuation function for sending the initial trace_prepare message.
+ */
+static void trace_prepare_cont(void *arg)
+{
+ errval_t err;
+
+ struct trace_prepare_state *state = (struct trace_prepare_state*) arg;
+ struct monitor_binding *mb = state->monitor_binding;
+
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic *disp = get_dispatcher_generic(handle);
+ coreid_t my_coreid = disp->core_id;
+
+ err = mb->tx_vtbl.trace_prepare(mb, MKCONT(free, state), my_coreid);
+
+ if (err_is_ok(err)) {
+ event_mutex_unlock(&mb->mutex);
+ } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = mb->register_send(mb, mb->waitset, MKCONT(&trace_prepare_cont, state));
+ assert(err_is_ok(err));
+ } else {
+ event_mutex_unlock(&mb->mutex);
+ // TODO: error handling
+ USER_PANIC_ERR(err, "Could not send trace_prepare message");
+ }
+}
+
+/*
+ * Function that is called when we receive a message that the preparation is
+ * finished.
+ */
+static void trace_prepare_finished(struct monitor_binding *mb)
+{
+ printf("Trace prepare finished!\n");
+
+ prepare_callback.handler(prepare_callback.arg);
+}
+
+/**
+ * \brief Optional call to do "extra" preparation of the tracing framework.
+ *
+ * Call this method to prepare for tracing. This is not a preparation in a strict
+ * sense, i.e. tracing will also work when you do not call this method, but it
+ * provides some benefits.
+ *
+ * Currently it provides a mechanism for clock synchronization between cores.
+ */
+errval_t trace_prepare(struct event_closure callback)
+{
+
+ prepare_callback = callback;
+
+ struct trace_prepare_state *state = malloc(sizeof(struct trace_prepare_state));
+
+ state->monitor_binding = get_monitor_binding();
+
+ state->monitor_binding->rx_vtbl.trace_prepare_finished = &trace_prepare_finished;
+
+ return event_mutex_enqueue_lock(&state->monitor_binding->mutex, &state->qnode, MKCLOSURE(&trace_prepare_cont, state));
+}
.slot = TASKCN_SLOT_TRACEBUF
};
- err = frame_create(cap, TRACE_BUF_SIZE, &bytes);
+ err = frame_create(cap, TRACE_ALLOC_SIZE, &bytes);
if (err_is_fail(err)) {
return err_push(err, TRACE_ERR_CREATE_CAP);
}
#include <string.h>
#include <barrelfish/dispatch.h>
#include "trace/trace.h"
+#include <trace_definitions/trace_defs.h>
/** Array of worker descriptors */
static struct worker_desc * workers;
/** Initializes _tweed_top_ to start of this worker's task block
*/
struct generic_task_desc * set_top(void) {
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_LOCKING, 0);
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING, 0);
struct worker_desc * tls = (struct worker_desc *) thread_get_tls();
LOCK(tls->lock);
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_LOCKING_END, 0);
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING_END, 0);
tls->bot = workers[tls->id].task_desc_stack;
UNLOCK(tls->lock);
return workers[tls->id].task_desc_stack;
* workers can steal tasks from it.
*/
static inline void set_bot(struct generic_task_desc * val) {
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_LOCKING, 0);
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING, 0);
struct worker_desc * tls = (struct worker_desc *) thread_get_tls();
LOCK(tls->lock);
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_LOCKING_END, 0);
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_LOCKING_END, 0);
tls->bot = val;
UNLOCK(tls->lock);
}
UNLOCK(victim->lock);
// and run task
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_STEAL, victim->core_id);
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_STEAL, victim->core_id);
func(_tweed_top_, stolenTask);
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_STEAL_END,
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_STEAL_END,
victim->core_id);
// signal task completion
/** Handle stolen task */
int handle_stolen_task(struct generic_task_desc * _tweed_top_) {
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_WAIT,
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_WAIT,
GET_THIEF(_tweed_top_)->core_id);
while ((_tweed_top_->balarm & TWEED_TASK_COMPLETE) == 0) {
thread_yield();
}
}
- trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_WAIT_END,
+ trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_WAIT_END,
GET_THIEF(_tweed_top_)->core_id); ;
// update bot
--- /dev/null
+----------------------------------------------------------------------
+-- Copyright (c) 2013, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for /tools/pleco
+--
+----------------------------------------------------------------------
+
+[ compileHaskellWithLibs "pleco" "Main.lhs" (find withSuffices [".hs",".lhs"])
+ ["/tools/fof"] ]
--- /dev/null
+%include polycode.fmt
+
+%if false
+ Trace Definitions: DSL for trace definitions (subsystems and events)
+
+ Copyright (c) 2013 ETH Zurich.
+ All rights reserved.
+
+ This file is distributed under the terms in the attached LICENSE file.
+ If you do not find this file, copies can be found by writing to:
+ ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+%endif
+
+> module Main where
+
+> import Text.PrettyPrint.HughesPJ as Pprinter
+
+> import System.Environment
+> import System.Exit
+> import System.Console.GetOpt
+> import System.IO
+> import System.Random
+> import System.FilePath.Posix
+
+> import Data.Char
+> import qualified Data.Map as Map
+
+> import Parser
+
+> addInBetween :: String -> [String] -> [String]
+> addInBetween _ [] = []
+> addInBetween _ (x:[]) = [x]
+> addInBetween inBetween (x:y:xs) = [x] ++ [inBetween] ++ addInBetween inBetween (y:xs)
+
+> printEventJSON :: (String, (EventField, Integer)) -> String
+> printEventJSON (subsystemName, (EventField name desc, number)) =
+> "\t\t" ++ show number ++ " : \"" ++ displayName ++ "\""
+> where
+> displayName = -- Use desc if it is not the empty string, else use the name used in the #define
+> if length desc == 0
+> then name
+> else desc
+
+> printSubsysJSON :: (SubsystemClass, Integer) -> String
+> printSubsysJSON (SubsystemClass name events, number) =
+> show number ++ " : {\n\t" ++ subsysString ++ ",\n\t\"events\" : {\n" ++ eventStrings ++ "\n\t}\n}"
+> where
+> subsysString = "\"name\" : \"" ++ name ++ "\""
+> eventStrings = concat (addInBetween ",\n" (map printEventJSON (zip (repeat name) (zip events [0..]))))
+
+As the flounder message send / receive trace events are a bit a hack (they use the event to store
+their own payload), they are not part of the pleco file. To still be able to decode them correctly
+in Aquarium, we need to add the corresponding information here with the function "addFlounder".
+
+> addFlounder :: [String]
+> addFlounder =
+> [",\n-5632 : {\n\t\"name\" : \"ump send\"\n\t},\n-5376 : {\n\t\"name\" : \"ump receive\"\n\t}"]
+
+> printTraceFileJSON :: [SubsystemClass] -> String
+> printTraceFileJSON subsystems =
+> concat ( ["{\n"] ++ (addInBetween ",\n" (map printSubsysJSON (zip subsystems [0..]))) ++ addFlounder ++ ["\n}"] )
+
+> printEvent :: (String, (EventField, Integer)) -> String
+> printEvent (subsystemName, (EventField name _, number)) =
+> "#define TRACE_EVENT_" ++ map toUpper subsystemName ++ "_" ++ map toUpper name ++ "\t" ++ show number ++ "\n"
+
+> printSubsys :: (SubsystemClass, Integer) -> String
+> printSubsys (SubsystemClass name events, number) =
+> subsysString ++ eventStrings ++ "\n"
+> where
+> subsysString = "#define TRACE_SUBSYS_" ++ map toUpper name ++ "\t" ++ show number ++ "\n"
+> eventStrings = concat (map printEvent (zip (repeat name) (zip events [0..])))
+
+> printTraceFileC :: [SubsystemClass] -> String
+> printTraceFileC subsystems =
+> (concat (map printSubsys (zip subsystems [0..]))) ++ "\n\n#define TRACE_NUM_SUBSYSTEMS\t" ++ (show (length subsystems)) ++ "\n"
+
+
+> main :: IO ()
+> main = do
+> argv <- System.Environment.getArgs
+> case argv of
+> [ inF, hdrF, jsonF, codeF ] -> do
+> input <- Parser.parse inF
+> case input of
+> Left err -> do
+> hPutStrLn stderr "parse error at: "
+> hPutStrLn stderr (show err)
+> exitWith (ExitFailure 1)
+> Right ast -> do
+> let macro = map toUpper (takeBaseName hdrF) ++ "_BARRELFISH__"
+> let header = printTraceFileC ast
+> fileH <- openFile hdrF WriteMode
+> fileC <- openFile codeF WriteMode
+> fileJ <- openFile jsonF WriteMode
+> let pre = "#ifndef " ++ macro ++ "\n" ++
+> "#define " ++ macro ++ "\n\n"
+> let post = "\n#endif // " ++ macro
+> hPutStr fileH pre
+> hPutStr fileH header
+> hPutStrLn fileH post
+> hClose fileH
+> hPutStr fileJ (printTraceFileJSON ast)
+> hClose fileJ
+> hClose fileC
+
+> otherwise -> do
+> hPutStrLn stderr "Usage: pleco input.pleco output.h output.json output.c"
+> exitWith (ExitFailure 1)
--- /dev/null
+##########################################################################
+# Copyright (c) 2013, ETH Zurich.
+# All rights reserved.
+#
+# This file is distributed under the terms in the attached LICENSE file.
+# If you do not find this file, copies can be found by writing to:
+# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+##########################################################################
+
+
+LHS2TEX = lhs2TeX
+PDFTEX = pdflatex
+BIBTEX = bibtex
+
+FOF_DIR = ../fof
+MAIN = Fugu.lhs
+SOURCES = $(wildcard *.lhs)
+TEXS = $(patsubst %.lhs,%.tex,$(SOURCES))
+
+
+
+all: literate
+
+
+
+.SUFFIXES: .tex .lhs
+
+.lhs.tex:
+ $(LHS2TEX) -o $*.tex $*.lhs
+
+clean:
+ rm -f *.aux *.ptb *.toc *.log *.o *.hi $(patsubst %.lhs,%.tex,$(SOURCES))
+
+cleanall: clean
+ rm -f ErrorDefinition.pdf
--- /dev/null
+{-
+
+ Parser.hs: Parser for the pleco interface definition language
+
+ Part of Pleco: a trace definition DSL for Barrelfish
+
+ Copyright (c) 2013, ETH Zurich.
+
+ All rights reserved.
+
+ This file is distributed under the terms in the attached LICENSE file.
+ If you do not find this file, copies can be found by writing to:
+ ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+-}
+
+module Parser where
+
+import Text.ParserCombinators.Parsec as Parsec
+import Text.ParserCombinators.Parsec.Expr
+import Text.ParserCombinators.Parsec.Pos
+import qualified Text.ParserCombinators.Parsec.Token as P
+import Text.ParserCombinators.Parsec.Language( javaStyle )
+import Data.Char
+import Numeric
+import Data.List
+import Text.Printf
+
+parse filename = parseFromFile traceFile filename
+
+lexer = P.makeTokenParser (javaStyle
+ { P.reservedNames = [ "subsystem",
+ "event"
+ ]
+ , P.reservedOpNames = ["*","/","+","-"]
+ , P.commentStart = "/*"
+ , P.commentEnd = "*/"
+ , P.commentLine = "//"
+ })
+
+whiteSpace = P.whiteSpace lexer
+reserved = P.reserved lexer
+identifier = P.identifier lexer
+stringLit = P.stringLiteral lexer
+comma = P.comma lexer
+commaSep = P.commaSep lexer
+commaSep1 = P.commaSep1 lexer
+parens = P.parens lexer
+braces = P.braces lexer
+squares = P.squares lexer
+semiSep = P.semiSep lexer
+symbol = P.symbol lexer
+
+data EventField = EventField String String
+data SubsystemClass = SubsystemClass String [ EventField ]
+
+traceFile =
+ do
+ whiteSpace
+ subsystems <- many1 subsystemClass
+ return subsystems
+
+
+subsystemClass =
+ do
+ reserved "subsystem"
+ name <- identifier
+ events <- braces $ many1 eventCase
+ symbol ";" <?> " ';' missing from end of " ++ name ++ " subsystem definition"
+ return $ SubsystemClass name events
+
+
+eventCase =
+ do
+ reserved "event"
+ acronym <- identifier
+ description <- stringLit
+ symbol "," <?> " ',' missing from end of " ++ acronym ++ " definition"
+ return $ EventField acronym description
--- /dev/null
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2009, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for /errors/
+--
+--------------------------------------------------------------------------
+
+[ plecoFile (options arch) "trace_defs" | arch <- allArchitectures ]
--- /dev/null
+/*
+ * Copyright (c) 2012-2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+*/
+/*
+ * \brief Subsystem and Event definitions of the tracing framework.
+ *
+ * Events do always belong to their subsystem. Make sure that you only
+ * call trace_event() with matching subsystems, in order to be able to
+ * make reasonable use of Aquarium 2.
+ *
+ * Note: If you don't specify a verbose description of the event, the
+ * description will default to the name of the event.
+ *
+ * Example:
+ *
+ * The subystem:
+ *
+ * subsystem mysubsystem {
+ * event myevent "",
+ * }
+ *
+ * will be compiled into the following two C macros:
+ *
+ * #define TRACE_SUBSYSTEM_MYSUBSYSTEM 1
+ * #define TRACE_EVENT_MYSUBSYSTEM_MYEVENT 1
+ *
+ * The pattern is that every subsystem is mapped to the concatentation of
+ * TRACE_SUBSYSTEM_ and its name in uppercase, and each event is mapped to
+ * the concatenation of TRACE_EVENT_ and the subsystem name in upper case,
+ * followed by a _ and the name of the event.
+ *
+*/
+
+subsystem kernel {
+
+ event CSWITCH "Context Switch",
+ event BZERO "Buffer zeroing",
+ event TIMER "",
+ event TIMER_SYNC "",
+
+ event SCHED_MAKE_RUNNABLE "",
+ event SCHED_REMOVE "",
+ event SCHED_YIELD "",
+ event SCHED_SCHEDULE "",
+ event SCHED_CURRENT "",
+
+};
+
+subsystem threads {
+
+ event BARRIER_ENTER "",
+ event BARRIER_LEAVE "",
+
+ event MUTEX_LOCK_ENTER "",
+ event MUTEX_LOCK_LEAVE "",
+ event MUTEX_LOCK_NESTED_ENTER "",
+ event MUTEX_LOCK_NESTED_LEAVE "",
+ event MUTEX_TRYLOCK "",
+ event MUTEX_UNLOCK "",
+
+ event COND_WAIT_ENTER "",
+ event COND_WAIT_LEAVE "",
+ event COND_SIGNAL "",
+ event COND_BROADCAST "",
+
+ event SEM_WAIT_ENTER "",
+ event SEM_WAIT_LEAVE "",
+ event SEM_TRYWAIT "",
+ event SEM_POST "",
+};
+
+subsystem memserv {
+
+ event ALLOC "",
+
+ event PERCORE_INIT "",
+ event PERCORE_ALLOC "",
+ event PERCORE_ALLOC_COMPLETE "",
+};
+
+subsystem memtest {
+
+ event START "",
+ event STOP "",
+ event STARTED "",
+ event WAIT "",
+ event RUN "",
+ event DONE "",
+ event ALLOC "",
+ event MASTER "",
+ event WORKER "",
+
+};
+
+
+subsystem monitor {
+ event SPAN0 "",
+ event SPAN1 "",
+ event SPAN "",
+ event PCREQ "",
+ event PCREP "",
+ event PCREQ_INTER "",
+ event PCREPLY_INTER "",
+ event URPC_BLOCK "",
+ event URPC_UNBLOCK "",
+ event REMOTE_CAP_RETYPE "",
+ event REMOTE_CAP_RETYPE_RETRY "",
+ event REMOTE_CAP_RETYPE_MSG "",
+ event REMOTE_CAP_RETYPE_END "",
+ event POLLING "",
+
+};
+
+subsystem chips {
+
+ event LISTENCB "",
+
+};
+
+subsystem bflib {
+
+ // XXX: Empty subsystems are not allowed
+ event dummy "",
+
+};
+
+
+subsystem tweed {
+
+ event START "",
+ event END "",
+ event STEAL "",
+ event STEAL_END "",
+ event WAIT "",
+ event WAIT_END "",
+ event LOCKING "",
+ event LOCKING_END "",
+
+};
+
+subsystem route {
+
+ event BCAST_WITH_CCAST_SEND "",
+ event BCAST_WITH_CCAST "",
+ event RECV_BCAST_WITH_CCAST "",
+ event RECV_CCAST "",
+ event BENCH_START "",
+ event BENCH_STOP "",
+ event SEND_PING "",
+ event SEND_PONG "",
+ event RECV_PING "",
+ event RECV_PONG "",
+ event POLL "",
+
+};
+
+subsystem bench {
+
+ event PCBENCH "",
+ event RXPING "",
+ event RXPONG "",
+
+};
+
+subsystem bomp {
+
+ event START "",
+ event STOP "",
+ event ITER "",
+
+};
+
+subsystem barriers {
+
+ event START "",
+ event STOP "",
+ event BARRIER_WAIT "",
+ event CENTRAL_REQ "",
+ event CENTRAL_REP "",
+ event TREE_REQ "",
+ event TREE_REP "",
+ event DIST "",
+ event SEND "",
+ event POLL1 "",
+ event POLL2 "",
+ event HEAP_REQ "",
+ event HEAP_REP "",
+ event SEQ_BCAST_REQ "",
+ event SEQ_BCAST_RECV "",
+ event TREE_BCAST_REQ "",
+ event TREE_BCAST_RECV "",
+
+};
+
+/* Following constants are used in network subsystem. */
+subsystem net {
+
+ event START "",
+ event STOP "",
+ event NI_AI "added, 0",
+ event NI_I "added, 0",
+ event NI_A "added, pkt data location",
+ event NI_FILTER_FRAG "added, pkt data location",
+ event NI_FILTER_EX_1 "added, pkt data location",
+ event NI_ARP "added, pkt data location",
+ event NI_FILTER_EX_2 "added, pkt data location",
+ event NI_PKT_CPY_1 "added, pkt data location",
+ event NI_PKT_CPY_2 "added, pkt data location",
+ event NI_PKT_CPY_3 "added, pkt data location",
+ event NI_PKT_CPY_4 "added, pkt data location",
+
+ event NI_PKT_CPY "added, pkt data location",
+ event NI_P "added, pbuf_id",
+ event NI_S "added, pbuf_id",
+ event AI_A "added, pbuf_id",
+ event AI_P "added, pbuf_addr",
+ event AO_C "added, pbuf_addr",
+ event AO_Q "added, pbuf_addr",
+ event AO_S "added, pbuf_addr (as client_data )",
+ event NO_A "added, client_data (pbuf_address in lwip)",
+ event NO_S "added, e1000n.c client_data (pbuf_address in lwip)",
+
+/* FIXME: Add the timings of when does NIC gets TX_done */
+ event NO_TXD "yet to add",
+ event AIR_R "added, pbuf_addr (TX DONE in app)",
+
+/* Response flow */
+ event AOR_S "added, pbuf_id ( register_pbuf from APP)",
+ event NIR_REG_PBUF "commented pbuf_id ( register_pbuf in NIC)",
+};
+
+subsystem multihop {
+
+ event BENCH_START "",
+ event BENCH_STOP "",
+ event MESSAGE_SEND "",
+ event MESSAGE_RECEIVE "",
+
+};
+
+/* Following constants are used in network benchmark. */
+subsystem bnet {
+
+ event START "",
+ event STOP "",
+ event DRV_SEE "",
+ event APP_SEE "",
+ event DRV_INT "",
+ event DRV_POLL "",
+ event YIELD "",
+ event I "",
+
+};
+
+/* Following constans are used for profiling modified stack */
+subsystem nnet {
+
+ event START "",
+ event STOP "",
+ event RXDRVSEE "Driver saw pkg (RX)",
+ event RXESVSEE "Ethersrv saw pkg",
+ event RXESVFRGDONE "Ethersrv checked frag",
+ event RXESVAPPFDONE "Ethersrv app filtered",
+ event RXESVAPPCSTART "Ethersrv app c2u started",
+ event RXESVCOPIED "Ethersrv copied pkg",
+ event RXESVSPPDONE "Ethersrv spp produce done",
+ event RXESVAPPNOTIF "Ethersrv app notify",
+ event RXLWIINCOM "LWIP handle_incoming_",
+ event RXLWIRECH "LWIP call rec_handler",
+ event RXAPPRCV "APP received",
+
+ event TXAPPSNT "APP sent",
+ event TXLWISEE "LWIP idc_send_packet",
+ event TXLWIBFFENCE "LWIP before mfence",
+ event TXLWIAFFENCE "LWIP after mfence",
+ event TXLWIFLUSHED "LWIP payload flushed",
+ event TXLWIBDESC "LWIP bufferdesc fetched",
+ event TXLWISPPSND "LWIP spp produced",
+ event TXLWISPPIDX "LWIP update spp index",
+ event TXLWITXWORK "LWIP pending TX work",
+ event TXLWINOTIF "LWIP notify driver",
+ event TXESVNOTIF "Ethersrv notify recieved",
+ event TXESVSPOW "Ethersrv send_pkt_on_w..",
+ event TXESVSSPOW "Ethersrv send_sng_pkt..",
+ event TXDRVADD "Driver add pkg (TX)",
+ event TXDRVSEE "Driver saw pkg done (TX)",
+ event TX_TCP_WRITE "tcp_write done",
+ event TX_TCP_OUTPUT "tcp_output done",
+ event TX_TCP_RECV "tcp_recved done",
+ event TX_TCP_FREE "tx pbuf freed",
+ event TX_MEMP "tx pbuf memp start",
+ event TX_MEMP_D "tx pbuf memp done",
+
+};
+
+/* The example subsystem is used to demonstrate how the tracing framework
+ * works. It is used by the program in "examples/xmpl-trace". */
+subsystem xmpl {
+
+ event START "",
+ event STOP "",
+ event EV1 "",
+ event EV2 "",
+
+};
+
#include <assert.h>
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#define PERIOD 2500000000UL
#define ITERATIONS 10
trace_event(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_STOP, 0);
char *buf = malloc(4096*4096);
- trace_dump(buf, 4096*4096);
+ trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
abort();
#endif
#include <barrelfish/resource_ctrl.h>
#include <barrelfish/waitset.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#define POLL_CYCLES (cycles_t)0xfffffffff
#define MAX_COUNT 100
static void ping(struct ping_pong_binding *b, uint64_t arg)
{
errval_t err;
- err = trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_BCAST_WITH_CCAST_SEND, 1);
+ err = trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BCAST_WITH_CCAST_SEND, 1);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "trace_event failed");
}
static void pong(struct ping_pong_binding *b, uint64_t arg)
{
- trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_BCAST_WITH_CCAST_SEND, 0);
+ trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BCAST_WITH_CCAST_SEND, 0);
request_done = true;
timestamp[idx].time1 = bench_tsc();
}
#if CONFIG_TRACE
if (exp_count == EXPERIMENT_COUNT - 1) {
char *buf = malloc(4096*4096);
- size_t length = trace_dump(buf, 4096*4096);
+ size_t length = trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
printf("length of buffer %lu\n", length);
}
--------------------------------------------------------------------------
--- Copyright (c) 2007-2009, ETH Zurich.
+-- Copyright (c) 2007-2013, ETH Zurich.
-- All rights reserved.
--
-- This file is distributed under the terms in the attached LICENSE file.
[ build application { target = "bfscope",
cFiles = [ "bfscope.c" ],
- addLibraries = [ "lwip", "contmng", "procon", "trace" ]
+ addLibraries = [ "lwip", "contmng", "trace" ],
+ flounderBindings = [ "empty" ]
}
]
/**
* \file
- * \brief Barrelfish trace server
+ * \brief Barrelfish trace server, Version 2
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007-2013, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#include <barrelfish/barrelfish.h>
#include <barrelfish/dispatch.h>
#include <barrelfish/lmp_endpoints.h>
+#include <barrelfish/event_queue.h>
+#include <barrelfish/nameservice_client.h>
#include <trace/trace.h>
-/* LWIP Network stack includes... */
+#include <flounder/flounder.h>
+#include <if/monitor_defs.h>
+
+#include <if/empty_defs.h>
+
+/* LWIP Network stack includes */
#include <lwip/init.h>
#include <lwip/netif.h>
#include <lwip/dhcp.h>
#define BFSCOPE_BUFLEN (2<<20)
-extern void idc_print_statistics(void);
-extern void idc_print_cardinfo(void);
-extern void network_polling_loop(void);
+extern struct waitset *lwip_waitset;
static char *trace_buf = NULL;
static size_t trace_length = 0;
static size_t trace_sent = 0;
+static bool dump_in_progress = false;
+/// Timestamp when the sending of a trace dump over the network started
static uint64_t timestamp_start = 0;
-static struct trace_buffer *bfscope_save_tracebuf = NULL;
-static bool bfscope_trace_acquired = false;
-
+/// The client that connected to this bfscope instance.
static struct tcp_pcb *bfscope_client = NULL;
+/// If we are in autoflush is enabled, bfscope can itself determine to flush. In
+/// that case, we don't want to notify anyone after doing a locally initiated flush.
+static bool local_flush = false;
+
#define DEBUG if (0) printf
+static void bfscope_send_flush_ack_to_monitor(void);
+
/*
* \brief Close the specified TCP connection
*/
DEBUG("bfscope: TCP(%p) error %d\n", arg, err);
- if (tpcb) bfscope_connection_close(tpcb);
+ if (tpcb) {
+ bfscope_connection_close(tpcb);
+ }
+}
+
+/*
+ * Call this method when you finished dumping the current trace buffer.
+ */
+static void bfscope_trace_dump_finished(void)
+{
+ trace_length = 0;
+ trace_sent = 0;
+ dump_in_progress = false;
+
+ if (!local_flush) {
+ bfscope_send_flush_ack_to_monitor();
+ } else {
+ // Locally initiated flush is finished.
+ local_flush = false;
+ }
}
/*
}
/* Give the data to LWIP until it runs out of buffer space */
- int r = tcp_write(tpcb, bufptr, len,
+ err_t lwip_err = tcp_write(tpcb, bufptr, len,
TCP_WRITE_FLAG_COPY | (more ? TCP_WRITE_FLAG_MORE : 0));
//DEBUG("%d %ld+%d\n", r, trace_sent, len);
- if (r == ERR_MEM) {
- printf("BAD!\n");
+ if (lwip_err == ERR_MEM) {
+ printf("bfscope: lwip: out of memory\n");
return;
}
+
trace_sent += len;
if (trace_sent >= trace_length) {
uint64_t timestamp_stop = rdtsc();
DEBUG("bfscope: done (%lu bytes) in %ld cycles\n",
trace_sent, timestamp_stop - timestamp_start);
- trace_length = 0;
- trace_sent = 0;
+
+ bfscope_trace_dump_finished();
}
}
//printf("send_cb %d\n", length);
/* If we haven't finished sending the trace, then send more data */
- if (trace_length) bfscope_trace_send(tpcb);
+ if (trace_length) {
+ bfscope_trace_send(tpcb);
+ }
return ERR_OK;
}
-
-static void bfscope_trace_complete(void)
+/*
+ * \brief This method should be called when a trace should be dumped on the network.
+ */
+static void bfscope_trace_dump_network(void)
{
- dispatcher_handle_t handle = curdispatcher();
- struct dispatcher_generic *disp = get_dispatcher_generic(handle);
- int len;
-
- bfscope_trace_acquired = true;
-
- /* Re-enable tracing if necessary */
- if (disp->trace_buf == NULL &&
- bfscope_save_tracebuf != NULL) disp->trace_buf = bfscope_save_tracebuf;
-
- if (bfscope_client == NULL) return;
-
-
- /* Format the trace into global trace buffer */
- trace_length = trace_dump(trace_buf, BFSCOPE_BUFLEN);
+ assert(bfscope_client != NULL);
+ assert(trace_length > 0);
- DEBUG("bfscope: trace length %lu\n", trace_length);
+ printf("bfscope: sending %lu bytes to network...\n", trace_length);
/* Send length field */
char tmpbuf[10];
+ int len;
len = snprintf(tmpbuf, 9, "%08ld", trace_length);
tcp_write(bfscope_client, tmpbuf, 8, TCP_WRITE_FLAG_COPY);
tcp_output(bfscope_client);
}
-static void ipi_handler(void *arg)
-{
- sys_print("!", 1);
- DEBUG("bfscope_ipi\n");
-#if 0
- // consume IDC message
- struct idc_recv_msg msg;
- idc_endpoint_poll(arg, &msg, NULL);
-#endif
-
- // Handle the trace completion
- bfscope_trace_complete();
-}
-
-/**
- * \brief Wait for a trace completion IPI
+/*
+ * \brief This method should be called when a trace should be dumped on the console.
*/
-static errval_t bfscope_trace_wait_ipi(void)
+static void bfscope_trace_dump_console(void)
{
- dispatcher_handle_t handle = curdispatcher();
- struct dispatcher_generic *disp = get_dispatcher_generic(handle);
- struct trace_buffer *buf = disp->trace_buf;
- if (buf == NULL) return TRACE_ERR_NO_BUFFER;
+ printf("%s\n", trace_buf);
- ((struct trace_buffer *)trace_buffer_master)->ipi_dest = disp_get_core_id();
-
- /* XXX Temporarily disable tracing for this process */
- bfscope_save_tracebuf = buf;
- disp->trace_buf = NULL;
-
- return SYS_ERR_OK;
+ bfscope_trace_dump_finished();
}
/*
- * \brief Take a real trace and dump it ito the trace_buf
+ * \brief This method should be called when a trace should be dumped.
+ *
+ * (Based upon a different application calling trace_flush() or so.)
*/
-static void trace_acquire(struct tcp_pcb *tpcb,
- uint64_t start_trigger,
- uint64_t stop_trigger)
+static void bfscope_trace_dump(void)
{
- errval_t err;
-
- if (trace_buf == NULL) {
- trace_buf = malloc(BFSCOPE_BUFLEN);
+ if(dump_in_progress) {
+ // Currently there is already a dump in progress, do nothing.
+ return;
}
- assert(trace_buf);
- trace_reset_all();
+ int number_of_events = 0;
+ // Acquire the trace buffer
+ trace_length = trace_dump(trace_buf, BFSCOPE_BUFLEN, &number_of_events);
- bfscope_trace_acquired = false;
+ DEBUG("bfscope: trace length %lu, nr. of events %d\n", trace_length, number_of_events);
- err = trace_control(start_trigger, stop_trigger, 10 * 30 * 2000000);
+ if (trace_length <= 0 || number_of_events <= 0) {
+ DEBUG("bfscope: trace length too small, not dumping.\n");
+ return;
+ }
- err = bfscope_trace_wait_ipi();
-}
+ dump_in_progress = true;
+ if (bfscope_client != NULL) {
+ // We have a connected client, dump to network
+ bfscope_trace_dump_network();
+ } else {
+ // There is no client, just dump to console
+ bfscope_trace_dump_console();
+ }
+}
+
/*
* \brief Callback from LWIP when we receive TCP data
*/
assert(p->next == 0);
+
if ((p->tot_len > 2) && (p->tot_len < 200)) {
- if (strncmp(p->payload, "stat", 4) == 0) {
- idc_print_statistics();
- }
- if (strncmp(p->payload, "cardinfo", 8) == 0) {
- idc_print_cardinfo();
- }
if (strncmp(p->payload, "trace", strlen("trace")) == 0) {
DEBUG("bfscope: trace request\n");
- if (trace_length == 0) {
- sys_print("T",1);
- trace_acquire((struct tcp_pcb *)arg,
- TRACE_EVENT(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 1),
- TRACE_EVENT(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 0));
- } else {
- printf("trace already in progress\n");
- //sys_print("X",1);
- tcp_write(tpcb, "000000", 6, TCP_WRITE_FLAG_COPY);
- }
+ // NOOP
+
+ } else {
+ DEBUG("bfscope: could not understand request\n");
}
}
+
/* Done with the incoming data */
tcp_recved(tpcb, p->len);
pbuf_free(p);
static err_t accept_cb(void *arg, struct tcp_pcb *tpcb, err_t err)
{
printf("bfscope: connected\n");
+
assert(err == ERR_OK);
+
tcp_recv(tpcb, recv_cb);
tcp_sent(tpcb, send_cb);
tcp_err(tpcb, error_cb);
tcp_arg(tpcb, (void*)tpcb);
+
tcp_accepted(tpcb);
bfscope_client = tpcb;
/*
* \brief Start listening on the bfscope server port
*/
-static int bfscope_server_init(void)
+static err_t bfscope_server_init(void)
{
- err_t r;
+ err_t err;
uint16_t bind_port = BFSCOPE_TCP_PORT;
return ERR_MEM;
}
- r = tcp_bind(pcb, IP_ADDR_ANY, bind_port);
- if(r != ERR_OK) {
- return(r);
+ err = tcp_bind(pcb, IP_ADDR_ANY, bind_port);
+ if(err != ERR_OK) {
+ return(err);
}
struct tcp_pcb *pcb2 = tcp_listen(pcb);
printf("bfscope: listening on port %d\n", BFSCOPE_TCP_PORT);
- return (0);
+ return ERR_OK;
}
+//------------------------------------------------------------------------------
+// Monitor Messaging Interface
+//------------------------------------------------------------------------------
+/*
+ * This function is called when we receive a flush message from our monitor.
+ */
+static void bfscope_handle_flush_msg(struct monitor_binding *mb, iref_t iref)
+{
+ printf("bfscope flush request message received!\n");
+
+ bfscope_trace_dump();
+}
-//******************************************************************************
-// irq handling stuff
-//******************************************************************************
-static struct lmp_endpoint *idcep;
+struct bfscope_ack_send_state {
+ struct event_queue_node qnode;
+ struct monitor_binding *monitor_binding;
+};
-static void generic_interrupt_handler(void *arg)
+static void bfscope_send_flush_ack_cont(void* arg)
{
errval_t err;
- // consume message
- struct lmp_recv_buf buf = { .buflen = 0 };
- err = lmp_endpoint_recv(idcep, &buf, NULL);
- assert(err_is_ok(err));
+ struct bfscope_ack_send_state *state = (struct bfscope_ack_send_state*) arg;
+ struct monitor_binding *monitor_binding = state->monitor_binding;
- ipi_handler(NULL);
+ err = monitor_binding->tx_vtbl.bfscope_flush_ack(monitor_binding, MKCONT(free, state));
- // re-register
- struct event_closure cl = {
- .handler = generic_interrupt_handler,
- .arg = arg,
- };
- err = lmp_endpoint_register(idcep, get_default_waitset(), cl);
- assert(err_is_ok(err));
+ if (err_is_ok(err)) {
+ event_mutex_unlock(&monitor_binding->mutex);
+ } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(&bfscope_send_flush_ack_cont, state));
+ assert(err_is_ok(err));
+ } else {
+ event_mutex_unlock(&monitor_binding->mutex);
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not send flush ack message to monitor of bfscope");
+ }
}
-static errval_t register_interrupt(int irqvector)
-{
- struct capref epcap;
- errval_t err;
+/*
+ * Call this method when bfscope is done with flushing and wants to notify
+ * the initiator of the flush request.
+ */
+static void bfscope_send_flush_ack_to_monitor(void) {
- // use minimum-sized endpoint, because we don't need to buffer >1 interrupt
- err = endpoint_create(LMP_RECV_LENGTH, &epcap, &idcep);
- if (err_is_fail(err)) {
- return err;
- }
- err = invoke_irqtable_set(cap_irq, irqvector, epcap);
+ struct bfscope_ack_send_state *state = malloc(sizeof(struct bfscope_ack_send_state));
+ //memset(state, 0, sizeof(struct trace_broadcast_start_state));
+
+ state->monitor_binding = get_monitor_binding();
+
+ event_mutex_enqueue_lock(&state->monitor_binding->mutex, &state->qnode, MKCLOSURE(&bfscope_send_flush_ack_cont, state));
+}
+
+//------------------------------------------------------------------------------
+// Interface Exporting
+//------------------------------------------------------------------------------
+
+static void export_cb(void *st, errval_t err, iref_t iref)
+{
if (err_is_fail(err)) {
- if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
- printf("cope: Error registering IRQ, check that cope " \
- "is spawned by the monitor by having 'boot' as its " \
- "first argument\n");
- } else {
- DEBUG_ERR(err, "bfscope, irq set");
- }
- return err;
+ USER_PANIC_ERR(err, "export failed");
}
- // register to receive on this endpoint
- struct event_closure cl = {
- .handler = generic_interrupt_handler,
- .arg = NULL,
- };
- err = lmp_endpoint_register(idcep, get_default_waitset(), cl);
+ printf("bfscope: exported at iref %"PRIuIREF"\n", iref);
+
+ // register this iref with the name service
+ err = nameservice_register("bfscope", iref);
if (err_is_fail(err)) {
- lmp_endpoint_free(idcep);
- // TODO: release vector
- return err;
+ USER_PANIC_ERR(err, "nameservice_register failed");
}
- return SYS_ERR_OK;
}
-//******************************************************************************
+static errval_t connect_cb(void *st, struct empty_binding *b)
+{
+ USER_PANIC("bfscope: connect_cb got called");
+}
+//------------------------------------------------------------------------------
+// Main
+//------------------------------------------------------------------------------
int main(int argc, char**argv)
{
return -1;
#endif
+ // Allocate the outgoing buffer
+ if (trace_buf == NULL) {
+ trace_buf = malloc(BFSCOPE_BUFLEN);
+ }
+ assert(trace_buf);
+
+ /* Disable tracing for bfscope */
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic *disp = get_dispatcher_generic(handle);
+ disp->trace_buf = NULL;
+
printf("%.*s running on core %d\n", DISP_NAME_LEN, disp_name(),
disp_get_core_id());
- // Register IRQ
- errval_t err = register_interrupt(TRACE_COMPLETE_IPI_IRQ);
- assert(err_is_ok(err)); //XXX
- printf("Registered IRQ\n");
-
/* Connect to e1000 driver */
printf("%.*s: trying to connect to the e1000 driver...\n",
DISP_NAME_LEN, disp_name());
lwip_init_auto();
- int r = bfscope_server_init();
- assert(r == 0);
- network_polling_loop();
+ err_t lwip_err = bfscope_server_init();
+
+ assert(lwip_err == ERR_OK);
+
+
+ // Export our empty interface
+ errval_t err;
+ err = empty_export(NULL /* state pointer for connect/export callbacks */,
+ export_cb, connect_cb, get_default_waitset(),
+ IDC_EXPORT_FLAGS_DEFAULT);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "export failed");
+ }
+
+ // Register our message handlers with the monitor
+ struct monitor_binding *monitor_binding;
+ monitor_binding = get_monitor_binding();
+ monitor_binding->rx_vtbl.bfscope_flush_send = &bfscope_handle_flush_msg;
+
+
+ while (1) {
+ //err = event_dispatch(lwip_waitset);
+ err = event_dispatch_non_block(lwip_waitset);
+
+ if (err == LIB_ERR_NO_EVENT) {
+ // It is ok that no event is dispatched.
+ err = ERR_OK;
+ }
+
+ DEBUG("bfscope: dispatched event, autoflush: %d\n",((struct trace_buffer*) trace_buffer_master)->autoflush);
+
+ // Check if we are in autoflush mode
+ if(((struct trace_buffer*) trace_buffer_master)->autoflush) {
+ local_flush = true;
+ bfscope_trace_dump();
+ }
+
+ thread_yield_dispatcher(NULL_CAP);
+
+
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "in event_dispatch");
+ break;
+ }
+ }
+
return 0;
}
#include <stdlib.h>
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <if/nameservice_defs.h>
#include <if/monitor_defs.h>
#include <net_queue_manager/net_queue_manager.h>
#include <if/net_queue_manager_defs.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "e1000n.h"
#if CONFIG_TRACE && NETWORK_STACK_TRACE
/*
char *buf = malloc(4096*4096);
- trace_dump(buf, 4096*4096);
+ trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
*/
#endif // TRACE_N_BM
#include <ipv4/lwip/inet.h>
#include <barrelfish/debug.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "e10k.h"
#include <barrelfish/inthandler.h>
#include <net_queue_manager/net_queue_manager.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "eMAC_dev.h"
#include <lwip/tcp.h>
#include <netif/bfeth.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "echoserver.h"
} else if (n64b == 8) {
trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_STOP, 0);
char* trbuf = malloc(4096*4096);
- size_t length = trace_dump(trbuf, 4096*4096);
+ size_t length = trace_dump(trbuf, 4096*4096, NULL);
printf("%s\n", trbuf);
printf("length of buffer %zu\n", length);
free(trbuf);
*/
/*
- * Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2010-2013, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
-/*
- * Trace identifiers. Typically these are defined in trace/trace.h
- * have a look in there to see which ones are used already.
- */
-#define TRACE_SUBSYS_XMPL 0x7337
-#define TRACE_EVENT_XMPL_START 0x0001
-#define TRACE_EVENT_XMPL_STOP 0x0002
-#define TRACE_EVENT_XMPL_EV1 0x0003
-#define TRACE_EVENT_XMPL_EV2 0x0004
+// Enable to benchmark the tracing framework.
+#define TRACE_EXAMPLE_BENCHMARK 0
+
+static void after_prepare(void *arg);
+
+static volatile bool finished;
+
+static volatile int counter = 0;
static errval_t init_tracing(void)
{
trace_reset_all();
+ debug_printf("after trace reset\n");
+
// Tell the trace system when to start and stop. We can also
// provide an overriding maximum duration (in cycles) as the last parameter.
return trace_control(TRACE_EVENT(TRACE_SUBSYS_XMPL,
trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_STOP, 0);
}
+
+#if TRACE_EXAMPLE_BENCHMARK
+
+#define NUM_EVENTS 1000
+
+static void benchmark(void)
+{
+ uint64_t times[NUM_EVENTS];
+
+ int i = 0;
+ for (i=0; i < NUM_EVENTS; i++) {
+ uint64_t start = rdtsc();
+ trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, i);
+ uint64_t end = rdtsc();
+ times[i] = end-start;
+ }
+
+ printf("Call times:\n");
+ for (i=0; i < NUM_EVENTS; i++) {
+ printf("%" PRIu64 "\n", times[i]);
+ }
+ printf("Call time end.\n");
+
+}
+
+#endif
+
+// This callback is invoked when the flushing process of the tracing framework
+// is finished, so that you can continue your operations.
+static void callback(void *arg)
+{
+ debug_printf("callback invoked\n");
+
+ finished = true;
+}
+
static void dump_trace(void)
{
// dump the trace on the output. We can copy and paste it
debug_printf("the trace dump\n");
- char *buf = malloc(4096*4096);
- trace_dump(buf, 4096*4096);
- printf("%s\n", buf);
+ // Let the trace framework decide where to flush to
+ trace_flush(MKCLOSURE(callback, NULL));
debug_printf("finished trace dump\n");
+
}
static void do_stuff(void)
{
// generate our own traces
- trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 0);
+#if TRACE_EXAMPLE_BENCHMARK
+
+ benchmark();
+
+#else
+
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
+ trace_set_subsys_enabled(TRACE_SUBSYS_KERNEL, false);
+
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+
+ trace_set_subsys_enabled(TRACE_SUBSYS_KERNEL, true);
+
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 2);
- trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV2, 1);
+ trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 1);
- trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 2);
+ trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV2, 2);
- trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV2, 3);
+ trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 3);
+
+ trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV2, 4);
trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 10);
trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV1, 12);
trace_event(TRACE_SUBSYS_XMPL, TRACE_EVENT_XMPL_EV2, 13);
+
+#endif
+}
+
+// Callback that is invoked after the tracing framework
+// has been prepared.
+static void after_prepare(void *arg)
+{
+ debug_printf("after_prepare starts");
+
+ start_tracing();
+
+ debug_printf("we are tracing now\n");
+
+ // do stuff to generate traces
+ do_stuff();
+
+ stop_tracing();
+
+ // flush the trace buffer
+ dump_trace();
+
}
int main(int argc, char *argv[])
{
+#ifndef CONFIG_TRACE
+ // bail - no tracing support
+ printf("%.*s: Error, no tracing support, cannot start xmpl-trace\n",
+ DISP_NAME_LEN, disp_name());
+ printf("%.*s: recompile with trace = TRUE in build/hake/Config.hs\n",
+ DISP_NAME_LEN, disp_name());
+ return -1;
+#endif
+
errval_t err;
debug_printf("starting\n");
+ // Enable this line if you want to flush automatically.
+ // trace_set_autoflush(true);
+
+ finished = false;
+
err = init_tracing();
if (err_is_fail(err)) {
DEBUG_ERR(err, "initialising tracing");
return EXIT_FAILURE;
}
-
- start_tracing();
-
- debug_printf("we are tracing now\n");
- // do stuff to generate traces
- do_stuff();
+ // Make sure all subsystems get logged.
+ trace_set_all_subsys_enabled(true);
- stop_tracing();
+ debug_printf("after init tracing\n");
+
+ // Prepare the tracing framework. This is optional.
+ trace_prepare(MKCLOSURE(after_prepare, NULL));
+
+ while(!finished) {
+ // Make sure this program is not exited before everything
+ // is completed.
+ event_dispatch_non_block(get_default_waitset());
+ thread_yield_dispatcher(NULL_CAP);
+ }
- dump_trace();
-
return EXIT_SUCCESS;
}
#include <barrelfish/spawn_client.h>
#include <barrelfish/terminal.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <acpi_client/acpi_client.h>
#include <skb/skb.h>
#include <vfs/vfs.h>
// Repeat each frame a few times to slow down scrolling!
for (int f = 0; f < frames; f++) {
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 1);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1);
for(int i = 0; i < RENDER_WIDTH; i++) {
int xpos = (x + i)%width;
while (rdtsc() - now < pixwidth) ;
}
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 0);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0);
}
}
return 0;
/* Initialize tracing */
err = trace_init();
if (err_is_fail(err)) {
- DEBUG_ERR(err, "error initialising trace buffer");
+ DEBUG_ERR(err, "error initializing trace buffer");
printf("Warning: tracing not available\n");
}
+ #if defined(CONFIG_TRACE)
+ err = trace_my_setup();
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "error setting up tracing in init");
+ printf("Warning: tracing not available\n");
+ } else {
+ // Initialize the pointers
+ trace_reset_all();
+ // Enable all subsystems by default.
+ trace_set_all_subsys_enabled(true);
+ }
+ #endif
/* Load mem_serv */
printf("Spawning memory server (%s)...\n", MEM_SERV_NAME);
#include <barrelfish/dispatch.h>
#include <mm/mm.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <barrelfish/morecore.h>
#include <barrelfish/monitor_client.h>
struct capref *cap = malloc(sizeof(struct capref));
errval_t err, ret;
- trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_ALLOC, bits);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);
/* refill slot allocator if needed */
err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
#include <barrelfish/barrelfish.h>
#include <barrelfish/dispatch.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <barrelfish/monitor_client.h>
#include <barrelfish/spawn_client.h>
#include <barrelfish/nameservice_client.h>
}
}
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, 0);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
}
static void percore_allocate_handler(struct mem_binding *b,
}
}
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, 0);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
}
#include <barrelfish/barrelfish.h>
#include <barrelfish/nameservice_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
#include <barrelfish/spawn_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
#include <barrelfish/spawn_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
#include <barrelfish/spawn_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
#include <barrelfish/spawn_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
#include <barrelfish/dispatch.h>
#include <mm/mm.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <barrelfish/morecore.h>
#include <barrelfish/monitor_client.h>
#include <barrelfish/spawn_client.h>
// debug_printf("percore alloc request: bits: %d\n", bits);
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC, bits);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC, bits);
// refill slot allocator if needed
err = do_slot_prealloc_refill(mm_slots->slot_alloc_inst);
try_steal(&ret, &cap, bits, minbase, maxlimit);
}
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, bits);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, bits);
*retcap = cap;
return ret;
mem_avail = 0;
mem_total = 0;
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_INIT, 0);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_INIT, 0);
err = init_slot_allocator(&percore_slot_alloc, mm_slots);
if (err_is_fail(err)) {
mem_avail / 1024 / 1024, mem_total / 1024 / 1024);
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_INIT, 9);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_INIT, 9);
return SYS_ERR_OK;
}
# define MEMSERV_AFFINITY
#endif
-// FIXME: this should end up in trace.h
-//#define TRACE_SUBSYS_PERCORE_MEMSERV (TRACE_SUBSYS_MEMSERV + 0xDDD)
-#define TRACE_SUBSYS_PERCORE_MEMSERV 0xADDD
-#define TRACE_EVENT_ALLOC_COMPLETE 0x0002
-#define TRACE_EVENT_INIT 0x0000
-
// appropriate size type for available RAM
typedef genpaddr_t memsize_t;
#define PRIuMEMSIZE PRIuGENPADDR
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "memtest_trace.h"
if (buf == NULL) {
prepare_dump();
}
- size_t len = trace_dump(buf, TRACE_SIZE);
+ size_t len = trace_dump(buf, TRACE_SIZE, NULL);
printf("%s\n", buf);
debug_printf("dump finished. len: %d\n", (int)len);
free(buf);
#define __MEMTEST_TRACE_H__
#include <barrelfish/barrelfish.h>
-
-// #define TRACE_SUBSYS_MEMTEST (TRACE_SUBSYS_MEMSERV | 0x0FF)
-#define TRACE_SUBSYS_MEMTEST 0xA0FF
-#define TRACE_EVENT_MEMTEST_START 0x0000
-#define TRACE_EVENT_MEMTEST_STOP 0x0001
-#define TRACE_EVENT_MEMTEST_STARTED 0x0020
-#define TRACE_EVENT_MEMTEST_WAIT 0x0030
-#define TRACE_EVENT_MEMTEST_RUN 0x0040
-#define TRACE_EVENT_MEMTEST_DONE 0x0050
-#define TRACE_EVENT_MEMTEST_ALLOC 0x0066
-#define TRACE_EVENT_NSTEST_MASTER 0x0444
-#define TRACE_EVENT_NSTEST_WORKER 0x0555
+#include <trace_definitions/trace_defs.h>
errval_t init_tracing(void);
void start_tracing(void);
#include <inttypes.h>
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <mm/mm.h>
#include <if/mem_defs.h>
#include <if/monitor_blocking_rpcclient_defs.h>
struct capref cap;
errval_t err, ret;
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC, bits);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC, bits);
/* debug_printf("%d: percore steal request: bits: %d\n", disp_get_core_id(), bits); */
// refill slot allocator if needed
cap = NULL_CAP;
}
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, bits);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, bits);
*retcap = cap;
return ret;
#include <barrelfish/barrelfish.h>
#include <barrelfish/dispatch.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <barrelfish/monitor_client.h>
#include <dist/barrier.h>
ret = percore_steal_handler_common(bits, minbase, maxlimit, &cap);
sv->send.steal(sv, ret, cap);
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, 0);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
}
}
}
- trace_event(TRACE_SUBSYS_PERCORE_MEMSERV, TRACE_EVENT_ALLOC_COMPLETE, 0);
+ trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
}
// Various startup procedures
Config.RCAP_DB_NULL -> []
Config.RCAP_DB_CENTRAL-> [ "collections" ]
Config.RCAP_DB_TWOPC -> [ "collections", "cap_predicates" ]
- common_srcs = [ "ram_alloc.c", "inter.c", "spawn.c", "invocations.c", "iref.c",
+ common_srcs = [ "trace_support.c", "bfscope_support.c", "ram_alloc.c", "inter.c", "spawn.c", "invocations.c", "iref.c",
"main.c", "monitor_server.c", "monitor_rpc_server.c",
"boot.c", "queue.c", "domain.c", "intermon_bindings.c",
"rcap_db_common.c", "resource_ctrl.c", "timing.c", rcap_db ]
--- /dev/null
+/**
+ * \file
+ * \brief Bfscope (trace server) support for the monitor.
+ */
+
+/*
+ * Copyright (c) 2012 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/nameservice_client.h>
+#include <barrelfish/event_queue.h>
+#include <trace/trace.h>
+#include <sys/time.h>
+#include "monitor.h"
+
+struct bfscope_flush_state
+{
+ struct intermon_binding *ib;
+ iref_t iref;
+};
+
+struct bfscope_ack_state
+{
+ struct intermon_binding* to_initiator_binding;
+};
+
+struct notify_bfscope_state
+{
+ struct monitor_binding *monitor_binding;
+ struct event_queue_node qnode;
+};
+
+// --------- Global variables
+
+// Monitor binding to the process that initiated the trace_start
+static struct monitor_binding *requester_monitor_binding = NULL;
+
+// Intermonitor binding between monitor on the core that initiated the flush request
+// and monitor on the core that runs bfscope
+static struct intermon_binding *requester_intermon_binding = NULL;
+
+// ----------
+
+//-----------
+// Receiving a flush ack message from bfscope
+//-----------
+
+static void bfscope_intermon_flush_ack_continue(void *arg);
+static void bfscope_intermon_flush_forward_handler(struct intermon_binding *imb, iref_t iref);
+static void bfscope_intermon_flush_ack(struct intermon_binding *ib);
+
+/*
+ * We received a message from bfscope, telling us that the flushing has been
+ * completed.
+ *
+ * So let's forward this information to the initiator.
+ */
+static void bfscope_monitor_flush_ack_handler(struct monitor_binding *mb)
+{
+ if(requester_intermon_binding == NULL) {
+ // The initiator and bfscope run on the same core, do no intermon communication
+
+ bfscope_intermon_flush_ack(NULL);
+ return;
+ }
+
+ struct bfscope_ack_state *state = malloc(sizeof (struct bfscope_ack_state));
+ state->to_initiator_binding = requester_intermon_binding;
+
+ requester_intermon_binding = NULL;
+
+ bfscope_intermon_flush_ack_continue(state);
+}
+
+// -------
+
+static void bfscope_monitor_flush_send_continue(void* arg)
+{
+
+ struct bfscope_flush_state *state = (struct bfscope_flush_state*) arg;
+
+ assert(state->ib != NULL);
+
+ // Send the intermonitor message
+ errval_t err;
+ err = state->ib->tx_vtbl.bfscope_flush_send_forward(state->ib, MKCONT(free, state), state->iref);
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, must be repeated for this core
+ err = state->ib->register_send(state->ib, state->ib->waitset, MKCONT(&bfscope_monitor_flush_send_continue, state));
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not broadcast trace_monitor_broadcast_start_continue");
+ } else {
+ // Everything was ok, do nothing.
+ }
+
+}
+
+/*
+ * This function is called on the monitor that initiated the "trace_flush" command.
+ *
+ * Let's forward the message to the core on which bfscope is running.
+ */
+static void bfscope_monitor_flush_send_handler(struct monitor_binding *mb, iref_t iref)
+{
+ printf("bfscope_monitor_flush_send_handler\n");
+
+ requester_monitor_binding = mb;
+
+ struct bfscope_flush_state *state = malloc(sizeof(struct bfscope_flush_state));
+ state->iref = iref;
+
+ // Get the coreid on which bfscope is running
+ coreid_t coreid;
+ errval_t err = iref_get_core_id(iref, &coreid);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "iref_get_core_id for bfscope failed");
+ }
+
+ printf("bfscope runs on core: %d\n", coreid);
+
+ if(coreid == my_core_id) {
+ printf("bfscope runs on the same core as the initiator of the flush request\n");
+
+ // Send message to bfscope directly
+ bfscope_intermon_flush_forward_handler(NULL, iref);
+ return;
+ }
+
+ err = intermon_binding_get(coreid, &state->ib);
+
+ if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "intermon_binding_get failed");
+ }
+
+ bfscope_monitor_flush_send_continue(state);
+}
+
+
+/*
+ * This function is called on the monitor on which the initiator of the flush
+ * message is running, once the flush has been performed and the ack has been
+ * received.
+ */
+static void bfscope_monitor_flush_finished_successfully(void *arg)
+{
+ // Reset the global state
+
+ requester_monitor_binding = NULL;
+}
+
+static void bfscope_intermon_flush_ack_continue(void* arg)
+{
+ struct bfscope_ack_state *state = (struct bfscope_ack_state*) arg;
+ struct intermon_binding *intermon_binding = state->to_initiator_binding;
+
+ errval_t err;
+
+ err = intermon_binding->tx_vtbl.bfscope_flush_ack_forward(intermon_binding, MKCONT(free, state));
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = intermon_binding->register_send(intermon_binding, intermon_binding->waitset, MKCONT(&bfscope_intermon_flush_ack_continue, state));
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not forward ack in bfscope_intermon_flush_ack_continue");
+ } else {
+ // Everything was ok, do nothing.
+ }
+}
+
+static void bfscope_send_flush_msg_to_bfscope(void* arg)
+{
+ errval_t err;
+
+ struct notify_bfscope_state *state = (struct notify_bfscope_state*) arg;
+ struct monitor_binding *monitor_binding = state->monitor_binding;
+
+ err = monitor_binding->tx_vtbl.bfscope_flush_send(monitor_binding, MKCONT(free, state), 0);
+
+ if (err_is_ok(err)) {
+ event_mutex_unlock(&monitor_binding->mutex);
+ } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = monitor_binding->register_send(monitor_binding, monitor_binding->waitset, MKCONT(&bfscope_send_flush_msg_to_bfscope, state));
+ assert(err_is_ok(err));
+ } else {
+ event_mutex_unlock(&monitor_binding->mutex);
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not send flush message to bfscope");
+ }
+}
+
+/*
+ * The flush message has received on the monitor where bfscope is running.
+ *
+ * Let's Notify bfscope about the flush request.
+ */
+static void bfscope_intermon_flush_forward_handler(struct intermon_binding *imb, iref_t iref)
+{
+ printf("bfscope_intermon_flush_forward_handler on core %d.\n", my_core_id);
+
+ // Store the intermonitor binding so that we can later reply
+ requester_intermon_binding = imb;
+
+ // Notify bfscope
+
+ struct notify_bfscope_state *state = malloc(sizeof(struct notify_bfscope_state));
+ //memset(state, 0, sizeof(struct trace_broadcast_start_state));
+
+ // Get the monitor binding to bfscope
+ errval_t err = iref_get_binding(iref, &state->monitor_binding);
+ if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "iref_get_binding for bfscope failed");
+ }
+ // Send the message to bfscope
+ event_mutex_enqueue_lock(&state->monitor_binding->mutex, &state->qnode, MKCLOSURE(&bfscope_send_flush_msg_to_bfscope, state));
+
+}
+
+static void bfscope_monitor_flush_forward_ack_to_requester(void *arg)
+{
+
+ assert(requester_monitor_binding != NULL);
+
+ errval_t err;
+
+ err = requester_monitor_binding->tx_vtbl.bfscope_flush_ack(requester_monitor_binding, MKCONT(bfscope_monitor_flush_finished_successfully, NULL));
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = requester_monitor_binding->register_send(requester_monitor_binding,
+ requester_monitor_binding->waitset, MKCONT(&bfscope_monitor_flush_forward_ack_to_requester, NULL));
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ //TODO: Error handling
+ USER_PANIC_ERR(err, "Could not reply to flush bfscope_monitor_flush_forward_ack_to_requester");
+ } else {
+ // Everything was ok, do nothing.
+ }
+}
+
+// This function is automatically called when the initiating monitor receives an ack from another monitor
+static void bfscope_intermon_flush_ack(struct intermon_binding *ib)
+{
+ printf("bfscope_intermon_flush_ack\n");
+
+ bfscope_monitor_flush_forward_ack_to_requester(NULL);
+}
+
+//------------------------------------------------------------------------------
+// Message Table Initialization
+//------------------------------------------------------------------------------
+
+
+// set up receive vtable in the intermonitor interface
+errval_t bfscope_intermon_init(struct intermon_binding *ib)
+{
+ ib->rx_vtbl.bfscope_flush_send_forward = &bfscope_intermon_flush_forward_handler;
+ ib->rx_vtbl.bfscope_flush_ack_forward = &bfscope_intermon_flush_ack;
+
+ return SYS_ERR_OK;
+}
+
+// set up receive vtable in the monitor interface
+errval_t bfscope_monitor_init(struct monitor_binding *mb)
+{
+ mb->rx_vtbl.bfscope_flush_send = &bfscope_monitor_flush_send_handler;
+ mb->rx_vtbl.bfscope_flush_ack = &bfscope_monitor_flush_ack_handler;
+
+ return SYS_ERR_OK;
+}
#include <inttypes.h>
#include "monitor.h"
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#define MIN(x,y) ((x<y) ? (x) : (y))
#define MAX(x,y) ((x>y) ? (x) : (y))
struct intermon_state *st = b->st;
coreid_t core_id = st->core_id;
- trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_SPAN, disp_get_core_id());
+ trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN, disp_get_core_id());
/* Contruct vroot */
struct capability vnode_cap = {
}
#endif
+#if CONFIG_TRACE
+ err = trace_intermon_init(b);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "trace_intermon_init failed");
+ return err;
+ }
+
+ err = bfscope_intermon_init(b);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "bfscope_intermon_init failed");
+ return err;
+ }
+#endif
+
err = arch_intermon_init(b);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "arch_intermon_init failed");
errval_t multihop_monitor_init(struct monitor_binding *mb);
errval_t multihop_request_routing_table(struct intermon_binding *b);
+/* trace_support.c */
+errval_t trace_intermon_init(struct intermon_binding *ib);
+errval_t trace_monitor_init(struct monitor_binding *mb);
+
+/* bfscope_support.c */
+errval_t bfscope_intermon_init(struct intermon_binding *ib);
+errval_t bfscope_monitor_init(struct monitor_binding *mb);
+
/* rck_support.c */
errval_t rck_intermon_init(struct intermon_binding *ib);
errval_t rck_monitor_init(struct monitor_binding *mb);
#include <barrelfish/debug.h> // XXX: To set the cap_identify_reply handler
#include <barrelfish/sys_debug.h> // XXX: for sys_debug_send_ipi
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <if/mem_defs.h>
#include <barrelfish/monitor_client.h>
#include <if/monitor_loopback_defs.h>
{
errval_t err, err2;
- trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_SPAN0, core_id);
+ trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN0, core_id);
struct span_state *state;
uintptr_t state_id;
state->mb = mb;
state->domain_id = domain_id;
- trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_SPAN1, core_id);
+ trace_event(TRACE_SUBSYS_MONITOR, TRACE_EVENT_MONITOR_SPAN1, core_id);
/* Look up the destination monitor */
struct intermon_binding *ib;
}
#endif // CONFIG_INTERCONNECT_DRIVER_MULTIHOP
+#ifdef CONFIG_TRACE
+ errval_t err3;
+ err3 = bfscope_monitor_init(b);
+ if (err_is_fail(err3)) {
+ USER_PANIC_ERR(err3, "bfscope_monitor_init failed");
+ }
+
+ err3 = trace_monitor_init(b);
+ if (err_is_fail(err3)) {
+ USER_PANIC_ERR(err3, "trace_monitor_init failed");
+ }
+#endif // CONFIG_TRACE
+
return monitor_server_arch_init(b);
}
--- /dev/null
+/**
+ * \file
+ * \brief Tracing support for the monitor.
+ */
+
+/*
+ * Copyright (c) 2012 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/nameservice_client.h>
+#include <barrelfish/event_queue.h>
+#include <trace/trace.h>
+#include <sys/time.h>
+#include "monitor.h"
+
+static void trace_intermon_send_time_measurement_request(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem);
+static void trace_intermon_send_time_measurement_ack(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem);
+
+static void trace_intermon_send_prepare(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem);
+static void trace_intermon_notify_next_core(coreid_t origin_core);
+static void trace_intermon_send_prepare_finished(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem);
+static void trace_monitor_send_prepare_finish(struct monitor_binding *mb, struct monitor_msg_queue_elem *elem);
+static void trace_monitor_prepare_finished_successfully(void *arg);
+
+// Global variables for communication with initiator program
+static struct monitor_binding *initiator_to_monitor_binding;
+static struct monitor_msg_queue_elem *initiator_monitor_elem;
+
+//------------------------------------------------------------------------------
+// Time measurement
+//------------------------------------------------------------------------------
+
+struct trace_measure_time_state {
+ struct intermon_msg_queue_elem elem;
+ coreid_t origin_core;
+ uint64_t t0;
+ uint64_t t1;
+};
+
+/*
+ * Perform a time measurement, relative to core 0.
+ */
+static void trace_monitor_measure_time(coreid_t origin_core)
+{
+ printf("trace_monitor_measure_time on core: %d\n", my_core_id);
+
+ if(my_core_id == 0) {
+
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic *disp = get_dispatcher_generic(handle);
+ struct trace_buffer *trace_buf = disp->trace_buf;
+
+ trace_buf->t_offset = 0;
+
+ // Notify next core
+ trace_intermon_notify_next_core(origin_core);
+
+ } else {
+
+ // Measure time relative to core 0
+
+ // TODO implement
+
+ struct trace_measure_time_state *state = malloc(sizeof(struct trace_measure_time_state));
+
+ struct intermon_binding *ib;
+ errval_t err = intermon_binding_get(0, &ib);
+ assert(err_is_ok(err));
+
+ state->elem.cont = trace_intermon_send_time_measurement_request;
+ state->origin_core = origin_core;
+
+ trace_intermon_send_time_measurement_request(ib, &state->elem);
+
+ }
+}
+
+/*
+ * Send a time measurement request to core 0.
+ */
+static void trace_intermon_send_time_measurement_request(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem)
+{
+ errval_t err;
+
+ struct trace_measure_time_state *state = (struct trace_measure_time_state*) elem;
+
+ err = ib->tx_vtbl.trace_measure(ib, MKCONT(free, state), state->origin_core, rdtsc());
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, so it must be repeated
+ struct intermon_state *ist = ib->st;
+ err = intermon_enqueue_send(ib, &ist->queue, get_default_waitset(), &state->elem.queue);
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending trace_intermon_send_time_measurement_request failed");
+ // TODO error handling
+ } // Else: Everything is ok, do nothing
+
+
+}
+
+/*
+ * We received a message for a time measurement.
+ */
+static void trace_intermon_measure_recv(struct intermon_binding *ib, coreid_t origin_core, uint64_t t0)
+{
+ // All measurements are relative to core 0, thus this monitor must be running
+ // on core 0.
+ assert(my_core_id == 0);
+
+ uint64_t t1 = rdtsc();
+
+ struct trace_measure_time_state *state = malloc(sizeof(struct trace_measure_time_state));
+
+ state->elem.cont = trace_intermon_send_time_measurement_ack;
+ state->origin_core = origin_core;
+ state->t0 = t0;
+ state->t1 = t1;
+
+ trace_intermon_send_time_measurement_ack(ib, &state->elem);
+}
+
+/*
+ * Send a time measurement back to the core who started the time measurement.
+ */
+static void trace_intermon_send_time_measurement_ack(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem)
+{
+ errval_t err;
+
+ struct trace_measure_time_state *state = (struct trace_measure_time_state*) elem;
+
+ err = ib->tx_vtbl.trace_measure_ack(ib, MKCONT(free, state), state->origin_core, state->t0, state->t1, rdtsc());
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, so it must be repeated
+ struct intermon_state *ist = ib->st;
+ err = intermon_enqueue_send(ib, &ist->queue, get_default_waitset(), &state->elem.queue);
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending trace_intermon_send_time_measurement_ack failed");
+ // TODO error handling
+ } // Else: Everything is ok, do nothing
+
+
+}
+
+/*
+ * The monitor who started a time measurement received the response from core 0.
+ */
+static void trace_intermon_measure_ack_recv(struct intermon_binding* ib, coreid_t origin_core, uint64_t t0, uint64_t t1, uint64_t t2)
+{
+ uint64_t t3 = rdtsc();
+
+ printf("NTP result: %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", t0,t1,t2,t3);
+
+ // Network Time Protocol formula ( *-1 because we calculated the offset of
+ // core 0 to this one, but we want the opposite).
+ int64_t offset = -1 * (((t1-t0)+(t2-t3))/2);
+
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic *disp = get_dispatcher_generic(handle);
+ struct trace_buffer *trace_buf = disp->trace_buf;
+
+ trace_buf->t_offset = offset;
+
+ // Notify next core
+ trace_intermon_notify_next_core(origin_core);
+}
+
+//------------------------------------------------------------------------------
+// Intermonitor Communication for Preparation
+//------------------------------------------------------------------------------
+
+struct trace_prepare_state {
+ struct intermon_msg_queue_elem elem;
+ coreid_t origin_core;
+};
+
+/*
+ * Notify either the next core to send prepare, or if there is no next core,
+ * notify the origin core that the preparation is finished.
+ */
+static void trace_intermon_notify_next_core(coreid_t origin_core)
+{
+ coreid_t next_core = my_core_id;
+
+ struct trace_prepare_state *state = malloc(sizeof (struct trace_prepare_state));
+ state->origin_core = origin_core;
+ state->elem.cont = trace_intermon_send_prepare;
+
+ bool has_more_cores = true;
+ errval_t err = SYS_ERR_OK;
+
+ struct intermon_binding *ib;
+
+ // Search for the next core to send the prepare message to
+ do {
+
+ if (next_core == MAX_COREID) {
+ // There are no more cores to notify
+ has_more_cores = false;
+ break;
+ }
+
+ next_core++;
+
+ err = intermon_binding_get(next_core, &ib);
+
+ } while(err_is_fail(err));
+
+
+ if (has_more_cores) {
+ // There is a another core
+
+ assert(ib != NULL);
+ assert(next_core <= MAX_COREID);
+
+ printf("Sending a prepare to core: %d\n", next_core);
+
+ trace_intermon_send_prepare(ib, &state->elem);
+
+ } else {
+ // There is no more core, notify the origin core
+
+ if (my_core_id == origin_core) {
+ // We are the origin core, just notify the user program
+
+ assert(initiator_to_monitor_binding != NULL);
+
+ initiator_monitor_elem = malloc(sizeof(struct monitor_msg_queue_elem));
+ initiator_monitor_elem->cont = trace_monitor_send_prepare_finish;
+
+ trace_monitor_send_prepare_finish(initiator_to_monitor_binding, initiator_monitor_elem);
+
+ } else {
+ // Notify the monitor on the core on which the initiator is running
+
+ err = intermon_binding_get(origin_core, &ib);
+
+ assert(err_is_ok(err));
+
+ struct intermon_msg_queue_elem *elem = malloc(sizeof(struct intermon_msg_queue_elem));
+
+ trace_intermon_send_prepare_finished(ib, elem);
+
+ }
+
+ }
+
+}
+
+/*
+ * Send a message from a monitor to the monitor on which the initiator is running,
+ * to tell the monitor that it should tell the initiator that the preparation is
+ * finished.
+ */
+static void trace_intermon_send_prepare_finished(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem)
+{
+ errval_t err;
+
+ err = ib->tx_vtbl.trace_prepare_finished(ib, MKCONT(free, elem));
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, so it must be repeated
+ struct intermon_state *ist = ib->st;
+ err = intermon_enqueue_send(ib, &ist->queue, get_default_waitset(), &elem->queue);
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending trace_prepare_finished failed");
+ // TODO error handling
+ } // Else: Everything is ok, do nothing
+
+
+}
+
+/*
+ * The monitor on which the initiator is running received a message from a
+ * different monitor, telling it that the preparation is finished. Forward this
+ * information to the user program.
+ */
+static void trace_intermon_prepare_finished_recv(struct intermon_binding *ib)
+{
+ assert(initiator_to_monitor_binding != NULL);
+
+ initiator_monitor_elem = malloc(sizeof(struct monitor_msg_queue_elem));
+ initiator_monitor_elem->cont = trace_monitor_send_prepare_finish;
+
+ trace_monitor_send_prepare_finish(initiator_to_monitor_binding, initiator_monitor_elem);
+}
+
+/*
+ * Send a prepare message to the next monitor.
+ */
+static void trace_intermon_send_prepare(struct intermon_binding *ib, struct intermon_msg_queue_elem *elem)
+{
+ errval_t err;
+
+ struct trace_prepare_state *state = (struct trace_prepare_state*) elem;
+
+ err = ib->tx_vtbl.trace_prepare(ib, MKCONT(free, state), state->origin_core);
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, so it must be repeated
+ printf("trace_intermon_send_prepare flounder busy\n");
+ struct intermon_state *ist = ib->st;
+ err = intermon_enqueue_send(ib, &ist->queue,
+ get_default_waitset(), &state->elem.queue);
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending trace prepare failed");
+ // TODO error handling
+ } // Else: Everything is ok, do nothing
+
+}
+
+/*
+ * This monitor has received a prepare message.
+ */
+static void trace_intermon_prepare_recv(struct intermon_binding *ib, coreid_t origin_core)
+{
+ trace_monitor_measure_time(origin_core);
+}
+
+//------------------------------------------------------------------------------
+// Monitor communicating with user program
+//------------------------------------------------------------------------------
+
+/*
+ * This is the function that is invoked when we receive a prepare message from
+ * a user program.
+ */
+static void trace_monitor_prepare_recv(struct monitor_binding *mb, coreid_t origin_core)
+{
+ printf("trace_monitor_prepare_recv\n");
+
+ initiator_to_monitor_binding = mb;
+
+ if (my_core_id == 0) {
+ // Perform time measurement, and notify next core afterwards
+
+ return trace_monitor_measure_time(origin_core);
+ } else {
+ // Notify core 0 to start measurement
+
+ struct trace_prepare_state *state = malloc(sizeof(struct trace_prepare_state));
+ state->origin_core = origin_core;
+ state->elem.cont = trace_intermon_send_prepare;
+
+ struct intermon_binding *ib;
+
+ errval_t err = intermon_binding_get(0, &ib);
+
+ assert(err_is_ok(err));
+
+ trace_intermon_send_prepare(ib, &state->elem);
+ }
+}
+
+/*
+ * This function sends the prepare_finished message from the monitor to the
+ * user program.
+ */
+static void trace_monitor_send_prepare_finish(struct monitor_binding *mb, struct monitor_msg_queue_elem *elem)
+{
+
+ errval_t err;
+
+ err = mb->tx_vtbl.trace_prepare_finished(mb,
+ MKCONT(trace_monitor_prepare_finished_successfully, NULL));
+
+ if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ // Sending failed, so it must be repeated
+ struct monitor_state *ist = mb->st;
+ err = monitor_enqueue_send(mb, &ist->queue, get_default_waitset(), &elem->queue);
+ assert(err_is_ok(err));
+ } else if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending trace_prepare_finished failed");
+ // TODO error handling
+ } // Else: everything is ok, do nothing
+}
+
+/*
+ * This function is called to reset the state, once the preparation is finished.
+ */
+static void trace_monitor_prepare_finished_successfully(void *arg)
+{
+ free(initiator_monitor_elem);
+ initiator_monitor_elem = NULL;
+ initiator_to_monitor_binding = NULL;
+}
+
+//------------------------------------------------------------------------------
+// Message Table Initialization
+//------------------------------------------------------------------------------
+
+
+// set up receive vtable in the intermonitor interface
+errval_t trace_intermon_init(struct intermon_binding *ib)
+{
+ ib->rx_vtbl.trace_prepare = &trace_intermon_prepare_recv;
+ ib->rx_vtbl.trace_prepare_finished = &trace_intermon_prepare_finished_recv;
+ ib->rx_vtbl.trace_measure = &trace_intermon_measure_recv;
+ ib->rx_vtbl.trace_measure_ack = &trace_intermon_measure_ack_recv;
+
+ return SYS_ERR_OK;
+}
+
+// set up receive vtable in the monitor interface
+errval_t trace_monitor_init(struct monitor_binding *mb)
+{
+ mb->rx_vtbl.trace_prepare = &trace_monitor_prepare_recv;
+
+ return SYS_ERR_OK;
+}
#include <barrelfish/barrelfish.h>
#include <bench/bench.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <inttypes.h>
#define STACK_SIZE (64 * 1024)
#if CONFIG_TRACE
char *buf = malloc(4096*4096);
- trace_dump(buf, 4096*4096);
+ trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
#endif
#include <if/bench_defs.h>
#include <unistd.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
static char my_name[100];
#if CONFIG_TRACE
// dump trace
char *buf = malloc(50*4096*4096);
- size_t length = trace_dump(buf, 20*4096*4096);
+ size_t length = trace_dump(buf, 20*4096*4096, NULL);
printf("%s\n", buf);
printf("length of buffer %lu\n", length);
#endif
#include <barrelfish/waitset.h>
#include <barrelfish/nameservice_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <stdio.h>
#include <lwip/pbuf.h>
#include <lwip/udp.h>
#include <barrelfish/barrelfish.h>
#include <barrelfish/dispatch.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#define LOCKDEC(x) spinlock_t x;
uint64_t before = rdtsc();
times[0] = before;
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 1);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1);
for (int i = 1; i < cores; i++) {
err = domain_new_dispatcher(i + disp_get_core_id(),
domain_spanned_callback,
}
uint64_t finish = rdtsc();
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 0);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0);
//sys_print("\nDone\n", 6);
printf("spantest: Done in %ld cycles\n", finish-before);
#include <barrelfish/threads.h>
#include "trace/trace.h"
+#include <trace_definitions/trace_defs.h>
#include <arch/x86/barrelfish_kpi/asm_inlines_arch.h>
}
MAIN_TASK(main, args) {
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 1);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1);
trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_START, 0);
uint64_t start = rdtsc();
int fib_res = CALL(fib, 1, 35);
uint64_t end = rdtsc();
trace_event(TRACE_SUBSYS_TWEED, TRACE_EVENT_TWEED_END, 0);
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 0);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 0);
printf("cycles taken - %ld\n", (end - start));
printf("result - %d\n", (fib_res));
return 0;
#include <if/xcorecap_defs.h>
#include <barrelfish/nameservice_client.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include "xcorecap.h"
/* --- Binding handlers --- */
{
create_ram_cap(&ram_cap);
- trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_PCBENCH, 1);
+ trace_event(TRACE_SUBSYS_BENCH, TRACE_EVENT_BENCH_PCBENCH, 1);
/* send cap */
errval_t err = b->tx_vtbl.send_cap(b, NOP_CONT, ram_cap);
#include <lwip/init.h>
#include <lwip/ip_addr.h>
#include <trace/trace.h>
+#include <trace_definitions/trace_defs.h>
#include <timer/timer.h>
#include <contmng/netbench.h>
#include "webserver_network.h"
trace_event(TRACE_SUBSYS_NET, TRACE_EVENT_NET_STOP, 0);
char *buf = malloc(4096*4096);
- trace_dump(buf, 4096*4096);
+ trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
#endif // ENABLE_WEB_TRACING