initial design and layout of the Xeon Phi DMA functionality.
"xeon_phi/xeon_phi_apic",
"xeon_phi/xeon_phi_smpt",
"xeon_phi/xeon_phi_irq",
+ "xeon_phi/xeon_phi_dma",
"pci_sr_iov_cap"
], arch <- allArchitectures
] ++
* description: register definitions for the Xeon Phi DMA
*/
-device xeon_phi_dma lsbfirst ( addr base ) "Intel Xeon Phi DMA System" {
-
- regarray car rw addr(base, 0xA000) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Channel Attribute Register" {
- _ 23 "";
- apic_irq 1 "APIC Interrupt mask bit";
- msix_irq 1 "MSI-X Interrupt mask bit";
- irq_status 1 "Interrupt status";
- _ 7 "";
+device xeon_phi_dma lsbfirst(addr base) "Intel Xeon Phi DMA System" {
+
+ regarray car rw addr(base, 0xA000) [8; 0x40] "DMA Channel Attribute Register" {
+ _ 23 "";
+ apic_irq 1 "APIC Interrupt mask bit";
+ msix_irq 1 "MSI-X Interrupt mask bit";
+ irq_status 1 "Interrupt status";
+ _ 6 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Register Access: CRU
* Number: 8
*/
- regarray dhpr rw addr(base, 0xA004) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Descriptor Head Pointer Register" {
- r 32 "";
+ regarray dhpr rw addr(base, 0xA004) [8; 0x40] "DMA Descriptor Head Pointer Register" {
+ index 16 "Index of the head pointer";
+ _ 16 "Reserved";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Register Access: CRU
* Number: 8
*/
- regarray dtpr rw addr(base, 0xA008) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Descriptor Tail Pointer Register" {
- r 32 "";
+ regarray dtpr rw addr(base, 0xA008) [8; 0x40] "DMA Descriptor Tail Pointer Register" {
+ index 16 "Index of the head pointer";
+ _ 16 "Reserved";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray aux_lo rw addr(base, 0xA00C) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Auxiliary Register 0 Lo" {
+ regarray aux_lo rw addr(base, 0xA00C) [8; 0x40] "DMA Auxiliary Register 0 Lo" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray aux_hi rw addr(base, 0xA010) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Auxiliary Register 0 Hi" {
+ regarray aux_hi rw addr(base, 0xA010) [8; 0x40] "DMA Auxiliary Register 0 Hi" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray drar rw addr(base, 0xA014) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Descriptor Ring Attributes Register Lo" {
- _ 6 "";
+ regarray drar rw addr(base, 0xA014) [8; 0x40] "DMA Descriptor Ring Attributes Register Lo" {
+ _ 6 "";
base 30 "base address";
- _ 2 "";
+ _ 2 "";
size 15 "size of the descriptor ring";
- page 5 "";
- _ 6 "";
+ page 5 "";
+ _ 6 "";
};
-
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray ditr rw addr(base, 0xA01C) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Interrupt Timer Register" {
+ regarray ditr rw addr(base, 0xA01C) [8; 0x40] "DMA Interrupt Timer Register" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray dstat rw addr(base, 0xA020) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Status Channel Register" {
+ regarray dstat rw addr(base, 0xA020) [8; 0x40] "DMA Status Channel Register" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray dstatwb_lo rw addr(base, 0xA024) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Tail Pointer Write Back Register Lo" {
+ regarray dstatwb_lo rw addr(base, 0xA024) [8; 0x40] "DMA Tail Pointer Write Back Register Lo" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray dstatwb_hi rw addr(base, 0xA028) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Tail Pointer Write Back Register Hi" {
+ regarray dstatwb_hi rw addr(base, 0xA028) [8; 0x40] "DMA Tail Pointer Write Back Register Hi" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray dcherr rw addr(base, 0xA02C) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Channel Error Register" {
+ regarray dcherr rw addr(base, 0xA02C) [8; 0x40] "DMA Channel Error Register" {
r 32 "";
};
-
- /*
+
+ /*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Reset Dmain: GRPB_REset
* Register Access: CRU
* Number: 8
*/
- regarray dcherrmsk rw addr(base, 0xA030) [0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0] "DMA Channel Error Register Mask" {
+ regarray dcherrmsk rw addr(base, 0xA030) [8; 0x40] "DMA Channel Error Register Mask" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
* Register Access: CRU
*/
register dcr rw addr(base, 0xA280) "DMA Configuration Register" {
- co0 1 "DMA Channel 0 Owner";
- ce0 1 "DMA Channel 0 Enable";
- co1 1 "DMA Channel 0 Owner";
- ce1 1 "DMA Channel 1 Enable";
- co2 1 "DMA Channel 0 Owner";
- ce2 1 "DMA Channel 2 Enable";
- co3 1 "DMA Channel 0 Owner";
- ce3 1 "DMA Channel 3 Enable";
- co4 1 "DMA Channel 0 Owner";
- ce4 1 "DMA Channel 4 Enable";
- co5 1 "DMA Channel 0 Owner";
- ce5 1 "DMA Channel 5 Enable";
- co6 1 "DMA Channel 0 Owner";
- ce6 1 "DMA Channel 6 Enable";
- co7 1 "DMA Channel 0 Owner";
- ce7 1 "DMA Channel 7 Enable";
- arb_h 8 "Arb H";
- arb_l 7 "Arb L";
- p 1 "Priority EN";
+ co0 1 "DMA Channel 0 Owner";
+ ce0 1 "DMA Channel 0 Enable";
+ co1 1 "DMA Channel 0 Owner";
+ ce1 1 "DMA Channel 1 Enable";
+ co2 1 "DMA Channel 0 Owner";
+ ce2 1 "DMA Channel 2 Enable";
+ co3 1 "DMA Channel 0 Owner";
+ ce3 1 "DMA Channel 3 Enable";
+ co4 1 "DMA Channel 0 Owner";
+ ce4 1 "DMA Channel 4 Enable";
+ co5 1 "DMA Channel 0 Owner";
+ ce5 1 "DMA Channel 5 Enable";
+ co6 1 "DMA Channel 0 Owner";
+ ce6 1 "DMA Channel 6 Enable";
+ co7 1 "DMA Channel 0 Owner";
+ ce7 1 "DMA Channel 7 Enable";
+ arb_h 8 "Arb H";
+ arb_l 7 "Arb L";
+ p 1 "Priority EN";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register dqar rw addr(base, 0xA284) "Descriptor Queue Access Register" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register dqdr_tl rw addr(base, 0xA288) "Descriptor Queue Data Register Top Left" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register dqdr_tr rw addr(base, 0xA28C) "Descriptor Queue Data Register Top Right" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register dqdr_bl rw addr(base, 0xA290) "Descriptor Queue Data Register Bottom Left" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register dqdr_br rw addr(base, 0xA294) "Descriptor Queue Data Register Bottom Right" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
register misc rw addr(base, 0xA2A4) "Misc DMA Bits" {
r 32 "";
};
-
+
/*
* Protection Level: Ring 0
* Visibility: Host / Coprocessor
r 32 "";
};
+ /*
+ * Xeon Phi Descriptors
+ * 16 bytes in length
+ *
+ */
+
+ constants descriptor_type width(4) "" {
+ desc_nop=0;
+ desc_memcpy=1;
+ desc_status = 2;
+ desc_general = 3;
+ desc_keynoncecent = 4;
+ desc_key = 5;
+ };
+
+ datatype desc_nop "No-Op descriptor type" {
+ _ 124 "";
+ dtype 4 "Descriptor type";
+ };
+
+ datatype desc_memcpy "Memory Copy descriptor type" {
+ src 40 "Source address";
+ index 3 "";
+ _ 3 "";
+ length 14 "";
+ _ 4 "";
+ dst 40 "Destination address";
+ resd 15 "";
+ twb 1 "";
+ intr 1 "";
+ c 1 "";
+ co 1 "";
+ ecy 1 "";
+ dtype 4 "";
+ };
+
+ datatype desc_status "Status request descritpor type" {
+ data 64 "";
+ dst 40 "";
+ resvdr 19 "";
+ intr 1 "";
+ dtype 4 "";
+ };
+
+ datatype desc_general "General descriptor type" {
+ data 32 "";
+ _ 32 "";
+ dst 40 "";
+ _ 20 "";
+ dtype 4 "";
+ };
+
+ datatype desc_keynoncecent "" {
+ data 64 "";
+ _ 53 "";
+ cs 1 "";
+ index 3 "";
+ h 1 "";
+ sel 2 "";
+ dtype 4 "";
+ };
+
+ datatype desc_key "" {
+ skap 40 "";
+ ski 3 "";
+ _ 72 "";
+ di 3 "";
+ _ 6 "";
+ dtype 4 "";
+ };
+
};
\ No newline at end of file
failure NOT_VIRTIO_DEVICE "The device is not a VirtIO device",
failure VERSION_MISMATCH "The VirtIO versions do mismatch",
failure DEVICE_STATUS "VirtIO device has the wrong status",
- failure DEVICE_TYPE "The VirtIO device is not of the expected type",
+ failure DEVICE_TYPE "The VirtIO device is not of the expected type",
failure DEVICE_IDLE "The VirtIO device is idle. No new requests.",
failure QUEUE_ACTIVE "The selected qeueue is already activated",
failure QUEUE_INVALID "The selected queue does not exist",
- failure QUEUE_BUSY "The queue is busy.",
+ failure QUEUE_BUSY "The queue is busy.",
failure BUFFER_SIZE "The buffer size is invalid.",
failure BUFFER_STATE "The state of the buffer / buffer list is invalid",
failure ARG_INVALID "The given argument is invalid.",
failure NO_DESC_AVAIL "There is no descriptor availabe",
failure DEQ_CHAIN "Not the entire chain could be dequeued",
failure INVALID_RING_INDEX "The supplied index is not valid",
- failure BLK_REQ_IOERR "The request ended in an IO error",
- failure BLK_REQ_UNSUP "The request type was not supported",
+ failure BLK_REQ_IOERR "The request ended in an IO error",
+ failure BLK_REQ_UNSUP "The request type was not supported",
};
errors xeon_phi XEON_PHI_ERR_ {
- failure DMA_ID_NOT_EXISTS "The DMA transfer with that ID does not exist ",
- failure DMA_BUSY "All DMA channels are busy",
- failure DMA_MEM_REGISTERED "The memory has not been registered",
+ failure DMA_ID_NOT_EXISTS "The DMA transfer with that ID does not exist ",
+ failure DMA_BUSY "All DMA channels are busy",
+ failure DMA_MEM_REGISTERED "The memory has not been registered",
+ failure DMA_RPC_IN_PROGRESS "There is currently an RPC going on",
+ failure DMA_MEM_ALIGN "The address / bytes has a wrong alignment",
+ failure DMA_NO_DESC "There are not enough DMA descriptors left",
+ failure DMA_REQ_SIZE "The requested transfer size is too big",
+ failure DMA_OUT_OF_RANGE "The physical address is out of range",
};
#define XEON_PHI_SYSMEM_SIZE_BITS 39
#define XEON_PHI_SYSMEM_SIZE (1ULL << XEON_PHI_SYSMEM_SIZE_BITS)
+#define XEON_PHI_MEM_MASK 0xFFFFFFFFFFULL
+
struct xeon_phi_boot_params
{
uint8_t reserved[0x54];
#define XEON_PHI_DMA_H
-#define XEON_PHI_DMA_SERVICE_NAME "xeon_phi_dma_svc"
-
-
/* for xeon_phi_dma_id_t */
#include <if/xeon_phi_dma_defs.h>
+/// the base name of the exported dma service
+#define XEON_PHI_DMA_SERVICE_NAME "xeon_phi_dma_svc"
+
+/// alignment for size and memory addresses
+#define XEON_PHI_DMA_ALIGNMENT 64
+
/**
* type definition of the xeon phi done callback
*/
/**
+ * \brief stops a previously started DMA transfer based on its ID
*
+ * \param the ID of the transfer to stop
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on failure
*/
errval_t xeon_phi_dma_client_stop(xeon_phi_dma_id_t id);
struct xdma_req
{
xeon_phi_dma_id_t id;
+ uint8_t xphi_id;
struct xeon_phi_dma_cont cont;
struct xdma_req *next;
struct xdma_req *prev;
};
-#ifdef __k1om__
-/// service iref
-static iref_t xdma_svc_iref;
-
-/// service binding
-struct xeon_phi_dma_binding *xdma_binding;
-
-/// wait reply flag for RPC like functionality where needed
-uint8_t xdma_wait_reply = 0;
-#else
/// service irefs
static iref_t xdma_svc_iref[XEON_PHI_NUM_MAX];
struct xeon_phi_dma_binding *xdma_binding[XEON_PHI_NUM_MAX];
/// wait reply flags for RPC like functionality where needed
-uint8_t xdma_wait_reply[XEON_PHI_NUM_MAX]
-#endif
-
+uint8_t xdma_wait_reply[XEON_PHI_NUM_MAX];
/// pointer to all DMA requests
static struct xdma_req *requests;
/// the number of free requests
static uint32_t requests_free_count;
-
-
#define DEBUG_XDMA(x...) debug_printf(" [xdma] " x)
enum xpm_state
static enum xpm_state conn_state = XPM_STATE_NSLOOKUP;
+static struct xdma_req *xdma_get_pending_request(xeon_phi_dma_id_t id)
+{
+ if (requests_pending_count == 0) {
+ return NULL;
+ }
+ struct xdma_req *req = requests_pending;
+ while (req) {
+ if (req->id == id) {
+ if (req->prev == NULL) {
+ /* beginning */
+ requests_pending = req->next;
+ requests_pending->prev = NULL;
+ } else if (req->next == NULL) {
+ req->prev->next = NULL;
+ } else {
+ req->prev->next = req->next;
+ req->next->prev = req->prev;
+ }
+ req->next = NULL;
+ req->prev = NULL;
+
+ requests_pending_count--;
+
+ return req;
+
+ }
+ }
+ return NULL;
+}
+
+static void xdma_insert_pending_request(struct xdma_req *req)
+{
+ if (requests_pending == NULL) {
+ requests_pending = req;
+ req->next = NULL;
+ } else {
+ requests_pending->prev = req;
+ req->next = requests_pending;
+ requests_pending = req;
+ }
+ req->prev = NULL;
+
+ requests_pending_count++;
+}
+
+static struct xdma_req *xdma_get_free_request(void)
+{
+ if (requests_free_count == 0) {
+ return NULL;
+ }
+
+ struct xdma_req *req = requests_free;
+ requests_free = req->next;
+ req->next = NULL;
+ requests_free_count--;
+ return req;
+
+}
+
+static void xdma_insert_free_request(struct xdma_req *req)
+{
+ req->next = requests_free;
+ requests_free = req;
+ requests_free_count++;
+}
+
/*
* forward declarations for the recv messages
*/
+static void xdma_register_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t msgerr);
+static void xdma_deregister_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t msgerr);
+static void xdma_exec_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t err,
+ xeon_phi_dma_id_t id);
+static void xdma_stop_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t err);
+static void xdma_done_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_id_t id,
+ xeon_phi_dma_errval_t err);
struct xeon_phi_dma_rx_vtbl xdma_rx_vtbl = {
- .register_call = NULL
+ .register_response = xdma_register_response_rx,
+ .deregister_response = xdma_deregister_response_rx,
+ .exec_response = xdma_exec_response_rx,
+ .stop_response = xdma_stop_response_rx,
+ .done = xdma_done_rx
};
/**
errval_t err,
struct xeon_phi_dma_binding *b)
{
-
if (err_is_fail(err)) {
conn_state = XPM_STATE_BIND_FAIL;
return;
}
b->rx_vtbl = xdma_rx_vtbl;
-#ifdef __k1om__
- xdma_binding = b;
-#else
- uint8_t xphi_id = (uint8_t)(uintptr_t)st;
+
+ uint8_t xphi_id = (uint8_t) (uintptr_t) st;
xdma_binding[xphi_id] = b;
-#endif
+
DEBUG_XDMA("Binding to xdma service ok.\n");
conn_state = XPM_STATE_BIND_OK;
/**
* \brief
*/
-#ifdef __k1om__
-static errval_t xdma_connect(void)
-{
- errval_t err;
-
- if (xdma_binding != NULL) {
- return SYS_ERR_OK;
- }
-
-#else
static errval_t xdma_connect(uint8_t xphi_id)
{
errval_t err;
return SYS_ERR_OK;
}
-#endif
-
char buf[50];
-#if !defined(__k1om__)
snprintf(buf, 50, "%s.%u", XEON_PHI_DMA_SERVICE_NAME, xphi_id);
-#else
- snprintf(buf, 50, "%s", XEON_PHI_DMA_SERVICE_NAME);
-#endif
iref_t svc_iref;
conn_state = XPM_STATE_BINDING;
DEBUG_XDMA("binding to iref [%u]... \n", svc_iref);
-#ifdef __k1om__
- xdma_svc_iref = svc_iref;
- err = xeon_phi_dma_bind(xdma_svc_iref, xdma_bind_cb,
- NULL,
- get_default_waitset(),
- IDC_BIND_FLAGS_DEFAULT);
-#else
xdma_svc_iref[xphi_id] = svc_iref;
- err = xeon_phi_dma_bind(svc_iref, xdma_bind_cb,
- (void *)(uintptr_t)xphi_id,
- get_default_waitset(),
- IDC_BIND_FLAGS_DEFAULT);
-#endif
+ err = xeon_phi_dma_bind(svc_iref,
+ xdma_bind_cb,
+ (void *) (uintptr_t) xphi_id,
+ get_default_waitset(),
+ IDC_BIND_FLAGS_DEFAULT);
if (err_is_fail(err)) {
return err;
}
* ---------------------------------------------------------------------------
*/
-struct xdma_reg_msg_st
+static struct xdma_reg_msg_st
{
+ struct capref mem;
+ errval_t err;
+ uint8_t xphi_id;
+} xdma_reg_msg_st[XEON_PHI_NUM_MAX];
+static void xdma_register_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t msgerr)
+{
+ uint8_t xphi_id = (uint8_t) (uintptr_t) _binding->st;
-};
+ DEBUG_XDMA("received register response [%u, %lu]\n", xphi_id, msgerr);
+
+ assert(xdma_reg_msg_st[xphi_id].xphi_id == xphi_id);
+
+ xdma_reg_msg_st[xphi_id].err = msgerr;
+ xdma_wait_reply[xphi_id] = 0x0;
+}
+
+static void xdma_register_call_tx(void *a)
+{
+ errval_t err;
+
+ struct xdma_reg_msg_st *msg_st = a;
+
+ struct xeon_phi_dma_binding *b = xdma_binding[msg_st->xphi_id];
+
+ struct event_closure txcont = MKCONT(NULL, a);
+ err = xeon_phi_dma_register_call__tx(b, txcont, msg_st->mem);
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ txcont = MKCONT(xdma_register_call_tx, a);
+ err = b->register_send(b, get_default_waitset(), txcont);
+ if (err_is_fail(err)) {
+ msg_st->err = err;
+ xdma_wait_reply[msg_st->xphi_id] = 0x0;
+ }
+ }
+ }
+}
/**
* \brief registers a physical memory region to be used for DMA transfers
struct capref mem)
{
errval_t err = SYS_ERR_OK;
- struct xeon_phi_dma_binding *bind;
#ifdef __k1om__
- if (xdma_binding == NULL) {
- err = xdma_connect();
- }
- bind = xdma_binding;
-#else
+ assert(xphi_id == 0);
+#endif
if (xdma_binding[xphi_id] == NULL) {
err = xdma_connect(xphi_id);
+ if (err_is_fail(err)) {
+ return err;
+ }
}
- bind = xdma_binding[xphi_id];
-#endif
- if(err_is_fail(err)) {
- return err;
+
+ if (xdma_wait_reply[xphi_id]) {
+ return XEON_PHI_ERR_DMA_RPC_IN_PROGRESS;
}
- return SYS_ERR_OK;
-}
+ xdma_wait_reply[xphi_id] = 0x1;
+
+ struct xdma_reg_msg_st *st = xdma_reg_msg_st + xphi_id;
+ st->xphi_id = xphi_id;
+ st->err = SYS_ERR_OK;
+ st->mem = mem;
+ xdma_register_call_tx(st);
+
+ while (xdma_wait_reply[xphi_id]) {
+ messages_wait_and_handle_next();
+ }
+
+ return st->err;
+}
/*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
*/
-struct xdma_dereg_msg_st
+static struct xdma_dereg_msg_st
{
+ struct capref mem;
+ errval_t err;
+ uint8_t xphi_id;
+} xdma_dereg_msg_st[XEON_PHI_NUM_MAX];
+static void xdma_deregister_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t msgerr)
+{
+ uint8_t xphi_id = (uint8_t) (uintptr_t) _binding->st;
-};
+ DEBUG_XDMA("received deregister response [%u, %lu]\n", xphi_id, msgerr);
+ xdma_wait_reply[xphi_id] = 0x0;
+}
-/*
- * ---------------------------------------------------------------------------
- * DMA start new transfer
- * ---------------------------------------------------------------------------
+static void xdma_deregister_call_tx(void *a)
+{
+ errval_t err;
+
+ struct xdma_dereg_msg_st *msg_st = a;
+
+ struct xeon_phi_dma_binding *b = xdma_binding[msg_st->xphi_id];
+
+ struct event_closure txcont = MKCONT(NULL, a);
+
+ err = xeon_phi_dma_deregister_call__tx(b, txcont, msg_st->mem);
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ txcont = MKCONT(xdma_deregister_call_tx, a);
+ err = b->register_send(b, get_default_waitset(), txcont);
+ if (err_is_fail(err)) {
+ msg_st->err = err;
+ xdma_wait_reply[msg_st->xphi_id] = 0x0;
+ }
+ }
+ }
+}
+
+/**
+ * \brief deregisters a physical memory region to be used for DMA transfers
+ * this memory region can be in host or card memory
+ *
+ * \param xphi_id id of the xeon phi
+ * \param mem the memory to be deregistered
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on error
+ *
+ * NOTE: this prevents the memory region from being used in future requests
+ * current active DMA transfers using this memory regions are not stopped.
*/
-struct xdma_reg_start_st
+errval_t xeon_phi_dma_client_deregister(uint8_t xphi_id,
+ struct capref mem)
{
+ errval_t err;
+#ifdef __k1om__
+ assert(xphi_id == 0);
+#endif
+ if (xdma_binding[xphi_id] == NULL) {
+ err = xdma_connect(xphi_id);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ }
-};
+ if (xdma_wait_reply[xphi_id]) {
+ return XEON_PHI_ERR_DMA_RPC_IN_PROGRESS;
+ }
+
+ xdma_wait_reply[xphi_id] = 0x1;
+
+ struct xdma_dereg_msg_st *st = xdma_dereg_msg_st + xphi_id;
+
+ st->xphi_id = xphi_id;
+ st->err = SYS_ERR_OK;
+ st->mem = mem;
+
+ xdma_deregister_call_tx(st);
+
+ while (xdma_wait_reply[xphi_id]) {
+ messages_wait_and_handle_next();
+ }
+
+ return st->err;
+
+ return SYS_ERR_OK;
+}
/*
* ---------------------------------------------------------------------------
- * DMA stop transfer
+ * DMA start new transfer
* ---------------------------------------------------------------------------
*/
+static struct xdma_reg_start_st
+{
+ lpaddr_t src;
+ lpaddr_t dst;
+ uint64_t bytes;
+ uint8_t xphi_id;
+ xeon_phi_dma_id_t id;
+ errval_t err;
+} xdma_reg_start_st[XEON_PHI_NUM_MAX];
-struct xdma_reg_stop_st
+static void xdma_exec_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t err,
+ xeon_phi_dma_id_t id)
{
+ uint8_t xphi_id = (uint8_t) (uintptr_t) _binding->st;
+ DEBUG_XDMA("received exec response [%u, %u]\n", xphi_id, id);
-};
+ assert(xdma_reg_msg_st[xphi_id].xphi_id == xphi_id);
+
+ xdma_reg_start_st[xphi_id].err = err;
+ xdma_reg_start_st[xphi_id].id = id;
+
+ xdma_wait_reply[xphi_id] = 0x0;
+}
+
+static void xdma_exec_call_tx(void *a)
+{
+ errval_t err;
+
+ struct xdma_reg_start_st *msg_st = a;
+
+ struct xeon_phi_dma_binding *b = xdma_binding[msg_st->xphi_id];
+
+ struct event_closure txcont = MKCONT(NULL, a);
+
+ err = xeon_phi_dma_exec_call__tx(b,
+ txcont,
+ msg_st->src,
+ msg_st->dst,
+ msg_st->bytes);
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ txcont = MKCONT(xdma_exec_call_tx, a);
+ err = b->register_send(b, get_default_waitset(), txcont);
+ if (err_is_fail(err)) {
+ msg_st->err = err;
+ xdma_wait_reply[msg_st->xphi_id] = 0x0;
+ }
+ }
+ }
+}
+
+/**
+ * \brief starts a new DMA transfer
+ *
+ * \param xphi_id id of the xeon phi
+ * \param info pointer to the DMA transfer info structure
+ * \param cont continuation to be called when transfer is done
+ * \param id returns the ID of the transfer
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on error
+ */
+errval_t xeon_phi_dma_client_start(uint8_t xphi_id,
+ struct xeon_phi_dma_info *info,
+ struct xeon_phi_dma_cont cont,
+ xeon_phi_dma_id_t *id)
+{
+ errval_t err;
+
+ /*
+ * we only allow multiple of 64 bytes for transfers.
+ * The Xeon Phi DMA controller supports only 64 byte granilarity in
+ * alignment and size.
+ */
+ if ((info->dest & (XEON_PHI_DMA_ALIGNMENT - 1)) || (info->src
+ & (XEON_PHI_DMA_ALIGNMENT - 1))
+ || (info->size & (XEON_PHI_DMA_ALIGNMENT - 1))) {
+ return XEON_PHI_ERR_DMA_MEM_ALIGN;
+ }
+
+#ifdef __k1om__
+ assert(xphi_id == 0);
+#endif
+ if (xdma_binding[xphi_id] == NULL) {
+ err = xdma_connect(xphi_id);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ }
+
+ if (xdma_wait_reply[xphi_id]) {
+ return XEON_PHI_ERR_DMA_RPC_IN_PROGRESS;
+ }
+
+ struct xdma_req *req = xdma_get_free_request();
+ if (req == NULL) {
+ return XEON_PHI_ERR_DMA_BUSY;
+ }
+
+ struct xdma_reg_start_st *msg_st = xdma_reg_start_st + xphi_id;
+
+ msg_st->bytes = info->size;
+ msg_st->dst = info->dest;
+ msg_st->src = info->src;
+ msg_st->xphi_id = xphi_id;
+
+ xdma_wait_reply[xphi_id] = 0x1;
+
+ xdma_exec_call_tx(msg_st);
+
+ while (xdma_wait_reply[xphi_id]) {
+ messages_wait_and_handle_next();
+ }
+
+ if (id) {
+ *id = msg_st->id;
+ }
+
+ return msg_st->err;
+}
/*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
*/
-struct xdma_reg_exec_st
-{
-
-
-};
+static uint8_t execute_wait_flag;
+static void xdma_execute_handler(xeon_phi_dma_id_t id,
+ errval_t err,
+ void *st)
+{
+ execute_wait_flag = 0x0;
+}
/**
* \brief executes a DMA transfer and waits for its completion
errval_t xeon_phi_dma_client_exec(uint8_t xphi_id,
struct xeon_phi_dma_info *info)
{
+ errval_t err;
+ xeon_phi_dma_id_t id;
+ struct xeon_phi_dma_cont cont = {
+ .arg = NULL,
+ .cb = xdma_execute_handler
+ };
+
+ execute_wait_flag = 0x1;
+
+ err = xeon_phi_dma_client_start(xphi_id, info, cont, &id);
+
+ while (execute_wait_flag) {
+ messages_wait_and_handle_next();
+ }
+ return SYS_ERR_OK;
}
+
+/*
+ * ---------------------------------------------------------------------------
+ * DMA stop transfer
+ * ---------------------------------------------------------------------------
+ */
+
+static struct xdma_reg_stop_st
+{
+ struct xdma_req *req;
+ xeon_phi_dma_id_t id;
+ errval_t err;
+} xdma_reg_stop_st[XEON_PHI_NUM_MAX];
+
+static void xdma_stop_response_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_errval_t err)
+{
+ uint8_t xphi_id = (uint8_t) (uintptr_t) _binding->st;
+
+ struct xdma_reg_stop_st *req_st = xdma_reg_stop_st + xphi_id;
+
+ DEBUG_XDMA("received stop response [%u, %u]\n", xphi_id, req_st->id);
+}
+
+static void xdma_stop_call_tx(void *a)
+{
+
+}
+
+/**
+ * \brief stops a previously started DMA transfer based on its ID
+ *
+ * \param the ID of the transfer to stop
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on failure
+ */
+errval_t xeon_phi_dma_client_stop(xeon_phi_dma_id_t id)
+{
+
+ struct xdma_req *req = xdma_get_pending_request(id);
+ if (req == NULL) {
+ return XEON_PHI_ERR_DMA_ID_NOT_EXISTS;
+ }
+
+#ifdef __k1om__
+ assert(req->xphi_id == 0);
+#endif
+
+ assert(xdma_binding[req->xphi_id] != NULL);
+
+ if (xdma_wait_reply[req->xphi_id]) {
+ xdma_insert_pending_request(req);
+ return XEON_PHI_ERR_DMA_RPC_IN_PROGRESS;
+ }
+
+ xdma_wait_reply[req->xphi_id] = 0x1;
+
+ struct xdma_reg_stop_st *msg_st = xdma_reg_stop_st + req->xphi_id;
+
+ xdma_stop_call_tx(msg_st);
+
+ while (xdma_wait_reply[req->xphi_id]) {
+ messages_wait_and_handle_next();
+ }
+
+ xdma_insert_free_request(req);
+
+ return msg_st->err;
}
/*
* ---------------------------------------------------------------------------
*/
+static void xdma_done_rx(struct xeon_phi_dma_binding *_binding,
+ xeon_phi_dma_id_t id,
+ xeon_phi_dma_errval_t err)
+{
+ uint8_t xphi_id = (uint8_t) (uintptr_t) _binding->st;
+
+ DEBUG_XDMA("received done message [%u, %u]\n", xphi_id, id);
+
+ struct xdma_req *req = xdma_get_pending_request(id);
+ assert(req);
+
+ if (req->cont.cb) {
+ req->cont.cb(id, err, req->cont.arg);
+ }
+
+ xdma_insert_free_request(req);
+}
"xeon_phi.c",
"boot.c",
"serial.c",
- "host_bootstrap.c",
- "dma.c",
+ -- "host_bootstrap.c",
"interrupts.c",
"service.c",
"spawn.c",
"smpt.c",
"sysmem_caps.c",
"messaging.c",
- "sleep.c" ],
+ "sleep.c",
+ "dma/dma_service.c",
+ "dma/dma_channel.c",
+ "dma/dma_descriptor_ring.c" ],
addLibraries = libDeps ["skb",
"pci",
"spawndomain",
"xeon_phi_messaging",
"mm"],
flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]) ],
- flounderDefs = ["monitor", "xeon_phi_manager", "xeon_phi", "xeon_phi_messaging"],
- flounderBindings = ["xeon_phi", "xeon_phi_messaging"],
+ flounderDefs = ["monitor", "xeon_phi_manager", "xeon_phi", "xeon_phi_messaging", "xeon_phi_dma"],
+ flounderBindings = ["xeon_phi", "xeon_phi_messaging", "xeon_phi_dma"],
architectures= ["x86_64"],
mackerelDevices = [ "xeon_phi/xeon_phi_apic",
"xeon_phi/xeon_phi_boot",
"xeon_phi/xeon_phi_serial",
"xeon_phi/xeon_phi_smpt",
- "xeon_phi/xeon_phi_irq"]
+ "xeon_phi/xeon_phi_irq",
+ "xeon_phi/xeon_phi_dma"]
},
build application { target = "xeon_phi",
cFiles = [ "mgr_main.c",
"sysmem_caps.c",
"spawn.c",
- "messaging.c" ],
+ "messaging.c",
+ "dma/dma_service.c",
+ "dma/dma_channel.c",
+ "dma/dma_descriptor_ring.c" ],
-- flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]) ],
- flounderDefs = ["xeon_phi_messaging"],
- flounderBindings = ["xeon_phi_messaging"],
+ flounderDefs = ["xeon_phi_messaging", "xeon_phi_dma"],
+ flounderBindings = ["xeon_phi_messaging", "xeon_phi_dma"],
addLibraries = libDeps [ "mm", "spawndomain", "xeon_phi_messaging" ],
architectures= ["k1om"],
mackerelDevices = [ "xeon_phi/xeon_phi_apic",
"xeon_phi/xeon_phi_boot",
"xeon_phi/xeon_phi_serial",
"xeon_phi/xeon_phi_smpt",
- "xeon_phi/xeon_phi_irq"]
+ "xeon_phi/xeon_phi_irq",
+ "xeon_phi/xeon_phi_dma"]
}
]
#define XDEBUG_MESSAGING 1
#define XDEBUG_SYSMEM 1
#define XDEBUG_SPAWN 1
+#define XDEBUG_DMA 1
/*
* --------------------------------------------------------------------------
#else
#define XSPAWN_DEBUG(x...)
#endif
-
+#if XDEBUG_DMA
+#define XDMA_DEBUG(x...) XDEBUG(" DMA | " x)
+#else
+#define XDMA_DEBUG(x...)
+#endif
#endif /* XEON_PHI_DEBUG_H_ */
+++ /dev/null
-/**
- * \file
- * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
- */
-
-/*
- * Copyright (c) 2014 ETH Zurich.
- * All rights reserved.
- *
- * This file is distributed under the terms in the attached LICENSE file.
- * If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <barrelfish/barrelfish.h>
-
-#include "xeon_phi.h"
-#include "dma.h"
-
-/**
- * \brief Initializes the DMA structure for the Xeon Phi
- *
- * \param phi the xeon phi DMA structure
- *
- * \return SYS_ERR_OK on success,
- */
-errval_t dma_init(struct xeon_phi *phi)
-{
-
- return SYS_ERR_OK;
-}
-
+++ /dev/null
-/**
- * \file
- * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
- */
-
-/*
- * Copyright (c) 2014 ETH Zurich.
- * All rights reserved.
- *
- * This file is distributed under the terms in the attached LICENSE file.
- * If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
- */
-#ifndef XEON_PHI_DMA_H
-#define XEON_PHI_DMA_H
-
-//#include <dev/xeon_phi/xeon_phi_dma_dev.h>
-
-
-#define XEON_PHI_DMA_CHAN_NUM 8
-
-struct dma_channel {
-
-
-};
-
-struct dma_info {
- struct dma_channel chan[XEON_PHI_DMA_CHAN_NUM];
-};
-
-errval_t dma_request_alloc(void);
-
-errval_t dma_request_exec(void);
-
-errval_t dma_request_free(void);
-
-
-/**
- * \brief Initializes the DMA structure for the Xeon Phi
- *
- * \param phi the xeon phi DMA structure
- *
- * \return SYS_ERR_OK on success,
- */
-errval_t dma_init(struct xeon_phi *phi);
-
-#endif /* XEON_PHI_DMA_H */
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+#ifndef XEON_PHI_DMA_H
+#define XEON_PHI_DMA_H
+
+/// the base name of the exported dma service
+#define XEON_PHI_DMA_SERVICE_NAME "xeon_phi_dma_svc"
+
+/// alignment for size and memory addresses
+#define XEON_PHI_DMA_ALIGNMENT 64
+
+///
+#define XEON_PHI_DMA_ALIGN_SHIFT 6
+
+/// the maximum number of DMA channels on the card
+#define XEON_PHI_DMA_CHAN_NUM_MAX 8
+
+/// the number of DMA channels for the host
+#define XEON_PHI_DMA_CHAN_NUM_HOST 4
+
+#ifdef __k1om__
+#define XEON_PHI_DMA_CHAN_NUM (XEON_PHI_DMA_CHAN_NUM_MAX- XEON_PHI_DMA_CHAN_NUM_HOST)
+#define XEON_PHI_DMA_CHAN_OFFSET XEON_PHI_DMA_CHAN_NUM_HOST
+#else
+#define XEON_PHI_DMA_CHAN_NUM XEON_PHI_DMA_CHAN_NUM_HOST
+#define XEON_PHI_DMA_CHAN_OFFSET 0
+#endif
+#define XEON_PHI_DMA_OWNER_HOST 1
+#define XEON_PHI_DMA_OWNER_CARD 2
+
+
+
+
+/// alignment constraint of the descriptor ring (cache line)
+#define XEON_PHI_DMA_DESC_RING_ALIGN 64
+
+
+/**
+ * \brief Initializes the DMA structure for the Xeon Phi
+ *
+ * \param phi the xeon phi DMA structure
+ *
+ * \return SYS_ERR_OK on success,
+ */
+errval_t dma_init(struct xeon_phi *phi);
+
+#endif /* XEON_PHI_DMA_H */
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <barrelfish/barrelfish.h>
+#include <xeon_phi/xeon_phi.h>
+
+#include <if/xeon_phi_dma_defs.h>
+#include <dev/xeon_phi/xeon_phi_dma_dev.h>
+
+#include "xeon_phi.h"
+#include "dma_channel.h"
+#include "dma_descriptor_ring.h"
+#include "debug.h"
+
+
+static inline void xdma_channel_set_headptr(struct xdma_channel *chan,
+ uint16_t entry)
+{
+ xeon_phi_dma_dhpr_index_wrf(chan->regs, chan->chanid, entry);
+}
+
+static inline uint16_t xdma_channel_get_headptr(struct xdma_channel *chan)
+{
+ return xeon_phi_dma_dhpr_index_rdf(chan->regs, chan->chanid);
+}
+
+static inline void xdma_channel_set_tailptr(struct xdma_channel *chan,
+ uint16_t entry)
+{
+ xeon_phi_dma_dtpr_index_wrf(chan->regs, chan->chanid, entry);
+}
+
+static inline uint16_t xdma_channel_get_tailptr(struct xdma_channel *chan)
+{
+ return xeon_phi_dma_dtpr_index_rdf(chan->regs, chan->chanid);
+}
+
+static uint16_t xdma_channel_get_desc_avail_count(struct xdma_channel *chan)
+{
+ assert(!"NYI");
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Public Interface
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief initializes a DMA channel
+ *
+ * \param chan where to initialize the DMA channel
+ * \param ndesc number of descriptors in the ring
+ * \param regs pointer to the Mackerel information structure
+ * \param chanid id of the channel
+ */
+errval_t xdma_channel_init(struct xdma_channel *chan,
+ uint16_t ndesc,
+ xeon_phi_dma_t *regs,
+ uint8_t chanid)
+{
+
+}
+
+/**
+ * \brief frees up the resources used by the channel
+ *
+ * \param chan the DMA channel to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t xdma_channel_free(struct xdma_channel *chan)
+{
+
+}
+
+/**
+ * \brief
+ *
+ * \param chan
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on failure
+ *
+ */
+errval_t xdma_channel_req_memcpy(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id)
+{
+ assert(setup->type == XDMA_REQ_TYPE_MEMCPY);
+ assert(!(setup->info.mem.dst & XEON_PHI_MEM_MASK));
+ assert(!(setup->info.mem.src & XEON_PHI_MEM_MASK));
+
+ uint32_t num_desc_needed = (setup->info.mem.bytes
+ + XEON_PHI_DMA_REQ_SIZE_MAX - 1) / XEON_PHI_DMA_REQ_SIZE_MAX;
+
+ if (num_desc_needed > XEON_PHI_DMA_DESC_RING_MAX
+ || num_desc_needed > xdma_channel_get_desc_avail_count(chan)) {
+ /* we do not support huge requests at this stage... */
+ }
+
+
+
+ XDEBUG_DMA("memcpy request: %lu bytes, %lu descriptors\n",
+ setup->info.mem.bytes, num_desc_needed);
+
+ for(uint16_t i = 0; i < num_desc_needed; ++i) {
+
+ }
+
+
+
+ if (id) {
+
+ }
+}
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef XEON_PHI_DMA_CHANNEL_H
+#define XEON_PHI_DMA_CHANNEL_H
+
+#include <if/xeon_phi_dma_defs.h>
+
+#include "dma_descriptor_ring.h"
+
+
+#define XEON_PHI_DMA_REQ_SIZE_MAX (((1U) * 1024 * 1024) >> 1)
+
+struct xdma_req_info
+{
+ struct xeon_phi_dma_binding *binding;
+ errval_t err; ///< outcome of the request
+ xeon_phi_dma_id_t id; ///< DMA request ID
+ uint32_t head :1; ///< flag indicating this is the first entry
+ uint32_t last :1; ///< flag indicating this is the last entry
+ uint32_t done :1; ///< flag indicating that this transfer is done
+ struct xdma_req_info *next;
+};
+
+struct xdma_channel
+{
+ xeon_phi_dma_t *regs; ///< Mackerel base
+ struct xdma_ring ring; ///< descriptor ring of this channel
+ uint16_t tail; ///< the tail pointer of the ring (cached)
+ uint16_t head; ///< the head pointer of the ring
+ uint16_t size; ///< size of the channel (elements in descriptor ring)
+ struct xdma_req *reqs; ///< stores request information for the descriptors
+ uint8_t chanid; ///< channel id
+ uint32_t reqcoutner; ///< request counter
+};
+
+enum xdma_req_type
+{
+ XDMA_REQ_TYPE_NOP = 0,
+ XDMA_REQ_TYPE_MEMCPY,
+ XDMA_REQ_TYPE_STATUS,
+ XDMA_REQ_TYPE_GENERAL,
+ XDMA_REQ_TYPE_KEYNON,
+ XDMA_REQ_TYPE_KEY
+};
+
+struct xdma_req_setup
+{
+ enum xdma_req_type type;
+ struct xeon_phi_dma_binding *binding;
+ union
+ {
+ struct
+ {
+ lpaddr_t src;
+ lpaddr_t dst;
+ size_t bytes;
+ } mem;
+ struct
+ {
+
+ } status;
+ struct
+ {
+
+ } general;
+ struct {
+
+ }keynon;
+ struct {
+
+ }key;
+ } info;
+};
+
+static inline xeon_phi_dma_id_t xdma_chan_generate_id(struct xdma_channel *chan)
+{
+ return ((((uint64_t) chan->chanid) << 56) | (((uint64_t) chan->head) << 32)
+ | (chan->reqcoutner++));
+}
+
+/**
+ * \brief initializes a DMA channel
+ *
+ * \param chan where to initialize the DMA channel
+ * \param ndesc number of descriptors in the ring
+ * \param regs pointer to the Mackerel information structure
+ * \param chanid id of the channel
+ */
+errval_t xdma_channel_init(struct xdma_channel *chan,
+ uint16_t ndesc,
+ xeon_phi_dma_t *regs,
+ uint8_t chanid);
+
+/**
+ * \brief frees up the resources used by the channel
+ *
+ * \param chan the DMA channel to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t xdma_channel_free(struct xdma_channel *chan);
+
+/**
+ * \brief
+ *
+ * \param chan
+ *
+ * \returns SYS_ERR_OK on success
+ * XEON_PHI_ERR_DMA_* on failure
+ *
+ */
+errval_t xdma_channel_req_memcpy(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id);
+
+/**
+ *
+ */
+errval_t xdma_channel_req_status(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id);
+
+/**
+ *
+ */
+errval_t xdma_channel_req_general(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id);
+
+/**
+ *
+ */
+errval_t xdma_channel_req_keynoncecent(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id);
+
+/**
+ *
+ */
+errval_t xdma_channel_req_key(struct xdma_channel *chan,
+ struct xdma_req_setup *setup,
+ xeon_phi_dma_id_t *id);
+
+/**
+ *
+ */
+errval_t xdma_channel_req_stop(struct xdma_channel *chan,
+ xeon_phi_dma_id_t req);
+
+/**
+ *
+ */
+errval_t xdma_channel_poll(struct xdma_channel *chan);
+
+#endif /* XEON_PHI_DMA_DESC_RING_H */
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <barrelfish/barrelfish.h>
+
+#include "xeon_phi.h"
+#include "dma.h"
+#include "dma_descriptor_ring.h"
+#include "debug.h"
+
+/**
+ * \brief initializes a dma descriptor ring and allocates memory for it
+ *
+ * \param ring the ring structure to initialize
+ * \param size number of elements in the ring
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring,
+ uint16_t size)
+{
+ errval_t err;
+
+ memset(ring, 0, sizeof(*ring));
+
+ assert(size < (XEON_PHI_DMA_DESC_RING_MAX));
+
+#ifndef __k1om__
+ /*
+ * we set the ram affinity to the maximum range mapped by the system memory
+ * page tables when being on the host. Otherwise the card cannot access it.
+ */
+ uint64_t minbase, maxlimit;
+ ram_get_affinity(&minbase, &maxlimit);
+ ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE);
+#endif
+
+ size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE;
+ err = frame_alloc(&ring->cap, frame_size, NULL);
+
+#ifndef __k1om__
+ ram_set_affinity(minbase, maxlimit);
+#endif
+
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ err = vspace_map_one_frame_attr(&ring->vbase,
+ frame_size,
+ ring->cap,
+ VREGION_FLAGS_READ_WRITE,
+ NULL,
+ NULL);
+ if (err_is_fail(err)) {
+ cap_destroy(ring->cap);
+ return err;
+ }
+
+ struct frame_identity id;
+ err = invoke_frame_identify(ring->cap, &id);
+ assert(err_is_ok(err));
+#ifdef __k1om__
+ ring->pbase = id.base;
+#else
+ ring->pbase = xdma_desc_ring_host2guest(id.base);
+#endif
+ ring->size = size;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief frees up the resources used by the ring.
+ *
+ * \param ring the descriptor ring to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t xeon_phi_dma_desc_ring_free(struct xdma_ring *ring)
+{
+ errval_t err;
+
+ if (capref_is_null(ring->cap)) {
+ return SYS_ERR_OK;
+ }
+
+ if (ring->vbase) {
+ vspace_unmap(ring->vbase);
+ }
+
+
+ err = cap_revoke(ring->cap);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "revokation of ring cap failed\n");
+ }
+ return cap_destroy(ring->cap);
+}
+
+
+
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+#ifndef XEON_PHI_DMA_DESC_RING_H
+#define XEON_PHI_DMA_DESC_RING_H
+
+#include <string.h> /* for memset() */
+#include <barrelfish/barrelfish.h>
+
+#include <dev/xeon_phi/xeon_phi_dma_dev.h>
+
+#include "xeon_phi.h"
+#include "dma.h"
+
+/// the maximum length of the descriptor ring 128k-1 rounded to cache line
+#define XEON_PHI_DMA_DESC_RING_MAX (128*1024 - 64)
+
+/// the size of a descriptor entry
+#define XEON_PHI_DMA_DESC_SIZE 16
+
+/// checks if a certain value is aligned to a multiple of cache line
+#define XDMA_ASSERT_ALIGNED(x) \
+ assert((x) && (((uintptr_t)x & (XEON_PHI_DMA_ALIGNMENT - 1)) == 0))
+
+/**
+ *
+ */
+struct xdma_ring
+{
+ uint16_t size; ///< the maximum number of elements in the ring
+ lpaddr_t pbase; ///< physical base address of the ring
+ void *vbase; ///< virtual base address of the ring
+ struct capref cap;
+};
+
+/**
+ * \brief calculates the virtual address of the descriptor at entry
+ *
+ * \param ring the dma ring
+ * \param entry the entry
+ *
+ * \returns pointer to the entry
+ * NULL if out or range
+ */
+static inline void *xdma_desc_get_entry(struct xdma_ring *ring,
+ uint16_t entry)
+{
+ if (entry < ring->size) {
+ return ((uint8_t*)(ring->vbase))+(entry*XEON_PHI_DMA_DESC_SIZE);
+ }
+ return NULL;
+}
+
+/**
+ * \brief clears the DMA descriptor by zeroing it
+ *
+ * \param desc pointer to the memory location of th descriptor
+ */
+static inline void xdma_desc_clear(void *desc)
+{
+ memset(desc, 0, XEON_PHI_DMA_DESC_SIZE);
+}
+
+/**
+ * \brief fills in a descriptor of type memcopy
+ *
+ * \param desc the descriptor to set
+ * \param src source address of the transfer (PHYS)
+ * \param dst destination address of the transfer (PHYS)
+ * \param bytes size of the transfer in bytes
+ */
+static inline void xdma_desc_set_memcpy(void *desc,
+ lpaddr_t src,
+ lpaddr_t dst,
+ size_t bytes,
+ uint32_t flags)
+{
+ xdma_desc_clear(desc);
+
+ XDMA_ASSERT_ALIGNED(src);
+ XDMA_ASSERT_ALIGNED(dst);
+ XDMA_ASSERT_ALIGNED(bytes);
+
+ xeon_phi_dma_desc_memcpy_src_insert(desc, src);
+ xeon_phi_dma_desc_memcpy_dst_insert(desc, dst);
+ xeon_phi_dma_desc_memcpy_length_insert(desc,
+ (bytes >> XEON_PHI_DMA_ALIGN_SHIFT));
+ xeon_phi_dma_desc_memcpy_dtype_insert(desc, xeon_phi_dma_desc_memcpy);
+}
+
+/**
+ * \brief fills in a descriptor of type status
+ *
+ * \param desc the descriptor to wrige
+ * \param dst where to store the daqta
+ * \param data data
+ * \param intr_enable enable interupts on this one
+ */
+static inline void xdma_desc_set_status(void *desc,
+ lpaddr_t dst,
+ uint64_t data,
+ uint8_t intr_enable)
+{
+ xdma_desc_clear(desc);
+
+ xeon_phi_dma_desc_status_data_insert(desc, data);
+ xeon_phi_dma_desc_status_dst_insert(desc, dst);
+ if (intr_enable) {
+ xeon_phi_dma_desc_status_intr_insert(desc, 1);
+ }
+
+ xeon_phi_dma_desc_status_dtype_insert(desc, xeon_phi_dma_desc_status);
+}
+
+/**
+ * \brief fills in a descriptor of type general purpose
+ *
+ * \param desc the descriptor to wrige
+ * \param dst where to store the daqta
+ * \param data data
+ */
+static inline void xdma_desc_set_general(void *desc,
+ lpaddr_t dst,
+ uint64_t data)
+{
+ xdma_desc_clear(desc);
+
+ xeon_phi_dma_desc_general_data_insert(desc, data);
+ xeon_phi_dma_desc_general_dst_insert(desc, dst);
+
+ xeon_phi_dma_desc_general_dtype_insert(desc, xeon_phi_dma_desc_general);
+}
+
+static inline void xdma_desc_set_keynoncecent(void *desc)
+{
+ assert(!"NYI: xdma_desc_set_keynoncecent");
+}
+
+static inline void xdma_desc_set_key(void *desc)
+{
+ assert(!"NYI: xdma_desc_set_key");
+}
+
+/**
+ * \brief translates the physical address to the format the DMA engine understands
+ */
+static inline lpaddr_t xdma_desc_ring_host2guest(lpaddr_t host_addr)
+{
+ assert(host_addr < XEON_PHI_SYSMEM_SIZE);
+ return (host_addr | XEON_PHI_SYSMEM_BASE);
+}
+
+
+
+/**
+ * \brief initializes a dma descriptor ring and allocates memory for it
+ *
+ * \param ring the ring structure to initialize
+ * \param size number of elements in the ring
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring,
+ uint16_t size);
+
+/**
+ * \brief frees up the resources used by the ring.
+ *
+ * \param ring the descriptor ring to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t xeon_phi_dma_desc_ring_free(struct xdma_ring *ring);
+
+#endif /* XEON_PHI_DMA_DESC_RING_H */
--- /dev/null
+/**
+ * \file
+ * \brief Driver for booting the Xeon Phi Coprocessor card on a Barrelfish Host
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <barrelfish/barrelfish.h>
+
+#include <dev/xeon_phi/xeon_phi_dma_dev.h>
+
+#include "xeon_phi.h"
+#include "dma.h"
+#include "dma_channel.h"
+#include "dma_descriptor_ring.h"
+#include "debug.h"
+
+
+struct dma_info {
+ struct xdma_channel channels[XEON_PHI_DMA_CHAN_NUM];
+ xeon_phi_dma_t dma_dev;
+};
+
+/**
+ * \brief Initializes the DMA structure for the Xeon Phi
+ *
+ * \param phi the xeon phi DMA structure
+ *
+ * \return SYS_ERR_OK on success,
+ */
+errval_t dma_init(struct xeon_phi *phi)
+{
+ /* check if already initialized */
+ if (phi->dma) {
+ return SYS_ERR_OK;
+ }
+
+ struct dma_info *info = calloc(1, sizeof(struct dma_info));
+ if (info == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ XDMA_DEBUG("initializing %u channels\n", XEON_PHI_DMA_CHAN_NUM);
+ for (uint32_t i = 0; i < XEON_PHI_DMA_CHAN_NUM; ++i) {
+ struct xdma_channel *chan = &info->channels[i];
+ chan->chanid = i + XEON_PHI_DMA_CHAN_OFFSET;
+
+ }
+
+
+
+ return SYS_ERR_OK;
+}
+
#include "interrupts.h"
#include "sleep.h"
#include "smpt.h"
-#include "dma.h"
+#include "dma/dma.h"
#include "sysmem_caps.h"
static uint32_t initialized = 0;
static struct ump_chan uc;
-
-
static errval_t msg_open_cb(struct capref msgframe,
uint8_t chantype)
{
debug_printf("[%p, %p, %p]\n", inbuf, outbuf, bufs.buf);
debug_printf("initializing ump channel\n");
err = ump_chan_init(&uc, inbuf,
- XPHI_BENCH_MSG_FRAME_SIZE,
+ XPHI_BENCH_MSG_FRAME_SIZE,
outbuf,
XPHI_BENCH_MSG_FRAME_SIZE);
if (err_is_fail(err)) {
static struct ump_chan uc;
-
static errval_t msg_open_cb(struct capref msgframe,
uint8_t chantype)
{
#endif
#endif
-
bufs.num = XPHI_BENCH_BUF_NUM;
bufs.buf_size = XPHI_BENCH_BUF_SIZE;
debug_printf("[%p, %p, %p]\n", inbuf, outbuf, bufs.buf);
err = ump_chan_init(&uc, inbuf,
- XPHI_BENCH_MSG_FRAME_SIZE,
+ XPHI_BENCH_MSG_FRAME_SIZE,
outbuf,
XPHI_BENCH_MSG_FRAME_SIZE);
if (err_is_fail(err)) {
assert(alloced_size >= frame_size);
ram_set_affinity(minbase, maxlimit);
- err = vspace_map_one_frame(&host_buf,
- frame_size,
- frame,
- NULL,
+ err = vspace_map_one_frame(&host_buf, frame_size, frame,
+ NULL,
NULL);
assert(err_is_ok(err));
-
-
err = xeon_phi_messaging_open(0, iface, frame, XEON_PHI_CHAN_TYPE_UMP);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "could not open channel");
messages_wait_and_handle_next();
}
-
-
#ifdef XPHI_BENCH_PROCESS_CARD
#ifndef XPHI_BENCH_THROUGHPUT
xphi_bench_start_initator_rtt(&bufs, &uc);
#endif
#endif
-
}