*/
constants desc_opcodes "IOAT DMA Descriptor OP Codes" {
desc_op_copy = 0x00 "Copy Operation";
+ desc_op_memset = 0x01 "Memset Operation";
desc_op_xor = 0x87 "For Xor Descriptor";
desc_op_xor_val = 0x88 "For Xor descriptor";
};
DMA_REQ_TYPE_MEM_REMOVE, ///<
DMA_REQ_TYPE_NOP, ///< NULL / NOP request
DMA_REQ_TYPE_MEMCPY, ///< Memcpy request
+ DMA_REQ_TYPE_MEMSET, ///< Memset request
DMA_REQ_TYPE_STATUS,
DMA_REQ_TYPE_GENERAL,
DMA_REQ_TYPE_KEYNON,
uint8_t ctrl_intr :1; ///< do an interrupt upon completion
uint8_t ctrl_fence :1; ///< do a mem fence upon completion
} memcpy; ///< memcpy request
+
+ struct
+ {
+ lpaddr_t dst; ///< source physical address
+ uint64_t val; ///< value to set
+ size_t bytes; ///< size of the transfer in bytes
+ uint8_t ctrl_intr :1; ///< do an interrupt upon completion
+ uint8_t ctrl_fence :1; ///< do a mem fence upon completion
+ } memset; ///< memcpy request
struct {
} nop;
errval_t dma_request_memcpy(struct dma_device *dev,
struct dma_req_setup *setup,
dma_req_id_t *id);
+/**
+ * \brief issues a new DMA memcpy request based on the setup information
+ *
+ * \param chan DMA channel
+ * \param setup DMA request setup information
+ * \param id returns the DMA request ID
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t dma_request_memset_chan(struct dma_channel *chan,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id);
+
+/**
+ * \brief issues a new DMA memcpy request based on the setup information
+ *
+ * \param dev DMA device
+ * \param setup DMA request setup information
+ * \param id returns the DMA request ID
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t dma_request_memset(struct dma_device *dev,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id);
/*
* ----------------------------------------------------------------------------
lpaddr_t dst,
uint32_t size,
ioat_dma_desc_ctrl_t ctrl);
-
+/**
+ * \brief initializes the hardware specific part of the descriptor
+ *
+ * \param desc IOAT DMA descriptor
+ * \param src Source address of the transfer
+ * \param dst destination address of the transfer
+ * \param size number of bytes to copy
+ * \param ctrl control flags
+ *
+ * XXX: this function assumes that the size of the descriptor has already been
+ * checked and must match the maximum transfer size of the channel
+ */
+void ioat_dma_desc_fill_memset(struct dma_descriptor *desc,
+ uint64_t data,
+ lpaddr_t dst,
+ uint32_t size,
+ ioat_dma_desc_ctrl_t ctrl);
/**
* \brief initializes the hardware specific part of the descriptor to be used
* for nop descriptors (null descriptors)
dma_req_id_t *id);
/**
+ * \brief issues a memcpy request to the given channel
+ *
+ * \param chan IOAT DMA channel
+ * \param setup request setup information
+ * \param id returns the generated request id
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+errval_t ioat_dma_request_memset_chan(struct dma_channel *chan,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id);
+/**
+ * \brief issues a memset request to a channel of the given device
+ *
+ * \param dev IOAT DMA device
+ * \param setup request setup information
+ * \param id returns the generated request id
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+errval_t ioat_dma_request_memset(struct dma_device *dev,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id);
+/**
* \brief issues a NOP / NULL descriptor request on the given channel
*
* \param chan IOAT DMA channel
return dma_request_memcpy_chan(chan, setup, id);
}
+/**
+ * \brief issues a new DMA memcpy request based on the setup information
+ *
+ * \param chan DMA channel
+ * \param setup DMA request setup information
+ * \param id returns the DMA request ID
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t dma_request_memset_chan(struct dma_channel *chan,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id)
+{
+ if (chan->f.memset == NULL) {
+ return DMA_ERR_REQUEST_UNSUPPORTED;
+ }
+
+ return chan->f.memset(chan, setup, id);
+}
+
+/**
+ * \brief issues a new DMA memcpy request based on the setup information
+ *
+ * \param dev DMA device
+ * \param setup DMA request setup information
+ * \param id returns the DMA request ID
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on error
+ */
+errval_t dma_request_memset(struct dma_device *dev,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id)
+{
+ struct dma_channel *chan = dma_device_get_channel(dev);
+ return dma_request_memset_chan(chan, setup, id);
+}
+
+
struct dma_req_setup *setup,
dma_req_id_t *id);
+typedef errval_t (*memset_fn_t)(struct dma_channel *chan,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id);
+
typedef errval_t (*chan_poll_fn_t)(struct dma_channel *chan);
/**
struct dma_channel_fn
{
memcpy_fn_t memcpy;
+ memset_fn_t memset;
chan_poll_fn_t poll;
};
dma_chan->state = DMA_CHAN_ST_PREPARED;
dma_chan->f.memcpy = ioat_dma_request_memcpy_chan;
+ dma_chan->f.memset = ioat_dma_request_memset_chan;
dma_chan->f.poll = ioat_dma_channel_poll;
*ret_chan = chan;
if (ioat_dma_channel_is_halted(status)) {
IOATCHAN_DEBUG("channel is in error state\n", chan->id);
+ char buf[512];
+ ioat_dma_chan_err_pr(buf, 512, &ioat_chan->channel);
+ printf("channel error: %s\n", buf);
assert(!"NYI: error event handling");
}
uint8_t *vbase = dma_desc_get_desc_handle(desc);
ioat_dma_desc_size_insert(vbase, size);
ioat_dma_desc_ctrl_insert(vbase, *((uint32_t *) ctrl));
+ ioat_dma_desc_ctrl_op_insert(ctrl, ioat_dma_desc_op_copy);
ioat_dma_desc_src_insert(vbase, src);
ioat_dma_desc_dst_insert(vbase, dst);
}
/**
+ * \brief initializes the hardware specific part of the descriptor
+ *
+ * \param desc IOAT DMA descriptor
+ * \param src Source address of the transfer
+ * \param dst destination address of the transfer
+ * \param size number of bytes to copy
+ * \param ctrl control flags
+ *
+ * XXX: this function assumes that the size of the descriptor has already been
+ * checked and must match the maximum transfer size of the channel
+ */
+inline void ioat_dma_desc_fill_memset(struct dma_descriptor *desc,
+ uint64_t data,
+ lpaddr_t dst,
+ uint32_t size,
+ ioat_dma_desc_ctrl_t ctrl)
+{
+ uint8_t *vbase = dma_desc_get_desc_handle(desc);
+ ioat_dma_desc_size_insert(vbase, size);
+ ioat_dma_desc_ctrl_op_insert(ctrl, ioat_dma_desc_op_memset);
+ ioat_dma_desc_ctrl_insert(vbase, *((uint32_t *) ctrl));
+ ioat_dma_desc_src_insert(vbase, data);
+ ioat_dma_desc_dst_insert(vbase, dst);
+}
+
+/**
* \brief initializes the hardware specific part of the descriptor to be used
* for nop descriptors (null descriptors)
*
req->desc_tail = desc;
ioat_dma_desc_ctrl_fence_insert(ctrl, setup->args.memcpy.ctrl_fence);
- ioat_dma_desc_ctrl_int_en_insert(ctrl, setup->args.memcpy.ctrl_intr);
+ ioat_dma_desc_ctrl_int_en_insert(ctrl, setup->args.memcpy.ctrl_intr);\
ioat_dma_desc_ctrl_compl_write_insert(ctrl, 0x1);
} else {
bytes = max_xfer_size;
}
/**
+ * \brief issues a memcpy request to the given channel
+ *
+ * \param chan IOAT DMA channel
+ * \param setup request setup information
+ * \param id returns the generated request id
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+errval_t ioat_dma_request_memset_chan(struct dma_channel *chan,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id)
+{
+ assert(chan->device->type == DMA_DEV_TYPE_IOAT);
+
+ struct ioat_dma_channel *ioat_chan = (struct ioat_dma_channel *) chan;
+
+ uint32_t num_desc = req_num_desc_needed(ioat_chan, setup->args.memset.bytes);
+
+ IOATREQ_DEBUG("DMA Memset request: [0x%016lx]->[0x%016lx] of %lu bytes (%u desc)\n",
+ setup->args.memset.src, setup->args.memset.dst,
+ setup->args.memset.bytes, num_desc);
+
+ struct dma_ring *ring = ioat_dma_channel_get_ring(ioat_chan);
+
+ if (num_desc > dma_ring_get_space(ring)) {
+ IOATREQ_DEBUG("Too less space in ring: %u / %u\n", num_desc,
+ dma_ring_get_space(ring));
+ return DMA_ERR_NO_DESCRIPTORS;
+ }
+
+ struct ioat_dma_request *req = request_alloc();
+ if (req == NULL) {
+ IOATREQ_DEBUG("No request descriptors for holding request data\n");
+ return DMA_ERR_NO_REQUESTS;
+ }
+
+ dma_request_common_init(&req->common, chan, setup->type);
+
+ ioat_dma_desc_ctrl_array_t ctrl = {
+ 0
+ };
+
+ struct dma_descriptor *desc;
+ size_t length = setup->args.memset.bytes;
+ lpaddr_t src_data = setup->args.memset.val;
+ lpaddr_t dst = setup->args.memset.dst;
+ size_t bytes, max_xfer_size = dma_channel_get_max_xfer_size(chan);
+ do {
+ desc = dma_ring_get_next_desc(ring);
+
+ if (!req->desc_head) {
+ req->desc_head = desc;
+ }
+ if (length <= max_xfer_size) {
+ /* the last one */
+ bytes = length;
+ req->desc_tail = desc;
+
+ ioat_dma_desc_ctrl_fence_insert(ctrl, setup->args.memset.ctrl_fence);
+ ioat_dma_desc_ctrl_int_en_insert(ctrl, setup->args.memset.ctrl_intr);
+ ioat_dma_desc_ctrl_compl_write_insert(ctrl, 0x1);
+ } else {
+ bytes = max_xfer_size;
+ }
+
+ ioat_dma_desc_fill_memset(desc, src_data, dst, bytes, ctrl);
+ dma_desc_set_request(desc, NULL);
+
+ length -= bytes;
+ dst += bytes;
+ } while (length > 0);
+
+ req->common.setup = *setup;
+
+ if (id) {
+ *id = req->common.id;
+ }
+ /* set the request pointer in the last descriptor */
+ dma_desc_set_request(desc, &req->common);
+
+ assert(req->desc_tail);
+ assert(dma_desc_get_request(req->desc_tail));
+
+ return ioat_dma_channel_submit_request(ioat_chan, req);
+}
+
+/**
+ * \brief issues a memset request to a channel of the given device
+ *
+ * \param dev IOAT DMA device
+ * \param setup request setup information
+ * \param id returns the generated request id
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+errval_t ioat_dma_request_memset(struct dma_device *dev,
+ struct dma_req_setup *setup,
+ dma_req_id_t *id)
+{
+ struct dma_channel *chan = dma_device_get_channel(dev);
+ return ioat_dma_request_memset_chan(chan, setup, id);
+}
+
+/**
* \brief issues a NOP / NULL descriptor request on the given channel
*
* \param chan IOAT DMA channel