failure DMA_MEM_OUT_OF_RANGE "The physical address is out of range",
};
-errors ioat IOAT_ERR_ {
- failure PCI_ADDRESS "The PCI address of the device is not as expected",
+errors dma DMA_ERR_ {
+ failure PCI_ADDRESS "The PCI address of the device is not as expected",
failure DEVICE_UNSUPPORTED "Device ID not supported / wrong configuration",
failure DEVICE_IDLE "The device is idle, no transfers finished",
failure ARG_INVALID "Supplied argument was not valid",
failure NO_REQUESTS "There are no request descriptors left",
failure CHAN_BUSY "The channel is busy and cannot accept more",
failure CHAN_IDLE "There were no finished requests on the channel",
- failure REQUEST_UNFINISHED "The request is still in operation",
+ failure REQUEST_UNFINISHED "The request is still in operation",
};
"hpet",
"interdisp",
"intermon",
+ "ioat_dma_mgr",
"keyboard",
"lock",
"mem",
--- /dev/null
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+interface ioat_dma_mgr "IOAT DMA manager interface" {
+
+
+ rpc request(out uint8 devid, out cap device_frame);
+
+ rpc release(in uint8 devid);
+
+};
\ No newline at end of file
/// IOAT DMA channel id
typedef uint8_t dma_dev_id_t;
+#define PCI_ADDR_DONT_CARE 0x10000
+
/// representation of a PCI address (with more bits than libpci)
struct pci_addr {
uint32_t bus;
*
* \returns MMIO register vbase
*/
-lvaddr_t dma_device_get_mmio_vbase(struct dma_device *dev);
+void *dma_device_get_mmio_vbase(struct dma_device *dev);
/**
/*
* ----------------------------------------------------------------------------
+ * Getter / Setter Functions
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the state of a DMA request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request state
+ */
+dma_req_st_t dma_request_get_state(struct dma_request *req);
+
+/**
+ * \brief returns the ID of a DMA request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request ID
+ */
+dma_req_id_t dma_request_get_id(struct dma_request *req);
+
+
+/**
+ * \brief returns the next DMA request of the given request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request if there was one
+ * NULL if the request is at the end of the chain
+ */
+struct dma_request *dma_request_get_next(struct dma_request *req);
+
+/*
+ * ----------------------------------------------------------------------------
* Request Execution
* ----------------------------------------------------------------------------
*/
*/
errval_t dma_request_exec(struct dma_req_setup *setup);
-/**
- *
- */
-dma_req_st_t dma_request_get_state(struct dma_request *req);
/*
* ----------------------------------------------------------------------------
#ifndef LIB_IOAT_DMA_CHANNEL_H
#define LIB_IOAT_DMA_CHANNEL_H
+#include <dev/ioat_dma_chan_dev.h>
+
+
+/**
+ * \brief pointer type conversion
+ */
+static inline struct ioat_dma_channel *dma_channel_to_ioat(struct dma_channel *chan)
+{
+ return (struct ioat_dma_channel *)chan;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Channel State Management
+ * ----------------------------------------------------------------------------
+ */
+
/**
* \brief Resets a IOAT DMA channel
*
errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan);
/**
- * \brief Starts a channel and sets it into the running state
+ * \brief restarts a IOAT DMA channel this updates the chain address register
+ * and the DMA count register.
*
- * \param chan IOAT DMA channel
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_restart(struct ioat_dma_channel *chan);
+
+/**
+ * \brief starts a IOAT DMA channel. This sets the chain address to the first
+ * entry of the ring and the DMA count to zero.
+ *
+ * \param chan IOAT DMA channel
*
* \returns SYS_ERR_OK on success
- * errval on failure
+ * DMA_ERR_* on failure
*/
errval_t ioat_dma_channel_start(struct ioat_dma_channel *chan);
/**
- * \brief Restarts a channel and sets it into the running state
- * this is to be used upon error condition
+ * \brief stopps the processing of the descriptors.
*
- * \param chan IOAT DMA channel
+ * \param chan IOAT DMA channel
*
* \returns SYS_ERR_OK on success
- * errval on failure
+ * DMA_ERR_* on failure
*/
-errval_t ioat_dma_channel_restart(struct ioat_dma_channel *chan);
+errval_t ioat_dma_channel_stop(struct ioat_dma_channel *chan);
+
+/**
+ * \brief Puts the IOAT DMA channel into the suspended state
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_suspend(struct ioat_dma_channel *chan);
/**
*
* \returns number of submitted descriptors
*/
-uint16_t ioat_dma_channel_submit_pending(struct ioat_dma_channel *chan);
+uint16_t ioat_dma_channel_issue_pending(struct ioat_dma_channel *chan);
/**
* \brief polls the IOAT DMA channel for completed events
*/
errval_t ioat_dma_channel_poll(struct ioat_dma_channel *chan);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Getter / Setter Functions
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the associated IOAT DMA descriptor ring of a channel
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns IOAT DMA descriptor ring handle
+ */
+struct ioat_dma_ring *ioat_dma_channel_get_ring(struct ioat_dma_channel *chan);
+
+/**
+ * \brief updates the channel status flag by reading the CHANSTS register
+ *
+ * \param chan IOAT DMA channel
+ */
+uint64_t ioat_dma_channel_get_status(struct ioat_dma_channel *chan);
+
/*
* ----------------------------------------------------------------------------
* Channel Status
* ----------------------------------------------------------------------------
*/
-bool ioat_dma_channel_is_active(struct ioat_dma_channel *chan);
+/**
+ * \brief reads the CHANSTS register and and checks if the channel is active
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns true if channel is active
+ * false if not
+ */
+static inline bool ioat_dma_channel_is_active(uint64_t status)
+{
+ uint32_t tr_st = ioat_dma_chan_sts_lo_dma_trans_state_extract(status);
+ return tr_st == ioat_dma_chan_trans_state_active;
+}
/**
* \brief reads the CHANSTS register and and checks if the channel is idle
* \returns true if channel is idle
* false if not
*/
-bool ioat_dma_channel_is_idle(struct ioat_dma_channel *chan);
+static inline bool ioat_dma_channel_is_idle(uint64_t status)
+{
+ uint32_t tr_st = ioat_dma_chan_sts_lo_dma_trans_state_extract(status);
+ return tr_st == ioat_dma_chan_trans_state_idle;
+}
/**
* \brief reads the CHANSTS register and and checks if the channel is halted
* \returns true if channel is halted (there was an error)
* false if not
*/
-bool ioat_dma_channel_is_halted(struct ioat_dma_channel *chan);
+static inline bool ioat_dma_channel_is_halted(uint64_t status)
+{
+ uint32_t tr_st = ioat_dma_chan_sts_lo_dma_trans_state_extract(status);
+ return tr_st == ioat_dma_chan_trans_state_halt;
+}
/**
* \brief reads the CHANSTS register and and checks if the channel is suspended
* \returns true if channel is suspended
* false if not
*/
-bool ioat_dma_channel_is_suspended(struct ioat_dma_channel *chan);
-
-
-
-/**
- * \brief returns the associated IOAT DMA descriptor ring of a channel
- *
- * \param chan IOAT DMA channel
- *
- * \returns IOAT DMA descriptor ring handle
- */
-struct ioat_dma_ring *ioat_dma_channel_get_ring(struct ioat_dma_channel *chan);
-
+static inline bool ioat_dma_channel_is_suspended(uint64_t status)
+{
+ uint32_t tr_st = ioat_dma_chan_sts_lo_dma_trans_state_extract(status);
+ return tr_st == ioat_dma_chan_trans_state_susp;
+}
/*
* ----------------------------------------------------------------------------
struct ioat_dma_channel;
+/**
+ * \brief pointer type conversion
+ */
+static inline struct ioat_dma_device *dma_device_to_ioat(struct dma_device *dev)
+{
+ return (struct ioat_dma_device *)dev;
+}
+
/*
* ----------------------------------------------------------------------------
* ----------------------------------------------------------------------------
*/
-/**
- * \brief gets the device state from the IOAT DMA device
- *
- * \param dev IOAT DMA device
- *
- * \returns device state enumeration
- */
-dma_dev_st_t ioat_dma_device_get_state(struct ioat_dma_device *dev);
-
-
-/**
- * \brief returns the channel count of this device
- *
- * \param dev IOAT DMA device
- *
- * \returns number of channels this device has
- */
-uint8_t ioat_dma_device_get_channel_count(struct ioat_dma_device *dev);
-
-/**
- * \brief returns the device ID from the IOAT device
- *
- * \param dev IOAT DMA device
- *
- * \returns IOAT DMA device ID
- */
-dma_dev_id_t ioat_dma_device_get_id(struct ioat_dma_device *dev);
-
-/**
- * \brief returns the channel belonging with the given ID
- *
- * \param dev IOAT DMA device
- * \param id channel id
- *
- * return IOAT DMA channel handle
- * NULL if no such channel exist
- */
-struct ioat_dma_channel *ioat_dma_device_get_channel(struct ioat_dma_device *dev,
- uint16_t id);
-
-/**
- * \brief returns a channel from the device based on a round robin fashion
- *
- * \param dev IOAT DMA device
- *
- * return IOAT DMA channel handle
- */
-struct ioat_dma_channel *ioat_dma_device_get_next_channel(struct ioat_dma_device *dev);
-
/**
* \brief polls the channels of the IOAT DMA device
struct ioat_dma_channel;
struct ioat_dma_request;
-
+/**
+ * \brief pointer type conversion
+ */
+static inline struct ioat_dma_request *dma_request_to_ioat(struct dma_request *req)
+{
+ return (struct ioat_dma_request *)req;
+}
/*
* ----------------------------------------------------------------------------
* \returns SYS_ERR_OK on success
* errval on failure
*/
-static inline errval_t ioat_dma_request_memcpy(struct ioat_dma_device *dev,
- struct dma_req_setup *setup)
-{
- struct ioat_dma_channel *chan = ioat_dma_device_get_next_channel(dev);
- return ioat_dma_request_memcpy_chan(chan, setup);
-}
+errval_t ioat_dma_request_memcpy(struct ioat_dma_device *dev,
+ struct dma_req_setup *setup);
/**
* \brief issues a NOP / NULL descriptor request on the given channel
* \returns SYS_ERR_OK on success
* errval on failure
*/
-static inline void ioat_dma_request_nop(struct ioat_dma_device *dev)
-{
- struct ioat_dma_channel *chan = ioat_dma_device_get_next_channel(dev);
- ioat_dma_request_nop_chan(chan);
-}
+void ioat_dma_request_nop(struct ioat_dma_device *dev);
/*
* ----------------------------------------------------------------------------
struct ioat_dma_descriptor *ioat_dma_ring_get_desc(struct ioat_dma_ring *ring,
uint16_t index);
+
/*
* ----------------------------------------------------------------------------
* Ring Status Queries
"dma_memory_manager.c",
"dma_mem_utils.c",
"dma_client.c",
+ "dma_device.c",
+ "dma_channel.c",
+ "dma_request.c",
"ioat/ioat_dma_channel.c",
"ioat/ioat_dma_dca.c",
"ioat/ioat_dma_descriptors.c",
"ioat/ioat_dma.c"
],
addIncludes = [ "include" ],
- flounderBindings = [ "dma" ],
+ flounderBindings = [ "dma", "ioat_dma_mgr" ],
+ flounderDefs = [ "dma" ],
mackerelDevices = [ "ioat_dma", "ioat_dma_chan" ]
},
],
addIncludes = [ "include" ],
flounderBindings = [ "dma" ]
+ },
+
+ build library {
+ target = "dma_service",
+ cFiles = [
+ "dma_service.c"
+ ],
+ addIncludes = [ "include" ],
+ flounderBindings = [ "dma" ]
}
]
* ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
-#include <string.h>
#include <barrelfish/barrelfish.h>
-#include <barrelfish/nameservice_client.h>
+
+#include <dma_internal.h>
+#include <dma_device_internal.h>
+#include <dma_channel_internal.h>
+#include <dma_request_internal.h>
+
+#include <debug.h>
+
+/*
+ * ============================================================================
+ * Library Internal Interface
+ * ============================================================================
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * Request List Management
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the first request on the submitted request queue of the channel
+ *
+ * \param chan DMA channel
+ *
+ * \returns pointer to the DMA request
+ * NULL if queue was empty
+ */
+struct dma_request *dma_channel_deq_request_head(struct dma_channel *chan)
+{
+ struct dma_request *req = chan->req_list.head;
+ if (req == NULL) {
+ assert(chan->req_list.count == 0);
+ return NULL;
+ }
+
+ chan->req_list.count--;
+
+ chan->req_list.head = dma_request_get_next(req);
+ if (chan->req_list.head == NULL) {
+ chan->req_list.tail = NULL;
+ assert(chan->req_list.count == 0);
+ }
+
+ DMACHAN_DEBUG("request : deq head [%016lx] count=%u\n", chan->id,
+ dma_request_get_id(req), chan->req_list.count);
+
+ return req;
+}
+
+/**
+ * \brief inserts a request into the channels request list
+ *
+ * \param chan DMA channel
+ * \param req DMA request to be inserted
+ */
+void dma_channel_enq_request_head(struct dma_channel *chan,
+ struct dma_request *req)
+{
+ dma_request_set_next(req, chan->req_list.head);
+
+ chan->req_list.count++;
+
+ chan->req_list.head = req;
+ if (chan->req_list.tail == NULL) {
+ chan->req_list.tail = req;
+ assert(chan->req_list.count == 1);
+ }
+
+ DMACHAN_DEBUG("request : enq head [%016lx] count=%u\n", chan->id,
+ dma_request_get_id(req), chan->req_list.count);
+}
+
+/**
+ * \brief inserts a request at the end of the channels request list
+ *
+ * \param chan DMA channel
+ * \param req DMA request to be inserted
+ */
+void dma_channel_enq_request_tail(struct dma_channel *chan,
+ struct dma_request *req)
+{
+ dma_request_set_next(req, NULL);
+
+ if (chan->req_list.head == NULL) {
+ assert(chan->req_list.count == 0);
+ chan->req_list.head = req;
+ } else {
+ assert(chan->req_list.count > 0);
+ dma_request_set_next(chan->req_list.tail, req);
+ }
+
+ chan->req_list.tail = req;
+
+ chan->req_list.count++;
+}
+
+
+/*
+ * ============================================================================
+ * Public Interface
+ * ============================================================================
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * Getter / Setter Methods
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief gets the ID of the channel
+ *
+ * \param DMA channel
+ *
+ * \returns DMA channel ID
+ */
+inline dma_chan_id_t dma_channel_get_id(struct dma_channel *chan)
+{
+ return chan->id;
+}
+
+/**
+ * \brief gets the state of the channel
+ *
+ * \param DMA channel
+ *
+ * \returns DMA channel state
+ */
+inline dma_chan_st_t dma_channel_get_state(struct dma_channel *chan)
+{
+ return chan->state;
+}
+
+/**
+ * \brief gets the DMA device this channel belongs to
+ *
+ * \param DMA channel
+ *
+ * \returns DMA device
+ */
+inline struct dma_device *dma_channel_get_device(struct dma_channel *chan)
+{
+ return chan->device;
+}
+
+/**
+ * \brief gets the number of unfinished requests on this channel
+ *
+ * \param DMA channel
+ *
+ * \returns request count
+ */
+inline uint32_t dma_channel_get_request_count(struct dma_channel *chan)
+{
+ return chan->req_list.count;
+}
+
+/**
+ * \brief gets the address where the MMIO registers are mapped
+ *
+ * \param DMA channel
+ *
+ * \returns MMIO register vbase
+ */
+inline lvaddr_t dma_channel_get_mmio_vbase(struct dma_channel *chan)
+{
+ return chan->mmio.vaddr;
+}
+
+/**
+ * \brief gets the maximum transfer size of the channel
+ *
+ * \param DMA channel
+ *
+ * \returns maximum transfer size in bytes
+ */
+inline uint32_t dma_channel_get_max_xfer_size(struct dma_channel *chan)
+{
+ return chan->max_xfer_size;
+}
* ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
-#include <string.h>
#include <barrelfish/barrelfish.h>
-#include <barrelfish/nameservice_client.h>
+
+#include <dma_internal.h>
+#include <dma_device_internal.h>
+
+/*
+ * ===========================================================================
+ * Public Interface
+ * ===========================================================================
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * Getters / Setters
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief gets the ID of the DMA device
+ *
+ * \param dev DMA device
+ *
+ * \returns DMA device ID
+ */
+inline dma_dev_id_t dma_device_get_id(struct dma_device *dev)
+{
+ return dev->id;
+}
+
+
+/**
+ * \brief gets the state of the DMA device
+ *
+ * \param dev DMA device
+ *
+ * \returns DMA device state
+ */
+inline dma_dev_st_t dma_device_get_state(struct dma_device *dev)
+{
+ return dev->state;
+}
+
+
+/**
+ * \brief gets the virtual address of the mapped MMIO register space
+ *
+ * \param dev DMA device
+ *
+ * \returns MMIO register vbase
+ */
+inline void *dma_device_get_mmio_vbase(struct dma_device *dev)
+{
+ return (void *)dev->mmio.vaddr;
+}
+
+
+/**
+ * \brief gets the number of channels this device has
+ *
+ * \param dev DMA device
+ *
+ * \returns DMA channel count
+ */
+inline uint8_t dma_device_get_channel_count(struct dma_device *dev)
+{
+ return dev->channels.count;
+}
+
+
+/**
+ * \brief obtains the channel associated with the given index
+ *
+ * \param dev DMA device
+ *
+ * \returns DMA channel if index exists
+ * NULL if index exceeds channel count
+ */
+
+struct dma_channel *dma_device_get_channe_by_idx(struct dma_device *dev,
+ uint8_t idx)
+{
+ if (idx < dev->channels.count) {
+ return dev->channels.c[idx];
+ }
+ return NULL;
+}
+
+/**
+ * \brief gets a DMA channel from this device in a round robin fashion
+ *
+ * \param dev DMA device
+ *
+ * \returns DMA channel
+ */
+inline struct dma_channel *dma_device_get_channel(struct dma_device *dev)
+{
+ if (dev->channels.next >= dev->channels.count) {
+ dev->channels.next = 0;
+ }
+ return dev->channels.c[dev->channels.next++];
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Interrupt Management
+ * ----------------------------------------------------------------------------
+ */
+
#include <barrelfish/barrelfish.h>
+#include <dma_internal.h>
#include <dma_mem_utils.h>
/**
errval_t err;
if (mem == NULL) {
- return IOAT_ERR_ARG_INVALID;
+ return DMA_ERR_ARG_INVALID;
}
err = frame_alloc(&mem->frame, bytes, &mem->bytes);
mem->paddr = id.base;
- err = vspace_map_one_frame_attr(&mem->addr, mem->bytes, mem->frame, flags, NULL,
+ void *addr;
+ err = vspace_map_one_frame_attr(&addr, mem->bytes, mem->frame, flags, NULL,
NULL);
if (err_is_fail(err)) {
dma_mem_free(mem);
return err;
}
+ mem->vaddr = (lvaddr_t)addr;
+
return SYS_ERR_OK;
}
{
errval_t err;
- if (mem->addr) {
- err = vspace_unmap(mem->addr);
+ if (mem->vaddr) {
+ err = vspace_unmap((void*)mem->vaddr);
if (err_is_fail(err)) {
/* todo: error handling ignoring for now */
}
* ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
-#include <string.h>
#include <barrelfish/barrelfish.h>
-#include <barrelfish/nameservice_client.h>
+
+#include <dma_internal.h>
+#include <dma_device_internal.h>
+#include <dma_channel_internal.h>
+#include <dma_request_internal.h>
+
+#include <debug.h>
+
+/*
+ * ---------------------------------------------------------------------------
+ * Request ID generation
+ * ---------------------------------------------------------------------------
+ */
+
+inline dma_req_id_t dma_request_generate_req_id(struct dma_channel *chan)
+{
+ return dma_request_id_build(chan, dma_channel_incr_req_count(chan));
+}
+
+/**
+ * \brief handles the processing of completed DMA requests
+ *
+ * \param req the DMA request to process
+ *
+ * \returns SYS_ERR_OK on sucess
+ * errval on failure
+ */
+errval_t dma_request_process(struct dma_request *req)
+{
+ DMAREQ_DEBUG("Processing done request [%016lx]:\n", req->id);
+
+ errval_t err;
+
+ switch (req->state) {
+ case DMA_REQ_ST_DONE:
+ err = SYS_ERR_OK;
+ break;
+ default:
+ err = -1; // todo: error code
+ break;
+ }
+
+ if (req->setup.done_cb) {
+ req->setup.done_cb(err, req->id, req->setup.cb_arg);
+ }
+
+ return SYS_ERR_OK;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Getter / Setter Functions
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the state of a DMA request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request state
+ */
+inline dma_req_st_t dma_request_get_state(struct dma_request *req)
+{
+ return req->state;
+}
+
+/**
+ * \brief returns the ID of a DMA request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request ID
+ */
+inline dma_req_id_t dma_request_get_id(struct dma_request *req)
+{
+ return req->id;
+}
+
+/**
+ * \brief returns the next DMA request of the given request
+ *
+ * \param req DMA request
+ *
+ * \returns DMA request if there was one
+ * NULL if the request is at the end of the chain
+ */
+inline struct dma_request *dma_request_get_next(struct dma_request *req)
+{
+ return req->next;
+}
+
+/**
+ * \brief sets the next DMA request of the given request
+ *
+ * \param req DMA request
+ * \param next DMA request
+ */
+inline void dma_request_set_next(struct dma_request *req,
+ struct dma_request *next)
+{
+ req->next = next;
+}
#define DMACHAN_DEBUG(x...)
#endif
#if DMA_DEBUG_REQUEST_ENABLED
-#define DMAREQ_DEBUG(x...) DMA_DEBUG_PRINT("[dma req] " x)
+#define DMAREQ_DEBUG(x...) DMA_DEBUG_PRINT("[dma req] " x)
#else
#define DMAREQ_DEBUG(x...)
#endif
#if DMA_DEBUG_DEVICE_ENABLED
-#define DMADEV_DEBUG(x...) DMA_DEBUG_PRINT("[dma dev.%02x] " x)
+#define DMADEV_DEBUG(x...) DMA_DEBUG_PRINT("[dma dev.%02x] " x)
#else
#define DMADEV_DEBUG(x...)
#endif
struct dma_request *head; ///< start of the request list
struct dma_request *tail; ///< end of the request list
} req_list; ///< list of submitted requests
+
+ uint64_t req_counter; ///< number of requests issued sofar
};
+/*
+ * ----------------------------------------------------------------------------
+ * Request List Management
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the first request on the submitted request queue of the channel
+ *
+ * \param chan DMA channel
+ *
+ * \returns pointer to the DMA request
+ * NULL if queue was empty
+ */
+struct dma_request *dma_channel_deq_request_head(struct dma_channel *chan);
+
+/**
+ * \brief inserts a request into the head of a channel's request list
+ *
+ * \param chan DMA channel
+ * \param req DMA request to be inserted
+ */
+void dma_channel_enq_request_head(struct dma_channel *chan,
+ struct dma_request *req);
+
+/**
+ * \brief inserts a request at the end of the channels request list
+ *
+ * \param chan DMA channel
+ * \param req DMA request to be inserted
+ */
+void dma_channel_enq_request_tail(struct dma_channel *chan,
+ struct dma_request *req);
+
+/**
+ * \brief Submits the pending descriptors to the hardware queue
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns number of submitted descriptors
+ */
+uint16_t dma_channel_submit_pending(struct dma_channel *chan);
+
+
+/**
+ * \brief returns the next DMA request counter value to generate the req id.
+ *
+ * \param chan DMA channel
+ *
+ * \returns request counter value
+ */
+static inline uint64_t dma_channel_incr_req_count(struct dma_channel *chan)
+{
+ return ++chan->req_counter;
+}
#endif /* DMA_CHANNEL_INTERNAL_H */
*/
errval_t dma_request_process(struct dma_request *req);
+/**
+ * \brief sets the next DMA request of the given request
+ *
+ * \param req DMA request
+ * \param next DMA request
+ */
+void dma_request_set_next(struct dma_request *req,
+ struct dma_request *next);
+
#endif /* DMA_REQUEST_INTERNAL_H */
errval_t ioat_dma_channel_init(struct ioat_dma_device *dev,
uint8_t id,
uint32_t max_xfer,
- struct dma_channel **ret_chan);
+ struct ioat_dma_channel **ret_chan);
+/**
+ * \brief enqueues a request onto the IOAT DMA channel and submits it to the
+ * hardware
+ *
+ * \param chan IOAT DMA channel
+ * \param req IOAT DMA request to be submitted
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_submit_request(struct ioat_dma_channel *chan,
+ struct ioat_dma_request *req);
+/**
+ * \brief initializes the MSI-X interrupts for the channel
+ */
+errval_t ioat_dma_channel_irq_setup_msix(struct ioat_dma_channel *chan);
#endif /* IOAT_DMA_CHANNEL_INTERNAL_H */
* \returns SYS_ERR_OK on success
* errval on error
*/
-errval_t ioat_desc_alloc(uint16_t size,
- uint16_t align,
- uint16_t count,
- struct ioat_dma_descriptor **desc);
+errval_t ioat_dma_desc_alloc(uint16_t size,
+ uint16_t align,
+ uint16_t count,
+ struct ioat_dma_descriptor **desc);
/**
* \brief brief frees up the array of previously allocated descriptors
* \returns SYS_ERR_OK on success
* errval on failure
*/
-errval_t ioat_desc_free(struct ioat_dma_descriptor *desc);
+errval_t ioat_dma_desc_free(struct ioat_dma_descriptor *desc);
/*
* ----------------------------------------------------------------------------
*
* \param desc IOAT DMA descriptor
*/
-ioat_dma_desc_t ioat_desc_get_desc_handle(struct ioat_dma_descriptor *desc);
+ioat_dma_desc_t ioat_dma_desc_get_desc_handle(struct ioat_dma_descriptor *desc);
/**
* \brief sets the corresponding request
*
* \param desc IOAT DMA descriptor
*/
-void ioat_desc_set_request(struct ioat_dma_descriptor *desc,
+void ioat_dma_desc_set_request(struct ioat_dma_descriptor *desc,
struct ioat_dma_request *req);
/**
* \param desc descriptor to set the next field
* \param next following descriptor
*/
-void ioat_desc_set_next(struct ioat_dma_descriptor *desc,
- struct ioat_dma_descriptor *next);
+void ioat_dma_desc_set_next(struct ioat_dma_descriptor *desc,
+ struct ioat_dma_descriptor *next);
/**
* \brief returns the physical address of the descriptor
*
* \returns physical address of the descriptor
*/
-lpaddr_t ioat_desc_get_paddr(struct ioat_dma_descriptor *desc);
+lpaddr_t ioat_dma_desc_get_paddr(struct ioat_dma_descriptor *desc);
+
+/**
+ * \brief returns a virtual address pointer to the location where the descriptor
+ * is mapped
+ *
+ * \param desc IOAT DMA descriptor
+ */
+ioat_dma_desc_t ioat_dma_desc_get_desc(struct ioat_dma_descriptor *desc);
#endif /* IOAT_DMA_DESCRIPTORS_INTERNAL_H */
#include <dev/ioat_dma_dev.h>
-
-
-
/* device flags */
#define IOAT_DMA_DEV_F_DCA 0x00000001
#define IOAT_DMA_DEV_F_RAID 0x00000002
* size and mapping information for che completion status field
* this is where the DMA channel will write the address of the last completed
* descriptor (a copy of CHANSTS register)
- *
*/
-#define IOAT_DMA_COMPLSTATUS_SIZE BASE_PAGE_SIZE
-#define IOAT_DMA_COMPLSTATUS_ELEMENT_SIZE 64
-#define IOAT_DMA_COMPLSTATUS_FLAGS VREGION_FLAGS_READ_WRITE
+#define IOAT_DMA_COMPLSTATUS_SIZE BASE_PAGE_SIZE
+#define IOAT_DMA_COMPLSTATUS_ELEMENT_SIZE 64
+#define IOAT_DMA_COMPLSTATUS_FLAGS VREGION_FLAGS_READ_WRITE
+/**
+ * \brief returns a dma_mem struct containing the memory location for the
+ * channel's completion status writeback
+ *
+ * \param dev IOAT DMA device
+ * \param mem filled in memory information
+ */
+void ioat_dma_device_get_complsts_addr(struct ioat_dma_device *dev,
+ struct dma_mem *mem);
-void ioat_device_get_complsts_addr(struct ioat_dma_device *dev,
- struct dma_mem *mem);
-mackerel_addr_t ioat_device_get_mmio_base(struct ioat_dma_device *dev);
+/**
+ * \brief globally enables the interrupts for the given device
+ *
+ * \param dev IOAT DMA device
+ * \param type the interrupt type to enable
+ */
+errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
+ dma_irq_t type);
#endif /* IOAT_DMA_DEVICE_INTERNAL_H */
*
* \returns pointer to a DMA descriptor
*/
-struct ioat_dma_descriptor *ioat_ring_get_next_desc(struct ioat_dma_ring *ring);
+struct ioat_dma_descriptor *ioat_dma_ring_get_next_desc(struct ioat_dma_ring *ring);
/**
*
* \returns pointer to a DMA descriptor
*/
-struct ioat_dma_descriptor *ioat_ring_get_tail_desc(struct ioat_dma_ring *ring);
+struct ioat_dma_descriptor *ioat_dma_ring_get_tail_desc(struct ioat_dma_ring *ring);
/**
* \brief submits the pending descriptors to the hardware
*
* \returns the current head of the descriptors
*/
-uint16_t ioat_ring_submit_pending(struct ioat_dma_ring *ring);
+uint16_t ioat_dma_ring_submit_pending(struct ioat_dma_ring *ring);
/**
* \brief obtains the physical address of the descriptor chain
*
* \returns physical address of the pending descriptor chain
*/
-lpaddr_t ioat_ring_get_chain_addr(struct ioat_dma_ring *ring);
+lpaddr_t ioat_dma_ring_get_chain_addr(struct ioat_dma_ring *ring);
#endif /* IOAT_DMA_RING_INTERNAL_H */
#include <barrelfish/barrelfish.h>
+#include <dma_internal.h>
#include <ioat/ioat_dma_internal.h>
#include <ioat/ioat_dma_dca_internal.h>
#include <ioat/ioat_dma_channel_internal.h>
#include <ioat/ioat_dma_ring_internal.h>
#include <ioat/ioat_dma_descriptors_internal.h>
+#include <ioat/ioat_dma_request_internal.h>
#include <debug.h>
struct ioat_dma_channel
{
- ioat_dma_chan_id_t id; ///< unique channel id
+ struct dma_channel common;
+
ioat_dma_chan_t channel; ///< Mackerel address
- struct ioat_dma_device *dev; ///< the DMA device this channel belongs to
- size_t max_xfer_size; ///< maximum transfer size of this channel
- ioat_dma_chan_st_t state; ///< channel state
+
lpaddr_t last_completion; ///<
struct dma_mem completion;
struct ioat_dma_ring *ring; ///< Descriptor ring
uint64_t status; ///< channel status
uint8_t irq_vector;
size_t irq_msix;
- struct ioat_dma_request *req_head;
- struct ioat_dma_request *req_tail;
};
-/*
- * ============================================================================
- * Public Internal Interface
- * ============================================================================
- */
-
/**
- * \brief Resets a IOAT DMA channel
+ * \brief sets the star of the descriptor chain address of the DMA channel
*
- * \param chan IOAT DMA channel to be reset
- *
- * \returns SYS_ERR_OK on success
- * IOAT_ERR_CHAN_RESET on reset timeout
+ * \param chan IOAT DMA channel
*/
-errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan)
+static inline void channel_set_chain_addr(struct ioat_dma_channel *chan)
{
- IOATCHAN_DEBUG("reset channel.\n", chan->id);
+ lpaddr_t chain_addr = ioat_dma_ring_get_chain_addr(chan->ring);
- if (chan->state == IOAT_DMA_CHAN_ST_ERROR) {
- ioat_dma_chan_err_t chanerr = ioat_dma_chan_err_rd(&chan->channel);
- ioat_dma_chan_err_wr(&chan->channel, chanerr);
- IOATCHAN_DEBUG("Reseting channel from error state: [%08x]\n", chan->id,
- chanerr);
+ IOATCHAN_DEBUG("setting chain addr to [%016lx]\n", chan->common.id,
+ chain_addr);
- /*
- * TODO: clear the ioat_dma_pci_chanerr register in PCI config space
- * (same approach as above)
- * -> How to access this ?
- */
- }
- chan->state = IOAT_DMA_CHAN_ST_RESETTING;
-
- /* perform reset */
- ioat_dma_chan_cmd_reset_wrf(&chan->channel, 0x1);
-
- uint16_t reset_counter = 0xFFF;
- do {
- if (!ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
- break;
- }
- thread_yield();
- } while(reset_counter--);
-
- if (ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
- /* reset failed */
- return IOAT_ERR_RESET_TIMEOUT;
- }
-
- /* XXX: Intel BD architecture will need some additional work here */
-
- chan->state = IOAT_DMA_CHAN_ST_UNINITIALEZED;
-
- return SYS_ERR_OK;
+ ioat_dma_chan_chainaddr_lo_wr(&chan->channel, (uint32_t) chain_addr);
+ ioat_dma_chan_chainaddr_hi_wr(&chan->channel, chain_addr >> 32);
}
-/*
- * ----------------------------------------------------------------------------
- * Getter / Setter Functions
- * ----------------------------------------------------------------------------
- */
-
/**
- * \brief returns the IOAT DMA channel ID
+ * \brief reads the channel status and returns the physical address of the last
+ * completed descriptor
*
- * \param chan IOAT DMA channel
+ * \param chan IOAT DMA channel
*
- * \returns IOAT DMA channel ID of the supplied channel
+ * \returns physical address of last descriptor
*/
-inline ioat_dma_chan_id_t ioat_dma_channel_get_id(struct ioat_dma_channel *chan)
+static inline lpaddr_t channel_get_completion_addr(struct ioat_dma_channel *chan)
{
- return chan->id;
+ lpaddr_t compl_addr = *((lpaddr_t*) chan->completion.vaddr);
+
+ return (compl_addr & (~ioat_dma_chan_status_mask));
}
/**
- * \brief returns the associated IOAT DMA descriptor ring of a channel
+ * \brief checks if the channel has completed descriptors which can be processed
+ * and returns the physical address of the last one.
*
- * \param chan IOAT DMA channel
+ * \param chan IOAT DMA channel
*
- * \returns IOAT DMA descriptor ring handle
+ * \returns physical address of last descriptor
+ * 0 if there were no new descriptors to process
*/
-inline struct ioat_dma_ring *ioat_dma_channel_get_ring(struct ioat_dma_channel *chan)
+static inline lpaddr_t channel_has_completed_descr(struct ioat_dma_channel *chan)
{
- return chan->ring;
+ lpaddr_t curr_compl = channel_get_completion_addr(chan);
+ if (curr_compl != chan->last_completion) {
+ return curr_compl;
+ } else {
+ return 0;
+ }
}
/**
- * \brief returns the maximum number of bytes per DMA descritpor
+ * \brief processes the completed descriptors of a DMA channel and finishes
+ * the requests
*
- * \param chan IOAT DMA channel
+ * \param chan IAOT DMA channel
+ * \param compl_addr_phyis physical address of the last completed descriptor
*
- * \returns maximum number of bytes
+ * \returns SYS_ERR_OK on if the request was processed to completion
+ * DMA_ERR_CHAN_IDLE if there was no descriptor to process
+ * DMA_ERR_REQUEST_UNFINISHED if the request is still not finished
+ * errval on error
*/
-inline uint32_t ioat_dma_channel_get_max_xfer_size(struct ioat_dma_channel *chan)
+static errval_t channel_process_descriptors(struct ioat_dma_channel *chan,
+ lpaddr_t compl_addr_phys)
{
- return chan->max_xfer_size;
+ errval_t err;
+
+ if (!compl_addr_phys) {
+ return DMA_ERR_CHAN_IDLE;
+ }
+
+ IOATCHAN_DEBUG("processing [%016lx] head: %u, tail: %u, issued: %u\n",
+ chan->common.id, compl_addr_phys,
+ ioat_dma_ring_get_head(chan->ring),
+ ioat_dma_ring_get_tail(chan->ring),
+ ioat_dma_ring_get_issued(chan->ring));
+
+ uint16_t active_count = ioat_dma_ring_get_active(chan->ring);
+
+ struct ioat_dma_descriptor *desc;
+ struct ioat_dma_request *req;
+ struct dma_request *req_head;
+
+ uint16_t processed = 0;
+ uint8_t request_done = 0;
+
+ for (uint16_t i = 0; i < active_count; i++) {
+ desc = ioat_dma_ring_get_tail_desc(chan->ring);
+
+ /*
+ * check if there is a request associated with the descriptor
+ * this indicates the last descriptor of a request
+ */
+ req = ioat_dma_desc_get_request(desc);
+ if (req) {
+ req_head = dma_channel_deq_request_head(&chan->common);
+ assert(req_head == (struct dma_request * )req);
+ err = ioat_dma_request_process(req);
+ if (err_is_fail(err)) {
+ dma_channel_enq_request_head(&chan->common, req_head);
+ return err;
+ }
+ request_done = 1;
+ }
+
+ /* this was the last completed descriptor */
+ if (ioat_dma_desc_get_paddr(desc) == compl_addr_phys) {
+ processed = i;
+ break;
+ }
+ }
+
+ chan->last_completion = compl_addr_phys;
+
+ /* do a 5us delay per pending descriptor */
+ ioat_dma_device_set_intr_delay((struct ioat_dma_device *) chan->common.device,
+ (5 * active_count - processed));
+
+ if (request_done) {
+ return SYS_ERR_OK;
+ }
+
+ return DMA_ERR_REQUEST_UNFINISHED;
}
/*
*
* \returns SYS_ERR_OK on success
*/
-errval_t ioat_channel_init(struct ioat_dma_device *dev,
- uint8_t id,
- uint32_t max_xfer,
- struct ioat_dma_channel **ret_chan)
+errval_t ioat_dma_channel_init(struct ioat_dma_device *dev,
+ uint8_t id,
+ uint32_t max_xfer,
+ struct ioat_dma_channel **ret_chan)
{
errval_t err;
- IOATCHAN_DEBUG("initialize channel with max. xfer size of %u bytes\n", id,
- max_xfer);
-
struct ioat_dma_channel *chan = calloc(1, sizeof(*chan));
if (chan == NULL) {
return LIB_ERR_MALLOC_FAIL;
}
- chan->id = ioat_dma_channel_build_id(ioat_dma_device_get_id(dev), id);
- chan->dev = dev;
- chan->max_xfer_size = max_xfer;
+ struct dma_device *dma_dev = (struct dma_device *) dev;
+ struct dma_channel *dma_chan = &chan->common;
+
+ dma_chan->id = dma_channel_id_build(dma_device_get_id(dma_dev), id);
+ dma_chan->device = dma_dev;
+ dma_chan->max_xfer_size = max_xfer;
- mackerel_addr_t chan_base = ioat_device_get_mmio_base(dev);
+ IOATCHAN_DEBUG("initialize channel with max. xfer size of %u bytes\n",
+ dma_chan->id, max_xfer);
+
+ mackerel_addr_t chan_base = dma_device_get_mmio_vbase(dma_dev);
ioat_dma_chan_initialize(&chan->channel, chan_base + ((id + 1) * 0x80));
ioat_dma_chan_dcactrl_target_cpu_wrf(&chan->channel,
return err;
}
- ioat_device_get_complsts_addr(dev, &chan->completion);
+ ioat_dma_device_get_complsts_addr(dev, &chan->completion);
/* write the completion address */
ioat_dma_chan_cmpl_lo_wr(&chan->channel, chan->completion.paddr);
chan_ctrl = ioat_dma_chan_ctrl_intp_dis_insert(chan_ctrl, 0x1);
ioat_dma_chan_ctrl_wr(&chan->channel, chan_ctrl);
- chan->state = IOAT_DMA_CHAN_ST_PREPARED;
+ dma_chan->state = DMA_CHAN_ST_PREPARED;
/*
* do a check if the channel operates correctly by issuing a NOP
*/
-#if 0
- IOATCHAN_DEBUG("performing selftest on channel with NOP\n", chan->id);
+ IOATCHAN_DEBUG("performing selftest on channel with NOP\n", dma_chan->id);
- ioat_dma_request_nop(chan);
- err = ioat_dma_channel_submit_pending(chan);
+ ioat_dma_request_nop_chan(chan);
+ err = ioat_dma_channel_issue_pending(chan);
if (err_is_fail(err)) {
dma_mem_free(&chan->completion);
return err;
}
uint32_t j = 0xFFFF;
+ uint64_t status;
do {
+ status = ioat_dma_channel_get_status(chan);
thread_yield();
- }while (j-- && !ioat_dma_channel_is_active(chan)
- && !ioat_dma_channel_is_idle(chan));
+ } while (j-- && !ioat_dma_channel_is_active(status)
+ && !ioat_dma_channel_is_idle(status));
- if (ioat_dma_channel_is_active(chan) || ioat_dma_channel_is_idle(chan)) {
- IOATCHAN_DEBUG("channel worked properly: %016lx\n", chan->id,
- *(uint64_t*) chan->completion.addr);
+ if (ioat_dma_channel_is_active(status) || ioat_dma_channel_is_idle(status)) {
+ IOATCHAN_DEBUG("channel worked properly: %016lx\n", dma_chan->id,
+ *(uint64_t* ) chan->completion.vaddr);
return SYS_ERR_OK;
} else {
uint32_t error = ioat_dma_chan_err_rd(&chan->channel);
- IOATCHAN_DEBUG(" channel error ERROR: %08x\n", chan->id, error);
- ioat_dma_mem_free(&chan->completion);
- return IOAT_ERR_CHAN_ERROR;
+ IOATCHAN_DEBUG(" channel error ERROR: %08x\n", dma_chan->id, error);
+ dma_mem_free(&chan->completion);
+ return DMA_ERR_CHAN_ERROR;
+ }
+
+ *ret_chan = chan;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Submits the pending descriptors to the hardware queue
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns number of submitted descriptors
+ */
+uint16_t ioat_dma_channel_issue_pending(struct ioat_dma_channel *chan)
+{
+ errval_t err;
+
+ uint16_t pending = ioat_dma_ring_get_pendig(chan->ring);
+
+ IOATCHAN_DEBUG("issuing %u pending descriptors to hardware\n",
+ chan->common.id, pending);
+
+ if (chan->common.state != DMA_CHAN_ST_RUNNING) {
+ err = ioat_dma_channel_start(chan);
+ }
+ if (pending > 0) {
+ uint16_t dmacnt = ioat_dma_ring_submit_pending(chan->ring);
+ ioat_dma_chan_dmacount_wr(&chan->channel, dmacnt);
+
+ IOATCHAN_DEBUG(" setting dma_count to [%u]\n", chan->common.id, dmacnt);
+ }
+
+ return pending;
+}
+
+/*
+ * ============================================================================
+ * Public Interface
+ * ============================================================================
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * Channel State Management
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief Resets a IOAT DMA channel
+ *
+ * \param chan IOAT DMA channel to be reset
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_CHAN_RESET on reset timeout
+ */
+errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan)
+{
+ struct dma_channel *dma_chan = &chan->common;
+
+ IOATCHAN_DEBUG("reset channel.\n", dma_chan->id);
+
+ if (dma_chan->state == DMA_CHAN_ST_ERROR) {
+ ioat_dma_chan_err_t chanerr = ioat_dma_chan_err_rd(&chan->channel);
+ ioat_dma_chan_err_wr(&chan->channel, chanerr);
+ IOATCHAN_DEBUG("Reseting channel from error state: [%08x]\n",
+ dma_chan->id, chanerr);
+
+ /*
+ * errval_t pci_read_conf_header(uint32_t dword, uint32_t *val);
+
+ errval_t pci_write_conf_header(uint32_t dword, uint32_t val);
+ * TODO: clear the ioat_dma_pci_chanerr register in PCI config space
+ * (same approach as above)
+ * -> How to access this ?
+ */
}
-#endif
+ dma_chan->state = DMA_CHAN_ST_RESETTING;
+
+ /* perform reset */
+ ioat_dma_chan_cmd_reset_wrf(&chan->channel, 0x1);
+
+ uint16_t reset_counter = 0xFFF;
+ do {
+ if (!ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
+ break;
+ }
+ thread_yield();
+ } while (reset_counter--);
+
+ if (ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
+ /* reset failed */
+ return DMA_ERR_RESET_TIMEOUT;
+ }
+
+ /* XXX: Intel BD architecture will need some additional work here */
+
+ dma_chan->state = DMA_CHAN_ST_UNINITIALEZED;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief restarts a IOAT DMA channel this updates the chain address register
+ * and the DMA count register.
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_restart(struct ioat_dma_channel *chan)
+{
+ assert(!"NYI");
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief starts a IOAT DMA channel. This sets the chain address to the first
+ * entry of the ring and the DMA count to zero.
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_start(struct ioat_dma_channel *chan)
+{
+ if (chan->common.state == DMA_CHAN_ST_ERROR) {
+ return ioat_dma_channel_restart(chan);
+ }
+
+ if (chan->common.state == DMA_CHAN_ST_RUNNING) {
+ return SYS_ERR_OK;
+ }
+
+ IOATCHAN_DEBUG("starting channel.\n", chan->common.id);
+
+ chan->common.state = DMA_CHAN_ST_RUNNING;
+ channel_set_chain_addr(chan);
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief stopps the processing of the descriptors.
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_stop(struct ioat_dma_channel *chan)
+{
+ assert(!"NYI");
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Puts the IOAT DMA channel into the suspended state
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_suspend(struct ioat_dma_channel *chan)
+{
+ assert(!"NYI");
return SYS_ERR_OK;
}
+
+/**
+ * \brief enqueues a request onto the IOAT DMA channel and submits it to the
+ * hardware
+ *
+ * \param chan IOAT DMA channel
+ * \param req IOAT DMA request to be submitted
+ *
+ * \returns SYS_ERR_OK on success
+ * DMA_ERR_* on failure
+ */
+errval_t ioat_dma_channel_submit_request(struct ioat_dma_channel *chan,
+ struct ioat_dma_request *req)
+{
+ IOATCHAN_DEBUG("submit request [%016lx]\n", chan->common.id,
+ dma_request_get_id((struct dma_request * )req));
+
+ dma_channel_enq_request_tail(&chan->common, (struct dma_request *) req);
+
+ assert(!"revise");
+ ioat_dma_channel_issue_pending(chan);
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief polls the IOAT DMA channel for completed events
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns SYS_ERR_OK if there was something processed
+ *
+ */
+errval_t ioat_dma_channel_poll(struct ioat_dma_channel *chan)
+{
+ errval_t err;
+
+ uint64_t status = ioat_dma_channel_get_status(chan);
+
+ if (ioat_dma_channel_is_halted(status)) {
+ IOATCHAN_DEBUG("channel is in error state\n", chan->common.id);
+ assert(!"NYI: error event handling");
+ }
+
+ /* check if there can be something to process */
+ if (chan->common.req_list.head == NULL) {
+ return DMA_ERR_CHAN_IDLE;
+ }
+
+ lpaddr_t compl_addr_phys = channel_has_completed_descr(chan);
+ if (!compl_addr_phys) {
+ return DMA_ERR_CHAN_IDLE;
+ }
+
+ err = channel_process_descriptors(chan, compl_addr_phys);
+ switch (err_no(err)) {
+ case SYS_ERR_OK:
+ /* this means we processed a descriptor request */
+ return SYS_ERR_OK;
+ case DMA_ERR_REQUEST_UNFINISHED:
+ return DMA_ERR_CHAN_IDLE;
+ default:
+ return err;
+ }
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Getter / Setter Functions
+ * ----------------------------------------------------------------------------
+ */
+
+/**
+ * \brief returns the associated IOAT DMA descriptor ring of a channel
+ *
+ * \param chan IOAT DMA channel
+ *
+ * \returns IOAT DMA descriptor ring handle
+ */
+inline struct ioat_dma_ring *ioat_dma_channel_get_ring(struct ioat_dma_channel *chan)
+{
+ return chan->ring;
+}
+
+/**
+ * \brief updates the channel status flag by reading the CHANSTS register
+ *
+ * \param chan IOAT DMA channel
+ */
+inline uint64_t ioat_dma_channel_get_status(struct ioat_dma_channel *chan)
+{
+ uint32_t status_lo = ioat_dma_chan_sts_lo_rd(&chan->channel);
+ chan->status = ioat_dma_chan_sts_hi_rd(&chan->channel);
+ chan->status <<= 32;
+ chan->status |= status_lo;
+
+ return chan->status;
+}
+
* \returns SYS_ERR_OK on success
* errval on error
*/
-errval_t ioat_desc_alloc(uint16_t size,
- uint16_t align,
- uint16_t count,
- struct ioat_dma_descriptor **desc)
+errval_t ioat_dma_desc_alloc(uint16_t size,
+ uint16_t align,
+ uint16_t count,
+ struct ioat_dma_descriptor **desc)
{
errval_t err;
}
/* do the linkage */
- ioat_desc_set_next(&dma_desc[(i-1) & (count - 1)], &dma_desc[i]);
+ ioat_dma_desc_set_next(&dma_desc[(i-1) & (count - 1)], &dma_desc[i]);
/* set the entry in the array */
desc[i] = &dma_desc[i];
* \returns SYS_ERR_OK on success
* errval on failure
*/
-errval_t ioat_desc_free(struct ioat_dma_descriptor *desc)
+errval_t ioat_dma_desc_free(struct ioat_dma_descriptor *desc)
{
errval_t err;
*/
/**
- * \brief returns the physical address of the descriptor
- *
- * \param desc IOAT DMA descriptor
- *
- * \returns physical address of the descriptor
- */
-inline lpaddr_t ioat_desc_get_paddr(struct ioat_dma_descriptor *desc)
-{
- return desc->paddr;
-}
-
-/**
* \brief sets the next pointer of the descriptor and does the corresponding
* hardware linkage
*
* \param desc descriptor to set the next field
* \param next following descriptor
*/
-inline void ioat_desc_set_next(struct ioat_dma_descriptor *desc,
- struct ioat_dma_descriptor *next)
+inline void ioat_dma_desc_set_next(struct ioat_dma_descriptor *desc,
+ struct ioat_dma_descriptor *next)
{
ioat_dma_desc_next_insert(desc->desc, next->paddr);
desc->next = next;
}
/**
- * \brief sets the corresponding request
- *
- * \param desc IOAT DMA descriptor
- */
-inline void ioat_desc_set_request(struct ioat_dma_descriptor *desc,
- struct ioat_dma_request *req)
-{
- desc->req = req;
-}
-
-/**
* \brief returns a virtual address pointer to the location where the descriptor
* is mapped
*
* \param desc IOAT DMA descriptor
*/
-inline ioat_dma_desc_t ioat_desc_get_desc_handle(struct ioat_dma_descriptor *desc)
+inline ioat_dma_desc_t ioat_dma_desc_get_desc_handle(struct ioat_dma_descriptor *desc)
{
return desc->desc;
}
{
return desc->next;
}
+
+/**
+ * \brief returns the physical address of the descriptor
+ *
+ * \param desc IOAT DMA descriptor
+ *
+ * \returns physical address of the descriptor
+ */
+inline lpaddr_t ioat_dma_desc_get_paddr(struct ioat_dma_descriptor *desc)
+{
+ return desc->paddr;
+}
+
+/**
+ * \brief returns a virtual address pointer to the location where the descriptor
+ * is mapped
+ *
+ * \param desc IOAT DMA descriptor
+ */
+inline ioat_dma_desc_t ioat_dma_desc_get_desc(struct ioat_dma_descriptor *desc)
+{
+ return desc->desc;
+}
+
+/**
+ * \brief sets the corresponding request
+ *
+ * \param desc IOAT DMA descriptor
+ */
+inline void ioat_dma_desc_set_request(struct ioat_dma_descriptor *desc,
+ struct ioat_dma_request *req)
+{
+ desc->req = req;
+}
*/
struct ioat_dma_device
{
- struct dma_dev_int common;
+ struct dma_device common;
ioat_dma_t device; ///< mackerel device base
ioat_dma_cbver_t version; ///< Crystal Beach version number
{
IOATDEV_DEBUG("devices of Crystal Beach Version 1.xx are currently not supported.\n",
dev->common.id);
- return IOAT_ERR_DEVICE_UNSUPPORTED;
+ return DMA_ERR_DEVICE_UNSUPPORTED;
}
static errval_t device_init_ioat_v2(struct ioat_dma_device *dev)
{
IOATDEV_DEBUG("devices of Crystal Beach Version 2.xx are currently not supported.\n",
dev->common.id);
- return IOAT_ERR_DEVICE_UNSUPPORTED;
+ return DMA_ERR_DEVICE_UNSUPPORTED;
}
static errval_t device_init_ioat_v3(struct ioat_dma_device *dev)
} else if (ioat_dma_cbver_minor_extract(dev->version) == 3) {
IOATDEV_DEBUG("devices of Crystal Beach Version 3.3 are not supported.\n",
dev->common.id);
- return IOAT_ERR_DEVICE_UNSUPPORTED;
+ return DMA_ERR_DEVICE_UNSUPPORTED;
}
/* if DCA is enabled, we cannot support the RAID functions */
if (ioat_dma_dca_is_enabled()) {
- IOATDEV_DEBUG("Disabling XOR and PQ while DCA is enabled\n", dev->common.id);
+ IOATDEV_DEBUG("Disabling XOR and PQ while DCA is enabled\n",
+ dev->common.id);
cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
}
}
/* set the interrupt type to disabled*/
- dev->irq_type = IOAT_DMA_IRQ_DISABLED;
+ dev->common.irq_type = DMA_IRQ_DISABLED;
/* allocate memory for completion status writeback */
err = dma_mem_alloc(IOAT_DMA_COMPLSTATUS_SIZE,
- IOAT_DMA_COMPLSTATUS_FLAGS,
+ IOAT_DMA_COMPLSTATUS_FLAGS,
&dev->complstatus);
if (err_is_fail(err)) {
return err;
}
- dev->common.channels.num = ioat_dma_chancnt_num_rdf(&dev->device);
+ dev->common.channels.count = ioat_dma_chancnt_num_rdf(&dev->device);
- dev->common.channels.c = calloc(dev->common.channels.num, sizeof(*dev->common.channels.c));
+ dev->common.channels.c = calloc(dev->common.channels.count,
+ sizeof(*dev->common.channels.c));
if (dev->common.channels.c == NULL) {
dma_mem_free(&dev->complstatus);
return LIB_ERR_MALLOC_FAIL;
/* channel enumeration */
- IOATDEV_DEBUG("channel enumeration. discovered %u channels\n", dev->common.id, dev->common.channels.num);
+ IOATDEV_DEBUG("channel enumeration. discovered %u channels\n", dev->common.id,
+ dev->common.channels.count);
uint32_t max_xfer_size = (1 << ioat_dma_xfercap_max_rdf(&dev->device));
- for (uint8_t i = 0; i < dev->common.channels.num; ++i) {
- err = ioat_channel_init(dev, i, max_xfer_size, &dev->common.channels.c[i]);
- }
-
+ for (uint8_t i = 0; i < dev->common.channels.count; ++i) {
+ struct dma_channel **chan = &dev->common.channels.c[i];
+ err = ioat_dma_channel_init(dev, i, max_xfer_size,
+ (struct ioat_dma_channel **) chan);
+ }
if (dev->flags & IOAT_DMA_DEV_F_DCA) {
/*TODO: DCA initialization device->dca = ioat3_dca_init(pdev, device->reg_base);*/
/*
* ===========================================================================
- * Library Internal Public Interface
+ * Library Internal Interface
* ===========================================================================
*/
-void ioat_device_get_complsts_addr(struct ioat_dma_device *dev,
+void ioat_dma_device_get_complsts_addr(struct ioat_dma_device *dev,
struct dma_mem *mem)
{
- if (dev->common.state != IOAT_DMA_DEV_ST_CHAN_ENUM) {
+ if (dev->common.state != DMA_DEV_ST_CHAN_ENUM) {
memset(mem, 0, sizeof(*mem));
}
- assert(dev->complstatus.addr);
+ assert(dev->complstatus.vaddr);
*mem = dev->complstatus;
mem->bytes = IOAT_DMA_COMPLSTATUS_SIZE;
mem->paddr += (IOAT_DMA_COMPLSTATUS_SIZE * dev->common.channels.next);
- mem->addr += (IOAT_DMA_COMPLSTATUS_SIZE * dev->common.channels.next++);
- mem->frame = NULL_CAP;
+ mem->frame = NULL_CAP
+ ;
+ mem->vaddr += (IOAT_DMA_COMPLSTATUS_SIZE * dev->common.channels.next++);
+
}
+/**
+ * \brief globally enables the interrupts for the given device
+ *
+ * \param dev IOAT DMA device
+ * \param type the interrupt type to enable
+ */
+errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
+ dma_irq_t type)
+{
+ ioat_dma_intrctrl_t intcrtl = 0;
+ intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);
+
+ dev->common.irq_type = type;
+ switch (type) {
+ case DMA_IRQ_MSIX:
+ IOATDEV_DEBUG("Initializing MSI-X interrupts \n", dev->common.id);
+ assert(!"NYI");
+ break;
+ case DMA_IRQ_MSI:
+ IOATDEV_DEBUG("Initializing MSI interrupts \n", dev->common.id);
+ assert(!"NYI");
+ break;
+ case DMA_IRQ_INTX:
+ IOATDEV_DEBUG("Initializing INTx interrupts \n", dev->common.id);
+ assert(!"NYI");
+ break;
+ default:
+ /* disabled */
+ intcrtl = 0;
+ IOATDEV_DEBUG("Disabling interrupts \n", dev->common.id);
+ break;
+ }
+
+ ioat_dma_intrctrl_wr(&dev->device, intcrtl);
+
+ return SYS_ERR_OK;
+}
/*
* ===========================================================================
return LIB_ERR_MALLOC_FAIL;
}
- struct dma_device_int *dma_dev = &ioat_device->common;
+ struct dma_device *dma_dev = &ioat_device->common;
struct frame_identity mmio_id;
err = invoke_frame_identify(mmio, &mmio_id);
}
dma_dev->id = device_id++;
- dma_dev->mmio.pbase = mmio_id.base;
+ dma_dev->mmio.paddr = mmio_id.base;
dma_dev->mmio.bytes = (1UL << mmio_id.bits);
- dma_dev->mmio.cap = mmio;
+ dma_dev->mmio.frame = mmio;
IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n",
dma_dev->id, mmio_id.base, 1 << mmio_id.bits);
- err = vspace_map_one_frame_attr(&dma_dev->mmio.vbase,
- dma_dev->mmio.bytes, dma_dev->mmio.cap,
+ err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr,
+ dma_dev->mmio.bytes, dma_dev->mmio.frame,
VREGION_FLAGS_READ_WRITE_NOCACHE,
- NULL, NULL);
+ NULL,
+ NULL);
if (err_is_fail(err)) {
free(ioat_device);
return err;
}
- ioat_dma_initialize(&ioat_device->device, NULL, dma_dev->mmio.vbase);
+ ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr);
ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device);
IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n",
- dma_dev->id, (lvaddr_t )dma_dev->mmio.vbase,
+ dma_dev->id, dma_dev->mmio.vaddr,
ioat_dma_cbver_major_extract(ioat_device->version),
ioat_dma_cbver_minor_extract(ioat_device->version));
err = device_init_ioat_v3(ioat_device);
break;
default:
- err = IOAT_ERR_DEVICE_UNSUPPORTED;
+ err = DMA_ERR_DEVICE_UNSUPPORTED;
}
if (err_is_fail(err)) {
- vspace_unmap(dma_dev->mmio.vbase);
+ vspace_unmap((void*) dma_dev->mmio.vaddr);
free(ioat_device);
}
* \param arg argument supplied to the handler function
*/
errval_t ioat_dma_device_intr_enable(struct ioat_dma_device *dev,
- ioat_dma_irq_t type,
- ioat_dma_irq_fn_t fn,
+ dma_irq_t type,
+ dma_irq_fn_t fn,
void *arg)
{
assert(!"NYI");
void ioat_dma_device_set_intr_delay(struct ioat_dma_device *dev,
uint16_t usec)
{
- assert(!"NYI");
+ ioat_dma_intrdelay_delay_us_wrf(&dev->device, usec);
}
/*
*/
/**
- * \brief gets the device state from the IOAT DMA device
- *
- * \param dev IOAT DMA device
- *
- * \returns device state enumeration
- */
-ioat_dma_dev_st_t ioat_dma_device_get_state(struct ioat_dma_device *dev)
-{
- assert(!"NYI");
- return 0;
-}
-
-/**
- * \brief returns the channel count of this device
- *
- * \param dev IOAT DMA device
- *
- * \returns number of channels this device has
- */
-inline uint8_t ioat_dma_device_get_channel_count(struct ioat_dma_device *dev)
-{
- return dev->common.channels.num;
-}
-
-/**
- * \brief returns the device ID from the IOAT device
- *
- * \param dev IOAT DMA device
- *
- * \returns IOAT DMA device ID
- */
-inline ioat_dma_devid_t ioat_dma_device_get_id(struct ioat_dma_device *dev)
-{
- return dev->common.id;
-}
-
-/**
- * \brief returns the channel belonging with the given ID
- *
- * \param dev IOAT DMA device
- * \param id channel id
- *
- * return IOAT DMA channel handle
- * NULL if no such channel exist
- */
-struct ioat_dma_channel *ioat_dma_device_get_channel(struct ioat_dma_device *dev,
- uint16_t id)
-{
- /* channel ID belongs not to this device */
- if ((id >> 8) != dev->common.id) {
- return NULL;
- }
-
- /* channel index exceeds channel number */
- if ((id & 0xFF) > dev->common.channels.num) {
- return NULL;
- }
- assert(!"NYI");
-
- return NULL;
-}
-
-/**
- * \brief returns a channel from the device based on a round robin fashion
- *
- * \param dev IOAT DMA device
- *
- * return IOAT DMA channel handle
- */
-struct ioat_dma_channel *ioat_dma_device_get_next_channel(struct ioat_dma_device *dev)
-{
- if (dev->common.channels.next >= dev->common.channels.num) {
- dev->common.channels.next = 0;
- }
- return dev->common.channels.c[dev->common.channels.next++];
-}
-
-
-/**
* \brief polls the channels of the IOAT DMA device
*
* \param dev IOAT DMA device
*
* \returns SYS_ERR_OK on success
- * IOAT_ERR_DEVICE_IDLE if there is nothing completed on the channels
+ * DMA_ERR_DEVICE_IDLE if there is nothing completed on the channels
* errval on error
*/
errval_t ioat_dma_device_poll_channels(struct ioat_dma_device *dev)
{
- assert(!"NYI");
+ errval_t err;
+ struct ioat_dma_channel *chan;
+
+ uint8_t idle = 0x1;
+
+ for (uint8_t i = 0; i < dev->common.channels.count; ++i) {
+ chan = (struct ioat_dma_channel *) dev->common.channels.c[i];
+ assert(chan);
+ err = ioat_dma_channel_poll(chan);
+ switch (err_no(err)) {
+ case DMA_ERR_CHAN_IDLE:
+ idle = idle && 0x1;
+ break;
+ case SYS_ERR_OK:
+ idle = 0;
+ break;
+ default:
+ return err;
+ }
+ }
+
+ if (idle) {
+ return DMA_ERR_DEVICE_IDLE;
+ }
+
return SYS_ERR_OK;
}
-
#include<barrelfish/barrelfish.h>
-#include <dma_request_internal.h>
+#include <ioat/ioat_dma_internal.h>
#include <ioat/ioat_dma_device_internal.h>
#include <ioat/ioat_dma_channel_internal.h>
#include <ioat/ioat_dma_request_internal.h>
#include <debug.h>
/**
- *
+ * represens the IOAT specific DMA requests
*/
struct ioat_dma_request
{
- struct dma_req_int common;
+ struct dma_request common;
struct ioat_dma_descriptor *desc_head;
struct ioat_dma_descriptor *desc_tail;
- struct ioat_dma_request *next;
};
/*
* ---------------------------------------------------------------------------
*/
-static struct ioat_dma_request *req_free_list = NULL;
+/// caches allocated requests which are no longer used
+static struct dma_request *req_free_list = NULL;
+/**
+ * \brief allocates a IOAT DMA request structure
+ *
+ * \returns IOAT DMA request
+ * NULL on failure
+ */
static struct ioat_dma_request *request_alloc(void)
{
struct ioat_dma_request *ret;
if (req_free_list) {
- ret = req_free_list;
- req_free_list = ret->next;
+ ret = (struct ioat_dma_request *) req_free_list;
+ req_free_list = ret->common.next;
- IOATREQ_DEBUG("meta: reusing request %p. freelist:%p\n", ret, req_free_list);
+ DMAREQ_DEBUG("meta: reusing request %p. freelist:%p\n", ret,
+ req_free_list);
return ret;
}
return calloc(1, sizeof(*ret));
}
+/**
+ * \brief frees up the used DMA request structure
+ *
+ * \param req DMA request to be freed
+ */
static void request_free(struct ioat_dma_request *req)
{
- IOATREQ_DEBUG("meta: freeing request %p.\n", req);
- req->next = req_free_list;
- req_free_list = req;
+ DMAREQ_DEBUG("meta: freeing request %p.\n", req);
+ req->common.next = req_free_list;
+ req_free_list = &req->common;
}
/*
inline static uint32_t req_num_desc_needed(struct ioat_dma_channel *chan,
size_t bytes)
{
- uint32_t max_xfer_size = ioat_dma_channel_get_max_xfer_size(chan);
+ struct dma_channel *dma_chan = (struct dma_channel *) chan;
+ uint32_t max_xfer_size = dma_channel_get_max_xfer_size(dma_chan);
bytes += (max_xfer_size - 1);
return (uint32_t) (bytes / max_xfer_size);
}
* ===========================================================================
*/
+/**
+ * \brief handles the processing of completed DMA requests
+ *
+ * \param req the DMA request to process
+ *
+ * \returns SYS_ERR_OK on sucess
+ * errval on failure
+ */
+errval_t ioat_dma_request_process(struct ioat_dma_request *req)
+{
+ errval_t err;
+
+ err = dma_request_process(&req->common);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ request_free(req);
+
+ return SYS_ERR_OK;
+}
+
/*
* ===========================================================================
* Public Interface
errval_t ioat_dma_request_memcpy_chan(struct ioat_dma_channel *chan,
struct dma_req_setup *setup)
{
+ struct dma_channel *dma_chan = (struct dma_channel *) chan;
+
uint32_t num_desc = req_num_desc_needed(chan, setup->args.memcpy.bytes);
IOATREQ_DEBUG("DMA Memcpy request: [0x%016lx]->[0x%016lx] of %lu bytes (%u desc)\n",
- setup->args.memcpy.src, setup->args.memcpy.dst, setup->args.memcpy.bytes, num_desc);
+ setup->args.memcpy.src, setup->args.memcpy.dst,
+ setup->args.memcpy.bytes, num_desc);
struct ioat_dma_ring *ring = ioat_dma_channel_get_ring(chan);
if (num_desc > ioat_dma_ring_get_space(ring)) {
IOATREQ_DEBUG("Too less space in ring: %u / %u\n", num_desc,
- ioat_dma_ring_get_space(ring));
- return IOAT_ERR_NO_DESCRIPTORS;
+ ioat_dma_ring_get_space(ring));
+ return DMA_ERR_NO_DESCRIPTORS;
}
struct ioat_dma_request *req = request_alloc();
if (req == NULL) {
IOATREQ_DEBUG("No request descriptors for holding request data\n");
- return IOAT_ERR_NO_REQUESTS;
+ return DMA_ERR_NO_REQUESTS;
}
ioat_dma_desc_ctrl_array_t ctrl = {
size_t length = setup->args.memcpy.bytes;
lpaddr_t src = setup->args.memcpy.src;
lpaddr_t dst = setup->args.memcpy.dst;
- size_t bytes, max_xfer_size = ioat_dma_channel_get_max_xfer_size(chan);
+ size_t bytes, max_xfer_size = dma_channel_get_max_xfer_size(dma_chan);
do {
- desc = ioat_ring_get_next_desc(ring);
+ desc = ioat_dma_ring_get_next_desc(ring);
if (!req->desc_head) {
req->desc_head = desc;
}
ioat_dma_desc_fill_memcpy(desc, src, dst, bytes, ctrl);
- ioat_desc_set_request(desc, NULL);
+ ioat_dma_desc_set_request(desc, NULL);
length -= bytes;
src += bytes;
dst += bytes;
} while (length > 0);
- req->common.req.setup = *setup;
- req->common.req.id = generate_req_id(chan);
+ req->common.setup = *setup;
+ req->common.id = dma_request_generate_req_id((struct dma_channel *) chan);
/* set the request pointer in the last descriptor */
- ioat_desc_set_request(desc, req);
+ ioat_dma_desc_set_request(desc, req);
assert(req->desc_tail);
assert(ioat_dma_desc_get_request(req->desc_tail));
- ioat_dma_channel_enq_request(chan, req);
+ return ioat_dma_channel_submit_request(chan, req);
+}
- return SYS_ERR_OK;
+/**
+ * \brief issues a memcpy request to a channel of the given device
+ *
+ * \param dev IOAT DMA device
+ * \param setup request setup information
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+inline errval_t ioat_dma_request_memcpy(struct ioat_dma_device *dev,
+ struct dma_req_setup *setup)
+{
+ struct ioat_dma_channel *chan;
+ struct dma_device *dma_dev = (struct dma_device *)dev;
+ chan = (struct ioat_dma_channel *)dma_device_get_channel(dma_dev);
+ return ioat_dma_request_memcpy_chan(chan, setup);
}
/**
struct ioat_dma_ring *ring = ioat_dma_channel_get_ring(chan);
assert(ring);
- struct ioat_dma_descriptor *desc = ioat_ring_get_next_desc(ring);
-
+ struct ioat_dma_descriptor *desc = ioat_dma_ring_get_next_desc(ring);
+ assert(desc);
IOATREQ_DEBUG("New DMA NOP request: descriptor=%p\n", desc);
ioat_dma_desc_fill_nop(desc);
}
+/**
+ * \brief issues a NOP / NULL descriptor request on the given device
+ *
+ * \param dev IOAT DMA device
+ * \param setup request setup information
+ *
+ * \returns SYS_ERR_OK on success
+ * errval on failure
+ */
+inline void ioat_dma_request_nop(struct ioat_dma_device *dev)
+{
+ struct ioat_dma_channel *chan;
+ struct dma_device *dma_dev = (struct dma_device *)dev;
+ chan = (struct ioat_dma_channel *)dma_device_get_channel(dma_dev);
+ ioat_dma_request_nop_chan(chan);
+}
#include <dev/ioat_dma_dev.h>
+#include <dma_internal.h>
#include <dma_mem_utils.h>
#include <ioat/ioat_dma_internal.h>
uint16_t head; ///< allocated index
uint16_t issued; ///< hardware notification point
uint16_t tail; ///< cleanup index
- uint16_t dmacount; ///< identical to 'head' except for occasionally resetting to zero
+ uint16_t dmacount; ///< value to be written into dmacount register
uint16_t alloc_order; ///< log2 of the number of allocated descriptors
uint16_t produce; ///< number of descriptors to produce at submit time
+
struct ioat_dma_descriptor **desc; ///< descriptor pointer array
- struct ioat_dma_channel *chan;
+ struct ioat_dma_channel *chan; ///< channel associated with this ring
};
/*
*
* \returns pointer to a DMA descriptor
*/
-inline struct ioat_dma_descriptor *ioat_ring_get_next_desc(struct ioat_dma_ring *ring)
+inline struct ioat_dma_descriptor *ioat_dma_ring_get_next_desc(struct ioat_dma_ring *ring)
{
struct ioat_dma_descriptor *desc = ioat_dma_ring_get_desc(ring, ring->head++);
- IOATDESC_DEBUG("ring getting next head desc:%p @ [%016lx], new head:%u\n", desc,
- ioat_desc_get_paddr(desc), ring->head);
+ IOATDESC_DEBUG("ring getting next head desc:%p @ [%016lx], new head:%u\n",
+ desc, ioat_dma_desc_get_paddr(desc), ring->head);
return desc;
}
-
/**
* \brief gets the next descriptor based on the tail pointer and increases the
* tail pointer index
* \param ring the DMA ring
*
* \returns pointer to a DMA descriptor
+ *
*/
-inline struct ioat_dma_descriptor *ioat_ring_get_tail_desc(struct ioat_dma_ring *ring)
+inline struct ioat_dma_descriptor *ioat_dma_ring_get_tail_desc(struct ioat_dma_ring *ring)
{
struct ioat_dma_descriptor *desc = ioat_dma_ring_get_desc(ring, ring->tail++);
IOATDESC_DEBUG("ring getting tail desc:%p @ [%016lx], new tail: %u\n", desc,
- ioat_desc_get_paddr(desc), ring->tail);
+ ioat_dma_desc_get_paddr(desc), ring->tail);
return desc;
}
/**
- * \brief submits the pending descriptors to the hardware
+ * \brief submits the pending descriptors and updates the DMA count value
*
* \param ring DMA ring to submit the pending descriptors
*
- * \returns the current head of the descriptors
+ * \returns the current head of the descriptors (dmacount)
*/
-uint16_t ioat_ring_submit_pending(struct ioat_dma_ring *ring)
+uint16_t ioat_dma_ring_submit_pending(struct ioat_dma_ring *ring)
{
uint16_t num_pending = ioat_dma_ring_get_pendig(ring);
if (num_pending != 0) {
ring->dmacount += num_pending;
ring->issued = ring->head;
+
+ IOATDESC_DEBUG("ring submit pending dmacount: %u, head = %u\n",
+ ring->dmacount, ring->head);
}
return ring->dmacount;
*
* \returns physical address of the pending descriptor chain
*/
-inline lpaddr_t ioat_ring_get_chain_addr(struct ioat_dma_ring *ring)
+inline lpaddr_t ioat_dma_ring_get_chain_addr(struct ioat_dma_ring *ring)
{
- return ioat_desc_get_paddr(ioat_dma_ring_get_desc(ring, ring->issued));
+ return ioat_dma_desc_get_paddr(ioat_dma_ring_get_desc(ring, ring->issued));
}
/*
ring->size = ndesc;
ring->desc = (void *) (ring + 1);
- err = ioat_desc_alloc(IOAT_DMA_DESC_SIZE, IOAT_DMA_DESC_ALIGN, ndesc,
- ring->desc);
+ err = ioat_dma_desc_alloc(IOAT_DMA_DESC_SIZE, IOAT_DMA_DESC_ALIGN, ndesc,
+ ring->desc);
if (err_is_fail(err)) {
free(ring);
return err;
IOATDESC_DEBUG("freeing descriptor ring %p\n", ring);
- err = ioat_desc_free(ring->desc[0]);
+ err = ioat_dma_desc_free(ring->desc[0]);
if (err_is_fail(err)) {
return err;
}
}
/**
+ * \brief returns the DMA count of the ring for setting the DMA count register
+ *
+ * \param ring IOAT DMA descriptor ring
+ *
+ * \returns dmacount value
+ */
+inline uint16_t ioat_dma_ring_get_dmacount(struct ioat_dma_ring *ring)
+{
+ return ring->dmacount;
+}
+
+/**
* \brief gets the next descriptor based on the index
*
* \param ring the DMA ring