Initial Implementation of the IOAT DMA device (device initialization)
authorReto Achermann <acreto@student.ethz.ch>
Mon, 14 Jul 2014 16:28:30 +0000 (18:28 +0200)
committerStefan Kaestle <stefan.kaestle@inf.ethz.ch>
Wed, 20 Aug 2014 21:39:53 +0000 (23:39 +0200)
13 files changed:
errors/errno.fugu
hake/symbolic_targets.mk
lib/pci/pci_client.c
usr/drivers/ioat_dma/Hakefile
usr/drivers/ioat_dma/debug.h [new file with mode: 0644]
usr/drivers/ioat_dma/ioat_dma.h
usr/drivers/ioat_dma/ioat_dma_channel.c
usr/drivers/ioat_dma/ioat_dma_channel.h
usr/drivers/ioat_dma/ioat_dma_descriptors.c
usr/drivers/ioat_dma/ioat_dma_descriptors.h
usr/drivers/ioat_dma/ioat_dma_device.c
usr/drivers/ioat_dma/ioat_dma_device.h
usr/drivers/ioat_dma/main.c

index 3cc7b5e..d247897 100755 (executable)
@@ -1060,3 +1060,9 @@ errors xeon_phi XEON_PHI_ERR_ {
     failure DMA_REQ_SIZE         "The requested transfer size is too big",
     failure DMA_MEM_OUT_OF_RANGE "The physical address is out of range",
 };
+
+errors ioat IOAT_ERR_ {
+    failure PCI_ADDRESS           "The PCI address of the device is not as expected",
+    failure DEVICE_UNSUPPORTED    "Device ID not supported /  wrong configuration",
+    
+};
index 756d3b6..70612c3 100644 (file)
@@ -180,7 +180,8 @@ MODULES_x86_64= \
        sbin/spin \
        sbin/xeon_phi_test \
        sbin/virtio_blk_host \
-       sbin/virtio_blk
+       sbin/virtio_blk \
+       sbin/ioat_dma
 #      sbin/block_server \
 #      sbin/block_server_client \
 #      sbin/bs_user \
index ae8f2af..b7ce2da 100644 (file)
@@ -43,7 +43,7 @@ errval_t pci_register_driver_irq(pci_driver_init_fn init_func, uint32_t class,
         if (err_is_fail(err)) {
             return err;
         }
-        printf("pci_client.c: got vector %"PRIu32"\n", vector);
+        //printf("pci_client.c: got vector %"PRIu32"\n", vector);
         assert(vector != INVALID_VECTOR);
     }
 
index 9680eca..c9297de 100644 (file)
@@ -19,8 +19,8 @@
                "ioat_dma_descriptors.c"                      
        ],
        flounderBindings = [ "dma" ],
-       mackerelDevices = [ "ioat_dma" ],
+       mackerelDevices = [ "ioat_dma", "ioat_dma_chan" ],
        addLibraries = libDeps [ "pci", "dma" ],
        architectures = [ "x86_64"]
-  }
+  }  
 ]
diff --git a/usr/drivers/ioat_dma/debug.h b/usr/drivers/ioat_dma/debug.h
new file mode 100644 (file)
index 0000000..cd65c2e
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef XEON_PHI_DEBUG_H_
+#define XEON_PHI_DEBUG_H_
+
+
+/*
+ * Debug output switches
+ */
+#define IODEBUG_ENABLED   1
+#define IODEBUG_INT       1
+#define IODEBUG_CHAN      1
+#define IODEBUG_TRANS     1
+#define IODEBUG_SVC       1
+#define IODEBUG_DEV       1
+
+/*
+ * --------------------------------------------------------------------------
+ * Debug output generation
+ */
+#if IODEBUG_ENABLED
+#define IODEBUG(x...) debug_printf(x)
+#else
+#define IODEBUG(x... )
+#endif
+#if IODEBUG_INT
+#define IOINT_DEBUG(x...) IODEBUG("intr > " x)
+#else
+#define IOINT_DEBUG(x...)
+#endif
+#if IODEBUG_CHAN
+#define IOCHAN_DEBUG(x...) IODEBUG("chan > " x)
+#else
+#define IOCHAN_DEBUG(x...)
+#endif
+#if IODEBUG_TRANS
+#define IOTRA_DEBUG(x...) IODEBUG("xfer > " x)
+#else
+#define IOTRA_DEBUG(x...)
+#endif
+#if IODEBUG_SVC
+#define IOSVC_DEBUG(x...) IODEBUG("svc > " x)
+#else
+#define IOSVC_DEBUG(x...)
+#endif
+#if IODEBUG_DEV
+#define IODEV_DEBUG(x...) IODEBUG("device > " x)
+#else
+#define IODEV_DEBUG(x...)
+#endif
+
+#endif /* XEON_PHI_DEBUG_H_ */
index 8e0f609..b4ec500 100644 (file)
 #ifndef IOAT_DMA_H
 #define IOAT_DMA_H
 
+
+struct ioat_dma_device;
+struct ioat_dma_channel;
+
+
+#define IOAT_DMA_DCA_ENABLE 1
+
+#define IOAT_DMA_IRQ_TYPE IOAT_DMA_IRQ_DISABLED
+
 struct ioat_dma_ctrl
 {
     uint16_t device_num;
     struct ioat_dma_device *devices;
+    uint8_t dca_enabled;
 };
 
 
index 9f7a974..2412a36 100644 (file)
  *
  */
 
-=
+#include <barrelfish/barrelfish.h>
+#include <pci/pci.h>
 
+#include <dev/ioat_dma_chan_dev.h>
+
+#include "ioat_dma.h"
+#include "ioat_dma_device.h"
+#include "ioat_dma_channel.h"
+
+#include "debug.h"
+
+typedef uint16_t ioat_dma_chan_id_t;
+
+struct ioat_dma_channel
+{
+    struct ioat_dma_device *dev;
+    ioat_dma_chan_id_t chan_id;
+    ioat_dma_chan_t channel;
+    uint8_t irq_vector;
+    size_t  irq_msix;
+};
+
+static void ioat_dma_chan_do_interrupt_msix(void *arg)
+{
+
+}
+
+errval_t ioat_dma_channel_irq_setup_msix(struct ioat_dma_channel *chan)
+{
+    errval_t err;
+
+    err = pci_setup_inthandler(ioat_dma_chan_do_interrupt_msix,
+                               chan,
+                               &chan->irq_vector);
+
+    err = pci_msix_vector_init(chan->irq_msix, 0, chan->irq_vector);
+
+    return err;
+}
+
+errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan)
+{
+    /* mask possible errors */
+    ioat_dma_chan_err_t chanerr = ioat_dma_chan_err_rd(&chan->channel);
+    ioat_dma_chan_err_wr(&chan->channel, chanerr);
+
+    IOCHAN_DEBUG("Reseting channel %x. Chanerrors=[%08x]\n", chan->chan_id, chanerr);
+
+    /* TODO: Clear any pending errors in pci config space */
+#if 0
+
+    /* clear any pending errors */
+    err = pci_read_config_dword(pdev,
+                    IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+    if (err) {
+        dev_err(&pdev->dev,
+                        "channel error register unreachable\n");
+        return err;
+    }
+    pci_write_config_dword(pdev,
+                    IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+#endif
+
+    ioat_dma_chan_cmd_reset_wrf(&chan->channel, 0x1);
+
+    while (ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
+        thread_yield();
+    }
+
+    /* broadwell may need some additional work here */
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief initializes a new DMA channel and allocates the channel resources
+ *
+ * \param chan  where to store the channel pointer
+ *
+ * \returns SYS_ERR_OK on success
+ *          errval on failure
+ */
+errval_t ioat_dma_channel_init(struct ioat_dma_device *dev)
+{
+    errval_t err;
+
+    IOCHAN_DEBUG("Initializing %u channels max. xfer size is %u bytes\n",
+                 dev->chan_num,
+                 dev->xfer_size_max);
+
+    dev->channels = calloc(dev->chan_num, sizeof(struct ioat_dma_channel));
+    if (dev->channels == NULL) {
+        return LIB_ERR_MALLOC_FAIL;
+    }
+
+    /*
+     * XXX: actually there is just one channel per device...
+     */
+
+    for (uint8_t i = 0; i < dev->chan_num; ++i) {
+        struct ioat_dma_channel *chan = dev->channels + i;
+        chan->chan_id = (((uint16_t) dev->devid) << 8) | i;
+        chan->dev = dev;
+        mackerel_addr_t chan_base = (mackerel_addr_t) dev->mmio.vbase;
+        ioat_dma_chan_initialize(&chan->channel, chan_base + ((i + 1) * 0x80));
+
+        ioat_dma_chan_dcactrl_target_cpu_wrf(&chan->channel,
+        ioat_dma_chan_dca_ctr_target_any);
+
+        err = ioat_dma_channel_reset(chan);
+        if (err_is_fail(err)) {
+            return err;
+        }
+    }
+
+    return SYS_ERR_OK;
+}
index fb83f05..245156a 100644 (file)
 #define IOAT_DMA_CHANNEL_H
 
 
-struct ioat_dma_channel
-{
-
-};
+struct ioat_dma_channel;
 
 /**
  * \brief initializes a new DMA channel and allocates the channel resources
@@ -24,9 +21,12 @@ struct ioat_dma_channel
  * \returns SYS_ERR_OK on success
  *          errval on failure
  */
-errval_t ioat_dma_chan_init(struct ioat_dma_chan **chan);
+errval_t ioat_dma_channel_init(struct ioat_dma_device *dev);
+
+errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan);
 
-void ioat_dma_chan_free(struct ioat_dma_chan *chan);
+void ioat_dma_chan_free(struct ioat_dma_channel *chan);
 
+errval_t ioat_dma_channel_irq_setup_msix(struct ioat_dma_channel *chan);
 
 #endif /* IOAT_DMA_CHANNEL_H */
index 74a8b02..301f19f 100644 (file)
  *
  */
 
-int main(int argc, char *argv[])
+#include <barrelfish/barrelfish.h>
+
+#include "ioat_dma_descriptors.h"
+
+errval_t ioat_dma_desc_alloc_init(size_t element_size,
+                                  size_t align)
 {
-    return 0;
+    assert(!"NYI");
+    return SYS_ERR_OK;
 }
-
index 2fce326..5eb1621 100644 (file)
 #ifndef IOAT_DMA_DESCRIPTORS_H
 #define IOAT_DMA_DESCRIPTORS_H
 
+struct ioat_dma_desc_pool;
 
 /// the minimum amount of DMA descriptors to allocate
 #define IOAT_DMA_DESC_COUNT
 
+#define IOAT_DMA_DESC_SIZE 0
 
-errval_t ioat_dma_desc_alloc();
+#define IOAT_DMA_DESC_ALIGN 64
 
-errval_t ioat_dma_desc_free();
+/**
+ *
+ */
+errval_t ioat_dma_desc_alloc_init(size_t element_size,
+                                  size_t align);
+
+
+
+void *ioat_dma_desc_alloc(void );
+
+errval_t ioat_dma_desc_free(void);
 
-errval_t ioat_dma_desc_chain_alloc();
+errval_t ioat_dma_desc_chain_alloc(void);
 
-errval_t ioat_dma_desc_chain_free();
+errval_t ioat_dma_desc_chain_free(void);
 
 
 
index 38cd307..ce074f4 100644 (file)
 #include <pci/pci.h>
 #include <pci/devids.h>
 
+#include "ioat_dma.h"
 #include "ioat_dma_device.h"
 #include "ioat_dma_channel.h"
 
+#include "debug.h"
+
 static struct ioat_dma_device *curr_dev;
 
 static errval_t curr_err;
 
-static void ioat_intrupt_handler(void *arg)
+static void ioat_dma_do_interrupt(void *arg)
+{
+
+}
+
+static errval_t device_setup_interrupts(struct ioat_dma_device *dev)
+{
+    errval_t err;
+
+    dev->irq_type = IOAT_DMA_IRQ_TYPE;
+    switch (dev->irq_type) {
+        case IOAT_DMA_IRQ_MSIX:
+            ;
+            uint16_t msi_count;
+            err = pci_msix_enable(&msi_count);
+
+            IODEV_DEBUG("Initializing %u MSI-X Vectors\n", msi_count);
+
+            for (uint16_t i = 0; i < msi_count; ++i) {
+                if (i == dev->chan_num) {
+                    break;
+                }
+                struct ioat_dma_channel *chan = dev->channels;
+                err = ioat_dma_channel_irq_setup_msix(chan);
+                if (err_is_fail(err)) {
+                    return err;
+                }
+
+            }
+            break;
+        case IOAT_DMA_IRQ_MSI:
+            break;
+        case IOAT_DMA_IRQ_INTX:
+            break;
+        default:
+            /* disabled */
+            break;
+    }
+
+    return SYS_ERR_OK;
+}
+
+static errval_t device_init_ioat_v1(struct ioat_dma_device *dev)
 {
+    IODEV_DEBUG("Devices of Crystal Beach Version 1.xx are not supported.\n");
+    return IOAT_ERR_DEVICE_UNSUPPORTED;
+}
 
+static errval_t device_init_ioat_v2(struct ioat_dma_device *dev)
+{
+    IODEV_DEBUG("Devices of Crystal Beach Version 2.xx are not supported.\n");
+    return IOAT_ERR_DEVICE_UNSUPPORTED;
+}
+
+static errval_t device_init_ioat_v3(struct ioat_dma_device *dev)
+{
+    errval_t err;
+
+    IODEV_DEBUG("Initializing Crystal Beach 3 DMA Device.\n");
+
+    ioat_dma_dmacapability_t cap = ioat_dma_dmacapability_rd(&dev->device);
+
+    debug_printf("cap - %x\n", cap);
+
+    if (ioat_dma_cbver_minor_extract(dev->version) == 2) {
+        IODEV_DEBUG("Disabling XOR and PQ.\n");
+        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
+        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
+    } else if (ioat_dma_cbver_minor_extract(dev->version) == 3) {
+        IODEV_DEBUG("Devices of Crystal Beach Version 3.3 are not supported.\n");
+        return IOAT_ERR_DEVICE_UNSUPPORTED;
+    }
+
+    /* if DCA is enabled, we cannot support the RAID functions */
+    if (dev->flags & IOAT_DMA_DEV_F_DCA) {
+        IODEV_DEBUG("Disabling XOR and PQ.\n");
+        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
+        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
+    }
+
+    if (ioat_dma_dmacapability_xor_extract(cap)) {
+        IODEV_DEBUG("Device supports XOR RAID.\n");
+
+        dev->flags |= IOAT_DMA_DEV_F_RAID;
+
+        /*
+         * this may need some additional functions to prepare
+         * the specific transfers...
+         *
+         * max_xor = 8;
+         * prepare_xor, prepare_xor_val
+         */
+    }
+
+    if (ioat_dma_dmacapability_pq_extract(cap)) {
+        IODEV_DEBUG("Device supports PQ RAID.\n");
+
+        dev->flags |= IOAT_DMA_DEV_F_RAID;
+
+        /*
+         * this may need some additional functions to prepare the
+         * DMA descriptors
+         *
+         * max_xor = 8;
+         * max_pq = 8;
+         * prepare_pq, perpare_pq_val
+         *
+         * also set the prepare_xor pointers...
+         *
+         */
+    }
+
+    /* set the interrupt type */
+    dev->irq_type = IOAT_DMA_IRQ_TYPE;
+
+    /* channel enumeration */
+    dev->chan_num = ioat_dma_chancnt_num_rdf(&dev->device);
+    dev->xfer_size_max = (1UL << ioat_dma_xfercap_max_rdf(&dev->device));
+
+    err = ioat_dma_channel_init(dev);
+    if (err_is_fail(err)) {
+        return err;
+    }
+
+    err = device_setup_interrupts(dev);
+    if (err_is_fail(err)) {
+        return err;
+    }
+
+
+#if 0
+
+    err = device->self_test(device);
+
+    ioat_set_tcp_copy_break(262144);
+
+    err = ioat_register(device);
+    if (err)
+    return err;
+
+    ioat_kobject_add(device, &ioat2_ktype);
+
+    if (dca)
+    device->dca = ioat3_dca_init(pdev, device->reg_base);
+#endif
+
+    return SYS_ERR_OK;
 }
 
 static void device_init(struct device_mem* bar_info,
@@ -27,27 +174,61 @@ static void device_init(struct device_mem* bar_info,
 {
     assert(curr_dev);
 
-    if (curr_dev->state != IOAT_DMA_DEV_ST_UNINITIALIZED) {
+    errval_t err;
+
+    if (bar_count != IOAT_DMA_BAR_COUNT) {
+        curr_dev->state = IOAT_DMA_DEV_ST_ERR;
+        curr_err = IOAT_ERR_DEVICE_UNSUPPORTED;
         return;
     }
 
-    printf("Got: %i bars\n", bar_count);
-    for (int i = 0; i < bar_count; ++i) {
-        printf("> Bar[%i]: {type=%i, paddr=0x%lx, size=%u}\n",
-               i,
-               bar_info[i].type,
-               bar_info[i].paddr,
-               (uint32_t) (bar_info[i].bytes / 1024));
+    IODEV_DEBUG("IOAT DMA device BAR[0]: {paddr=0x%016lx, size=%u kB}\n",
+                bar_info[0].paddr,
+                (uint32_t ) (bar_info[0].bytes / 1024));
+
+    err = map_device(bar_info);
+    if (err_is_fail(err)) {
+        curr_err = err;
+        curr_dev->state = IOAT_DMA_DEV_ST_ERR;
     }
 
-    if (bar_count != 1) {
-        USER_PANIC("There is something wrong. The device should have 1 MBARs.");
+    curr_dev->mmio.bits = bar_info->bits;
+    curr_dev->mmio.vbase = bar_info->vaddr;
+    curr_dev->mmio.pbase = bar_info->paddr;
+    curr_dev->mmio.bytes = bar_info->bytes;
+    curr_dev->mmio.cap = bar_info->frame_cap[0];
+
+    ioat_dma_initialize(&curr_dev->device, NULL, bar_info->vaddr);
+
+    curr_dev->version = ioat_dma_cbver_rd(&curr_dev->device);
+
+    IODEV_DEBUG("IOAT DMA device version: %u.%u\n",
+                ioat_dma_cbver_major_extract(curr_dev->version),
+                ioat_dma_cbver_minor_extract(curr_dev->version));
+
+    switch (ioat_dma_cbver_major_extract(curr_dev->version)) {
+        case ioat_dma_cbver_1x:
+            curr_err = device_init_ioat_v1(curr_dev);
+            break;
+        case ioat_dma_cbver_2x:
+            curr_err = device_init_ioat_v2(curr_dev);
+            break;
+        case ioat_dma_cbver_3x:
+            curr_err = device_init_ioat_v3(curr_dev);
+            break;
+        default:
+            curr_err = IOAT_ERR_DEVICE_UNSUPPORTED;
+    }
+
+    if (err_is_fail(err)) {
+        curr_dev->state = IOAT_DMA_DEV_ST_ERR;
+        return;
     }
 
 }
 
-errval_t ioat_dma_device_init(uint16_t device_id,
-                              struct pci_address addr,
+errval_t ioat_dma_device_init(struct pci_addr addr,
+                              uint16_t device_id,
                               struct ioat_dma_device *dev)
 {
     errval_t err;
@@ -55,6 +236,10 @@ errval_t ioat_dma_device_init(uint16_t device_id,
     assert(dev);
     assert(curr_dev == NULL);
 
+    if (dev->state != IOAT_DMA_DEV_ST_UNINITIALIZED) {
+        return SYS_ERR_OK;
+    }
+
     curr_dev = dev;
     curr_err = SYS_ERR_OK;
 
@@ -67,7 +252,7 @@ errval_t ioat_dma_device_init(uint16_t device_id,
                                   addr.bus,
                                   addr.device,
                                   addr.function,
-                                  ioat_intrupt_handler,
+                                  ioat_dma_do_interrupt,
                                   dev);
     if (err_is_fail(err)) {
         dev->state = IOAT_DMA_DEV_ST_ERR;
@@ -80,7 +265,7 @@ errval_t ioat_dma_device_init(uint16_t device_id,
 /**
  *
  */
-errval_t ioat_dma_device_discovery(struct pci_address addr,
+errval_t ioat_dma_device_discovery(struct pci_addr addr,
                                    uint16_t device_id,
                                    struct ioat_dma_ctrl *ctrl)
 {
@@ -95,15 +280,16 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
     }
 
     /* there are several possible devices which implement Intel IOAT DMA */
-    switch (device & 0xFFF0) {
+    switch (device_id & 0xFFF0) {
         case PCI_DEVICE_IOAT_IVB0:
+            IODEV_DEBUG("doing device discovery: Ivy Bridge\n");
             /*
              * Ivy Bridge Platform
              */
             if (addr.device != 4) {
                 /* the IOAT DMA engine should be on device 4 on ivy bridge */
                 /* TODO: error number */
-                return -1;
+                return IOAT_ERR_PCI_ADDRESS;
             }
             dev_ids = calloc(PCI_DEVICE_IOAT_IVB_CNT, sizeof(uint16_t));
             if (dev_ids == NULL) {
@@ -124,13 +310,13 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
             break;
 
         case PCI_DEVICE_IOAT_HSW0:
+            IODEV_DEBUG("doing device discovery: Haswell\n");
             /*
              * Haswell Platform
              */
             if (addr.device != 4) {
                 /* the IOAT DMA engine should be on device 4 on ivy bridge */
-                /* TODO: error number */
-                return -1;
+                return IOAT_ERR_PCI_ADDRESS;
             }
             dev_ids = calloc(PCI_DEVICE_IOAT_HSW_CNT, sizeof(uint16_t));
             if (dev_ids == NULL) {
@@ -150,8 +336,7 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
             addr.function = 0;
             break;
         default:
-            /* TODO: return unsupported error number */
-            return -1;
+            return IOAT_ERR_DEVICE_UNSUPPORTED;
     }
 
     struct ioat_dma_device *dev = calloc(dev_cnt, sizeof(struct ioat_dma_device));
@@ -161,6 +346,10 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
     }
 
     for (uint16_t i = 0; i < dev_cnt; ++i) {
+        dev[i].dma_ctrl = ctrl;
+        if (ctrl->dca_enabled) {
+            dev[i].flags = IOAT_DMA_DEV_F_DCA;
+        }
         err = ioat_dma_device_init(addr, dev_ids[i], dev + i);
         if (err_is_fail(err)) {
             /*
@@ -175,10 +364,12 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
             dev_cnt = i + 1;
             break;
         }
-        dev[i].dma_ctrl = ctrl;
+
         addr.function++;
     }
 
+    IODEV_DEBUG("Device discovery done: got %u devices.\n", dev_cnt);
+
     ctrl->devices = dev;
     ctrl->device_num = dev_cnt;
 
@@ -186,3 +377,4 @@ errval_t ioat_dma_device_discovery(struct pci_address addr,
 
     return SYS_ERR_OK;
 }
+
index af6ecf5..398a040 100644 (file)
 
 #include <dev/ioat_dma_dev.h>
 
+#define PCI_ADDR_DONT_CARE 0x10000
+
+
+struct pci_addr {
+    uint32_t bus;
+    uint32_t device;
+    uint32_t function;
+};
 
 enum ioat_dma_dev_st {
     IOAT_DMA_DEV_ST_UNINITIALIZED,
@@ -58,31 +66,53 @@ enum ioat_dma_dev_st {
 
 #define IOAT_DMA_CHAN_COUNT 8
 
-typedef uint16_t ioat_dma_devid_t;
+#define IOAT_DMA_BAR_COUNT 1
+
+typedef uint8_t ioat_dma_devid_t;
+
+enum ioat_dma_irq {
+    IOAT_DMA_IRQ_DISABLED,
+    IOAT_DMA_IRQ_MSIX,
+    IOAT_DMA_IRQ_MSI,
+    IOAT_DMA_IRQ_INTX,
+};
+
+/* device flags */
+#define IOAT_DMA_DEV_F_DCA  0x00000001
+#define IOAT_DMA_DEV_F_RAID 0x00000002
 
+struct ioat_dma_fn
+{
+
+};
 
 struct ioat_dma_device
 {
     ioat_dma_devid_t devid;
     enum ioat_dma_dev_st state;
     struct {
-        lvaddr_t vbase;
+        void    *vbase;
         lpaddr_t pbase;
         uint8_t  bits;
         uint64_t bytes;
         struct capref cap;
     } mmio;
     ioat_dma_t device;
+    ioat_dma_cbver_t version;
+    uint32_t xfer_size_max;
+    struct ioat_dma_fn fn;
     struct ioat_dma_channel *channels;
     uint8_t chan_num;
+    uint32_t flags;
     struct ioat_dma_ctrl *dma_ctrl;
+    enum ioat_dma_irq irq_type;
 };
 
-errval_t ioat_dma_device_init(uint16_t device_id,
-                              struct pci_address addr,
+errval_t ioat_dma_device_init(struct pci_addr addr,
+                              uint16_t device_id,
                               struct ioat_dma_device *dev);
 
-errval_t ioat_dma_device_discovery(struct pci_address addr,
+errval_t ioat_dma_device_discovery(struct pci_addr addr,
                                    uint16_t device_id,
                                    struct ioat_dma_ctrl *ctrl);
 
index 74a8b02..a5fb5b9 100644 (file)
  *
  */
 
-int main(int argc, char *argv[])
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include <pci/devids.h>
+
+#include "ioat_dma.h"
+#include "ioat_dma_device.h"
+#include "ioat_dma_descriptors.h"
+
+#include "debug.h"
+
+struct ioat_dma_ctrl dma_ctrl;
+
+int main(int argc,
+         char *argv[])
 {
+    errval_t err;
+
+    debug_printf("I/O AT DMA driver started\n");
+
+    memset(&dma_ctrl, 0, sizeof(dma_ctrl));
+
+    /*
+     * Parsing of cmdline arguments.
+     *
+     * When started by Kaluga, the last element of the cmdline will contain
+     * the basic PCI information of the device.
+     * VENDORID:DEVICEID:BUS:DEV:FUN
+     */
+    uint32_t vendor_id, device_id;
+    struct pci_addr addr = {
+        .bus = PCI_ADDR_DONT_CARE,
+        .device = PCI_ADDR_DONT_CARE,
+        .device = PCI_ADDR_DONT_CARE
+    };
+
+    if (argc > 1) {
+        uint32_t parsed = sscanf(argv[argc - 1],
+                                 "%x:%x:%x:%x:%x",
+                                 &vendor_id,
+                                 &device_id,
+                                 &addr.bus,
+                                 &addr.device,
+                                 &addr.function);
+        if (parsed != 5) {
+            IODEBUG("WARNING: cmdline parsing failed. Using PCI Address [0,0,0]");
+        } else {
+            if (vendor_id != 0x8086 || (((device_id & 0xFFF0) != PCI_DEVICE_IOAT_IVB0)
+                            && ((device_id & 0xFFF0) != PCI_DEVICE_IOAT_HSW0))) {
+                USER_PANIC("unexpected vendor / device id: [%x, %x]",
+                           vendor_id,
+                           device_id);
+                return -1;
+            }
+            IODEBUG("Initializing I/O AT DMA device with PCI address "
+                   "[%u,%u,%u]\n",
+                   addr.bus, addr.device, addr.function);
+        }
+    } else {
+        IODEBUG("WARNING: Initializing I/O AT DMA device with unknown PCI address "
+               "[0,0,0]\n");
+    }
+
+    err = ioat_dma_device_discovery(addr, device_id, &dma_ctrl);
+    if (err_is_fail(err)) {
+        USER_PANIC_ERR(err, "DMA Device discovery failed");
+    }
+
+    err = ioat_dma_desc_alloc_init(IOAT_DMA_DESC_SIZE, IOAT_DMA_DESC_ALIGN);
+    if (err_is_fail(err)) {
+        USER_PANIC_ERR(err, "DMA Descriptor allocator initialization failed.\n");
+    }
+
     return 0;
 }