failure DEVICE_INIT "Could not initialize device",
failure MEM_ALLOC "Could not allocate memory",
failure WRONG_INDEX "Invalid index for requested cap",
+ failure MSIX_NOTSUP "MSI-X is not sypported by the device",
+ failure MSIX_BADVECTOR "Invalid index for MSI-X vector",
+ failure MSIX_DISABLED "MSI-X is not enabled for this device",
};
// errors in ACPI
/* write PCI conf header */
rpc write_conf_header(in uint32 dword, in uint32 val, out errval err);
+
+ /* Enable MSI-X for the specified PCI device. */
+ rpc msix_enable(out errval err,
+ out uint16 vec_count);
+
+ /* Configure specified MSI-X vector */
+ rpc msix_vector_init(in uint16 idx, /* Index of MSI-X vector */
+ in uint8 destination, /* Interrupt Destination */
+ in uint8 vector, /* Interrupt Vector */
+ out errval err);
};
void *handler_arg);
errval_t pci_setup_inthandler(interrupt_handler_fn handler, void *handler_arg,
- int *ret_vector);
+ uint8_t *ret_vector);
errval_t pci_read_conf_header(uint32_t dword, uint32_t *val);
errval_t pci_client_connect(void);
+
+/**
+ * Enable MSI-X for the PCI device
+ * @param count Memory location where the number of supported vectors is written
+ */
+errval_t pci_msix_enable(uint16_t *count);
+
+/**
+ * Configure an MSI-X vector
+ * @param index MSI-X Vector index
+ * @param destination Destination APIC where the interrupt should be sent
+ * @param vector Interrupt vector to send
+ */
+errval_t pci_msix_vector_init(uint16_t index, uint8_t destination,
+ uint8_t vector);
+
#endif
return msgerr;
}
+errval_t pci_setup_inthandler(interrupt_handler_fn handler, void *handler_arg,
+ uint8_t *ret_vector)
+{
+ errval_t err;
+ uint32_t vector = INVALID_VECTOR;
+ *ret_vector = 0;
+ err = inthandler_setup(handler, handler_arg, &vector);
+ if (err_is_ok(err)) {
+ *ret_vector = vector + 32; // FIXME: HACK
+ }
+ return err;
+}
+
errval_t pci_read_conf_header(uint32_t dword, uint32_t *val)
{
errval_t err, msgerr;
return err_is_fail(err) ? err : msgerr;
}
+errval_t pci_msix_enable(uint16_t *count)
+{
+ errval_t err, msgerr;
+ err = pci_client->vtbl.msix_enable(pci_client, &msgerr, count);
+ return err_is_fail(err) ? err : msgerr;
+}
+
+errval_t pci_msix_vector_init(uint16_t idx, uint8_t destination,
+ uint8_t vector)
+{
+ errval_t err, msgerr;
+ err = pci_client->vtbl.msix_vector_init(pci_client, idx, destination,
+ vector, &msgerr);
+ return err_is_fail(err) ? err : msgerr;
+
+}
static void bind_cont(void *st, errval_t err, struct pci_binding *b)
{
mackerelDevices = [ "pci_hdr0", "pci_hdr1", "ht_config" ],
cFiles = [ "pcimain.c", "pci.c", "pci_service.c",
- "ht_config.c", "pcie.c" ],
+ "ht_config.c", "pcie.c", "pci_msix.c" ],
addCFlags = [ "-Wno-redundant-decls" ],
addLibraries = [ "skb", "pciconfspace", "acpi_client",
"octopus", "octopus_parser", "thc" ],
memset(dev_caps, 0, sizeof(dev_caps));
}
+int pci_bar_to_caps_index(uint8_t bus, uint8_t dev, uint8_t fun, uint8_t BAR)
+{
+ uint8_t i;
+ for (i = 0; i < PCI_NBARS && dev_caps[bus][dev][fun][i].assigned; i++) {
+ if (dev_caps[bus][dev][fun][i].bar_nr == BAR) {
+ return i;
+ }
+ }
+ return -1;
+}
+
int pci_get_nr_caps_for_bar(uint8_t bus, uint8_t dev, uint8_t fun, uint8_t idx)
{
return(dev_caps[bus][dev][fun][idx].nr_caps);
uint32_t vendor_id, uint32_t device_id,
uint32_t *bus, uint32_t *dev,uint32_t *fun,
int *nr_allocated_bars);
+int pci_bar_to_caps_index(uint8_t bus, uint8_t dev, uint8_t fun, uint8_t BAR);
int pci_get_nr_caps_for_bar(uint8_t bus, uint8_t dev, uint8_t fun, uint8_t index);
struct capref pci_get_cap_for_device(uint8_t bus, uint8_t dev, uint8_t fun,
uint8_t index, int cap_nr);
bool pcie);
errval_t pcie_setup_confspace(void);
+errval_t pci_msix_enable(struct pci_address *addr, uint16_t *count);
+errval_t pci_msix_vector_init(struct pci_address *addr, uint16_t idx,
+ uint8_t destination, uint8_t vector);
+
#endif // PCI_H_
--- /dev/null
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdio.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include "pci.h"
+
+#define PCI_CAP_MSIX 0x11
+
+/*****************************************************************************/
+/* Context management */
+
+struct pci_msix_context {
+ bool enabled;
+ uint8_t vectors;
+
+ volatile uint32_t *table;
+
+ struct pci_address addr;
+ struct pci_msix_context *next;
+};
+
+static struct pci_msix_context *contexts = NULL;
+
+static struct pci_msix_context *get_context(struct pci_address *addr,
+ bool alloc)
+{
+ struct pci_msix_context *ctx = contexts;
+ while (ctx != NULL) {
+ if (!memcmp(addr, &ctx->addr, sizeof(*addr)))
+ return ctx;
+ }
+
+ if (alloc) {
+ ctx = calloc(1, sizeof(*ctx));
+ memcpy(&ctx->addr, addr, sizeof(*addr));
+ ctx->next = contexts;
+ contexts = ctx;
+ }
+ return ctx;
+}
+
+/*****************************************************************************/
+/* PCI helpers */
+
+/**
+ * Find PCI capability by type.
+ * @return Offset of first capability with specified type
+ */
+static uint8_t pci_cap_find(pci_hdr0_t *hdr, struct pci_address *addr,
+ uint8_t type)
+{
+ pci_hdr0_status_t status;
+ uint8_t offset;
+ uint32_t header;
+ uint8_t max;
+
+ // No caplist -> abort
+ status = pci_hdr0_status_rd(hdr);
+ if (!status.caplist) {
+ return 0;
+ }
+
+ max = 48;
+ // Spec says bottom 2 bits must be masked
+ offset = pci_hdr0_cap_ptr_rd(hdr) & ~0x3U;
+ do {
+ assert(offset >= 0x40);
+ header = pci_read_conf_header(addr, offset / 4);
+ if ((header & 0xff) == type) {
+ return offset;
+ }
+ offset = (header >> 8) & 0xfc;
+ max--;
+ } while (max > 0 && offset != 0);
+
+ return 0;
+}
+
+
+/*****************************************************************************/
+/* MSI-X implementation */
+
+errval_t pci_msix_enable(struct pci_address *addr, uint16_t *count)
+{
+ uint8_t off;
+ uint32_t cap[3];
+ uint8_t bir;
+ struct capref tablecap;
+ struct frame_identity frameid = { .base = 0, .bits = 0 };
+ errval_t err;
+ void *virt;
+ struct pci_msix_context *ctx;
+ uint16_t i;
+ int bar_index;
+ volatile uint32_t *table;
+ pci_hdr0_t hdr;
+
+ pci_hdr0_initialize(&hdr, *addr);
+
+ if (!(off = pci_cap_find(&hdr, addr, PCI_CAP_MSIX))) {
+ return PCI_ERR_MSIX_NOTSUP;
+ }
+
+ ctx = get_context(addr, true);
+
+ off /= 4;
+ cap[0] = pci_read_conf_header(addr, off);
+ cap[1] = pci_read_conf_header(addr, off + 1);
+ cap[2] = pci_read_conf_header(addr, off + 2);
+
+ // TODO: How do we do this using mackerel?
+ *count = ((cap[0] >> 16) & ((1 << 11) - 1)) + 1;
+
+ if (!ctx->enabled) {
+ // Make sure MSI-X is disabled during initialization
+ cap[0] &= ~(1 << 31);
+ pci_write_conf_header(addr, off, cap[0]);
+
+
+ // Find BAR for MSI-X table and map memory
+ bir = cap[1] & 0x7;
+ // TODO map all caps
+ bar_index = pci_bar_to_caps_index(addr->bus, addr->device,
+ addr->function, bir);
+ assert(bar_index >= 0);
+ assert(pci_get_nr_caps_for_bar(addr->bus, addr->device, addr->function,
+ bar_index) == 1);
+ tablecap = pci_get_cap_for_device(addr->bus, addr->device, addr->function,
+ bar_index, 0);
+ invoke_frame_identify(tablecap, &frameid);
+ err = vspace_map_one_frame_attr(&virt, 1 << frameid.bits, tablecap,
+ VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
+ assert(err_is_ok(err));
+
+ // Enable MSI-X and function mask
+ cap[0] |= (1 << 31) | (1 << 30);
+ pci_write_conf_header(addr, off, cap[0]);
+
+ // Calculate address for table
+ cap[1] &= ~0x7;
+ assert(cap[1] + (*count*16) <= (1ULL << frameid.bits));
+ table = (uint32_t *) ((uintptr_t) virt + cap[1]);
+ } else {
+ table = ctx->table;
+ }
+
+
+ // Make sure all interrupts are masked
+ for (i = 0; i < *count; i++) {
+ table[4*i + 3] |= 1;
+ }
+
+ // Disable INTX
+ pci_hdr0_command_int_dis_wrf(&hdr, 1);
+
+ // Disable function mask
+ cap[0] &= ~(1 << 30);
+ pci_write_conf_header(addr, off, cap[0]);
+
+ // Save context
+ ctx->enabled = true;
+ ctx->vectors = *count;
+ ctx->table = table;
+
+ return SYS_ERR_OK;
+}
+
+errval_t pci_msix_vector_init(struct pci_address *addr, uint16_t idx,
+ uint8_t destination, uint8_t vector)
+{
+ struct pci_msix_context *ctx;
+ volatile uint32_t *entry;
+
+ ctx = get_context(addr, false);
+ if (!ctx || !ctx->enabled) {
+ return PCI_ERR_MSIX_DISABLED;
+ }
+
+ if (idx >= ctx->vectors) {
+ return PCI_ERR_MSIX_BADVECTOR;
+ }
+
+ entry = ctx->table + 4 * idx;
+ // Message address
+ entry[0] = 0xFEE00000 | ((uint32_t) destination << 12);
+ // Message address high
+ entry[1] = 0x0;
+ // Message data
+ entry[2] = vector;
+ // Message control (unmask)
+ entry[3] &= ~0x1;
+
+ return SYS_ERR_OK;
+
+}
+
assert(err_is_ok(err));
}
+static void msix_enable_handler(struct pci_binding *b)
+{
+ struct client_state *cc = (struct client_state *) b->st;
+ struct pci_address addr = {
+ .bus= cc->bus,
+ .device=cc->dev,
+ .function=cc->fun,
+ };
+ errval_t err;
+ uint16_t count;
+
+ err = pci_msix_enable(&addr, &count);
+ err = b->tx_vtbl.msix_enable_response(b, NOP_CONT, err, count);
+ assert(err_is_ok(err));
+}
+
+static void msix_vector_init_handler(struct pci_binding *b, uint16_t idx,
+ uint8_t destination, uint8_t vector)
+{
+ struct client_state *cc = (struct client_state *) b->st;
+ struct pci_address addr = {
+ .bus= cc->bus,
+ .device=cc->dev,
+ .function=cc->fun,
+ };
+ errval_t err;
+
+ err = pci_msix_vector_init(&addr, idx, destination, vector);
+ err = b->tx_vtbl.msix_vector_init_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+}
+
struct pci_rx_vtbl pci_rx_vtbl = {
.init_pci_device_call = init_pci_device_handler,
.init_legacy_device_call = init_legacy_device_handler,
//.get_vbe_bios_cap_call = get_vbe_bios_cap,
.read_conf_header_call = read_conf_header_handler,
.write_conf_header_call = write_conf_header_handler,
+
+ .msix_enable_call = msix_enable_handler,
+ .msix_vector_init_call = msix_vector_init_handler,
};
static void export_callback(void *st, errval_t err, iref_t iref)