3 * \brief PCI service client-side logic
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
17 #include <barrelfish/barrelfish.h>
18 #include <barrelfish/nameservice_client.h>
19 #include <barrelfish/dispatch.h>
20 #include <barrelfish/inthandler.h>
22 #include <if/pci_defs.h>
23 #include <if/pci_rpcclient_defs.h>
25 #define INVALID_VECTOR ((uint32_t)-1)
27 static struct pci_rpc_client *pci_client = NULL;
29 errval_t pci_reregister_irq_for_device(uint32_t class, uint32_t subclass, uint32_t prog_if,
30 uint32_t vendor, uint32_t device,
31 uint32_t bus, uint32_t dev, uint32_t fun,
32 interrupt_handler_fn handler,
34 interrupt_handler_fn reloc_handler,
35 void *reloc_handler_arg)
37 uint32_t vector = INVALID_VECTOR;
40 if (handler != NULL && reloc_handler != NULL) {
41 // register movable interrupt
42 err = inthandler_setup_movable(handler, handler_arg, reloc_handler,
43 reloc_handler_arg, &vector);
44 if (err_is_fail(err)) {
48 assert(vector != INVALID_VECTOR);
49 } else if (handler != NULL) {
50 // register non-movable interrupt
51 err = inthandler_setup(handler, handler_arg, &vector);
52 if (err_is_fail(err)) {
56 assert(vector != INVALID_VECTOR);
59 err = pci_client->vtbl.
60 reregister_interrupt(pci_client, class, subclass, prog_if, vendor,
61 device, bus, dev, fun, disp_get_current_core_id(),
63 if (err_is_fail(err)) {
65 } else if (err_is_fail(msgerr)) {
71 errval_t pci_register_driver_movable_irq(pci_driver_init_fn init_func, uint32_t class,
72 uint32_t subclass, uint32_t prog_if,
73 uint32_t vendor, uint32_t device,
74 uint32_t bus, uint32_t dev, uint32_t fun,
75 interrupt_handler_fn handler,
77 interrupt_handler_fn reloc_handler,
78 void *reloc_handler_arg)
80 pci_caps_per_bar_t *caps_per_bar = NULL;
84 err = pci_client->vtbl.
85 init_pci_device(pci_client, class, subclass, prog_if, vendor,
86 device, bus, dev, fun, &msgerr,
87 &nbars, &caps_per_bar);
88 if (err_is_fail(err)) {
90 } else if (err_is_fail(msgerr)) {
95 struct capref irq_src_cap;
97 // Get IRQ 0. For backward compatability with function interface
98 err = pci_client->vtbl.get_irq_cap(pci_client, 0, &msgerr, &irq_src_cap);
99 if (err_is_fail(err) || err_is_fail(msgerr)) {
100 if (err_is_ok(err)) {
103 DEBUG_ERR(err, "requesting cap for IRQ %d of device", 0);
107 // Get irq_dest_cap from monitor
108 struct capref irq_dest_cap;
109 err = alloc_dest_irq_cap(&irq_dest_cap);
110 if(err_is_fail(err)){
111 DEBUG_ERR(err, "Could not allocate dest irq cap");
119 // Connect endpoint to handler
121 err = inthandler_setup_movable_cap(irq_dest_cap, handler, handler_arg, reloc_handler,
123 if (err_is_fail(err)) {
129 assert(nbars > 0); // otherwise we should have received an error!
132 struct device_mem *bars = calloc(nbars, sizeof(struct device_mem));
133 assert(bars != NULL);
135 // request caps for all bars of device
136 for (int nb = 0; nb < nbars; nb++) {
137 struct device_mem *bar = &bars[nb];
139 int ncaps = (*caps_per_bar)[nb];
141 bar->nr_caps = ncaps;
142 bar->frame_cap = malloc(ncaps * sizeof(struct capref)); // FIXME: leak
143 assert(bar->frame_cap != NULL);
146 for (int nc = 0; nc < ncaps; nc++) {
150 err = pci_client->vtbl.get_bar_cap(pci_client, nb, nc, &msgerr, &cap,
151 &type, &bar->bar_nr);
152 if (err_is_fail(err) || err_is_fail(msgerr)) {
153 if (err_is_ok(err)) {
156 DEBUG_ERR(err, "requesting cap %d for BAR %d of device", nc, nb);
160 if (type == 0) { // Frame cap BAR
161 bar->frame_cap[nc] = cap;
163 struct frame_identity id = { .base = 0, .bits = 0 };
164 invoke_frame_identify(cap, &id);
165 bar->paddr = id.base;
167 bar->bytes = (1ul << id.bits) * ncaps;
171 err = cap_copy(cap_io, cap);
172 if(err_is_fail(err) && err_no(err) != SYS_ERR_SLOT_IN_USE) {
173 DEBUG_ERR(err, "cap_copy for IO cap");
180 // initialize the device. We have all the caps now
181 init_func(bars, nbars);
190 errval_t pci_register_driver_irq(pci_driver_init_fn init_func, uint32_t class,
191 uint32_t subclass, uint32_t prog_if,
192 uint32_t vendor, uint32_t device,
193 uint32_t bus, uint32_t dev, uint32_t fun,
194 interrupt_handler_fn handler,
197 return pci_register_driver_movable_irq(init_func, class, subclass,
198 prog_if, vendor, device, bus, dev, fun, handler, handler_arg,
203 errval_t pci_register_driver_noirq(pci_driver_init_fn init_func, uint32_t class,
204 uint32_t subclass, uint32_t prog_if,
205 uint32_t vendor, uint32_t device,
206 uint32_t bus, uint32_t dev, uint32_t fun)
208 return pci_register_driver_irq(init_func, class, subclass, prog_if, vendor,
209 device, bus, dev, fun, NULL, NULL);
212 errval_t pci_register_legacy_driver_irq(legacy_driver_init_fn init_func,
213 uint16_t iomin, uint16_t iomax, int irq,
214 interrupt_handler_fn handler,
217 errval_t err, msgerr;
220 uint32_t vector = INVALID_VECTOR;
221 err = inthandler_setup(handler, handler_arg, &vector);
222 if (err_is_fail(err)) {
223 DEBUG_ERR(err, "inthandler_setup()\n");
227 err = pci_client->vtbl.init_legacy_device(pci_client, iomin, iomax, irq,
228 disp_get_core_id(), vector,
230 if (err_is_fail(err)) {
231 DEBUG_ERR(err, "pci_client->init_legacy_device()\n");
233 } else if (err_is_fail(msgerr)) {
234 DEBUG_ERR(msgerr, "pci_client->init_legacy_device()\n");
238 /* copy IO cap to default location */
239 err = cap_copy(cap_io, iocap);
240 if (err_is_fail(err) && err_no(err) != SYS_ERR_SLOT_IN_USE) {
241 DEBUG_ERR(err, "failed to copy legacy io cap to default slot\n");
244 err = cap_destroy(iocap);
245 assert(err_is_ok(err));
253 errval_t pci_setup_inthandler(interrupt_handler_fn handler, void *handler_arg,
257 uint32_t vector = INVALID_VECTOR;
259 err = inthandler_setup(handler, handler_arg, &vector);
260 if (err_is_ok(err)) {
261 *ret_vector = vector + 32; // FIXME: HACK
266 errval_t pci_read_conf_header(uint32_t dword, uint32_t *val)
268 errval_t err, msgerr;
269 err = pci_client->vtbl.read_conf_header(pci_client, dword, &msgerr, val);
270 return err_is_fail(err) ? err : msgerr;
273 errval_t pci_write_conf_header(uint32_t dword, uint32_t val)
275 errval_t err, msgerr;
276 err = pci_client->vtbl.write_conf_header(pci_client, dword, val, &msgerr);
277 return err_is_fail(err) ? err : msgerr;
280 errval_t pci_msix_enable_addr(struct pci_address *addr, uint16_t *count)
282 errval_t err, msgerr;
284 err = pci_client->vtbl.msix_enable(pci_client, &msgerr, count);
286 err = pci_client->vtbl.msix_enable_addr(pci_client, addr->bus, addr->device,
287 addr->function, &msgerr, count);
289 return err_is_fail(err) ? err : msgerr;
292 errval_t pci_msix_enable(uint16_t *count)
294 return pci_msix_enable_addr(NULL, count);
297 errval_t pci_msix_vector_init_addr(struct pci_address *addr, uint16_t idx,
298 uint8_t destination, uint8_t vector)
300 errval_t err, msgerr;
302 err = pci_client->vtbl.msix_vector_init(pci_client, idx, destination,
305 err = pci_client->vtbl.msix_vector_init_addr(pci_client, addr->bus,
306 addr->device, addr->function,
311 return err_is_fail(err) ? err : msgerr;
314 errval_t pci_msix_vector_init(uint16_t idx, uint8_t destination,
317 return pci_msix_vector_init_addr(NULL, idx, destination, vector);
320 static void bind_cont(void *st, errval_t err, struct pci_binding *b)
322 errval_t *reterr = st;
323 if (err_is_ok(err)) {
324 struct pci_rpc_client *r = malloc(sizeof(*r));
326 err = pci_rpc_client_init(r, b);
327 if (err_is_ok(err)) {
336 errval_t pci_client_connect(void)
339 errval_t err, err2 = SYS_ERR_OK;
341 /* Connect to the pci server */
342 err = nameservice_blocking_lookup("pci", &iref);
343 if (err_is_fail(err)) {
349 /* Setup flounder connection with pci server */
350 err = pci_bind(iref, bind_cont, &err2, get_default_waitset(),
351 IDC_BIND_FLAG_RPC_CAP_TRANSFER);
352 if (err_is_fail(err)) {
356 /* XXX: Wait for connection establishment */
357 while (pci_client == NULL && err2 == SYS_ERR_OK) {
358 messages_wait_and_handle_next();