2 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
12 #include <barrelfish/barrelfish.h>
13 #include <barrelfish/waitset.h>
14 #include <barrelfish/deferred.h>
15 #include <barrelfish/nameservice_client.h>
16 #include <devif/queue_interface.h>
18 #include <if/sfn5122f_devif_defs.h>
19 #include <if/sfn5122f_devif_rpcclient_defs.h>
20 #include <devif/backends/net/sfn5122f_devif.h>
21 #include "../../../queue_interface_internal.h"
27 #define DEBUG_QUEUE(x...) printf("sfn5122f_q : " x)
29 #define DEBUG_QUEUE(x...) do {} while (0)
35 #define TX_ENTRIES 2048
36 #define RX_ENTRIES 2048
41 #define EV_CODE_DRV_GEN 7
42 #define EV_CODE_USER 8
43 #define EV_CODE_MCDI 12
44 #define EV_CODE_GLOBAL 6
45 #define EV_CODE_NONE 15
49 /* for each TX/RX entry one entry plus an additonal 2 for mcdi completion
50 and link state events */
51 #define EV_ENTRIES 4096
54 static errval_t update_rxtail(struct sfn5122f_queue* q, size_t tail)
56 assert(q->device != NULL);
59 reg = sfn5122f_rx_desc_upd_reg_hi_rx_desc_wptr_insert(reg, tail);
60 /* don't want to push an additional rx descriptor with the write pointer */
61 reg = sfn5122f_rx_desc_upd_reg_hi_rx_desc_push_cmd_insert(reg, 0);
62 /* the lower register will be ignored */
63 sfn5122f_rx_desc_upd_reg_lo_wr(q->device, q->id, 0);
64 sfn5122f_rx_desc_upd_reg_hi_wr(q->device, q->id, reg);
69 static errval_t update_txtail(struct sfn5122f_queue* q, size_t tail)
71 assert(q->device != NULL);
73 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_wptr_insert(reg, tail);
74 /* don't want to push an additional tx descriptor with the write pointer */
75 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_push_cmd_insert(reg, 0);
76 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_insert(reg, 0);
78 /* the lower register will be ignored */
79 sfn5122f_tx_desc_upd_reg_lo_wr(q->device, q->id, 0);
80 sfn5122f_tx_desc_upd_reg_hi_wr(q->device, q->id, reg);
84 static void bind_cb(void *st, errval_t err, struct sfn5122f_devif_binding *b)
87 DEBUG_QUEUE("binding CB \n");
88 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) st;
93 queue->rpc = malloc(sizeof(struct sfn5122f_devif_rpc_client));
94 assert(queue->rpc != NULL);
96 err = sfn5122f_devif_rpc_client_init(queue->rpc, b);
97 if (err_is_fail(err)) {
105 static errval_t sfn5122f_register(struct devq* q, struct capref cap,
108 uint64_t buftbl_idx = 0;
110 struct frame_identity id;
112 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) q;
114 if (queue->userspace) {
115 err = queue->rpc->vtbl.register_region(queue->rpc, queue->id, cap,
117 if (err_is_fail(err) || err_is_fail(err2)) {
118 err = err_is_fail(err) ? err: err2;
123 err = invoke_frame_identify(cap, &id);
124 if (err_is_fail(err)) {
128 // Setup datastructure for translating region ID to buffer table entry id
129 // Currently only a linked list
130 struct region_entry* entry = malloc(sizeof(struct region_entry));
132 entry->phys = id.base;
133 entry->size = id.bytes;
135 entry->buftbl_idx = buftbl_idx;
138 struct region_entry* cur = queue->regions;
141 queue->regions = entry;
145 while (cur->next != NULL) {
154 static errval_t sfn5122f_deregister(struct devq* q, regionid_t rid)
158 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) q;
160 // find region and translate to buftlb entry
161 struct region_entry* cur = queue->regions;
164 return SFN_ERR_DEREGISTER_REGION;
167 while (cur->next != NULL && cur->rid != rid) {
171 // do rpc do inform carddriver to remove buftbl entries
172 if (queue->userspace) {
173 err = queue->rpc->vtbl.deregister_region(queue->rpc, cur->buftbl_idx, cur->size,
175 if (err_is_fail(err) || err_is_fail(err2)) {
176 err = err_is_fail(err) ? err: err2;
185 static errval_t sfn5122f_control(struct devq* q, uint64_t cmd, uint64_t value)
188 DEBUG_QUEUE("Control cmd=%lu value=%lu \n", cmd, value);
193 static errval_t sfn5122f_notify(struct devq* q)
195 DEBUG_QUEUE("Notify \n");
199 static errval_t enqueue_rx_buf(struct sfn5122f_queue* q, regionid_t rid,
200 bufferid_t bid, lpaddr_t base, size_t len,
204 DEBUG_QUEUE("Enqueueing RX buf \n");
205 // check if there is space
206 if (sfn5122f_queue_free_rxslots(q) == 0) {
207 printf("SFN5122F_%d: Not enough space in RX ring, not adding buffer\n",
209 return SFN_ERR_ENQUEUE;
213 struct region_entry* entry = q->regions;
215 while((entry->next != NULL) && (entry->rid != rid)) {
220 return SFN_ERR_ENQUEUE;
223 // compute buffer table entry of the rx buffer and the within it offset
224 uint64_t buftbl_idx = entry->buftbl_idx + (bid/BUF_SIZE);
225 uint16_t offset = bid & 0x00000FFF;
227 // still in the same buffer table entry
228 assert(buftbl_idx == (entry->buftbl_idx + ((bid+len-1)/BUF_SIZE)));
231 DEBUG_QUEUE("RX_BUF tbl_idx=%lu offset=%d flags=%lu \n",
232 buftbl_idx, offset, flags);
234 sfn5122f_queue_add_user_rxbuf_devif(q, buftbl_idx, offset,
235 rid, bid, base, len, flags);
237 sfn5122f_queue_add_rxbuf_devif(q, rid, bid, base,
240 sfn5122f_queue_bump_rxtail(q);
245 static errval_t enqueue_tx_buf(struct sfn5122f_queue* q, regionid_t rid,
246 bufferid_t bid, lpaddr_t base, size_t len,
249 DEBUG_QUEUE("Enqueueing TX buf \n");
250 // check if there is space
251 if (sfn5122f_queue_free_txslots(q) == 0) {
252 printf("SFN5122F_%d: Not enough space in TX ring, not adding buffer\n",
254 return SFN_ERR_ENQUEUE;
258 struct region_entry* entry = q->regions;
260 while((entry->next != NULL) && (entry->rid != rid)) {
265 return SFN_ERR_ENQUEUE;
268 // compute buffer table entry of the rx buffer and the within it offset
269 uint64_t buftbl_idx = entry->buftbl_idx + (bid/BUF_SIZE);
270 uint16_t offset = bid & 0x00000FFF;
273 // still in the same buffer table entry
274 assert(buftbl_idx == (entry->buftbl_idx + ((bid+len-1)/BUF_SIZE)));
276 DEBUG_QUEUE("TX_BUF tbl_idx=%lu offset=%d flags=%lu \n", buftbl_idx, offset,
280 DEBUG_QUEUE("TX_BUF tbl_idx=%lu offset=%d flags=%lu \n", buftbl_idx, offset,
282 sfn5122f_queue_add_user_txbuf_devif(q, buftbl_idx, offset,
283 rid, bid, base, len, flags);
286 DEBUG_QUEUE("TX_BUF flags=%lu \n", flags);
287 sfn5122f_queue_add_txbuf_devif(q, rid, bid, base,
290 sfn5122f_queue_bump_txtail(q);
294 static errval_t sfn5122f_enqueue(struct devq* q, regionid_t rid, bufferid_t bid,
295 lpaddr_t base, size_t len, uint64_t flags)
300 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) q;
301 if (flags & NETIF_RXFLAG) {
302 /* can not enqueue receive buffer larger than 2048 bytes */
305 err = enqueue_rx_buf(queue, rid, bid, base, len, flags);
306 if (err_is_fail(err)) {
309 } else if (flags & NETIF_TXFLAG) {
310 assert(len <= BASE_PAGE_SIZE);
312 err = enqueue_tx_buf(queue, rid, bid, base, len, flags);
313 if (err_is_fail(err)) {
321 static errval_t sfn5122f_dequeue(struct devq* q, regionid_t* rid, bufferid_t* bid,
322 lpaddr_t* base, size_t* len, uint64_t* flags)
325 errval_t err = DEVQ_ERR_RX_EMPTY;
327 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) q;
329 sfn5122f_evq_rptr_reg_wr(queue->device, queue->id, queue->ev_head);
330 //__sync_synchronize();
332 if (queue->num_left > 0) {
333 *rid = queue->bufs[queue->last_deq].rid;
334 *bid = queue->bufs[queue->last_deq].bid;
335 *flags = queue->bufs[queue->last_deq].flags;
336 *base = queue->bufs[queue->last_deq].addr;
337 *len = queue->bufs[queue->last_deq].len;
344 ev_code = sfn5122f_get_event_code(queue);
347 // TODO multiple packets
348 err = sfn5122f_queue_handle_rx_ev_devif(queue, rid, bid, base,
350 if (err_is_ok(err)) {
351 DEBUG_QUEUE(" RX_EV Q_ID: %d len %ld \n", queue->id, *len);
353 sfn5122f_queue_bump_evhead(queue);
356 err = sfn5122f_queue_handle_tx_ev_devif(queue, rid, bid, base,
358 if (err_is_ok(err)) {
359 DEBUG_QUEUE("TX EVENT OK %d \n", queue->id);
361 DEBUG_QUEUE("TX EVENT ERR %d \n", queue->id);
364 sfn5122f_queue_bump_evhead(queue);
367 //DEBUG_QUEUE("DRIVER EVENT %d\n", qi);
368 sfn5122f_handle_drv_ev(queue, queue->id);
369 sfn5122f_queue_bump_evhead(queue);
371 case EV_CODE_DRV_GEN:
372 DEBUG_QUEUE("DRIVER GENERATED EVENT \n");
373 sfn5122f_queue_bump_evhead(queue);
376 DEBUG_QUEUE("USER EVENT \n");
377 sfn5122f_queue_bump_evhead(queue);
380 //DEBUG_QUEUE("MCDI EVENT \n");
381 sfn5122f_queue_handle_mcdi_event(queue);
382 sfn5122f_queue_bump_evhead(queue);
385 DEBUG_QUEUE("GLOBAL EVENT \n");
386 sfn5122f_queue_bump_evhead(queue);
389 sfn5122f_evq_rptr_reg_wr(queue->device, queue->id,
398 static void interrupt_handler(void* arg)
401 struct sfn5122f_queue* queue = (struct sfn5122f_queue*) arg;
402 // TODO check fata interrupts
412 errval_t sfn5122f_queue_create(struct sfn5122f_queue** q, sfn5122f_event_cb_t cb,
413 bool userlevel, bool interrupts)
415 DEBUG_QUEUE("create called \n");
418 //struct capref tx_frame, rx_frame, ev_frame;
420 //size_t tx_size, rx_size, ev_size;
422 void *tx_virt, *rx_virt, *ev_virt;
423 struct sfn5122f_queue* queue;
424 struct frame_identity id;
426 struct sfn5122f_queue_ops ops = {
427 .update_txtail = update_txtail,
428 .update_rxtail = update_rxtail
431 /* Allocate memory for descriptor rings
432 No difference for userspace networking*/
433 // TODO too large ...
434 total_size = sizeof(uint64_t)*(TX_ENTRIES + RX_ENTRIES + EV_ENTRIES);
435 tx_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE, total_size, &frame);
436 if (tx_virt == NULL) {
437 return SFN_ERR_ALLOC_QUEUE;
440 rx_virt = tx_virt + (sizeof(uint64_t) *TX_ENTRIES);
441 ev_virt = rx_virt + (sizeof(uint64_t) *RX_ENTRIES);
443 DEBUG_QUEUE("queue init \n");
445 queue = sfn5122f_queue_init(tx_virt, TX_ENTRIES, rx_virt, RX_ENTRIES,
446 ev_virt, EV_ENTRIES, &ops, userlevel);
448 queue->frame = frame;
449 queue->bound = false;
454 const char *name = "sfn5122f_sfn5122fmng_devif";
456 // Connect to solarflare card driver
457 err = nameservice_blocking_lookup(name, &iref);
458 if (err_is_fail(err)) {
462 DEBUG_QUEUE("binding \n");
463 err = sfn5122f_devif_bind(iref, bind_cb, queue, get_default_waitset(),
465 if (err_is_fail(err)) {
470 while(!queue->bound) {
471 event_dispatch(get_default_waitset());
474 DEBUG_QUEUE("bound \n");
479 // Inform card driver about new queue and get the registers/queue id
480 err = slot_alloc(®s);
481 if (err_is_fail(err)) {
486 printf("Solarflare queue used in polling mode \n");
487 err = queue->rpc->vtbl.create_queue(queue->rpc, frame, userlevel, interrupts,
488 0, 0, &queue->id, ®s, &err2);
489 if (err_is_fail(err) || err_is_fail(err2)) {
490 err = err_is_fail(err) ? err: err2;
494 printf("Solarflare queue used in interrupt mode mode \n");
495 err = pci_setup_inthandler(interrupt_handler, queue, &queue->vector);
496 assert(err_is_ok(err));
498 queue->core = disp_get_core_id();
500 err = queue->rpc->vtbl.create_queue(queue->rpc, frame, userlevel,
501 interrupts, queue->core,
502 queue->vector, &queue->id,
504 if (err_is_fail(err) || err_is_fail(err2)) {
505 err = err_is_fail(err) ? err: err2;
506 printf("Registering interrupt failed, continueing in polling mode \n");
510 DEBUG_QUEUE("rpc done \n");
512 err = invoke_frame_identify(regs, &id);
513 if (err_is_fail(err)) {
517 err = vspace_map_one_frame_attr(&queue->device_va, id.bytes, regs,
518 VREGION_FLAGS_READ_WRITE, NULL, NULL);
519 if (err_is_fail(err)) {
524 DEBUG_QUEUE("mapped \n");
525 queue->device = malloc(sizeof(sfn5122f_t));
526 sfn5122f_initialize(queue->device, queue->device_va);
528 err = devq_init(&queue->q, false);
529 if (err_is_fail(err)) {
533 queue->q.f.enq = sfn5122f_enqueue;
534 queue->q.f.deq = sfn5122f_dequeue;
535 queue->q.f.reg = sfn5122f_register;
536 queue->q.f.dereg = sfn5122f_deregister;
537 queue->q.f.ctrl = sfn5122f_control;
538 queue->q.f.notify = sfn5122f_notify;
545 errval_t sfn5122f_queue_destroy(struct sfn5122f_queue* q)
548 err = q->rpc->vtbl.destroy_queue(q->rpc, q->id, &err2);
549 if (err_is_fail(err) || err_is_fail(err2)) {
550 err = err_is_fail(err) ? err: err2;
554 err = vspace_unmap(q->device_va);
555 if (err_is_fail(err)) {
562 err = devq_destroy(&(q->q));
563 if (err_is_fail(err)){
567 err = sfn5122f_queue_free(q);
568 if (err_is_fail(err)) {