2 * Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
10 #include <barrelfish/barrelfish.h>
11 #include <barrelfish/nameservice_client.h>
12 #include <devif/backends/net/e10k_devif.h>
13 #include <devif/queue_interface.h>
14 #include <dev/e10k_q_dev.h>
15 #include <dev/e10k_dev.h>
16 #include <dev/e10k_vf_dev.h>
20 #include <if/e10k_vf_defs.h>
21 #include <if/e10k_vf_rpcclient_defs.h>
23 #include "e10k_devif_vf.h"
25 #include "e10k_queue.h"
28 #define NUM_TX_DESC 2048
29 #define NUM_RX_DESC 2048
35 /******************************************************************************/
39 static inline bool buf_use_tcpxsm(uint64_t flags)
41 return (flags & NETIF_TXFLAG_TCPCHECKSUM);
44 static inline bool buf_use_udpxsm(uint64_t flags)
46 return (flags & NETIF_TXFLAG_UDPCHECKSUM);
49 static inline bool buf_use_ipxsm(uint64_t flags)
51 return (flags & NETIF_TXFLAG_IPCHECKSUM) ||
52 buf_use_tcpxsm(flags) || buf_use_udpxsm(flags);
55 static inline uint8_t buf_tcphdrlen(uint64_t flags)
57 return ((flags & NETIF_TXFLAG_TCPHDRLEN_MASK) >>
58 NETIF_TXFLAG_TCPHDRLEN_SHIFT) * 4;
61 static errval_t update_txtail(struct e10k_queue* q, size_t tail)
66 e10k_vf_vftdt_wr(q->d, q->id, tail);
68 e10k_tdt_wr(q->d, q->id, tail);
73 static errval_t update_rxtail(struct e10k_queue* q, size_t tail)
78 e10k_vf_vfrdt_wr(q->d, q->id, tail);
80 e10k_rdt_1_wr(q->d, q->id, tail);
86 static struct region_entry* get_region(struct e10k_queue* q, regionid_t rid)
88 struct region_entry* entry = q->regions;
89 while (entry != NULL) {
90 if (entry->rid == rid) {
99 static errval_t enqueue_tx_buf(struct e10k_queue* q, regionid_t rid,
102 genoffset_t valid_data,
103 genoffset_t valid_length,
106 DEBUG_QUEUE("Enqueueing TX buf \n");
108 if (e10k_queue_free_txslots(q) == 0) {
109 DEBUG_QUEUE("e10k_%d: Not enough space in TX ring, not adding buffer\n",
112 return NIC_ERR_ENQUEUE;
115 bool last = flags & NETIF_TXFLAG_LAST;
116 bool first = flags & NETIF_TXFLAG_FIRST;
117 // Prepare checksum offload
119 if (buf_use_ipxsm(flags)) {
120 e10k_q_l4_type_t l4t = 0;
123 if (buf_use_tcpxsm(flags)) {
125 l4len = buf_tcphdrlen(flags);
126 } else if (buf_use_udpxsm(flags)) {
130 e10k_queue_add_txcontext(q, 0, ETHHDR_LEN, IPHDR_LEN, l4len, l4t);
133 // get virtual address of buffer
134 struct region_entry* entry = get_region(q, rid);
135 assert(entry != NULL);
138 addr = (lpaddr_t) entry->virt + offset + valid_data;
139 e10k_queue_add_txbuf_ctx(q, addr, rid, offset, length,
140 valid_data, valid_length, flags,
141 first, last, length, 0, true, l4len !=0);
144 // get virtual address of buffer
145 struct region_entry* entry = get_region(q, rid);
146 assert(entry != NULL);
149 addr = (lpaddr_t) entry->phys + offset + valid_data;
150 e10k_queue_add_txbuf_ctx(q, addr, rid, offset, length,
151 valid_data, valid_length, flags,
152 first, last, length, 0, true, l4len != 0);
156 // get virtual address of buffer
157 struct region_entry* entry = get_region(q, rid);
158 assert(entry != NULL);
161 addr = (lpaddr_t) entry->virt + offset + valid_data;
162 e10k_queue_add_txbuf(q, addr, rid, offset, length, valid_data,
164 first, last, length);
166 struct region_entry* entry = get_region(q, rid);
167 assert(entry != NULL);
170 addr = (lpaddr_t) entry->phys + offset + valid_data;
171 e10k_queue_add_txbuf(q, addr, rid, offset, length, valid_data,
172 valid_length, flags, first, last, length);
175 e10k_queue_bump_txtail(q);
180 static errval_t enqueue_rx_buf(struct e10k_queue* q, regionid_t rid,
183 genoffset_t valid_data,
184 genoffset_t valid_length,
187 DEBUG_QUEUE("Enqueueing RX buf \n");
188 // check if there is space
189 if (e10k_queue_free_rxslots(q) == 0) {
190 DEBUG_QUEUE("e10k_%d: Not enough space in RX ring, not adding buffer\n",
193 return NIC_ERR_ENQUEUE;
197 // get virtual address of buffer
198 struct region_entry* entry = get_region(q, rid);
199 assert(entry != NULL);
202 addr = (lpaddr_t) entry->virt + offset;
203 e10k_queue_add_rxbuf(q, addr, rid, offset, length, valid_data,
204 valid_length, flags);
206 // get virtual address of buffer
207 struct region_entry* entry = get_region(q, rid);
208 assert(entry != NULL);
211 addr = (lpaddr_t) entry->phys + offset;
212 e10k_queue_add_rxbuf(q, addr, rid, offset, length, valid_data,
213 valid_length, flags);
216 DEBUG_QUEUE("before bump tail\n");
217 e10k_queue_bump_rxtail(q);
218 DEBUG_QUEUE("Enqueueing RX buf: terminated\n");
222 /******************************************************************************/
223 /* Queue functions */
225 static errval_t e10k_enqueue(struct devq* q, regionid_t rid, genoffset_t offset,
226 genoffset_t length, genoffset_t valid_data,
227 genoffset_t valid_length, uint64_t flags)
232 struct e10k_queue* queue = (struct e10k_queue*) q;
233 if (flags & NETIF_RXFLAG) {
234 /* can not enqueue receive buffer larger than 2048 bytes */
235 assert(length <= 2048);
237 err = enqueue_rx_buf(queue, rid, offset, length, valid_data,
238 valid_length, flags);
239 if (err_is_fail(err)) {
242 } else if (flags & NETIF_TXFLAG) {
244 assert(length <= 2048);
246 DEBUG_QUEUE("Enqueuing offset=%lu valid_data=%lu \n", offset, valid_data);
247 err = enqueue_tx_buf(queue, rid, offset, length, valid_data,
248 valid_length, flags);
249 if (err_is_fail(err)) {
258 static errval_t e10k_dequeue(struct devq* q, regionid_t* rid,
259 genoffset_t* offset, genoffset_t* length,
260 genoffset_t* valid_data,
261 genoffset_t* valid_length, uint64_t* flags)
263 struct e10k_queue* que = (struct e10k_queue*) q;
265 errval_t err = SYS_ERR_OK;
267 if (!e10k_queue_get_rxbuf(que, rid, offset, length, valid_data,
268 valid_length, flags, &last)) {
269 err = DEVQ_ERR_QUEUE_EMPTY;
271 DEBUG_QUEUE("Received offset=%lu valid_data=%lu \n", *offset, *valid_data);
275 if (!e10k_queue_get_txbuf(que, rid, offset, length, valid_data,
276 valid_length, flags)) {
277 err = DEVQ_ERR_QUEUE_EMPTY;
279 DEBUG_QUEUE("Sent offset=%lu valid_data=%lu \n", *offset, *valid_data);
286 static errval_t e10k_register(struct devq* q, struct capref cap, regionid_t rid)
289 struct e10k_queue* queue = (struct e10k_queue*) q;
291 struct frame_identity id;
292 err = invoke_frame_identify(cap, &id);
293 if (err_is_fail(err)) {
298 err = slot_alloc(&cr);
299 if (err_is_fail(err)) {
303 err = cap_copy(cr, cap);
304 if (err_is_fail(err)) {
309 err = vspace_map_one_frame_attr(&va, id.bytes, cr,
310 VREGION_FLAGS_READ_WRITE_NOCACHE,
312 if (err_is_fail(err)) {
316 // keep track of regions since we need the virtual address ...
317 struct region_entry* entry = malloc(sizeof(struct region_entry));
320 entry->phys = id.base;
321 entry->virt = (lvaddr_t)va;
322 entry->size = id.bytes;
325 // linked list of regions
326 struct region_entry* cur = queue->regions;
328 queue->regions = entry;
332 while (cur->next != NULL) {
341 static errval_t e10k_deregister(struct devq* q, regionid_t rid)
346 static errval_t e10k_control(struct devq* q, uint64_t cmd, uint64_t value, uint64_t *result)
348 struct e10k_queue* queue = (struct e10k_queue*) q;
349 *result = queue->mac;
350 DEBUG_QUEUE("Control cmd=%lu value=%lu \n", cmd, value);
355 static errval_t e10k_notify(struct devq* q)
360 /******************************************************************
361 * Management functions
365 static void bind_cb(void *st, errval_t err, struct e10k_vf_binding *b)
367 struct e10k_queue* q = (struct e10k_queue*) st;
368 assert(err_is_ok(err));
370 DEBUG_QUEUE("Sucessfully connected to management interface\n");
374 e10k_vf_rpc_client_init(q->binding);
378 /** Connect to the management interface */
379 static void connect_to_mngif(struct e10k_queue* q)
385 char name[strlen("e10k_vf") + 2];
387 // Build label for interal management service
388 sprintf(name, "%s%u", "e10k_vf", q->pci_function);
390 // Connect to service
391 DEBUG_QUEUE("Looking up management interface (%s)\n", name);
392 r = nameservice_blocking_lookup(name, &iref);
393 assert(err_is_ok(r));
395 DEBUG_QUEUE("Binding to management interface\n");
396 r = e10k_vf_bind(iref, bind_cb, q, get_default_waitset(),
397 IDC_BIND_FLAGS_DEFAULT);
398 assert(err_is_ok(r));
401 event_dispatch(get_default_waitset());
405 /*********************************************************
406 * Queue creation and destruction
410 errval_t e10k_queue_destroy(struct e10k_queue* queue)
412 //TODO: do the cleanup
416 static errval_t map_device_memory(struct e10k_queue* q,
420 struct frame_identity id = {.base = 0, .bytes = 0};
423 err = invoke_frame_identify(regs, &id);
424 if (err_is_fail(err)) {
429 err = vspace_map_one_frame_attr(&va, id.bytes, regs,
430 VREGION_FLAGS_READ_WRITE_NOCACHE,
432 if (err_is_fail(err)) {
437 DEBUG_QUEUE("mapped %zu bytes at address %p\n", id.bytes, va);
438 q->d = malloc(sizeof(e10k_t));
439 assert(q->d != NULL);
440 e10k_initialize(q->d, va);
443 // TODO mostly cleanup when fail
444 errval_t e10k_queue_create(struct e10k_queue** queue, e10k_event_cb_t cb,
445 bool use_vf, bool interrupts)
449 struct e10k_queue* q;
452 q = malloc(sizeof(struct e10k_queue));
453 q->pci_function = 0; // TODO allow also function 1
456 USER_PANIC("NOT YET WORKING \n");
458 if (!e10k_vf_started()) {
459 err = e10k_init_vf_driver(q->pci_function, interrupts);
460 if (err_is_fail(err)) {
465 // If i can not create any more queues -> start new VF
466 if (!e10k_vf_can_create_queue()) {
467 err = e10k_init_vf_driver(q->pci_function, interrupts);
468 if (err_is_fail(err)) {
473 // TODO: VF queues only work with VT-d enabled?
474 err = skb_client_connect();
475 assert(err_is_ok(err));
476 err = skb_execute_query("vtd_enabled(0,_).");
477 if (err_is_fail(err)) {
478 DEBUG_QUEUE("Assume disabled VT-d \n");
479 //q->use_vtd = false;
481 DEBUG_QUEUE("Assume enabled VT-d \n");
487 // need to set up communicaton to PF
491 // allocate memory for RX/TX rings
492 struct capref tx_frame;
493 size_t tx_size = e10k_q_tdesc_adv_ctx_size*NUM_TX_DESC;
494 void* tx_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE, tx_size, &tx_frame);
495 if (tx_virt == NULL) {
496 return DEVQ_ERR_INIT_QUEUE;
500 struct capref rx_frame;
501 size_t rx_size = e10k_q_tdesc_adv_ctx_size*NUM_RX_DESC;
502 void* rx_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE, rx_size, &rx_frame);
503 if (rx_virt == NULL) {
504 return DEVQ_ERR_INIT_QUEUE;
507 struct e10k_queue_ops ops = {
508 .update_txtail = update_txtail,
509 .update_rxtail = update_rxtail,
512 struct capref txhwb_frame = NULL_CAP;
513 void* txhwb_virt = NULL;
516 txhwb_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE, BASE_PAGE_SIZE,
518 if (txhwb_virt == NULL) {
519 return DEVQ_ERR_INIT_QUEUE;
521 memset(txhwb_virt, 0, sizeof(uint32_t));
524 e10k_queue_init(q, tx_virt, NUM_TX_DESC, txhwb_virt,
525 rx_virt, NUM_RX_DESC, &ops);
527 DEBUG_QUEUE("Local queue init done\n");
530 q->rx_frame = rx_frame;
531 q->tx_frame = tx_frame;
532 q->txhwb_frame = txhwb_frame;
533 q->use_irq = interrupts;
535 // XXX:disable by default for now
536 q->use_txhwb = false;
540 err = e10k_vf_init_queue_hw(q);
541 if (err_is_fail(err)) {
552 err = pci_setup_inthandler(interrupt_handler, NULL, &vector);
553 assert(err_is_ok(err));
554 core = disp_get_core_id();
556 // TODO setup MSI-X interrupts
559 // Inform card driver about new queue and get the registers/queue id
560 err = slot_alloc(®s);
561 if (err_is_fail(err)) {
565 err = q->binding->rpc_tx_vtbl.create_queue(q->binding, tx_frame, txhwb_frame,
566 rx_frame, 2048, q->msix_intvec,
567 q->msix_intdest, false, false,
570 if (err_is_fail(err) || err_is_fail(err2)) {
571 DEBUG_QUEUE("e10k rpc error\n");
572 return err_is_fail(err)? err: err2;
576 q->id = (uint16_t)qid;
578 err = map_device_memory(q, regs);
579 if (err_is_fail(err)) {
580 DEBUG_QUEUE("e10k map device error\n");
585 err = devq_init(&q->q, false);
586 if (err_is_fail(err)) {
587 DEBUG_QUEUE("e10k devq_init error\n");
591 q->q.f.enq = e10k_enqueue;
592 q->q.f.deq = e10k_dequeue;
593 q->q.f.reg = e10k_register;
594 q->q.f.dereg = e10k_deregister;
595 q->q.f.ctrl = e10k_control;
596 q->q.f.notify = e10k_notify;
601 DEBUG_QUEUE("e10k queue init done\n");
605 uint64_t e10k_queue_get_id(struct e10k_queue* q)