2 * Copyright (c) 2007-2011, 2017, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
16 #include <net_interfaces/flags.h>
18 #include <devif/queue_interface.h>
19 #include "../../../queue_interface_internal.h"
20 #include <dev/e10k_q_dev.h>
24 struct e10k_queue_ops {
25 errval_t (*update_txtail)(struct e10k_queue*, size_t);
26 errval_t (*update_rxtail)(struct e10k_queue*, size_t);
30 * Context structure for RX descriptors. This is needed to implement RSC, since
31 * we need to be able to chain buffers together. */
32 struct e10k_queue_rxctx {
34 struct e10k_queue_rxctx *previous;
45 struct region_entry* next;
56 bool use_vf; // use VF for this queue
57 bool use_rsc; // Receive Side Coalescing
58 bool use_vtd; // Virtual addressing (required for VF)
66 struct region_entry* regions;
76 struct capref rx_frame;
77 struct capref tx_frame;
78 struct capref txhwb_frame;
85 struct e10k_vf_binding *binding;
88 // FIXME: Look for appropriate type for the _head/tail/size fields
89 e10k_q_tdesc_adv_wb_array_t* tx_ring;
90 struct devq_buf* tx_bufs;
93 size_t tx_tail, tx_lasttail;
97 e10k_q_rdesc_adv_wb_array_t* rx_ring;
98 struct devq_buf* rx_bufs;
99 struct e10k_queue_rxctx* rx_context;
104 struct e10k_queue_ops ops;
109 typedef struct e10k_queue e10k_queue_t;
111 // Does not initalize the queue struct itself
112 static inline void e10k_queue_init(struct e10k_queue* q, void* tx, size_t tx_size,
113 uint32_t* tx_hwb, void* rx, size_t rx_size,
114 struct e10k_queue_ops* ops)
117 q->tx_bufs = calloc(tx_size, sizeof(struct devq_buf));
118 q->tx_isctx = calloc(tx_size, sizeof(bool));
120 q->tx_tail = q->tx_lasttail = 0;
121 q->tx_size = tx_size;
125 q->rx_bufs = calloc(rx_size, sizeof(struct devq_buf));
126 q->rx_context = calloc(rx_size, sizeof(*q->rx_context));
129 q->rx_size = rx_size;
133 // Initialize ring memory with zero
134 memset(tx, 0, tx_size * e10k_q_tdesc_adv_wb_size);
135 memset(rx, 0, rx_size * e10k_q_rdesc_adv_wb_size);
136 memset(q->tx_isctx, 0, tx_size*sizeof(bool));
137 memset(q->rx_context, 0, tx_size*sizeof(*q->rx_context));
140 static inline int e10k_queue_add_txcontext(e10k_queue_t* q, uint8_t idx,
141 uint8_t maclen, uint16_t iplen,
142 uint8_t l4len, e10k_q_l4_type_t l4t)
144 e10k_q_tdesc_adv_ctx_t d;
145 size_t tail = q->tx_tail;
147 memset(q->tx_ring[tail], 0, e10k_q_tdesc_adv_wb_size);
149 // TODO: Check if there is room in the queue
150 q->tx_isctx[tail] = true;
151 d = q->tx_ring[tail];
153 e10k_q_tdesc_adv_rd_dtyp_insert(d, e10k_q_adv_ctx);
154 e10k_q_tdesc_adv_rd_dext_insert(d, 1);
156 /* e10k_q_tdesc_adv_ctx_bcntlen_insert(d, 0x3f); */
157 e10k_q_tdesc_adv_ctx_idx_insert(d, idx);
158 e10k_q_tdesc_adv_ctx_maclen_insert(d, maclen);
159 e10k_q_tdesc_adv_ctx_iplen_insert(d, iplen);
160 e10k_q_tdesc_adv_ctx_ipv4_insert(d, 1);
161 e10k_q_tdesc_adv_ctx_l4len_insert(d, l4len);
162 e10k_q_tdesc_adv_ctx_l4t_insert(d, l4t);
164 q->tx_lasttail = q->tx_tail;
165 q->tx_tail = (tail + 1) % q->tx_size;
169 // len is only length of this descriptor where length is the total length
170 static inline int e10k_queue_add_txbuf_ctx(e10k_queue_t* q, lpaddr_t phys,
174 genoffset_t valid_data,
175 genoffset_t valid_length,
177 bool first, bool last,
178 size_t len, uint8_t ctx,
179 bool ixsm, bool txsm)
181 e10k_q_tdesc_adv_rd_t d;
182 size_t tail = q->tx_tail;
184 memset(q->tx_ring[tail], 0, e10k_q_tdesc_adv_wb_size);
186 // TODO: Check if there is room in the queue
187 q->tx_isctx[tail] = false;
188 struct devq_buf* buf = &q->tx_bufs[tail];
190 buf->offset = offset;
191 buf->length = length;
192 buf->valid_data = valid_data;
193 buf->valid_length = valid_length;
195 d = q->tx_ring[tail];
197 e10k_q_tdesc_adv_rd_buffer_insert(d, phys);
198 e10k_q_tdesc_adv_rd_dtalen_insert(d, len);
200 e10k_q_tdesc_adv_rd_paylen_insert(d, length);
202 e10k_q_tdesc_adv_rd_dtyp_insert(d, e10k_q_adv_data);
203 e10k_q_tdesc_adv_rd_dext_insert(d, 1);
204 e10k_q_tdesc_adv_rd_rs_insert(d, (last == 1));
205 e10k_q_tdesc_adv_rd_ifcs_insert(d, 1);
206 e10k_q_tdesc_adv_rd_eop_insert(d, last);
208 if (ctx != (uint8_t)-1) {
209 e10k_q_tdesc_adv_rd_idx_insert(d, ctx);
210 e10k_q_tdesc_adv_rd_cc_insert(d, 1);
211 e10k_q_tdesc_adv_rd_ixsm_insert(d, ixsm);
212 e10k_q_tdesc_adv_rd_txsm_insert(d, txsm);
215 q->tx_lasttail = q->tx_tail;
216 q->tx_tail = (tail + 1) % q->tx_size;
222 static inline int e10k_queue_add_txbuf_legacy(e10k_queue_t* q, lpaddr_t phys,
226 genoffset_t valid_data,
227 genoffset_t valid_length,
229 bool first, bool last,
232 e10k_q_tdesc_legacy_t d;
233 size_t tail = q->tx_tail;
236 struct devq_buf* buf = &q->tx_bufs[tail];
238 buf->offset = offset;
239 buf->length = length;
240 buf->valid_data = valid_data;
241 buf->valid_length = valid_length;
244 d = q->tx_ring[tail];
246 e10k_q_tdesc_legacy_buffer_insert(d, phys);
247 e10k_q_tdesc_legacy_length_insert(d, len);
248 // OPTIMIZATION: Maybe only set rs on last packet?
249 e10k_q_tdesc_legacy_rs_insert(d, (last == 1));
250 e10k_q_tdesc_legacy_ifcs_insert(d, 1);
251 e10k_q_tdesc_legacy_eop_insert(d, last);
253 q->tx_tail = (tail + 1) % q->tx_size;
257 static inline int e10k_queue_add_txbuf(e10k_queue_t* q, lpaddr_t phys,
261 genoffset_t valid_data,
262 genoffset_t valid_length,
264 bool first, bool last,
268 return e10k_queue_add_txbuf_legacy(q, phys, rid, offset, length,
269 valid_data, valid_length,
273 return e10k_queue_add_txbuf_ctx(q, phys, rid, offset, length,
274 valid_data, valid_length,
276 len, -1, false, false);
281 * Reclaim 1 packet from the TX queue once it's handled by the
282 * card. Call multiple times to reclaim more packets.
284 * \param q Queue to check
285 * \param opaque Contains opaque data of reclaimed packet, if any
287 * \return true if packet can be reclaimed otherwise false
289 static inline bool e10k_queue_get_txbuf_avd(e10k_queue_t* q, regionid_t* rid,
292 genoffset_t* valid_data,
293 genoffset_t* valid_length,
296 /* e10k_q_tdesc_adv_wb_t d; */
297 size_t head = q->tx_head;
300 // If HWB is enabled, we can skip reading the descriptor if nothing happened
301 if (q->tx_hwb && *((uint32_t*)q->tx_hwb) == head) {
308 // Skip over context and non-EOP descriptors
309 while(idx != q->tx_tail && q->tx_isctx[idx] &&
310 !e10k_q_tdesc_adv_wb_dd_extract(q->tx_ring[idx])) {
311 idx = (idx + 1) % q->tx_size;
314 if(idx == q->tx_tail) {
319 // That last packet got written out, now go reclaim from the head pointer.
320 if (!q->tx_isctx[head]) {
321 //*opaque = q->tx_opaque[head];
322 *rid = q->tx_bufs[head].rid;
323 *offset = q->tx_bufs[head].offset;
324 *length = q->tx_bufs[head].length;
325 *valid_data = q->tx_bufs[head].valid_data;
326 *valid_length = q->tx_bufs[head].valid_length;
327 *flags = q->tx_bufs[head].flags;
332 /* memset(q->tx_ring[head], 0, e10k_q_tdesc_adv_wb_size); */
333 q->tx_head = (head + 1) % q->tx_size;
337 static inline bool e10k_queue_get_txbuf_legacy(e10k_queue_t* q, regionid_t* rid,
340 genoffset_t* valid_data,
341 genoffset_t* valid_length,
345 e10k_q_tdesc_legacy_t d;
346 size_t head = q->tx_head;
348 d = q->tx_ring[head];
349 if (e10k_q_tdesc_legacy_dd_extract(d)) {
351 *rid = q->tx_bufs[head].rid;
352 *offset = q->tx_bufs[head].offset;
353 *length = q->tx_bufs[head].length;
354 *valid_data = q->tx_bufs[head].valid_data;
355 *valid_length = q->tx_bufs[head].valid_length;
356 *flags = q->tx_bufs[head].flags;
357 memset(d, 0, e10k_q_tdesc_legacy_size);
359 q->tx_head = (head + 1) % q->tx_size;
364 head = *((uint32_t*) q->tx_hwb);
365 if (q->tx_head == head) {
368 *rid = q->tx_bufs[q->tx_head].rid;
369 *offset = q->tx_bufs[q->tx_head].offset;
370 *length = q->tx_bufs[q->tx_head].length;
371 *valid_data = q->tx_bufs[q->tx_head].valid_data;
372 *valid_length = q->tx_bufs[q->tx_head].valid_length;
373 *flags = q->tx_bufs[q->tx_head].flags;
374 memset(d, 0, e10k_q_tdesc_legacy_size);
376 q->tx_head = (q->tx_head + 1) % q->tx_size;
384 static inline bool e10k_queue_get_txbuf(e10k_queue_t* q, regionid_t* rid,
387 genoffset_t* valid_data,
388 genoffset_t* valid_length,
392 return e10k_queue_get_txbuf_legacy(q, rid, offset, length, valid_data,
393 valid_length, flags);
395 return e10k_queue_get_txbuf_avd(q, rid, offset, length, valid_data,
396 valid_length, flags);
400 static inline errval_t e10k_queue_bump_txtail(e10k_queue_t* q)
402 return q->ops.update_txtail(q, q->tx_tail);
405 static inline size_t e10k_queue_free_txslots(e10k_queue_t* q)
407 size_t head = q->tx_head;
408 size_t tail = q->tx_tail;
409 size_t size = q->tx_size;
412 return size - (tail - head) - 1; // TODO: could this be off by 1?
414 return size - (tail + size - head) - 1; // TODO: off by 1?
419 static inline int e10k_queue_add_rxbuf_adv(e10k_queue_t* q,
424 genoffset_t valid_data,
425 genoffset_t valid_length,
428 e10k_q_rdesc_adv_rd_t d;
429 size_t tail = q->rx_tail;
430 struct e10k_queue_rxctx *ctx;
432 ctx = q->rx_context + tail;
434 printf("e10k: Already used!\n");
438 // TODO: Check if there is room in the queue
440 ctx->buf.offset = offset;
441 ctx->buf.length = length;
442 ctx->buf.valid_data = valid_data;
443 ctx->buf.valid_length = valid_length;
444 ctx->buf.flags = flags;
446 d = (e10k_q_rdesc_adv_rd_t) q->rx_ring[tail];
448 e10k_q_rdesc_adv_rd_buffer_insert(d, phys);
449 // TODO: Does this make sense for RSC?
450 e10k_q_rdesc_adv_rd_hdr_buffer_insert(d, 0);
452 q->rx_tail = (tail + 1) % q->rx_size;
457 static inline int e10k_queue_add_rxbuf_legacy(e10k_queue_t* q,
462 genoffset_t valid_data,
463 genoffset_t valid_length,
466 e10k_q_rdesc_legacy_t d;
467 size_t tail = q->rx_tail;
470 struct devq_buf* buf = &q->rx_bufs[tail];
472 buf->offset = offset;
473 buf->length = length;
474 buf->valid_data = valid_data;
475 buf->valid_length = valid_length;
478 d = q->rx_ring[tail];
480 e10k_q_rdesc_legacy_buffer_insert(d, phys);
482 q->rx_tail = (tail + 1) % q->rx_size;
488 static inline int e10k_queue_add_rxbuf(e10k_queue_t* q,
493 genoffset_t valid_data,
494 genoffset_t valid_length,
498 return e10k_queue_add_rxbuf_legacy(q, phys, rid, offset, length, valid_data,
499 valid_length, flags);
501 return e10k_queue_add_rxbuf_adv(q, phys, rid, offset, length, valid_data,
502 valid_length, flags);
505 static inline uint64_t e10k_queue_convert_rxflags(e10k_q_rdesc_adv_wb_t d)
510 if (e10k_q_rdesc_adv_wb_ipcs_extract(d)) {
511 flags |= NETIF_RXFLAG_IPCHECKSUM;
512 if (!e10k_q_rdesc_adv_wb_ipe_extract(d)) {
513 flags |= NETIF_RXFLAG_IPCHECKSUM_GOOD;
518 if (e10k_q_rdesc_adv_wb_l4i_extract(d)) {
519 flags |= NETIF_RXFLAG_L4CHECKSUM;
520 if (!e10k_q_rdesc_adv_wb_l4e_extract(d)) {
521 flags |= NETIF_RXFLAG_L4CHECKSUM_GOOD;
526 if (e10k_q_rdesc_adv_wb_pt_ipv4_extract(d)) {
527 flags |= NETIF_RXFLAG_TYPE_IPV4;
529 if (e10k_q_rdesc_adv_wb_pt_tcp_extract(d)) {
530 flags |= NETIF_RXFLAG_TYPE_TCP;
532 if (e10k_q_rdesc_adv_wb_pt_udp_extract(d)) {
533 flags |= NETIF_RXFLAG_TYPE_UDP;
539 static inline bool e10k_queue_get_rxbuf_avd(e10k_queue_t* q, regionid_t* rid,
542 genoffset_t* valid_data,
543 genoffset_t* valid_length,
547 e10k_q_rdesc_adv_wb_t d;
548 size_t head = q->rx_head;
549 struct e10k_queue_rxctx *ctx;
551 d = q->rx_ring[head];
552 ctx = q->rx_context + head;
554 if (!e10k_q_rdesc_adv_wb_dd_extract(d)) {
558 // Barrier needed according to linux driver to make sure nothing else is
559 // read before the dd bit TODO: make sure
562 // TODO add code for RSC
564 *flags = ctx->buf.flags;
565 // Set flags if it this is a descriptor with EOP
566 // TODO: with multi-part packets, we want these flags on the first packet
567 if (e10k_q_rdesc_adv_wb_eop_extract(d)) {
568 *flags = *flags | e10k_queue_convert_rxflags(d);
571 // TODO: Extract status (okay/error)
572 *last = e10k_q_rdesc_adv_wb_eop_extract(d);
573 *valid_length = e10k_q_rdesc_adv_wb_pkt_len_extract(d);
575 *offset = ctx->buf.offset;
576 *length = ctx->buf.length;
577 *valid_data = ctx->buf.valid_data;
580 memset(d, 0, e10k_q_rdesc_adv_wb_size);
582 q->rx_head = (head + 1) % q->rx_size;
587 static inline bool e10k_queue_get_rxbuf_legacy(e10k_queue_t* q, regionid_t* rid,
590 genoffset_t* valid_data,
591 genoffset_t* valid_length,
596 e10k_q_rdesc_legacy_t d;
597 size_t head = q->rx_head;
598 struct devq_buf* buf = &q->rx_bufs[head];
600 d = q->rx_ring[head];
601 if (e10k_q_rdesc_legacy_dd_extract(d)) {
602 *last = e10k_q_rdesc_legacy_eop_extract(d);
603 *valid_length = e10k_q_rdesc_legacy_length_extract(d);
606 *offset = buf->offset;
607 *length = buf->length;
608 *valid_data = buf->valid_data;
611 memset(d, 0, e10k_q_rdesc_legacy_size);
613 q->rx_head = (head + 1) % q->rx_size;
621 static inline bool e10k_queue_get_rxbuf(e10k_queue_t* q, regionid_t* rid,
624 genoffset_t* valid_data,
625 genoffset_t* valid_length,
630 return e10k_queue_get_rxbuf_legacy(q, rid, offset, length, valid_data, valid_length,
633 return e10k_queue_get_rxbuf_avd(q, rid, offset, length, valid_data, valid_length,
638 static inline errval_t e10k_queue_bump_rxtail(e10k_queue_t* q)
640 return q->ops.update_rxtail(q, q->rx_tail);
643 static inline size_t e10k_queue_free_rxslots(e10k_queue_t* q)
645 size_t head = q->rx_head;
646 size_t tail = q->rx_tail;
647 size_t size = q->rx_size;
650 return size - (tail - head) - 1; // TODO: could this be off by 1?
652 return size - (tail + size - head) - 1; // TODO: off by 1?
657 #endif // ndef E10K_QUEUE_H_