2 *Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
10 #ifndef SFN5122F_CHANNEL_H_
11 #define SFN5122F_CHANNEL_H_
18 #include <net_interfaces/net_interfaces.h>
19 #include <devif/queue_interface.h>
20 #include <dev/sfn5122f_q_dev.h>
21 #include <dev/sfn5122f_dev.h>
26 struct sfn5122f_devif_binding;
27 struct sfn5122f_devif_rpc_client;
28 struct sfn5122f_queue;
31 struct sfn5122f_queue_ops {
32 errval_t (*update_txtail)(struct sfn5122f_queue*, size_t);
33 errval_t (*update_rxtail)(struct sfn5122f_queue*, size_t);
42 struct region_entry* next;
45 struct sfn5122f_queue {
48 sfn5122f_q_tx_user_desc_array_t* user;
49 sfn5122f_q_tx_ker_desc_array_t* ker;
51 struct devq_buf* tx_bufs;
57 sfn5122f_q_rx_user_desc_array_t* user;
58 sfn5122f_q_rx_ker_desc_array_t* ker;
60 struct devq_buf* rx_bufs;
64 uint8_t rx_batch_size;
67 sfn5122f_q_event_entry_array_t* ev_ring;
72 struct sfn5122f_queue_ops ops;
76 // For batchin of TX events, maximum of 32
77 // entries since there can be a maximum of
78 // TX_CACHE descriptors per event
79 struct devq_buf bufs[32];
80 uint8_t last_deq; // last deq from buffer
83 // state for devif interface
84 struct sfn5122f_devif_binding* b;
85 struct sfn5122f_devif_rpc_client* rpc;
94 sfn5122f_event_cb_t cb;
96 // Direct interface fields
102 struct region_entry* regions;
105 typedef struct sfn5122f_queue sfn5122f_queue_t;
107 static inline sfn5122f_queue_t* sfn5122f_queue_init(void* tx,
113 struct sfn5122f_queue_ops* ops,
117 sfn5122f_queue_t* q = malloc(sizeof(*q));
120 q->tx_ring.user = tx;
124 q->tx_bufs = malloc(sizeof(struct devq_buf) * tx_size);
127 q->tx_size = tx_size;
130 q->rx_ring.user = rx;
134 q->rx_bufs = malloc(sizeof(struct devq_buf) * rx_size);
137 q->rx_batch_size = 0;
138 q->rx_size = rx_size;
143 q->ev_size = ev_size;
144 q->userspace = userspace;
150 // Initialize ring memory with 0xff
152 memset(tx, 0xff, tx_size * sfn5122f_q_tx_ker_desc_size);
153 memset(rx, 0xff, rx_size * sfn5122f_q_rx_ker_desc_size);
155 memset(tx, 0xff, tx_size * sfn5122f_q_tx_user_desc_size);
156 memset(rx, 0xff, rx_size * sfn5122f_q_rx_user_desc_size);
158 /* all 0 is potential valid event */
159 memset(ev, 0xff, ev_size * sfn5122f_q_event_entry_size);
164 static inline errval_t sfn5122f_queue_free(struct sfn5122f_queue* q)
168 // only one cap that is mapped (TX)
170 err = vspace_unmap(q->tx_ring.user);
172 err = vspace_unmap(q->tx_ring.ker);
174 if (err_is_fail(err)) {
185 static inline uint8_t sfn5122f_get_event_code(sfn5122f_queue_t* queue)
187 sfn5122f_q_event_entry_t ev;
188 ev = queue->ev_ring[queue->ev_head];
189 return sfn5122f_q_event_entry_ev_code_extract(ev);
193 static inline errval_t sfn5122f_queue_bump_txtail(sfn5122f_queue_t* q)
195 return q->ops.update_txtail(q, q->tx_tail);
199 static inline errval_t sfn5122f_queue_bump_rxtail(sfn5122f_queue_t* q)
201 return q->ops.update_rxtail(q, q->rx_tail);
205 static inline errval_t sfn5122f_handle_drv_ev(sfn5122f_queue_t* q, uint16_t n)
207 size_t ev_head = q->ev_head;
209 sfn5122f_q_event_entry_t code;
210 code = q->ev_ring[ev_head];
212 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 2) {
213 printf("Event queue init done %d \n", n);
216 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 9) {
217 printf("Packet neither TCP nor UPD %d \n", n);
220 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 14) {
221 printf("RX error %d \n", n);
222 return NIC_ERR_RX_PKT;
225 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 15) {
226 printf("TX error %d \n", n);
227 return NIC_ERR_TX_PKT;
230 memset(code, 0xff, sfn5122f_q_event_entry_size);
237 static inline errval_t sfn5122f_queue_handle_mcdi_event(sfn5122f_queue_t* q)
239 // TODO handle different events
240 size_t ev_head = q->ev_head;
241 sfn5122f_q_event_entry_t ev;
243 ev = q->ev_ring[ev_head];
244 reg = sfn5122f_q_event_entry_ev_data_extract(ev);
245 memset(ev, 0xff, sfn5122f_q_event_entry_size);
252 static inline int sfn5122f_queue_add_user_rxbuf_devif(sfn5122f_queue_t* q,
258 genoffset_t valid_data,
259 genoffset_t valid_length,
262 struct devq_buf* buf;
263 sfn5122f_q_rx_user_desc_t d;
264 size_t tail = q->rx_tail;
266 d = q->rx_ring.user[tail];
267 buf = &q->rx_bufs[tail];
270 buf->offset = offset;
271 buf->length = length;
272 buf->valid_data = valid_data;
273 buf->valid_length = valid_length;
275 sfn5122f_q_rx_user_desc_rx_user_buf_id_insert(d, buf_id);
276 sfn5122f_q_rx_user_desc_rx_user_2byte_offset_insert(d, b_off >> 1);
277 q->rx_tail = (tail + 1) % q->rx_size;
281 static inline int sfn5122f_queue_add_rxbuf_devif(sfn5122f_queue_t* q,
286 genoffset_t valid_data,
287 genoffset_t valid_length,
290 struct devq_buf* buf;
291 sfn5122f_q_rx_ker_desc_t d;
292 size_t tail = q->rx_tail;
294 d = q->rx_ring.ker[tail];
296 buf = &q->rx_bufs[tail];
299 buf->offset = offset;
300 buf->length = length;
301 buf->valid_data = valid_data;
302 buf->valid_length = valid_length;
305 sfn5122f_q_rx_ker_desc_rx_ker_buf_addr_insert(d, addr);
306 sfn5122f_q_rx_ker_desc_rx_ker_buf_region_insert(d, 0);
308 sfn5122f_q_rx_ker_desc_rx_ker_buf_size_insert(d, length);
309 q->rx_tail = (tail + 1) % q->rx_size;
313 static inline errval_t sfn5122f_queue_handle_rx_ev_devif(sfn5122f_queue_t* q,
317 genoffset_t* valid_data,
318 genoffset_t* valid_length,
321 /* Only one event is generated even if there is more than one
322 descriptor per packet */
323 struct devq_buf* buf;
325 sfn5122f_q_rx_ev_t ev;
326 //sfn5122f_q_rx_user_desc_t d_user= 0;
327 //sfn5122f_q_rx_ker_desc_t d = 0;
329 ev = q->ev_ring[q->ev_head];
330 rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
332 buf = &q->rx_bufs[rx_head];
335 *offset = buf->offset;
336 *length = buf->length;
337 *valid_data = buf->valid_data;
340 if(!sfn5122f_q_rx_ev_rx_ev_pkt_ok_extract(ev)) {
341 // TODO error handling
342 q->rx_head = (rx_head + 1) % q->rx_size;
343 if (sfn5122f_q_rx_ev_rx_ev_tobe_disc_extract(ev)) {
344 // packet discared by softare -> ok
345 return NIC_ERR_RX_DISCARD;
348 if (sfn5122f_q_rx_ev_rx_ev_buf_owner_id_extract(ev)) {
349 printf("Wrong owner \n");
351 return NIC_ERR_RX_PKT;
354 *valid_length = sfn5122f_q_rx_ev_rx_ev_byte_ctn_extract(ev);
355 /* Length of 0 is treated as 16384 bytes */
356 if (*valid_length == 0) {
357 *valid_length = 16384;
363 d_user = q->tx_ring.user[q->tx_head];
366 d = q->tx_ring.ker[q->tx_head];
370 /* only have to reset event entry */
371 memset(ev, 0xff, sfn5122f_q_event_entry_size);
373 q->rx_head = (rx_head + 1) % q->rx_size;
377 static inline void sfn5122f_queue_bump_evhead(sfn5122f_queue_t* q)
379 q->ev_head = (q->ev_head +1) % q->ev_size;
382 static inline size_t sfn5122f_queue_free_rxslots(sfn5122f_queue_t* q)
384 size_t head = q->rx_head;
385 size_t tail = q->rx_tail;
386 size_t size = q->rx_size;
389 return size - (tail - head) -1;
391 return size - (tail + size - head) -1;
397 static inline size_t sfn5122f_queue_free_txslots(sfn5122f_queue_t* q)
399 size_t head = q->tx_head;
400 size_t tail = q->tx_tail;
401 size_t size = q->tx_size;
404 return size - (tail - head) - 1;
406 return size - (tail + size - head) - 1;
411 static inline bool is_batched(size_t size, uint16_t tx_head, uint16_t q_tx_head)
413 if (tx_head >= q_tx_head) {
414 return (tx_head - q_tx_head > 0);
416 return (((tx_head + size) - q_tx_head) > 0);
420 static inline errval_t sfn5122f_queue_handle_tx_ev_devif(sfn5122f_queue_t* q,
424 genoffset_t* valid_data,
425 genoffset_t* valid_length,
428 /* Only one event is generated even if there is more than one
429 descriptor per packet */
430 uint16_t ev_head = q->ev_head;
432 struct devq_buf* buf;
433 sfn5122f_q_tx_ev_t ev;
434 sfn5122f_q_tx_user_desc_t d_user= 0;
435 sfn5122f_q_tx_ker_desc_t d = 0;
437 ev = q->ev_ring[ev_head];
438 tx_head = sfn5122f_q_tx_ev_tx_ev_desc_ptr_extract(ev);
441 buf = &q->tx_bufs[q->tx_head];
443 //printf("Tx_head %d q->tx_head %d size %ld q->tx_tail %d\n",
444 // tx_head, q->tx_head, q->tx_size, q->tx_tail);
447 *offset = buf->offset;
448 *length = buf->length;
449 *valid_data = buf->valid_data;
452 if (sfn5122f_q_tx_ev_tx_ev_pkt_err_extract(ev)){
453 q->tx_head = (tx_head +1) % q->tx_size;
454 return NIC_ERR_TX_PKT;
457 if (sfn5122f_q_tx_ev_tx_ev_comp_extract(ev) == 1){
458 // TX Event is a batch
459 if (is_batched(q->tx_size, tx_head, q->tx_head)) {
464 d_user = q->tx_ring.user[q->tx_head];
466 d = q->tx_ring.ker[q->tx_head];
469 while (q->tx_head != (tx_head + 1) % q->tx_size ) {
470 buf = &q->tx_bufs[q->tx_head];
471 q->bufs[index].rid = buf->rid;
472 q->bufs[index].offset = buf->offset;
473 q->bufs[index].valid_data = buf->valid_data;
474 q->bufs[index].valid_length = buf->valid_length;
475 q->bufs[index].flags = buf->flags;
476 q->bufs[index].length = buf->length;
477 //d_user = q->tx_ring.user[tx_head];
479 q->tx_head = (q->tx_head + 1) % q->tx_size;
485 // set descriptor to 0
487 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size*q->num_left);
489 memset(d, 0 , sfn5122f_q_tx_ker_desc_size*q->num_left);
491 } else { // Singe descriptor
493 d_user = q->tx_ring.user[q->tx_head];
494 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size);
496 d = q->tx_ring.ker[q->tx_head];
497 memset(d, 0 , sfn5122f_q_tx_ker_desc_size);
501 // reset entry event in queue
502 memset(ev, 0xff, sfn5122f_q_event_entry_size);
503 q->tx_head = (tx_head +1) % q->tx_size;
509 static inline int sfn5122f_queue_add_txbuf_devif(sfn5122f_queue_t* q,
514 genoffset_t valid_data,
515 genoffset_t valid_length,
518 struct devq_buf* buf;
519 sfn5122f_q_tx_ker_desc_t d;
520 size_t tail = q->tx_tail;
522 d = q->tx_ring.ker[tail];
524 buf = &q->tx_bufs[tail];
526 bool last = flags & NETIF_TXFLAG_LAST;
528 buf->offset = offset;
529 buf->length = length;
530 buf->valid_data = valid_data;
531 buf->valid_length = valid_length;
534 sfn5122f_q_tx_ker_desc_tx_ker_buf_addr_insert(d, addr);
535 sfn5122f_q_tx_ker_desc_tx_ker_byte_count_insert(d, valid_length);
536 sfn5122f_q_tx_ker_desc_tx_ker_cont_insert(d, !last);
537 sfn5122f_q_tx_ker_desc_tx_ker_buf_region_insert(d, 0);
539 __sync_synchronize();
541 q->tx_tail = (tail + 1) % q->tx_size;
546 static inline int sfn5122f_queue_add_user_txbuf_devif(sfn5122f_queue_t* q,
552 genoffset_t valid_data,
553 genoffset_t valid_length,
557 //printf("Add tx_buf %lx \n", base);
558 sfn5122f_q_tx_user_desc_t d;
559 struct devq_buf* buf;
560 size_t tail = q->tx_tail;
562 d = q->tx_ring.ker[tail];
563 buf = &q->tx_bufs[tail];
565 bool last = flags & NETIF_TXFLAG_LAST;
567 buf->offset = offset;
568 buf->length = length;
569 buf->valid_data = valid_data;
570 buf->valid_length = valid_length;
573 sfn5122f_q_tx_user_desc_tx_user_sw_ev_en_insert(d, 0);
574 sfn5122f_q_tx_user_desc_tx_user_cont_insert(d, !last);
575 sfn5122f_q_tx_user_desc_tx_user_byte_cnt_insert(d, valid_length);
576 sfn5122f_q_tx_user_desc_tx_user_buf_id_insert(d, buftbl_idx);
577 sfn5122f_q_tx_user_desc_tx_user_byte_ofs_insert(d, b_off);
579 __sync_synchronize();
581 q->tx_tail = (tail + 1) % q->tx_size;
585 #endif //ndef SFN5122F_CHANNEL_H_