2 *Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
10 #ifndef SFN5122F_CHANNEL_H_
11 #define SFN5122F_CHANNEL_H_
18 #include <net_interfaces/net_interfaces.h>
19 #include <devif/queue_interface.h>
20 #include <dev/sfn5122f_q_dev.h>
21 #include <dev/sfn5122f_dev.h>
26 struct sfn5122f_devif_binding;
27 struct sfn5122f_devif_rpc_client;
28 struct sfn5122f_queue;
31 struct sfn5122f_queue_ops {
32 errval_t (*update_txtail)(struct sfn5122f_queue*, size_t);
33 errval_t (*update_rxtail)(struct sfn5122f_queue*, size_t);
42 struct region_entry* next;
45 struct sfn5122f_queue {
48 sfn5122f_q_tx_user_desc_array_t* user;
49 sfn5122f_q_tx_ker_desc_array_t* ker;
51 struct devq_buf* tx_bufs;
57 sfn5122f_q_rx_user_desc_array_t* user;
58 sfn5122f_q_rx_ker_desc_array_t* ker;
60 struct devq_buf* rx_bufs;
65 sfn5122f_q_event_entry_array_t* ev_ring;
70 struct sfn5122f_queue_ops ops;
74 // For batchin of TX events, maximum of 32
75 // entries since there can be a maximum of
76 // TX_CACHE descriptors per event
77 struct devq_buf bufs[32];
78 uint8_t last_deq; // last deq from buffer
81 // state for devif interface
82 struct sfn5122f_devif_binding* b;
83 struct sfn5122f_devif_rpc_client* rpc;
92 sfn5122f_event_cb_t cb;
94 // Direct interface fields
99 struct region_entry* regions;
102 typedef struct sfn5122f_queue sfn5122f_queue_t;
104 static inline sfn5122f_queue_t* sfn5122f_queue_init(void* tx,
110 struct sfn5122f_queue_ops* ops,
114 sfn5122f_queue_t* q = malloc(sizeof(*q));
117 q->tx_ring.user = tx;
121 q->tx_bufs = malloc(sizeof(struct devq_buf) * tx_size);
124 q->tx_size = tx_size;
127 q->rx_ring.user = rx;
131 q->rx_bufs = malloc(sizeof(struct devq_buf) * rx_size);
134 q->rx_size = rx_size;
139 q->ev_size = ev_size;
140 q->userspace = userspace;
146 // Initialize ring memory with 0xff
148 memset(tx, 0xff, tx_size * sfn5122f_q_tx_ker_desc_size);
149 memset(rx, 0xff, rx_size * sfn5122f_q_rx_ker_desc_size);
151 memset(tx, 0xff, tx_size * sfn5122f_q_tx_user_desc_size);
152 memset(rx, 0xff, rx_size * sfn5122f_q_rx_user_desc_size);
154 /* all 0 is potential valid event */
155 memset(ev, 0xff, ev_size * sfn5122f_q_event_entry_size);
160 static inline errval_t sfn5122f_queue_free(struct sfn5122f_queue* q)
164 // only one cap that is mapped (TX)
166 err = vspace_unmap(q->tx_ring.user);
168 err = vspace_unmap(q->tx_ring.ker);
170 if (err_is_fail(err)) {
181 static inline uint8_t sfn5122f_get_event_code(sfn5122f_queue_t* queue)
183 sfn5122f_q_event_entry_t ev;
184 ev = queue->ev_ring[queue->ev_head];
185 return sfn5122f_q_event_entry_ev_code_extract(ev);
189 static inline errval_t sfn5122f_queue_bump_txtail(sfn5122f_queue_t* q)
191 return q->ops.update_txtail(q, q->tx_tail);
195 static inline errval_t sfn5122f_queue_bump_rxtail(sfn5122f_queue_t* q)
197 return q->ops.update_rxtail(q, q->rx_tail);
201 static inline errval_t sfn5122f_handle_drv_ev(sfn5122f_queue_t* q, uint16_t n)
203 size_t ev_head = q->ev_head;
205 sfn5122f_q_event_entry_t code;
206 code = q->ev_ring[ev_head];
208 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 2) {
209 printf("Event queue init done %d \n", n);
212 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 9) {
213 printf("Packet neither TCP nor UPD %d \n", n);
216 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 14) {
217 printf("RX error %d \n", n);
218 return SFN_ERR_RX_PKT;
221 if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 15) {
222 printf("TX error %d \n", n);
223 return SFN_ERR_TX_PKT;
226 memset(code, 0xff, sfn5122f_q_event_entry_size);
233 static inline errval_t sfn5122f_queue_handle_mcdi_event(sfn5122f_queue_t* q)
235 // TODO handle different events
236 size_t ev_head = q->ev_head;
237 sfn5122f_q_event_entry_t ev;
239 ev = q->ev_ring[ev_head];
240 reg = sfn5122f_q_event_entry_ev_data_extract(ev);
241 memset(ev, 0xff, sfn5122f_q_event_entry_size);
248 static inline int sfn5122f_queue_add_user_rxbuf_devif(sfn5122f_queue_t* q,
257 struct devq_buf* buf;
258 sfn5122f_q_rx_user_desc_t d;
259 size_t tail = q->rx_tail;
261 d = q->rx_ring.user[tail];
262 buf = &q->rx_bufs[tail];
269 sfn5122f_q_rx_user_desc_rx_user_buf_id_insert(d, buf_id);
270 sfn5122f_q_rx_user_desc_rx_user_2byte_offset_insert(d, offset >> 1);
271 q->rx_tail = (tail + 1) % q->rx_size;
275 static inline int sfn5122f_queue_add_rxbuf_devif(sfn5122f_queue_t* q,
282 struct devq_buf* buf;
283 sfn5122f_q_rx_ker_desc_t d;
284 size_t tail = q->rx_tail;
286 d = q->rx_ring.ker[tail];
288 buf = &q->rx_bufs[tail];
296 sfn5122f_q_rx_ker_desc_rx_ker_buf_addr_insert(d, addr);
297 sfn5122f_q_rx_ker_desc_rx_ker_buf_region_insert(d, 0);
299 sfn5122f_q_rx_ker_desc_rx_ker_buf_size_insert(d, len);
300 q->rx_tail = (tail + 1) % q->rx_size;
304 static inline errval_t sfn5122f_queue_handle_rx_ev_devif(sfn5122f_queue_t* q,
311 /* Only one event is generated even if there is more than one
312 descriptor per packet */
313 struct devq_buf* buf;
314 size_t ev_head = q->ev_head;
316 sfn5122f_q_rx_ev_t ev;
317 sfn5122f_q_rx_user_desc_t d_user = 0;
319 ev = q->ev_ring[ev_head];
320 rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
322 buf = &q->rx_bufs[rx_head];
329 if(!sfn5122f_q_rx_ev_rx_ev_pkt_ok_extract(ev)) {
330 // TODO error handling
331 q->rx_head = (rx_head + 1) % q->rx_size;
332 if (sfn5122f_q_rx_ev_rx_ev_tobe_disc_extract(ev)) {
333 // packet discared by softare -> ok
334 return SFN_ERR_RX_DISCARD;
337 printf("Packet not ok \n");
338 if (sfn5122f_q_rx_ev_rx_ev_buf_owner_id_extract(ev)) {
339 printf("Wrong owner \n");
341 return SFN_ERR_RX_PKT;
344 *len = sfn5122f_q_rx_ev_rx_ev_byte_ctn_extract(ev);
345 /* Length of 0 is treated as 16384 bytes */
350 rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
351 d_user = q->rx_ring.user[rx_head];
353 buf = &q->rx_bufs[rx_head];
360 memset(ev, 0xff, sfn5122f_q_event_entry_size);
361 memset(d_user, 0 , sfn5122f_q_rx_user_desc_size);
363 q->rx_head = (rx_head + 1) % q->rx_size;
367 static inline void sfn5122f_queue_bump_evhead(sfn5122f_queue_t* q)
369 q->ev_head = (q->ev_head +1) % q->ev_size;
372 static inline size_t sfn5122f_queue_free_rxslots(sfn5122f_queue_t* q)
374 size_t head = q->rx_head;
375 size_t tail = q->rx_tail;
376 size_t size = q->rx_size;
379 return size - (tail - head) -1;
381 return size - (tail + size - head) -1;
387 static inline size_t sfn5122f_queue_free_txslots(sfn5122f_queue_t* q)
389 size_t head = q->tx_head;
390 size_t tail = q->tx_tail;
391 size_t size = q->tx_size;
394 return size - (tail - head) - 1;
396 return size - (tail + size - head) - 1;
401 static inline bool is_batched(size_t size, uint16_t tx_head, uint16_t q_tx_head)
403 if (tx_head >= q_tx_head) {
404 return (tx_head - q_tx_head > 0);
406 return (((tx_head + size) - q_tx_head) > 0);
410 static inline errval_t sfn5122f_queue_handle_tx_ev_devif(sfn5122f_queue_t* q,
417 /* Only one event is generated even if there is more than one
418 descriptor per packet */
419 uint16_t ev_head = q->ev_head;
421 struct devq_buf* buf;
422 sfn5122f_q_tx_ev_t ev;
423 sfn5122f_q_tx_user_desc_t d_user= 0;
425 ev = q->ev_ring[ev_head];
426 tx_head = sfn5122f_q_tx_ev_tx_ev_desc_ptr_extract(ev);
429 buf = &q->tx_bufs[q->tx_head];
431 //printf("Tx_head %d q->tx_head %d size %ld \n", tx_head, q->tx_head,
440 if (sfn5122f_q_tx_ev_tx_ev_pkt_err_extract(ev)){
441 q->tx_head = (tx_head +1) % q->tx_size;
442 return SFN_ERR_TX_PKT;
445 if (sfn5122f_q_tx_ev_tx_ev_comp_extract(ev) == 1){
446 // TX Event is a batch
447 if (is_batched(q->tx_size, tx_head, q->tx_head)) {
450 d_user = q->tx_ring.user[q->tx_head];
451 while (q->tx_head != (tx_head + 1) % q->tx_size ) {
452 buf = &q->tx_bufs[q->tx_head];
453 q->bufs[index].rid = buf->rid;
454 q->bufs[index].bid = buf->bid;
455 q->bufs[index].addr = buf->addr;
456 q->bufs[index].flags = buf->flags;
457 q->bufs[index].len = buf->len;
458 d_user = q->tx_ring.user[tx_head];
460 q->tx_head = (q->tx_head + 1) % q->tx_size;
464 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size*q->num_left);
465 } else { // Singe descriptor
466 d_user = q->tx_ring.user[tx_head];
467 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size);
470 // reset entry event in queue
471 memset(ev, 0xff, sfn5122f_q_event_entry_size);
472 q->tx_head = (tx_head +1) % q->tx_size;
478 static inline int sfn5122f_queue_add_txbuf_devif(sfn5122f_queue_t* q,
485 struct devq_buf* buf;
486 sfn5122f_q_tx_ker_desc_t d;
487 size_t tail = q->tx_tail;
489 d = q->tx_ring.ker[tail];
491 buf = &q->tx_bufs[tail];
493 bool last = flags & NETIF_TXFLAG_LAST;
500 sfn5122f_q_tx_ker_desc_tx_ker_buf_addr_insert(d, base);
501 sfn5122f_q_tx_ker_desc_tx_ker_byte_count_insert(d, len);
502 sfn5122f_q_tx_ker_desc_tx_ker_cont_insert(d, !last);
503 sfn5122f_q_tx_ker_desc_tx_ker_buf_region_insert(d, 0);
505 __sync_synchronize();
507 q->tx_tail = (tail + 1) % q->tx_size;
512 static inline int sfn5122f_queue_add_user_txbuf_devif(sfn5122f_queue_t* q,
522 //printf("Add tx_buf %lx \n", base);
523 sfn5122f_q_tx_user_desc_t d;
524 struct devq_buf* buf;
525 size_t tail = q->tx_tail;
527 d = q->tx_ring.ker[tail];
528 buf = &q->tx_bufs[tail];
530 bool last = flags & NETIF_TXFLAG_LAST;
537 sfn5122f_q_tx_user_desc_tx_user_sw_ev_en_insert(d, 0);
538 sfn5122f_q_tx_user_desc_tx_user_cont_insert(d, !last);
539 sfn5122f_q_tx_user_desc_tx_user_byte_cnt_insert(d, len);
540 sfn5122f_q_tx_user_desc_tx_user_buf_id_insert(d, buftbl_idx);
541 sfn5122f_q_tx_user_desc_tx_user_byte_ofs_insert(d, offset);
543 __sync_synchronize();
545 q->tx_tail = (tail + 1) % q->tx_size;
549 #endif //ndef SFN5122F_CHANNEL_H_