3d9572aeb9b43a40692df03437f030f75a360dc1
[barrelfish] / lib / devif / backends / net / solarflare / hw_queue.h
1 /*
2  *Copyright (c) 2007-2011, ETH Zurich.
3  * All rights reserved.
4  *
5  * This file is distributed under the terms in the attached LICENSE file.
6  * If you do not find this file, copies can be found by writing to:
7  * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8  */
9
10 #ifndef SFN5122F_CHANNEL_H_
11 #define SFN5122F_CHANNEL_H_
12
13
14
15 #include <string.h>
16 #include <stdlib.h>
17
18 #include <devif/queue_interface.h>
19 #include <dev/sfn5122f_q_dev.h>
20 #include <dev/sfn5122f_dev.h>
21 #include "helper.h"
22
23 #define MTU_MAX 2048
24
25 struct sfn5122f_devif_binding;
26 struct sfn5122f_devif_rpc_client;
27 struct sfn5122f_queue;
28 struct peridoc_event;
29
30 struct sfn5122f_queue_ops {
31     errval_t (*update_txtail)(struct sfn5122f_queue*, size_t);
32     errval_t (*update_rxtail)(struct sfn5122f_queue*, size_t);
33 };
34
35 struct region_entry {
36     uint32_t rid;
37     lpaddr_t phys;
38     size_t size;
39     uint64_t buftbl_idx;
40     struct capref cap;
41     struct region_entry* next;
42 };
43
44 struct sfn5122f_queue {
45     struct devq q;
46     union {
47         sfn5122f_q_tx_user_desc_array_t* user;
48         sfn5122f_q_tx_ker_desc_array_t* ker;
49     } tx_ring;
50     struct devq_buf*                tx_bufs;
51     uint16_t                        tx_head;
52     uint16_t                        tx_tail;
53     size_t                          tx_size;
54
55     union {
56         sfn5122f_q_rx_user_desc_array_t* user;
57         sfn5122f_q_rx_ker_desc_array_t* ker;
58     } rx_ring;
59     struct devq_buf*                rx_bufs;
60     uint16_t                        rx_head;
61     uint16_t                        rx_tail;
62     uint16_t                        rx_size;
63
64     sfn5122f_q_event_entry_array_t* ev_ring;
65     uint32_t                        ev_head;
66     uint32_t                        ev_tail;
67     size_t                          ev_size;
68
69     struct sfn5122f_queue_ops       ops;
70     void*                           opaque;
71     bool                            userspace;
72
73     // For batchin of TX events, maximum of 32
74     // entries since there can be a maximum of 
75     // TX_CACHE descriptors per event
76     struct devq_buf bufs[32];
77     uint8_t last_deq; // last deq from buffer
78     uint8_t num_left;
79
80     // state for devif interface
81     struct sfn5122f_devif_binding* b;
82     struct sfn5122f_devif_rpc_client* rpc;
83     volatile bool bound;
84
85
86     // interrupts
87     uint8_t core;
88     uint8_t vector;
89
90     // callback 
91     sfn5122f_event_cb_t cb;
92
93     // Direct interface fields
94     uint16_t id;
95     struct capref frame;
96     sfn5122f_t *device;
97     void* device_va;
98     struct region_entry* regions;
99 };
100
101 typedef struct sfn5122f_queue sfn5122f_queue_t;
102
103 static inline sfn5122f_queue_t* sfn5122f_queue_init(void* tx, 
104                                                     size_t tx_size,
105                                                     void* rx, 
106                                                     size_t rx_size, 
107                                                     void* ev, 
108                                                     size_t ev_size,
109                                                     struct sfn5122f_queue_ops* ops, 
110                                                     bool userspace)
111 {
112
113     sfn5122f_queue_t* q = malloc(sizeof(*q));
114
115     if (userspace) {
116         q->tx_ring.user = tx;
117     } else {
118         q->tx_ring.ker = tx;
119     }
120     q->tx_bufs = malloc(sizeof(struct devq_buf) * tx_size);
121     q->tx_head = 0;
122     q->tx_tail = 0;
123     q->tx_size = tx_size;
124
125     if (userspace) {
126         q->rx_ring.user = rx;
127     } else {
128         q->rx_ring.ker = rx;
129     }
130     q->rx_bufs = malloc(sizeof(struct devq_buf) * rx_size);
131     q->rx_head = 0;
132     q->rx_tail = 0;
133     q->rx_size = rx_size;
134   
135     q->ev_ring = ev;
136     q->ev_head = 0;
137     q->ev_tail = 0;
138     q->ev_size = ev_size;
139     q->userspace = userspace; 
140     q->num_left = 0;
141     q->last_deq = 0;
142
143     q -> ops = *ops;
144
145     // Initialize ring memory with 0xff
146     if(!userspace){
147        memset(tx, 0xff, tx_size * sfn5122f_q_tx_ker_desc_size);
148        memset(rx, 0xff, rx_size * sfn5122f_q_rx_ker_desc_size);
149     }else{
150        memset(tx, 0xff, tx_size * sfn5122f_q_tx_user_desc_size);
151        memset(rx, 0xff, rx_size * sfn5122f_q_rx_user_desc_size);
152     }
153     /* all 0 is potential valid event */
154     memset(ev, 0xff, ev_size * sfn5122f_q_event_entry_size);
155     return q;
156 }
157
158
159 static inline errval_t sfn5122f_queue_free(struct sfn5122f_queue* q)
160 {
161     errval_t err;
162
163     // only one cap that is mapped (TX)
164     if (q->userspace) {
165         err = vspace_unmap(q->tx_ring.user);  
166     } else {
167         err = vspace_unmap(q->tx_ring.ker);  
168     }
169     if (err_is_fail(err)) {
170         return err;
171     }   
172     free(q->rx_bufs);
173     free(q->tx_bufs);
174     free(q);
175
176     return SYS_ERR_OK;
177 }
178
179
180 static inline uint8_t sfn5122f_get_event_code(sfn5122f_queue_t* queue)
181 {             
182        sfn5122f_q_event_entry_t ev;
183        ev = queue->ev_ring[queue->ev_head];
184        return sfn5122f_q_event_entry_ev_code_extract(ev);
185 }
186
187
188 static inline errval_t sfn5122f_queue_bump_txtail(sfn5122f_queue_t* q)
189 {
190     return q->ops.update_txtail(q, q->tx_tail);
191 }
192
193
194 static inline errval_t sfn5122f_queue_bump_rxtail(sfn5122f_queue_t* q)
195 {
196     return q->ops.update_rxtail(q, q->rx_tail);
197 }
198
199
200 static inline errval_t sfn5122f_handle_drv_ev(sfn5122f_queue_t* q, uint16_t n)
201 {   
202     size_t ev_head = q->ev_head;
203
204     sfn5122f_q_event_entry_t code;
205     code = q->ev_ring[ev_head]; 
206
207     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 2) {
208         printf("Event queue init done %d \n", n);
209     }
210
211     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 9) {
212         printf("Packet neither TCP nor UPD %d \n", n);
213     }
214     
215     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 14) {
216         printf("RX error %d \n", n);
217         return SFN_ERR_RX_PKT;
218     }
219
220     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 15) {
221         printf("TX error %d \n", n);
222         return SFN_ERR_TX_PKT;
223     }
224
225     memset(code, 0xff, sfn5122f_q_event_entry_size);
226     return SYS_ERR_OK;
227
228 }
229
230
231
232 static inline errval_t sfn5122f_queue_handle_mcdi_event(sfn5122f_queue_t* q)
233 {
234     // TODO handle different events    
235     size_t ev_head = q->ev_head;
236     sfn5122f_q_event_entry_t ev;
237     uint64_t reg;
238     ev = q->ev_ring[ev_head]; 
239     reg = sfn5122f_q_event_entry_ev_data_extract(ev);
240     memset(ev, 0xff, sfn5122f_q_event_entry_size);
241
242     return SYS_ERR_OK;
243
244 }
245
246 /*    RX      */
247 static inline int sfn5122f_queue_add_user_rxbuf_devif(sfn5122f_queue_t* q, 
248                                                       uint32_t buf_id,
249                                                       uint16_t offset,
250                                                       regionid_t rid,
251                                                       bufferid_t devq_bid,
252                                                       lpaddr_t base,
253                                                       size_t len,
254                                                       uint64_t flags)
255 {
256     struct devq_buf* buf;
257     sfn5122f_q_rx_user_desc_t d;
258     size_t tail = q->rx_tail;
259
260     d = q->rx_ring.user[tail];
261     buf = &q->rx_bufs[tail];
262
263     buf->rid = rid;
264     buf->bid = devq_bid;
265     buf->addr = base;
266     buf->len = len;
267     buf->flags = flags;
268     sfn5122f_q_rx_user_desc_rx_user_buf_id_insert(d, buf_id);
269     sfn5122f_q_rx_user_desc_rx_user_2byte_offset_insert(d, offset >> 1);
270     q->rx_tail = (tail + 1) % q->rx_size;
271     return 0;
272 }
273
274 static inline int sfn5122f_queue_add_rxbuf_devif(sfn5122f_queue_t* q, 
275                                                  regionid_t rid,
276                                                  bufferid_t bid,
277                                                  lpaddr_t addr,
278                                                  size_t len,
279                                                  uint64_t flags)
280 {
281     struct devq_buf* buf;
282     sfn5122f_q_rx_ker_desc_t d;
283     size_t tail = q->rx_tail;
284
285     d = q->rx_ring.ker[tail];
286
287     buf = &q->rx_bufs[tail];
288
289     buf->rid = rid;
290     buf->bid = bid;
291     buf->addr = addr;
292     buf->len = len;
293     buf->flags = flags;
294
295     sfn5122f_q_rx_ker_desc_rx_ker_buf_addr_insert(d, addr);
296     sfn5122f_q_rx_ker_desc_rx_ker_buf_region_insert(d, 0);
297     // TODO: Check size
298     sfn5122f_q_rx_ker_desc_rx_ker_buf_size_insert(d, len);
299     q->rx_tail = (tail + 1) % q->rx_size;
300     return 0;
301 }
302
303 static inline errval_t sfn5122f_queue_handle_rx_ev_devif(sfn5122f_queue_t* q, 
304                                                          regionid_t* rid,
305                                                          bufferid_t* bid,
306                                                          lpaddr_t* base,
307                                                          size_t* len,
308                                                          uint64_t* flags)
309 {   
310     /*  Only one event is generated even if there is more than one
311         descriptor per packet  */
312     struct devq_buf* buf;
313     size_t ev_head = q->ev_head;
314     size_t rx_head;
315     sfn5122f_q_rx_ev_t ev;
316     sfn5122f_q_rx_user_desc_t d_user = 0;
317
318     ev = q->ev_ring[ev_head];
319     rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
320
321     buf = &q->rx_bufs[rx_head];
322
323     *rid = buf->rid;
324     *bid = buf->bid;
325     *base = buf->addr;
326     *flags = buf->flags;
327
328     if(!sfn5122f_q_rx_ev_rx_ev_pkt_ok_extract(ev)) {   
329          // TODO error handling
330          q->rx_head = (rx_head + 1) % q->rx_size;
331          if (sfn5122f_q_rx_ev_rx_ev_tobe_disc_extract(ev)) {
332             // packet discared by softare -> ok
333             return SFN_ERR_RX_DISCARD;
334          }
335
336          printf("Packet not ok \n");
337          if (sfn5122f_q_rx_ev_rx_ev_buf_owner_id_extract(ev)) {
338              printf("Wrong owner \n");
339          }
340          return SFN_ERR_RX_PKT;
341     }
342
343     *len = sfn5122f_q_rx_ev_rx_ev_byte_ctn_extract(ev);
344     /* Length of 0 is treated as 16384 bytes */
345     if (*len == 0) {
346         *len = 16384;
347     }
348
349     rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
350     d_user = q->rx_ring.user[rx_head];  
351
352     buf = &q->rx_bufs[rx_head];
353
354     *rid = buf->rid;
355     *bid = buf->bid;
356     *base = buf->addr;
357     *flags = buf->flags;
358
359     memset(ev, 0xff, sfn5122f_q_event_entry_size);
360     memset(d_user, 0 , sfn5122f_q_rx_user_desc_size);
361
362     q->rx_head = (rx_head + 1) % q->rx_size;
363     return SYS_ERR_OK;
364 }
365
366 static inline void sfn5122f_queue_bump_evhead(sfn5122f_queue_t* q)
367 {     
368      q->ev_head = (q->ev_head +1) % q->ev_size;
369 }
370
371 static inline size_t sfn5122f_queue_free_rxslots(sfn5122f_queue_t* q)
372 {
373     size_t head = q->rx_head;
374     size_t tail = q->rx_tail;
375     size_t size = q->rx_size;
376
377     if (tail >= head) {
378         return size - (tail - head) -1; 
379     } else {
380         return size - (tail + size - head) -1; 
381     }
382 }
383
384
385 /*   TX       */
386 static inline size_t sfn5122f_queue_free_txslots(sfn5122f_queue_t* q)
387 {
388     size_t head = q->tx_head;
389     size_t tail = q->tx_tail;
390     size_t size = q->tx_size;
391
392     if (tail >= head) {
393         return size - (tail - head) - 1; 
394     } else {
395         return size - (tail + size - head) - 1; 
396     }
397
398 }
399
400 static inline bool is_batched(size_t size, uint16_t tx_head, uint16_t q_tx_head)
401 {
402     if (tx_head >= q_tx_head) {
403         return (tx_head - q_tx_head > 0);
404     } else {
405         return (((tx_head + size) - q_tx_head) > 0);
406     }
407 }
408
409 static inline errval_t sfn5122f_queue_handle_tx_ev_devif(sfn5122f_queue_t* q, 
410                                                          regionid_t* rid,
411                                                          bufferid_t* bid,
412                                                          lpaddr_t* base,
413                                                          size_t* len,
414                                                          uint64_t* flags)
415 {
416     /*  Only one event is generated even if there is more than one
417         descriptor per packet  */
418     uint16_t ev_head = q->ev_head;
419     uint16_t tx_head;
420     struct devq_buf* buf;
421     sfn5122f_q_tx_ev_t ev;
422     sfn5122f_q_tx_user_desc_t d_user= 0;
423    
424     ev = q->ev_ring[ev_head];
425     tx_head = sfn5122f_q_tx_ev_tx_ev_desc_ptr_extract(ev);
426     
427
428     buf = &q->tx_bufs[q->tx_head];
429
430     //printf("Tx_head %d q->tx_head %d size %ld \n", tx_head, q->tx_head,
431     //        q->tx_size);
432
433     *rid = buf->rid;
434     *bid = buf->bid;
435     *base = buf->addr;
436     *flags = buf->flags;
437     *len = buf->len;
438
439     if (sfn5122f_q_tx_ev_tx_ev_pkt_err_extract(ev)){     
440         q->tx_head = (tx_head +1) % q->tx_size;
441         return SFN_ERR_TX_PKT;
442     }
443
444     if (sfn5122f_q_tx_ev_tx_ev_comp_extract(ev) == 1){  
445         // TX Event is a batch
446         if (is_batched(q->tx_size, tx_head, q->tx_head)) {
447             uint8_t index = 0;
448             q->num_left = 0;
449             d_user = q->tx_ring.user[q->tx_head];  
450             while (q->tx_head != (tx_head + 1) % q->tx_size ) {
451                 buf = &q->tx_bufs[q->tx_head];
452                 q->bufs[index].rid = buf->rid;
453                 q->bufs[index].bid = buf->bid;
454                 q->bufs[index].addr = buf->addr;
455                 q->bufs[index].flags = buf->flags;
456                 q->bufs[index].len = buf->len;
457                 d_user = q->tx_ring.user[tx_head];  
458                 index++;
459                 q->tx_head = (q->tx_head + 1) % q->tx_size;
460                 q->num_left++;
461             }          
462             q->last_deq = 0;  
463             memset(d_user, 0 , sfn5122f_q_tx_user_desc_size*q->num_left);
464         } else { // Singe descriptor
465             d_user = q->tx_ring.user[tx_head];  
466             memset(d_user, 0 , sfn5122f_q_tx_user_desc_size);
467         }
468
469         // reset entry event in queue
470         memset(ev, 0xff, sfn5122f_q_event_entry_size);
471         q->tx_head = (tx_head +1) % q->tx_size;
472     }
473
474     return SYS_ERR_OK;
475 }
476
477 static inline int sfn5122f_queue_add_txbuf_devif(sfn5122f_queue_t* q, 
478                                                  regionid_t rid,
479                                                  bufferid_t bid,
480                                                  lpaddr_t base,
481                                                  size_t len,
482                                                  uint64_t flags)
483 {
484     struct devq_buf* buf;
485     sfn5122f_q_tx_ker_desc_t d;
486     size_t tail = q->tx_tail;
487
488     d = q->tx_ring.ker[tail];
489  
490     buf = &q->tx_bufs[tail];
491    
492     bool last = flags & DEVQ_BUF_FLAG_TX_LAST;    
493     buf->rid = rid;
494     buf->bid = bid;
495     buf->addr = base;
496     buf->len = len;
497     buf->flags = flags;
498
499     sfn5122f_q_tx_ker_desc_tx_ker_buf_addr_insert(d, base);
500     sfn5122f_q_tx_ker_desc_tx_ker_byte_count_insert(d, len);
501     sfn5122f_q_tx_ker_desc_tx_ker_cont_insert(d, !last);
502     sfn5122f_q_tx_ker_desc_tx_ker_buf_region_insert(d, 0);
503
504     __sync_synchronize();
505  
506     q->tx_tail = (tail + 1) % q->tx_size;
507     return 0;
508 }
509
510
511 static inline int sfn5122f_queue_add_user_txbuf_devif(sfn5122f_queue_t* q, 
512                                                       uint64_t buftbl_idx, 
513                                                       uint64_t offset,
514                                                       regionid_t rid,
515                                                       bufferid_t devq_bid,
516                                                       lpaddr_t base,
517                                                       size_t len,
518                                                       uint64_t flags)
519 {
520     
521     //printf("Add tx_buf %lx \n", base);
522     sfn5122f_q_tx_user_desc_t d;
523     struct devq_buf* buf;
524     size_t tail = q->tx_tail;
525
526     d = q->tx_ring.ker[tail];
527     buf = &q->tx_bufs[tail];
528    
529     bool last = flags & DEVQ_BUF_FLAG_TX_LAST;    
530     buf->rid = rid;
531     buf->bid = devq_bid;
532     buf->addr = base;
533     buf->len = len;
534     buf->flags = flags;
535
536     sfn5122f_q_tx_user_desc_tx_user_sw_ev_en_insert(d, 0);
537     sfn5122f_q_tx_user_desc_tx_user_cont_insert(d, !last);
538     sfn5122f_q_tx_user_desc_tx_user_byte_cnt_insert(d, len);
539     sfn5122f_q_tx_user_desc_tx_user_buf_id_insert(d, buftbl_idx);
540     sfn5122f_q_tx_user_desc_tx_user_byte_ofs_insert(d, offset);
541
542     __sync_synchronize();
543  
544     q->tx_tail = (tail + 1) % q->tx_size;
545     return 0;
546 }
547
548 #endif //ndef SFN5122F_CHANNEL_H_
549