DeviceQueue: moved networking related flags to include/net_interfaces/flags.h
[barrelfish] / lib / devif / backends / net / solarflare / hw_queue.h
1 /*
2  *Copyright (c) 2007-2011, ETH Zurich.
3  * All rights reserved.
4  *
5  * This file is distributed under the terms in the attached LICENSE file.
6  * If you do not find this file, copies can be found by writing to:
7  * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8  */
9
10 #ifndef SFN5122F_CHANNEL_H_
11 #define SFN5122F_CHANNEL_H_
12
13
14
15 #include <string.h>
16 #include <stdlib.h>
17
18 #include <net_interfaces/net_interfaces.h>
19 #include <devif/queue_interface.h>
20 #include <dev/sfn5122f_q_dev.h>
21 #include <dev/sfn5122f_dev.h>
22 #include "helper.h"
23
24 #define MTU_MAX 2048
25
26 struct sfn5122f_devif_binding;
27 struct sfn5122f_devif_rpc_client;
28 struct sfn5122f_queue;
29 struct peridoc_event;
30
31 struct sfn5122f_queue_ops {
32     errval_t (*update_txtail)(struct sfn5122f_queue*, size_t);
33     errval_t (*update_rxtail)(struct sfn5122f_queue*, size_t);
34 };
35
36 struct region_entry {
37     uint32_t rid;
38     lpaddr_t phys;
39     size_t size;
40     uint64_t buftbl_idx;
41     struct capref cap;
42     struct region_entry* next;
43 };
44
45 struct sfn5122f_queue {
46     struct devq q;
47     union {
48         sfn5122f_q_tx_user_desc_array_t* user;
49         sfn5122f_q_tx_ker_desc_array_t* ker;
50     } tx_ring;
51     struct devq_buf*                tx_bufs;
52     uint16_t                        tx_head;
53     uint16_t                        tx_tail;
54     size_t                          tx_size;
55
56     union {
57         sfn5122f_q_rx_user_desc_array_t* user;
58         sfn5122f_q_rx_ker_desc_array_t* ker;
59     } rx_ring;
60     struct devq_buf*                rx_bufs;
61     uint16_t                        rx_head;
62     uint16_t                        rx_tail;
63     uint16_t                        rx_size;
64
65     sfn5122f_q_event_entry_array_t* ev_ring;
66     uint32_t                        ev_head;
67     uint32_t                        ev_tail;
68     size_t                          ev_size;
69
70     struct sfn5122f_queue_ops       ops;
71     void*                           opaque;
72     bool                            userspace;
73
74     // For batchin of TX events, maximum of 32
75     // entries since there can be a maximum of 
76     // TX_CACHE descriptors per event
77     struct devq_buf bufs[32];
78     uint8_t last_deq; // last deq from buffer
79     uint8_t num_left;
80
81     // state for devif interface
82     struct sfn5122f_devif_binding* b;
83     struct sfn5122f_devif_rpc_client* rpc;
84     volatile bool bound;
85
86
87     // interrupts
88     uint8_t core;
89     uint8_t vector;
90
91     // callback 
92     sfn5122f_event_cb_t cb;
93
94     // Direct interface fields
95     uint16_t id;
96     struct capref frame;
97     sfn5122f_t *device;
98     void* device_va;
99     struct region_entry* regions;
100 };
101
102 typedef struct sfn5122f_queue sfn5122f_queue_t;
103
104 static inline sfn5122f_queue_t* sfn5122f_queue_init(void* tx, 
105                                                     size_t tx_size,
106                                                     void* rx, 
107                                                     size_t rx_size, 
108                                                     void* ev, 
109                                                     size_t ev_size,
110                                                     struct sfn5122f_queue_ops* ops, 
111                                                     bool userspace)
112 {
113
114     sfn5122f_queue_t* q = malloc(sizeof(*q));
115
116     if (userspace) {
117         q->tx_ring.user = tx;
118     } else {
119         q->tx_ring.ker = tx;
120     }
121     q->tx_bufs = malloc(sizeof(struct devq_buf) * tx_size);
122     q->tx_head = 0;
123     q->tx_tail = 0;
124     q->tx_size = tx_size;
125
126     if (userspace) {
127         q->rx_ring.user = rx;
128     } else {
129         q->rx_ring.ker = rx;
130     }
131     q->rx_bufs = malloc(sizeof(struct devq_buf) * rx_size);
132     q->rx_head = 0;
133     q->rx_tail = 0;
134     q->rx_size = rx_size;
135   
136     q->ev_ring = ev;
137     q->ev_head = 0;
138     q->ev_tail = 0;
139     q->ev_size = ev_size;
140     q->userspace = userspace; 
141     q->num_left = 0;
142     q->last_deq = 0;
143
144     q -> ops = *ops;
145
146     // Initialize ring memory with 0xff
147     if(!userspace){
148        memset(tx, 0xff, tx_size * sfn5122f_q_tx_ker_desc_size);
149        memset(rx, 0xff, rx_size * sfn5122f_q_rx_ker_desc_size);
150     }else{
151        memset(tx, 0xff, tx_size * sfn5122f_q_tx_user_desc_size);
152        memset(rx, 0xff, rx_size * sfn5122f_q_rx_user_desc_size);
153     }
154     /* all 0 is potential valid event */
155     memset(ev, 0xff, ev_size * sfn5122f_q_event_entry_size);
156     return q;
157 }
158
159
160 static inline errval_t sfn5122f_queue_free(struct sfn5122f_queue* q)
161 {
162     errval_t err;
163
164     // only one cap that is mapped (TX)
165     if (q->userspace) {
166         err = vspace_unmap(q->tx_ring.user);  
167     } else {
168         err = vspace_unmap(q->tx_ring.ker);  
169     }
170     if (err_is_fail(err)) {
171         return err;
172     }   
173     free(q->rx_bufs);
174     free(q->tx_bufs);
175     free(q);
176
177     return SYS_ERR_OK;
178 }
179
180
181 static inline uint8_t sfn5122f_get_event_code(sfn5122f_queue_t* queue)
182 {             
183        sfn5122f_q_event_entry_t ev;
184        ev = queue->ev_ring[queue->ev_head];
185        return sfn5122f_q_event_entry_ev_code_extract(ev);
186 }
187
188
189 static inline errval_t sfn5122f_queue_bump_txtail(sfn5122f_queue_t* q)
190 {
191     return q->ops.update_txtail(q, q->tx_tail);
192 }
193
194
195 static inline errval_t sfn5122f_queue_bump_rxtail(sfn5122f_queue_t* q)
196 {
197     return q->ops.update_rxtail(q, q->rx_tail);
198 }
199
200
201 static inline errval_t sfn5122f_handle_drv_ev(sfn5122f_queue_t* q, uint16_t n)
202 {   
203     size_t ev_head = q->ev_head;
204
205     sfn5122f_q_event_entry_t code;
206     code = q->ev_ring[ev_head]; 
207
208     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 2) {
209         printf("Event queue init done %d \n", n);
210     }
211
212     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 9) {
213         printf("Packet neither TCP nor UPD %d \n", n);
214     }
215     
216     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 14) {
217         printf("RX error %d \n", n);
218         return SFN_ERR_RX_PKT;
219     }
220
221     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 15) {
222         printf("TX error %d \n", n);
223         return SFN_ERR_TX_PKT;
224     }
225
226     memset(code, 0xff, sfn5122f_q_event_entry_size);
227     return SYS_ERR_OK;
228
229 }
230
231
232
233 static inline errval_t sfn5122f_queue_handle_mcdi_event(sfn5122f_queue_t* q)
234 {
235     // TODO handle different events    
236     size_t ev_head = q->ev_head;
237     sfn5122f_q_event_entry_t ev;
238     uint64_t reg;
239     ev = q->ev_ring[ev_head]; 
240     reg = sfn5122f_q_event_entry_ev_data_extract(ev);
241     memset(ev, 0xff, sfn5122f_q_event_entry_size);
242
243     return SYS_ERR_OK;
244
245 }
246
247 /*    RX      */
248 static inline int sfn5122f_queue_add_user_rxbuf_devif(sfn5122f_queue_t* q, 
249                                                       uint32_t buf_id,
250                                                       uint16_t offset,
251                                                       regionid_t rid,
252                                                       bufferid_t devq_bid,
253                                                       lpaddr_t base,
254                                                       size_t len,
255                                                       uint64_t flags)
256 {
257     struct devq_buf* buf;
258     sfn5122f_q_rx_user_desc_t d;
259     size_t tail = q->rx_tail;
260
261     d = q->rx_ring.user[tail];
262     buf = &q->rx_bufs[tail];
263
264     buf->rid = rid;
265     buf->bid = devq_bid;
266     buf->addr = base;
267     buf->len = len;
268     buf->flags = flags;
269     sfn5122f_q_rx_user_desc_rx_user_buf_id_insert(d, buf_id);
270     sfn5122f_q_rx_user_desc_rx_user_2byte_offset_insert(d, offset >> 1);
271     q->rx_tail = (tail + 1) % q->rx_size;
272     return 0;
273 }
274
275 static inline int sfn5122f_queue_add_rxbuf_devif(sfn5122f_queue_t* q, 
276                                                  regionid_t rid,
277                                                  bufferid_t bid,
278                                                  lpaddr_t addr,
279                                                  size_t len,
280                                                  uint64_t flags)
281 {
282     struct devq_buf* buf;
283     sfn5122f_q_rx_ker_desc_t d;
284     size_t tail = q->rx_tail;
285
286     d = q->rx_ring.ker[tail];
287
288     buf = &q->rx_bufs[tail];
289
290     buf->rid = rid;
291     buf->bid = bid;
292     buf->addr = addr;
293     buf->len = len;
294     buf->flags = flags;
295
296     sfn5122f_q_rx_ker_desc_rx_ker_buf_addr_insert(d, addr);
297     sfn5122f_q_rx_ker_desc_rx_ker_buf_region_insert(d, 0);
298     // TODO: Check size
299     sfn5122f_q_rx_ker_desc_rx_ker_buf_size_insert(d, len);
300     q->rx_tail = (tail + 1) % q->rx_size;
301     return 0;
302 }
303
304 static inline errval_t sfn5122f_queue_handle_rx_ev_devif(sfn5122f_queue_t* q, 
305                                                          regionid_t* rid,
306                                                          bufferid_t* bid,
307                                                          lpaddr_t* base,
308                                                          size_t* len,
309                                                          uint64_t* flags)
310 {   
311     /*  Only one event is generated even if there is more than one
312         descriptor per packet  */
313     struct devq_buf* buf;
314     size_t ev_head = q->ev_head;
315     size_t rx_head;
316     sfn5122f_q_rx_ev_t ev;
317     sfn5122f_q_rx_user_desc_t d_user = 0;
318
319     ev = q->ev_ring[ev_head];
320     rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
321
322     buf = &q->rx_bufs[rx_head];
323
324     *rid = buf->rid;
325     *bid = buf->bid;
326     *base = buf->addr;
327     *flags = buf->flags;
328
329     if(!sfn5122f_q_rx_ev_rx_ev_pkt_ok_extract(ev)) {   
330          // TODO error handling
331          q->rx_head = (rx_head + 1) % q->rx_size;
332          if (sfn5122f_q_rx_ev_rx_ev_tobe_disc_extract(ev)) {
333             // packet discared by softare -> ok
334             return SFN_ERR_RX_DISCARD;
335          }
336
337          printf("Packet not ok \n");
338          if (sfn5122f_q_rx_ev_rx_ev_buf_owner_id_extract(ev)) {
339              printf("Wrong owner \n");
340          }
341          return SFN_ERR_RX_PKT;
342     }
343
344     *len = sfn5122f_q_rx_ev_rx_ev_byte_ctn_extract(ev);
345     /* Length of 0 is treated as 16384 bytes */
346     if (*len == 0) {
347         *len = 16384;
348     }
349
350     rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
351     d_user = q->rx_ring.user[rx_head];  
352
353     buf = &q->rx_bufs[rx_head];
354
355     *rid = buf->rid;
356     *bid = buf->bid;
357     *base = buf->addr;
358     *flags = buf->flags;
359
360     memset(ev, 0xff, sfn5122f_q_event_entry_size);
361     memset(d_user, 0 , sfn5122f_q_rx_user_desc_size);
362
363     q->rx_head = (rx_head + 1) % q->rx_size;
364     return SYS_ERR_OK;
365 }
366
367 static inline void sfn5122f_queue_bump_evhead(sfn5122f_queue_t* q)
368 {     
369      q->ev_head = (q->ev_head +1) % q->ev_size;
370 }
371
372 static inline size_t sfn5122f_queue_free_rxslots(sfn5122f_queue_t* q)
373 {
374     size_t head = q->rx_head;
375     size_t tail = q->rx_tail;
376     size_t size = q->rx_size;
377
378     if (tail >= head) {
379         return size - (tail - head) -1; 
380     } else {
381         return size - (tail + size - head) -1; 
382     }
383 }
384
385
386 /*   TX       */
387 static inline size_t sfn5122f_queue_free_txslots(sfn5122f_queue_t* q)
388 {
389     size_t head = q->tx_head;
390     size_t tail = q->tx_tail;
391     size_t size = q->tx_size;
392
393     if (tail >= head) {
394         return size - (tail - head) - 1; 
395     } else {
396         return size - (tail + size - head) - 1; 
397     }
398
399 }
400
401 static inline bool is_batched(size_t size, uint16_t tx_head, uint16_t q_tx_head)
402 {
403     if (tx_head >= q_tx_head) {
404         return (tx_head - q_tx_head > 0);
405     } else {
406         return (((tx_head + size) - q_tx_head) > 0);
407     }
408 }
409
410 static inline errval_t sfn5122f_queue_handle_tx_ev_devif(sfn5122f_queue_t* q, 
411                                                          regionid_t* rid,
412                                                          bufferid_t* bid,
413                                                          lpaddr_t* base,
414                                                          size_t* len,
415                                                          uint64_t* flags)
416 {
417     /*  Only one event is generated even if there is more than one
418         descriptor per packet  */
419     uint16_t ev_head = q->ev_head;
420     uint16_t tx_head;
421     struct devq_buf* buf;
422     sfn5122f_q_tx_ev_t ev;
423     sfn5122f_q_tx_user_desc_t d_user= 0;
424    
425     ev = q->ev_ring[ev_head];
426     tx_head = sfn5122f_q_tx_ev_tx_ev_desc_ptr_extract(ev);
427     
428
429     buf = &q->tx_bufs[q->tx_head];
430
431     //printf("Tx_head %d q->tx_head %d size %ld \n", tx_head, q->tx_head,
432     //        q->tx_size);
433
434     *rid = buf->rid;
435     *bid = buf->bid;
436     *base = buf->addr;
437     *flags = buf->flags;
438     *len = buf->len;
439
440     if (sfn5122f_q_tx_ev_tx_ev_pkt_err_extract(ev)){     
441         q->tx_head = (tx_head +1) % q->tx_size;
442         return SFN_ERR_TX_PKT;
443     }
444
445     if (sfn5122f_q_tx_ev_tx_ev_comp_extract(ev) == 1){  
446         // TX Event is a batch
447         if (is_batched(q->tx_size, tx_head, q->tx_head)) {
448             uint8_t index = 0;
449             q->num_left = 0;
450             d_user = q->tx_ring.user[q->tx_head];  
451             while (q->tx_head != (tx_head + 1) % q->tx_size ) {
452                 buf = &q->tx_bufs[q->tx_head];
453                 q->bufs[index].rid = buf->rid;
454                 q->bufs[index].bid = buf->bid;
455                 q->bufs[index].addr = buf->addr;
456                 q->bufs[index].flags = buf->flags;
457                 q->bufs[index].len = buf->len;
458                 d_user = q->tx_ring.user[tx_head];  
459                 index++;
460                 q->tx_head = (q->tx_head + 1) % q->tx_size;
461                 q->num_left++;
462             }          
463             q->last_deq = 0;  
464             memset(d_user, 0 , sfn5122f_q_tx_user_desc_size*q->num_left);
465         } else { // Singe descriptor
466             d_user = q->tx_ring.user[tx_head];  
467             memset(d_user, 0 , sfn5122f_q_tx_user_desc_size);
468         }
469
470         // reset entry event in queue
471         memset(ev, 0xff, sfn5122f_q_event_entry_size);
472         q->tx_head = (tx_head +1) % q->tx_size;
473     }
474
475     return SYS_ERR_OK;
476 }
477
478 static inline int sfn5122f_queue_add_txbuf_devif(sfn5122f_queue_t* q, 
479                                                  regionid_t rid,
480                                                  bufferid_t bid,
481                                                  lpaddr_t base,
482                                                  size_t len,
483                                                  uint64_t flags)
484 {
485     struct devq_buf* buf;
486     sfn5122f_q_tx_ker_desc_t d;
487     size_t tail = q->tx_tail;
488
489     d = q->tx_ring.ker[tail];
490  
491     buf = &q->tx_bufs[tail];
492    
493     bool last = flags & NETIF_TXFLAG_LAST;    
494     buf->rid = rid;
495     buf->bid = bid;
496     buf->addr = base;
497     buf->len = len;
498     buf->flags = flags;
499
500     sfn5122f_q_tx_ker_desc_tx_ker_buf_addr_insert(d, base);
501     sfn5122f_q_tx_ker_desc_tx_ker_byte_count_insert(d, len);
502     sfn5122f_q_tx_ker_desc_tx_ker_cont_insert(d, !last);
503     sfn5122f_q_tx_ker_desc_tx_ker_buf_region_insert(d, 0);
504
505     __sync_synchronize();
506  
507     q->tx_tail = (tail + 1) % q->tx_size;
508     return 0;
509 }
510
511
512 static inline int sfn5122f_queue_add_user_txbuf_devif(sfn5122f_queue_t* q, 
513                                                       uint64_t buftbl_idx, 
514                                                       uint64_t offset,
515                                                       regionid_t rid,
516                                                       bufferid_t devq_bid,
517                                                       lpaddr_t base,
518                                                       size_t len,
519                                                       uint64_t flags)
520 {
521     
522     //printf("Add tx_buf %lx \n", base);
523     sfn5122f_q_tx_user_desc_t d;
524     struct devq_buf* buf;
525     size_t tail = q->tx_tail;
526
527     d = q->tx_ring.ker[tail];
528     buf = &q->tx_bufs[tail];
529    
530     bool last = flags & NETIF_TXFLAG_LAST;    
531     buf->rid = rid;
532     buf->bid = devq_bid;
533     buf->addr = base;
534     buf->len = len;
535     buf->flags = flags;
536
537     sfn5122f_q_tx_user_desc_tx_user_sw_ev_en_insert(d, 0);
538     sfn5122f_q_tx_user_desc_tx_user_cont_insert(d, !last);
539     sfn5122f_q_tx_user_desc_tx_user_byte_cnt_insert(d, len);
540     sfn5122f_q_tx_user_desc_tx_user_buf_id_insert(d, buftbl_idx);
541     sfn5122f_q_tx_user_desc_tx_user_byte_ofs_insert(d, offset);
542
543     __sync_synchronize();
544  
545     q->tx_tail = (tail + 1) % q->tx_size;
546     return 0;
547 }
548
549 #endif //ndef SFN5122F_CHANNEL_H_
550