0ef8dc01aedb5d39eb39eeef1cb3d25e9d6b3057
[barrelfish] / lib / devif / backends / net / solarflare / hw_queue.h
1 /*
2  *Copyright (c) 2007-2011, ETH Zurich.
3  * All rights reserved.
4  *
5  * This file is distributed under the terms in the attached LICENSE file.
6  * If you do not find this file, copies can be found by writing to:
7  * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8  */
9
10 #ifndef SFN5122F_CHANNEL_H_
11 #define SFN5122F_CHANNEL_H_
12
13
14
15 #include <string.h>
16 #include <stdlib.h>
17
18 #include <net_interfaces/net_interfaces.h>
19 #include <devif/queue_interface.h>
20 #include <dev/sfn5122f_q_dev.h>
21 #include <dev/sfn5122f_dev.h>
22 #include "helper.h"
23
24 #define MTU_MAX 2048
25
26 struct sfn5122f_devif_binding;
27 struct sfn5122f_devif_rpc_client;
28 struct sfn5122f_queue;
29 struct peridoc_event;
30
31 struct sfn5122f_queue_ops {
32     errval_t (*update_txtail)(struct sfn5122f_queue*, size_t);
33     errval_t (*update_rxtail)(struct sfn5122f_queue*, size_t);
34 };
35
36 struct region_entry {
37     uint32_t rid;
38     lpaddr_t phys;
39     size_t size;
40     uint64_t buftbl_idx;
41     struct capref cap;
42     struct region_entry* next;
43 };
44
45 struct sfn5122f_queue {
46     struct devq q;
47     union {
48         sfn5122f_q_tx_user_desc_array_t* user;
49         sfn5122f_q_tx_ker_desc_array_t* ker;
50     } tx_ring;
51     struct devq_buf*                tx_bufs;
52     uint16_t                        tx_head;
53     uint16_t                        tx_tail;
54     size_t                          tx_size;
55
56     union {
57         sfn5122f_q_rx_user_desc_array_t* user;
58         sfn5122f_q_rx_ker_desc_array_t* ker;
59     } rx_ring;
60     struct devq_buf*                rx_bufs;
61     uint16_t                        rx_head;
62     uint16_t                        rx_tail;
63     uint16_t                        rx_size;
64     uint8_t                         rx_batch_size;
65
66
67     sfn5122f_q_event_entry_array_t* ev_ring;
68     uint32_t                        ev_head;
69     uint32_t                        ev_tail;
70     size_t                          ev_size;
71
72     struct sfn5122f_queue_ops       ops;
73     void*                           opaque;
74     bool                            userspace;
75
76     // For batchin of TX events, maximum of 32
77     // entries since there can be a maximum of 
78     // TX_CACHE descriptors per event
79     struct devq_buf bufs[32];
80     uint8_t last_deq; // last deq from buffer
81     uint8_t num_left;
82
83     // state for devif interface
84     struct sfn5122f_devif_binding* b;
85     struct sfn5122f_devif_rpc_client* rpc;
86     volatile bool bound;
87
88
89     // interrupts
90     uint8_t core;
91     uint8_t vector;
92
93     // callback 
94     sfn5122f_event_cb_t cb;
95
96     // Direct interface fields
97     uint16_t id;
98     uint64_t mac;
99     struct capref frame;
100     sfn5122f_t *device;
101     void* device_va;
102     struct region_entry* regions;
103 };
104
105 typedef struct sfn5122f_queue sfn5122f_queue_t;
106
107 static inline sfn5122f_queue_t* sfn5122f_queue_init(void* tx, 
108                                                     size_t tx_size,
109                                                     void* rx, 
110                                                     size_t rx_size, 
111                                                     void* ev, 
112                                                     size_t ev_size,
113                                                     struct sfn5122f_queue_ops* ops, 
114                                                     bool userspace)
115 {
116
117     sfn5122f_queue_t* q = malloc(sizeof(*q));
118
119     if (userspace) {
120         q->tx_ring.user = tx;
121     } else {
122         q->tx_ring.ker = tx;
123     }
124     q->tx_bufs = malloc(sizeof(struct devq_buf) * tx_size);
125     q->tx_head = 0;
126     q->tx_tail = 0;
127     q->tx_size = tx_size;
128
129     if (userspace) {
130         q->rx_ring.user = rx;
131     } else {
132         q->rx_ring.ker = rx;
133     }
134     q->rx_bufs = malloc(sizeof(struct devq_buf) * rx_size);
135     q->rx_head = 0;
136     q->rx_tail = 0;
137     q->rx_batch_size = 0;
138     q->rx_size = rx_size;
139   
140     q->ev_ring = ev;
141     q->ev_head = 0;
142     q->ev_tail = 0;
143     q->ev_size = ev_size;
144     q->userspace = userspace; 
145     q->num_left = 0;
146     q->last_deq = 0;
147
148     q -> ops = *ops;
149
150     // Initialize ring memory with 0xff
151     if(!userspace){
152        memset(tx, 0xff, tx_size * sfn5122f_q_tx_ker_desc_size);
153        memset(rx, 0xff, rx_size * sfn5122f_q_rx_ker_desc_size);
154     }else{
155        memset(tx, 0xff, tx_size * sfn5122f_q_tx_user_desc_size);
156        memset(rx, 0xff, rx_size * sfn5122f_q_rx_user_desc_size);
157     }
158     /* all 0 is potential valid event */
159     memset(ev, 0xff, ev_size * sfn5122f_q_event_entry_size);
160     return q;
161 }
162
163
164 static inline errval_t sfn5122f_queue_free(struct sfn5122f_queue* q)
165 {
166     errval_t err;
167
168     // only one cap that is mapped (TX)
169     if (q->userspace) {
170         err = vspace_unmap(q->tx_ring.user);  
171     } else {
172         err = vspace_unmap(q->tx_ring.ker);  
173     }
174     if (err_is_fail(err)) {
175         return err;
176     }   
177     free(q->rx_bufs);
178     free(q->tx_bufs);
179     free(q);
180
181     return SYS_ERR_OK;
182 }
183
184
185 static inline uint8_t sfn5122f_get_event_code(sfn5122f_queue_t* queue)
186 {             
187        sfn5122f_q_event_entry_t ev;
188        ev = queue->ev_ring[queue->ev_head];
189        return sfn5122f_q_event_entry_ev_code_extract(ev);
190 }
191
192
193 static inline errval_t sfn5122f_queue_bump_txtail(sfn5122f_queue_t* q)
194 {
195     return q->ops.update_txtail(q, q->tx_tail);
196 }
197
198
199 static inline errval_t sfn5122f_queue_bump_rxtail(sfn5122f_queue_t* q)
200 {
201     return q->ops.update_rxtail(q, q->rx_tail);
202 }
203
204
205 static inline errval_t sfn5122f_handle_drv_ev(sfn5122f_queue_t* q, uint16_t n)
206 {   
207     size_t ev_head = q->ev_head;
208
209     sfn5122f_q_event_entry_t code;
210     code = q->ev_ring[ev_head]; 
211
212     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 2) {
213         printf("Event queue init done %d \n", n);
214     }
215
216     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 9) {
217         printf("Packet neither TCP nor UPD %d \n", n);
218     }
219     
220     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 14) {
221         printf("RX error %d \n", n);
222         return NIC_ERR_RX_PKT;
223     }
224
225     if (sfn5122f_q_driver_ev_driver_ev_subcode_extract(code) == 15) {
226         printf("TX error %d \n", n);
227         return NIC_ERR_TX_PKT;
228     }
229
230     memset(code, 0xff, sfn5122f_q_event_entry_size);
231     return SYS_ERR_OK;
232
233 }
234
235
236
237 static inline errval_t sfn5122f_queue_handle_mcdi_event(sfn5122f_queue_t* q)
238 {
239     // TODO handle different events    
240     size_t ev_head = q->ev_head;
241     sfn5122f_q_event_entry_t ev;
242     uint64_t reg;
243     ev = q->ev_ring[ev_head]; 
244     reg = sfn5122f_q_event_entry_ev_data_extract(ev);
245     memset(ev, 0xff, sfn5122f_q_event_entry_size);
246
247     return SYS_ERR_OK;
248
249 }
250
251 /*    RX      */
252 static inline int sfn5122f_queue_add_user_rxbuf_devif(sfn5122f_queue_t* q, 
253                                                       uint32_t buf_id,
254                                                       uint16_t b_off,
255                                                       regionid_t rid,
256                                                       genoffset_t offset,
257                                                       genoffset_t length,
258                                                       genoffset_t valid_data,
259                                                       genoffset_t valid_length,
260                                                       uint64_t flags)
261 {
262     struct devq_buf* buf;
263     sfn5122f_q_rx_user_desc_t d;
264     size_t tail = q->rx_tail;
265
266     d = q->rx_ring.user[tail];
267     buf = &q->rx_bufs[tail];
268
269     buf->rid = rid;
270     buf->offset = offset;
271     buf->length = length;
272     buf->valid_data = valid_data;
273     buf->valid_length = valid_length;
274     buf->flags = flags;
275     sfn5122f_q_rx_user_desc_rx_user_buf_id_insert(d, buf_id);
276     sfn5122f_q_rx_user_desc_rx_user_2byte_offset_insert(d, b_off >> 1);
277     q->rx_tail = (tail + 1) % q->rx_size;
278     return 0;
279 }
280
281 static inline int sfn5122f_queue_add_rxbuf_devif(sfn5122f_queue_t* q, 
282                                                  lpaddr_t addr,
283                                                  regionid_t rid,
284                                                  genoffset_t offset,
285                                                  genoffset_t length,
286                                                  genoffset_t valid_data,
287                                                  genoffset_t valid_length,
288                                                  uint64_t flags)
289 {
290     struct devq_buf* buf;
291     sfn5122f_q_rx_ker_desc_t d;
292     size_t tail = q->rx_tail;
293
294     d = q->rx_ring.ker[tail];
295
296     buf = &q->rx_bufs[tail];
297
298     buf->rid = rid;
299     buf->offset = offset;
300     buf->length = length;
301     buf->valid_data = valid_data;
302     buf->valid_length = valid_length;
303     buf->flags = flags;
304
305     sfn5122f_q_rx_ker_desc_rx_ker_buf_addr_insert(d, addr);
306     sfn5122f_q_rx_ker_desc_rx_ker_buf_region_insert(d, 0);
307     // TODO: Check size
308     sfn5122f_q_rx_ker_desc_rx_ker_buf_size_insert(d, length);
309     q->rx_tail = (tail + 1) % q->rx_size;
310     return 0;
311 }
312
313 static inline errval_t sfn5122f_queue_handle_rx_ev_devif(sfn5122f_queue_t* q, 
314                                                          regionid_t* rid,
315                                                          genoffset_t* offset,
316                                                          genoffset_t* length,
317                                                          genoffset_t* valid_data,
318                                                          genoffset_t* valid_length,
319                                                          uint64_t* flags)
320 {   
321     /*  Only one event is generated even if there is more than one
322         descriptor per packet  */
323     struct devq_buf* buf;
324     size_t rx_head;
325     sfn5122f_q_rx_ev_t ev;
326     //sfn5122f_q_rx_user_desc_t d_user= 0;
327     //sfn5122f_q_rx_ker_desc_t d = 0;
328
329     ev = q->ev_ring[q->ev_head];
330     rx_head = sfn5122f_q_rx_ev_rx_ev_desc_ptr_extract(ev);
331
332     buf = &q->rx_bufs[rx_head];
333
334     *rid = buf->rid;
335     *offset = buf->offset;
336     *length = buf->length;
337     *valid_data = buf->valid_data;
338     *flags = buf->flags;
339
340     if(!sfn5122f_q_rx_ev_rx_ev_pkt_ok_extract(ev)) {   
341          // TODO error handling
342          q->rx_head = (rx_head + 1) % q->rx_size;
343          if (sfn5122f_q_rx_ev_rx_ev_tobe_disc_extract(ev)) {
344             // packet discared by softare -> ok
345             return NIC_ERR_RX_DISCARD;
346          }
347
348          if (sfn5122f_q_rx_ev_rx_ev_buf_owner_id_extract(ev)) {
349              printf("Wrong owner \n");
350          }
351          return NIC_ERR_RX_PKT;
352     }
353
354     *valid_length = sfn5122f_q_rx_ev_rx_ev_byte_ctn_extract(ev);
355     /* Length of 0 is treated as 16384 bytes */
356     if (*valid_length == 0) {
357         *valid_length = 16384;
358     }
359
360
361     /*
362     if (q->userspace){
363         d_user = q->tx_ring.user[q->tx_head];  
364         d_user = 0;
365     } else {
366         d = q->tx_ring.ker[q->tx_head];  
367         d = 0;
368     }
369     */
370     /* only have to reset event entry */
371     memset(ev, 0xff, sfn5122f_q_event_entry_size);
372
373     q->rx_head = (rx_head + 1) % q->rx_size;
374     return SYS_ERR_OK;
375 }
376
377 static inline void sfn5122f_queue_bump_evhead(sfn5122f_queue_t* q)
378 {     
379      q->ev_head = (q->ev_head +1) % q->ev_size;
380 }
381
382 static inline size_t sfn5122f_queue_free_rxslots(sfn5122f_queue_t* q)
383 {
384     size_t head = q->rx_head;
385     size_t tail = q->rx_tail;
386     size_t size = q->rx_size;
387
388     if (tail >= head) {
389         return size - (tail - head) -1; 
390     } else {
391         return size - (tail + size - head) -1; 
392     }
393 }
394
395
396 /*   TX       */
397 static inline size_t sfn5122f_queue_free_txslots(sfn5122f_queue_t* q)
398 {
399     size_t head = q->tx_head;
400     size_t tail = q->tx_tail;
401     size_t size = q->tx_size;
402
403     if (tail >= head) {
404         return size - (tail - head) - 1; 
405     } else {
406         return size - (tail + size - head) - 1; 
407     }
408
409 }
410
411 static inline bool is_batched(size_t size, uint16_t tx_head, uint16_t q_tx_head)
412 {
413     if (tx_head >= q_tx_head) {
414         return (tx_head - q_tx_head > 0);
415     } else {
416         return (((tx_head + size) - q_tx_head) > 0);
417     }
418 }
419
420 static inline errval_t sfn5122f_queue_handle_tx_ev_devif(sfn5122f_queue_t* q, 
421                                                          regionid_t* rid,
422                                                          genoffset_t* offset,
423                                                          genoffset_t* length,
424                                                          genoffset_t* valid_data,
425                                                          genoffset_t* valid_length,
426                                                          uint64_t* flags)
427 {
428     /*  Only one event is generated even if there is more than one
429         descriptor per packet  */
430     uint16_t ev_head = q->ev_head;
431     uint16_t tx_head;
432     struct devq_buf* buf;
433     sfn5122f_q_tx_ev_t ev;
434     sfn5122f_q_tx_user_desc_t d_user= 0;
435     sfn5122f_q_tx_ker_desc_t d = 0;
436    
437     ev = q->ev_ring[ev_head];
438     tx_head = sfn5122f_q_tx_ev_tx_ev_desc_ptr_extract(ev);
439     
440
441     buf = &q->tx_bufs[q->tx_head];
442
443     //printf("Tx_head %d q->tx_head %d size %ld q->tx_tail %d\n", 
444     //        tx_head, q->tx_head, q->tx_size, q->tx_tail);
445
446     *rid = buf->rid;
447     *offset = buf->offset;
448     *length = buf->length;
449     *valid_data = buf->valid_data;
450     *flags = buf->flags;
451
452     if (sfn5122f_q_tx_ev_tx_ev_pkt_err_extract(ev)){     
453         q->tx_head = (tx_head +1) % q->tx_size;
454         return NIC_ERR_TX_PKT;
455     }
456
457     if (sfn5122f_q_tx_ev_tx_ev_comp_extract(ev) == 1){  
458         // TX Event is a batch
459         if (is_batched(q->tx_size, tx_head, q->tx_head)) {
460             uint8_t index = 0;
461             q->num_left = 0;
462
463             if (q->userspace) {
464                 d_user = q->tx_ring.user[q->tx_head];  
465             } else {
466                 d = q->tx_ring.ker[q->tx_head];  
467             }
468
469             while (q->tx_head != (tx_head + 1) % q->tx_size ) {
470                 buf = &q->tx_bufs[q->tx_head];
471                 q->bufs[index].rid = buf->rid;
472                 q->bufs[index].offset = buf->offset;
473                 q->bufs[index].valid_data = buf->valid_data;
474                 q->bufs[index].valid_length = buf->valid_length;
475                 q->bufs[index].flags = buf->flags;
476                 q->bufs[index].length = buf->length;
477                 //d_user = q->tx_ring.user[tx_head];  
478                 index++;
479                 q->tx_head = (q->tx_head + 1) % q->tx_size;
480                 q->num_left++;
481             }
482           
483             q->last_deq = 0;
484
485             // set descriptor to 0 
486             /*
487             if (q->userspace){
488                 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size*q->num_left);
489             } else {
490                 memset(d, 0 , sfn5122f_q_tx_ker_desc_size*q->num_left);
491             }
492             */
493         } else { // Singe descriptor
494             /*
495             if (q->userspace){
496                 d_user = q->tx_ring.user[q->tx_head];  
497                 memset(d_user, 0 , sfn5122f_q_tx_user_desc_size);
498             } else {
499                 d = q->tx_ring.ker[q->tx_head];  
500                 memset(d, 0 , sfn5122f_q_tx_ker_desc_size);
501             }
502             */
503         }
504
505         // reset entry event in queue
506         memset(ev, 0xff, sfn5122f_q_event_entry_size);
507         q->tx_head = (tx_head +1) % q->tx_size;
508     }
509
510     return SYS_ERR_OK;
511 }
512
513 static inline int sfn5122f_queue_add_txbuf_devif(sfn5122f_queue_t* q, 
514                                                  lpaddr_t addr,
515                                                  regionid_t rid,
516                                                  genoffset_t offset,
517                                                  genoffset_t length,
518                                                  genoffset_t valid_data,
519                                                  genoffset_t valid_length,
520                                                  uint64_t flags)
521 {
522     struct devq_buf* buf;
523     sfn5122f_q_tx_ker_desc_t d;
524     size_t tail = q->tx_tail;
525
526     d = q->tx_ring.ker[tail];
527  
528     buf = &q->tx_bufs[tail];
529    
530     bool last = flags & NETIF_TXFLAG_LAST;    
531     buf->rid = rid;
532     buf->offset = offset;
533     buf->length = length;
534     buf->valid_data = valid_data;
535     buf->valid_length = valid_length;
536     buf->flags = flags;
537
538     sfn5122f_q_tx_ker_desc_tx_ker_buf_addr_insert(d, addr);
539     sfn5122f_q_tx_ker_desc_tx_ker_byte_count_insert(d, valid_length);
540     sfn5122f_q_tx_ker_desc_tx_ker_cont_insert(d, !last);
541     sfn5122f_q_tx_ker_desc_tx_ker_buf_region_insert(d, 0);
542
543     __sync_synchronize();
544  
545     q->tx_tail = (tail + 1) % q->tx_size;
546     return 0;
547 }
548
549
550 static inline int sfn5122f_queue_add_user_txbuf_devif(sfn5122f_queue_t* q, 
551                                                       uint64_t buftbl_idx, 
552                                                       uint64_t b_off,
553                                                       regionid_t rid,
554                                                       genoffset_t offset,
555                                                       genoffset_t length,
556                                                       genoffset_t valid_data,
557                                                       genoffset_t valid_length,
558                                                       uint64_t flags)
559 {
560     
561     //printf("Add tx_buf %lx \n", base);
562     sfn5122f_q_tx_user_desc_t d;
563     struct devq_buf* buf;
564     size_t tail = q->tx_tail;
565
566     d = q->tx_ring.ker[tail];
567     buf = &q->tx_bufs[tail];
568    
569     bool last = flags & NETIF_TXFLAG_LAST;    
570     buf->rid = rid;
571     buf->offset = offset;
572     buf->length = length;
573     buf->valid_data = valid_data;
574     buf->valid_length = valid_length;
575     buf->flags = flags;
576
577     sfn5122f_q_tx_user_desc_tx_user_sw_ev_en_insert(d, 0);
578     sfn5122f_q_tx_user_desc_tx_user_cont_insert(d, !last);
579     sfn5122f_q_tx_user_desc_tx_user_byte_cnt_insert(d, valid_length);
580     sfn5122f_q_tx_user_desc_tx_user_buf_id_insert(d, buftbl_idx);
581     sfn5122f_q_tx_user_desc_tx_user_byte_ofs_insert(d, b_off);
582
583     __sync_synchronize();
584  
585     q->tx_tail = (tail + 1) % q->tx_size;
586     return 0;
587 }
588
589 #endif //ndef SFN5122F_CHANNEL_H_
590