1 /* Copyright (c) 2017, ETH Zurich.
4 * This file is distributed under the terms in the attached LICENSE file.
5 * If you do not find this file, copies can be found by writing to:
6 * ETH Zurich D-INFK, Universitätstrasse 6, CH-8092 Zurich. Attn: Systems Group.
12 #include <net_queue_manager/net_queue_manager.h>
14 #include <dev/sfn5122f_dev.h>
15 #include <dev/sfn5122f_q_dev.h>
19 #include "sfn5122f_queue.h"
20 #include "sfn5122f_debug.h"
21 #include "buffer_tbl.h"
22 #include "sfn5122f_qdriver.h"
24 /** Queue index for this manager instance */
27 /** Mackerel handle for device */
28 static sfn5122f_t *d = NULL;
30 /** Queue handle for queue management library */
31 static sfn5122f_queue_t *q;
33 /** MAC address to be used */
34 static uint64_t mac_address = 0;
36 /** Indicates if the initialization is done */
37 static bool initialized = false;
40 * Indicates whether we should rely on cache coherence for the descriptor
43 static bool cache_coherence = true;
45 /** Indicates whether Interrupts should be used */
46 static bool use_interrupts = true;
48 /** Capability for hardware TX ring */
49 static struct capref tx_frame;
51 /** Capability for hardware RX ring */
52 static struct capref rx_frame;
54 /** Capability for hardware EV ring */
55 static struct capref ev_frame;
57 //static void* mac_virt;
58 uint64_t mac_stats_array[NUM_MAC_STATS];
61 /******************************************************************************/
63 static uint64_t find_tx_free_slot_count_fn(void)
65 return sfn5122f_queue_free_txslots(q);
68 static errval_t transmit_pbuf_list_fn(struct driver_buffer *buffers,
73 if (find_tx_free_slot_count_fn() < count) {
74 return ETHERSRV_ERR_CANT_TRANSMIT;
77 for (i = 0; i < count; i++) {
78 sfn5122f_queue_add_txbuf(q, buffers[i].pa,
79 buffers[i].len, buffers[i].opaque,
83 sfn5122f_queue_bump_txtail(q);
88 static bool handle_free_tx_slot_fn(void)
95 /******************************************************************************/
98 static uint64_t find_rx_free_slot_count_fn(void)
100 return sfn5122f_queue_free_rxslots(q);
103 static errval_t register_rx_buffer_fn(uint64_t paddr, void *vaddr, void *opaque)
105 if (find_rx_free_slot_count_fn() == 0) {
106 printf("SFN5122F_%d: Not enough space in RX ring, not adding buffer\n",
108 return ETHERSRV_ERR_TOO_MANY_BUFFERS;
111 sfn5122f_queue_add_rxbuf(q, paddr, opaque);
112 sfn5122f_queue_bump_rxtail(q);
118 /* polling event queue for new events */
119 static size_t check_for_new_events(void)
125 struct driver_rx_buffer buf[16];
127 // need to block until initalized
129 return NIC_ERR_QDRIVER;
132 ev_code = sfn5122f_get_event_code(q);
133 while (ev_code != 15 && count < 100){
135 ev_code = sfn5122f_get_event_code(q);
139 // TODO multiple packets
140 if (sfn5122f_queue_handle_rx_ev(q, &op, &len) == SYS_ERR_OK) {
143 process_received_packet(buf, 1, 0);
145 DEBUG_QUEUE("Failed receiveing \n");
146 // TODO how to tell the the upper layer that it can reuse
150 DEBUG_QUEUE(" RX_EV Q_ID: %d len %ld \n", qi, len);
151 sfn5122f_queue_bump_evhead(q);
154 if (sfn5122f_queue_handle_tx_ev(q) == SYS_ERR_OK) {
155 DEBUG_QUEUE("TX EVENT OK %d \n", qi);
157 while (q->num_left > 0) {
158 handle_tx_done(q->bufs[index]);
163 DEBUG_QUEUE("TX EVENT ERR %d \n", qi);
165 sfn5122f_queue_bump_evhead(q);
168 DEBUG_QUEUE("DRIVER EVENT %d\n", qi);
169 sfn5122f_handle_drv_ev(q, qi);
170 sfn5122f_queue_bump_evhead(q);
172 case EV_CODE_DRV_GEN:
173 DEBUG_QUEUE("DRIVER GENERATED EVENT \n");
174 sfn5122f_queue_bump_evhead(q);
177 DEBUG_QUEUE("USER EVENT \n");
178 sfn5122f_queue_bump_evhead(q);
181 DEBUG_QUEUE("MCDI EVENT \n");
182 sfn5122f_queue_handle_mcdi_event(q);
183 sfn5122f_queue_bump_evhead(q);
186 DEBUG_QUEUE("GLOBAL EVENT \n");
187 sfn5122f_queue_bump_evhead(q);
192 /* update event queue tail */
194 sfn5122f_evq_rptr_reg_wr(d, qi, q->ev_head);
201 static errval_t update_rxtail(struct sfn5122f_queue* que, void *opaque, size_t tail)
206 reg = sfn5122f_rx_desc_upd_reg_hi_rx_desc_wptr_insert(reg, tail);
207 /* don't want to push an additional rx descriptor with the write pointer */
208 reg = sfn5122f_rx_desc_upd_reg_hi_rx_desc_push_cmd_insert(reg, 0);
209 /* the lower register will be ignored */
210 sfn5122f_rx_desc_upd_reg_lo_wr(d, qi, 0);
211 sfn5122f_rx_desc_upd_reg_hi_wr(d, qi, reg);
216 static errval_t update_txtail(struct sfn5122f_queue* que, void *opaque, size_t tail)
220 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_wptr_insert(reg, tail);
221 /* don't want to push an additional tx descriptor with the write pointer */
222 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_push_cmd_insert(reg, 0);
223 reg = sfn5122f_tx_desc_upd_reg_hi_tx_desc_insert(reg, 0);
225 /* the lower register will be ignored */
226 sfn5122f_tx_desc_upd_reg_lo_wr(d, qi, 0);
227 sfn5122f_tx_desc_upd_reg_hi_wr(d, qi, reg);
231 /** Callback to get card's MAC address */
232 static void get_mac_address_fn(uint8_t* mac)
234 memcpy(mac, &mac_address, 6);
237 /******************************************************************************/
238 /* Device/queue initialization */
240 /** Allocate queue n and return handle for queue manager */
242 static void setup_queue(struct capref* ev, struct capref* tx, struct capref* rx)
244 size_t tx_size, rx_size, ev_size;
245 void *tx_virt, *rx_virt, *ev_virt;
246 vregion_flags_t flags_vreg;
248 struct sfn5122f_queue_ops ops = {
249 .update_txtail = update_txtail,
250 .update_rxtail = update_rxtail
253 // Decide on which flags to use for the mappings
254 flags_vreg = (cache_coherence ? VREGION_FLAGS_READ_WRITE :
255 VREGION_FLAGS_READ_WRITE_NOCACHE);
258 /* Allocate memory for descriptor rings
259 No difference for userspace networking*/
260 tx_size = sfn5122f_q_tx_ker_desc_size * TX_ENTRIES;
261 tx_virt = alloc_map_frame(flags_vreg, tx_size, tx);
263 assert(tx_virt != NULL);
265 rx_size = sfn5122f_q_rx_ker_desc_size * RX_ENTRIES;
267 rx_virt = alloc_map_frame(flags_vreg, rx_size, rx);
268 assert(rx_virt != NULL);
270 ev_size = sfn5122f_q_event_entry_size * EV_ENTRIES;
271 ev_virt = alloc_map_frame(flags_vreg, ev_size, ev);
272 assert(ev_virt != NULL);
274 // Initialize queue manager
275 q = sfn5122f_queue_init(tx_virt, TX_ENTRIES, rx_virt, RX_ENTRIES,
276 ev_virt, EV_ENTRIES, &ops, NULL, false);
281 void write_queue_tails(void)
283 DEBUG_QUEUE("idc_write_queue_tails()\n");
285 sfn5122f_queue_bump_rxtail(q);
286 sfn5122f_queue_bump_txtail(q);
289 size_t check_queue_0(void)
291 do_pending_work_for_all();
292 return check_for_new_events();
295 static void terminate_queue_fn(void)
298 err = terminate_queue_0();
299 assert(err_is_ok(err));
302 // Callback from device manager
303 errval_t terminate_queue_0(void)
307 DEBUG_QUEUE("idc_queue_terminated()\n");
309 // Free memory for hardware ring buffers
311 err = vspace_unmap(q->tx_ring.user);
312 if (err_is_fail(err)) {
315 err = vspace_unmap(q->rx_ring.user);
316 if (err_is_fail(err)) {
320 err = vspace_unmap(q->tx_ring.ker);
321 if (err_is_fail(err)) {
324 err = vspace_unmap(q->rx_ring.ker);
325 if (err_is_fail(err)) {
330 err = vspace_unmap(q->ev_ring);
331 if (err_is_fail(err)) {
334 err = cap_delete(tx_frame);
335 if (err_is_fail(err)) {
338 err = cap_delete(rx_frame);
339 if (err_is_fail(err)) {
342 err = cap_delete(ev_frame);
343 if (err_is_fail(err)) {
349 errval_t init_queue_0(char* cname, uint64_t mac_addr, void* device,
350 bool interrupts, bool userspace, struct capref* ev,
351 struct capref* tx, struct capref* rx)
353 use_interrupts = interrupts;
354 mac_address = mac_addr;
356 d = malloc(sizeof(*d));
357 sfn5122f_initialize(d, device);
359 setup_queue(ev, tx, rx);
361 ethersrv_init((char*) cname, 0,
364 transmit_pbuf_list_fn,
365 find_tx_free_slot_count_fn,
366 handle_free_tx_slot_fn,
368 register_rx_buffer_fn,
369 find_rx_free_slot_count_fn);