2 * Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
14 #include <net_queue_manager/net_queue_manager.h>
15 #include <barrelfish/nameservice_client.h>
17 #include <ipv4/lwip/inet.h>
18 #include <barrelfish/debug.h>
19 #include <if/sfn5122f_defs.h>
20 #include <if/sfn5122f_devif_defs.h>
21 #include <if/sfn5122f_devif_rpcclient_defs.h>
22 #include <if/net_ARP_rpcclient_defs.h>
23 #include <if/net_ARP_defs.h>
27 #include "sfn5122f_debug.h"
28 #include "buffer_tbl.h"
35 struct sfn5122f_binding *binding;
36 struct sfn5122f_devif_binding *devif;
37 struct capref tx_frame;
38 struct capref rx_frame;
39 struct capref ev_frame;
47 // first entries of the buffer table to make up queue
59 static bool use_msix = false;
60 static const char *service_name = "sfn5122f";
61 static sfn5122f_t *d = NULL;
62 //static sfn5122f_msix_t *d_msix = NULL;
63 static uint64_t d_mac[2];
64 static int initialized = 0;
65 static struct capref *regframe;
67 static struct capref int_ker;
68 static void* int_ker_virt;
70 static struct capref mac_stats;
71 static void* mac_virt;
72 static uint64_t mac_phys;
74 static uint8_t pci_function = 0;
76 static uint32_t cap[2];
77 static uint32_t speed[2];
78 static uint32_t flags[2];
79 static uint32_t fcntl [2];
82 static uint32_t phy_caps[2];
83 static uint32_t phy_flags[2];
84 static uint32_t phy_media[2];
85 /* Loopback mode none and speed */
86 static uint32_t phy_loopback_mode = 0;
87 //static uint32_t phy_loopback_speed = 0;
89 static uint32_t wol_filter_id = 0;
92 static struct net_ARP_rpc_client arp_rpc;
93 static bool net_arp_connected = false;
94 static struct waitset rpc_ws;
96 static bool csum_offload = 1;
98 static uint32_t rx_indir_tbl[128];
101 static struct queue_state queues[1024];
102 /* PCI device address passed on command line */
103 static uint32_t pci_bus = PCI_DONT_CARE;
104 static uint32_t pci_device = PCI_DONT_CARE;
105 static struct bmallocator msix_alloc;
106 static size_t cdriver_msix = -1;
107 static uint8_t cdriver_vector;
109 // first to start everything
110 static bool first = 1;
113 uint8_t rx_hash_key[40];
117 static uint32_t ip = 0;
119 enum filter_type_ip {
128 enum filter_type_mac {
135 struct sfn5122f_filter_ip {
152 struct sfn5122f_filter_mac {
169 /* scatter and rss enable */
170 static bool rss_en = 0;
171 static bool scatter_en = 0;
172 static struct sfn5122f_filter_ip filters_rx_ip[NUM_FILTERS_IP];
173 //static struct sfn5122f_filter_ip filters_tx_ip[NUM_FILTERS_IP];
176 static struct sfn5122f_filter_mac filters_rx_ip[NUM_FILTERS_MAC];
177 static struct sfn5122f_filter_mac filters_tx_ip[NUM_FILTERS_MAC];
181 /******************************************************************************/
183 void qd_main(void) __attribute__((weak));
184 void qd_argument(const char *arg) __attribute__((weak));
185 void qd_interrupt(void) __attribute__((weak));
186 void qd_queue_init_data(struct sfn5122f_binding *b, struct capref registers,
187 uint64_t macaddr) __attribute__((weak));
188 void qd_queue_memory_registered(struct sfn5122f_binding *b) __attribute__((weak));
189 void qd_write_queue_tails(struct sfn5122f_binding *b) __attribute__((weak));
192 void cd_request_device_info(struct sfn5122f_binding *b);
193 void cd_register_queue_memory(struct sfn5122f_binding *b,
204 static void idc_write_queue_tails(struct sfn5122f_binding *b);
206 static void device_init(void);
207 static void start_all(void);
208 static void probe_all(void);
209 static uint32_t init_txq(uint16_t n, bool csum, bool userspace);
210 static uint32_t init_rxq(uint16_t n, bool userspace);
211 static uint32_t init_evq(uint16_t n);
212 static void queue_hw_stop(uint16_t n);
214 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector);
215 static void interrupt_handler(void* arg);
217 static void bind_arp(struct waitset *ws);
218 static errval_t arp_ip_info(void);
219 /***************************************************************************/
222 static void sfn5122f_filter_port_setup(int idx, struct sfn5122f_filter_ip* filter)
224 sfn5122f_rx_filter_tbl_lo_t filter_lo = 0;
225 sfn5122f_rx_filter_tbl_hi_t filter_hi = 0;
227 if (filter->type_ip == sfn5122f_PORT_UDP) {
229 // Add destination IP
230 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
232 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
234 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 1);
235 filter_lo = sfn5122f_rx_filter_tbl_lo_src_tcp_dest_udp_insert(
236 filter_lo, filter->dst_port);
238 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
239 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
240 DEBUG("UPD filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
242 idx, filter->dst_ip, filter->dst_port,
243 filter->src_ip, filter->src_port, filter->queue);
246 if (filter->type_ip == sfn5122f_PORT_TCP) {
247 // Add dst IP and port
248 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
250 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
252 filter_lo = sfn5122f_rx_filter_tbl_lo_dest_port_tcp_insert(filter_lo,
254 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 0);
255 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
256 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
257 DEBUG("TCP filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
259 idx, filter->dst_ip, filter->dst_port,
260 filter->src_ip, filter->src_port, filter->queue);
263 filter_hi = sfn5122f_rx_filter_tbl_hi_rxq_id_insert(filter_hi, filter->queue);
264 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, rss_en);
265 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, scatter_en);
267 sfn5122f_rx_filter_tbl_lo_wr(d, idx, filter_lo);
268 sfn5122f_rx_filter_tbl_hi_wr(d, idx, filter_hi);
271 static uint32_t build_key(struct sfn5122f_filter_ip* f)
273 uint32_t data[4] = {0,0,0,0};
281 if (f->type_ip == sfn5122f_PORT_UDP) {
291 data[0] = host1 << 16 | port1;
292 data[1] = port2 << 16 | host1 >> 16;
295 return data[0] ^ data[1] ^ data[2] ^ data[3];
298 static uint16_t filter_hash(uint32_t key)
302 /* First 16 rounds */
303 tmp = 0x1fff ^ key >> 16;
304 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
305 tmp = tmp ^ tmp >> 9;
307 tmp = tmp ^ tmp << 13 ^ key;
308 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
309 return tmp ^ tmp >> 9;
312 static bool filter_equals(struct sfn5122f_filter_ip* f1,
313 struct sfn5122f_filter_ip* f2)
315 if (f1->type_ip != f2->type_ip) {
317 } else if ((f1->src_ip != f2->src_ip) ||
318 (f1->dst_ip != f2->dst_ip) ||
319 (f1->queue != f2->queue)) {
321 } else if ((f1->src_port != f2->src_port) &&
322 (f2->dst_port != f1->dst_port)) {
329 static uint16_t filter_increment(uint32_t key)
334 static int ftqf_alloc(struct sfn5122f_filter_ip* f)
336 // Documentation suggest hashing using a certain algorithm
339 unsigned int incr = 0;
342 hash = filter_hash(key);
343 incr = filter_increment(key);
345 key = hash & (NUM_FILTERS_IP - 1);
348 if (filters_rx_ip[key].enabled == false) {
350 } else if (filter_equals(&filters_rx_ip[key], f)){
358 key = (key + incr) & (NUM_FILTERS_IP - 1);
366 static errval_t reg_port_filter(struct sfn5122f_filter_ip* f, uint64_t* fid)
370 DEBUG("reg_port_filter: called\n");
372 if ((filt_ind=ftqf_alloc(f)) < 0) {
373 return FILTER_ERR_NOT_ENOUGH_MEMORY;
376 filters_rx_ip[filt_ind] = *f;
377 filters_rx_ip[filt_ind].enabled = true;
379 sfn5122f_filter_port_setup(filt_ind, f);
388 /***************************************************************************/
389 /* Helper functions*/
390 static void decode_link(uint32_t fcntl1 , uint32_t flags1 , uint32_t speed1)
394 DEBUG("LINK MODE: AUTO \n");
397 DEBUG("LINK MODE: RX/TX \n");
400 DEBUG("LINK MODE: RESPOND \n");
403 DEBUG("LINK MODE: NONE \n");
406 DEBUG("LINK SPEED: %"PRIu32" \n", speed1);
407 DEBUG("LINK FLAGS: %8lX \n", (long unsigned int) flags1);
408 if (!!(flags1 & 1)) {
409 DEBUG("LINK IS UP \n");
412 if (!!(flags1 & 1 << 0x1)) {
413 DEBUG("LINK IS FULL DUPLEX \n");
418 static void handle_assertions(void)
425 memset(in, 0, sizeof(in));
426 in[CMD_GET_ASSERTS_IN_CLEAR_OFFSET] = 0;
428 err = mcdi_rpc(CMD_GET_ASSERTS, in , CMD_GET_ASSERTS_IN_LEN, out,
429 CMD_GET_ASSERTS_OUT_LEN, &outlen, pci_function, d);
430 assert(err_is_ok(err));
433 /* TODO handle assertions */
434 printf("THERE WERE ASSERTIONS: %"PRIu8" \n ", out[0]);
435 /* exit assertions -> special reboot*/
437 err = mcdi_rpc(CMD_REBOOT, in, CMD_REBOOT_IN_LEN ,
438 NULL, 0, NULL, pci_function, d);
439 assert(err_is_ok(err));
444 /* Get Link and write settings into global variables */
445 static void get_link(uint8_t port)
447 uint8_t out[CMD_GET_LINK_OUT_LEN];
450 err = mcdi_rpc(CMD_GET_LINK, NULL, 0 , out, CMD_GET_LINK_OUT_LEN, NULL, port,d);
451 assert(err_is_ok(err));
453 memcpy(&cap[port], out, 4);
454 memcpy(&speed[port], out+CMD_GET_LINK_OUT_SPEED_OFFSET, 4);
455 memcpy(&fcntl[port], out+CMD_GET_LINK_OUT_FCNTL_OFFSET, 4);
456 memcpy(&flags[port], out+CMD_GET_LINK_OUT_FLAGS_OFFSET, 4);
458 decode_link(fcntl[port], flags[port], speed[port]);
464 static void init_port(uint8_t port)
466 uint8_t in[CMD_SET_MAC_IN_LEN];
470 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
471 /* linux driver sets these bits */
476 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
478 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
479 /* Reject unicast packets? */
480 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
481 /* Set wanted flow control of the card 2 -> bidirectional*/
482 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
483 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
484 assert(err_is_ok(err));
486 memset(mc_hash, 0, sizeof(mc_hash));
487 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
488 NULL, 0 , NULL, port, d);
489 assert(err_is_ok(err));
491 memset(in, 0 , sizeof(in));
492 memcpy(in + CMD_SET_LINK_IN_CAP_OFFSET, &cap[pci_function], 4);
494 err = mcdi_rpc(CMD_SET_LINK, in, CMD_SET_LINK_IN_LEN, NULL, 0, NULL, 0, d);
495 assert(err_is_ok(err));
498 static void start_port(uint8_t port)
500 uint8_t in[CMD_SET_MAC_IN_LEN];
504 memset(&in, 0, sizeof(in));
506 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
507 NULL, 0 , NULL, port, d);
508 assert(err_is_ok(err));
511 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
512 /* seems like the linux driver sets all bits not set
513 from the MAC address to 1*/
518 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
519 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
520 /* Reject unicast packets ? */
521 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
522 /* Set wanted functionality (flow control) of card -> set to 2 for RX/TX
524 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
525 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
526 assert(err_is_ok(err));
528 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
529 NULL, 0 , NULL, port, d);
531 assert(err_is_ok(err));
534 /******************************************************************************
536 *****************************************************************************/
538 static void probe_all(void)
548 struct frame_identity frameid = { .base = 0, .bytes = 0 };
553 // Test and clear MC-reboot flag for port/function
554 offset = MCDI_REBOOT_OFFSET(pci_function);
555 reg = sfn5122f_mc_treg_smem_rd(d,offset);
557 sfn5122f_mc_treg_smem_wr(d,offset,0);
560 /*print out any assertions */
562 // Let BMC know that driver is in charg of filter/link setttings
563 // before we can restet NIC
564 memset(&in, 0, sizeof(in));
565 memset(&out, 0 , sizeof(out));
567 r = mcdi_rpc(CMD_GET_VERSION, NULL, 0, out, CMD_GET_VERSION_OUT_LEN,
568 &outlen, pci_function, d);
569 assert(err_is_ok(r));
572 memset(&out, 0 , sizeof(out));
574 // driver is operating / + update
577 r = mcdi_rpc(CMD_DRV_ATTACH, in, CMD_DRV_ATTACH_IN_LEN, out,
578 CMD_DRV_ATTACH_OUT_LEN, &outlen, pci_function, d);
579 assert(err_is_ok(r));
582 r = mcdi_rpc(CMD_PORT_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
583 assert(err_is_ok(r));
586 if(mcdi_rpc(CMD_WOL_FILTER_GET, NULL, 0, out, CMD_WOL_FILTER_GET_OUT_LEN,
587 &outlen, pci_function, d) == SYS_ERR_OK) {
588 memcpy(&wol_filter_id, out , 4);
590 // Reset filter of card
591 mcdi_rpc(CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
594 // memory for INT_KER
595 int_ker_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
596 2*sizeof(uint64_t), &int_ker);
597 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
598 // Read in non volatile configuration
599 memset(&out, 0, sizeof(out));
600 r = mcdi_rpc(CMD_GET_BOARD_CONFIG, NULL, 0, out,
601 CMD_GET_BOARD_CONFIG_OUT_LEN, &outlen, pci_function, d);
602 assert(err_is_ok(r));
604 memcpy(&d_mac[0], out+MCDI_MAC_PORT_OFFSET(0) ,6);
605 memcpy(&d_mac[1], out+MCDI_MAC_PORT_OFFSET(1) ,6);
607 // read phy configuration
608 r = mcdi_rpc(CMD_GET_PHY_CFG, NULL, 0, out, CMD_GET_PHY_CFG_OUT_LEN, &outlen,
610 assert(err_is_ok(r));
612 memcpy(&phy_caps[pci_function], out+CMD_GET_PHY_CFG_OUT_CAP_OFFSET, 4);
613 memcpy(&phy_flags[pci_function], out+CMD_GET_PHY_CFG_OUT_FLAGS_OFFSET, 4);
614 memcpy(&phy_media[pci_function], out+CMD_GET_PHY_CFG_OUT_MEDIA_OFFSET, 4);
616 // get loopback modes
617 r = mcdi_rpc(CMD_GET_LOOPBACK_MODES, NULL, 0, out,
618 CMD_GET_LOOPBACK_MODES_OUT_LEN, &outlen, pci_function, d);
619 assert(err_is_ok(r));
620 memcpy(&phy_loopback_mode, out+CMD_GET_LOOPBACK_MODES_SUGGESTED_OFFSET,4);
621 // loopback mode NONE is no valid condition
622 phy_loopback_mode &= ~(1);
626 mac_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
627 NUM_MAC_STATS*sizeof(uint64_t),
630 assert(mac_virt != NULL);
631 r = invoke_frame_identify(mac_stats, &frameid);
632 assert(err_is_ok(r));
633 mac_phys = frameid.base;
634 memset(mac_virt, 0, NUM_MAC_STATS*sizeof(uint64_t));
637 memset(&in, 0, sizeof(in));
638 memcpy(in, &mac_phys, 8);
640 // Settings for DMA of MAC stats
641 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0x6;
642 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
643 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
644 r = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN, NULL, 0, NULL,
646 assert(err_is_ok(r));
652 // Init card IP filters
653 static void init_rx_filter_config(void)
655 uint64_t reg_hi, reg_lo;
657 for (int i = 0; i < NUM_FILTERS_IP; i++) {
658 sfn5122f_rx_filter_tbl_lo_wr(d, i, 0);
659 sfn5122f_rx_filter_tbl_hi_wr(d, i, 0);
662 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_rd(d);
663 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_rd(d);
665 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_full_search_limit_insert(reg_hi, 1);
666 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_wildcard_search_limit_insert(reg_hi, 3);
669 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_q_id_lo_insert(reg_lo, 0);
670 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_q_id_insert(reg_lo, 0);
671 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_rss_enabled_insert(reg_lo, 0);
672 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_rss_enabled_insert(reg_lo, 0);
674 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_full_srch_limit_insert(reg_lo, 1);
675 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_wild_srch_limit_insert(reg_lo, 3);
676 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_full_srch_limit_insert(reg_lo, 1);
677 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_wild_srch_limit_insert(reg_lo, 3);
680 sfn5122f_rx_filter_ctl_reg_lo_wr(d,reg_lo);
681 sfn5122f_rx_filter_ctl_reg_hi_wr(d,reg_hi);
685 static void device_init(void)
688 struct frame_identity frameid = { .base = 0, .bytes = 0 };
689 uint64_t reg, reg2; // tmp_key = 0;
690 uint8_t in[24]; // set length to biggest in length needed
692 memset(&in, 0, sizeof(in));
694 // recover from failed assertion post-reset
697 /* ignore TX of packets 16 bytes and less */
698 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
699 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
700 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
701 sfn5122f_tx_reserved_reg_hi_wr(d, sfn5122f_tx_reserved_reg_hi_rd(d));
702 //Disable TX_NO_EOP_DISC_EN because else would limit packets to 16
703 reg = sfn5122f_tx_cfg_reg_lo_rd(d);
704 reg = sfn5122f_tx_cfg_reg_lo_tx_no_eop_disc_en_insert(reg, 0);
705 reg = sfn5122f_tx_cfg_reg_lo_tx_ownerr_ctl_insert(reg, 1);
706 reg = sfn5122f_tx_cfg_reg_lo_tx_filter_en_bit_insert(reg, 1);
707 sfn5122f_tx_cfg_reg_lo_wr(d, reg);
708 sfn5122f_tx_cfg_reg_hi_wr(d, sfn5122f_tx_cfg_reg_hi_rd(d));
710 reg = sfn5122f_rx_cfg_reg_lo_rd(d);
711 // unset bit and set other bit which are not in documentation (43 and 47)
712 reg = sfn5122f_rx_cfg_reg_lo_rx_desc_push_en_insert(reg, 0) ;
713 reg = sfn5122f_rx_cfg_reg_lo_rx_ingr_en_insert(reg, 1);
714 reg = sfn5122f_rx_cfg_reg_lo_rx_usr_buf_size_insert(reg, (MTU_MAX-256) >> 5);
715 //reg = sfn5122f_rx_cfg_reg_lo_rx_ownerr_ctl_insert(reg, 1);
716 reg = sfn5122f_rx_cfg_reg_lo_rx_ip_hash_insert(reg, 1);
717 //reg = sfn5122f_rx_cfg_reg_lo_rx_hash_insrt_hdr_insert(reg, 1);
718 reg = sfn5122f_rx_cfg_reg_lo_rx_hash_alg_insert(reg, 1);
719 sfn5122f_rx_cfg_reg_lo_wr(d, reg);
720 sfn5122f_rx_cfg_reg_hi_wr(d, sfn5122f_rx_cfg_reg_hi_rd(d));
721 /* enable event logging, no UART
722 Event destination is queue 0 */
724 r = mcdi_rpc(CMD_LOG_CTRL, in, CMD_LOG_CTRL_IN_LEN,
725 NULL, 0, NULL, pci_function, d);
726 assert(err_is_ok(r));
728 /* Set destination of TX/RX flush event */
730 sfn5122f_dp_ctrl_reg_lo_fls_evq_id_wrf(d, 0);
731 sfn5122f_dp_ctrl_reg_hi_wr(d, sfn5122f_dp_ctrl_reg_hi_rd(d));
733 /* Disalbe user events for now */
734 sfn5122f_usr_ev_cfg_lo_usrev_dis_wrf(d , 1);
735 sfn5122f_usr_ev_cfg_hi_wr(d, sfn5122f_usr_ev_cfg_hi_rd(d));
738 // This seems to be not device specific i.e. works for other
740 /* Set position of descriptor caches in SRAM */
741 sfn5122f_srm_tx_dc_cfg_reg_lo_wr(d, TX_DC_BASE);
742 sfn5122f_srm_tx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_tx_dc_cfg_reg_hi_rd(d));
743 sfn5122f_srm_rx_dc_cfg_reg_lo_srm_rx_dc_base_adr_wrf(d, RX_DC_BASE);
744 sfn5122f_srm_rx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_rx_dc_cfg_reg_hi_rd(d));
746 /* Set TX descriptor cache size to 16 */
747 sfn5122f_tx_dc_cfg_reg_lo_tx_dc_size_wrf(d, 1);
748 sfn5122f_tx_dc_cfg_reg_hi_wr(d, sfn5122f_tx_dc_cfg_reg_hi_rd(d));
750 /* Set RX descriptor cache size to 64 and low watermark */
751 sfn5122f_rx_dc_cfg_reg_lo_rx_dc_size_wrf(d, 3);
752 sfn5122f_rx_dc_cfg_reg_hi_wr(d, sfn5122f_rx_dc_cfg_reg_hi_rd(d));
755 reg = sfn5122f_rx_dc_pf_wm_reg_lo_rx_dc_pf_lwm_insert(reg, RX_DESC_CACHE_SIZE -8);
756 sfn5122f_rx_dc_pf_wm_reg_lo_wr(d, reg);
757 sfn5122f_rx_dc_pf_wm_reg_hi_wr(d, sfn5122f_rx_dc_pf_wm_reg_hi_rd(d));
759 /*programm init ker address for interrupts */
760 r = invoke_frame_identify(int_ker, &frameid);
761 assert(err_is_ok(r));
763 sfn5122f_int_adr_reg_ker_lo_wr(d, frameid.base);
764 reg = sfn5122f_int_adr_reg_ker_hi_rd(d);
766 // disable vector write if we use MSI-X
768 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 1);
769 if (cdriver_msix == -1) {
770 r = pci_setup_inthandler(interrupt_handler, NULL, &cdriver_vector);
771 assert(err_is_ok(r));
772 setup_interrupt(&cdriver_msix, disp_get_core_id(), cdriver_vector);
775 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 0);
777 sfn5122f_int_adr_reg_ker_hi_wr(d, reg);
779 /* Enable all the genuinley fatal interrupts */
780 reg = sfn5122f_fatal_intr_reg_ker_lo_ill_adr_int_ker_en_insert(reg, 1);
781 /* Enable rxbuf/txbuf interrupt fields not documented.
783 reg = sfn5122f_fatal_intr_reg_ker_lo_rxbuf_own_int_ker_en_insert(reg, 1);
784 reg = sfn5122f_fatal_intr_reg_ker_lo_txbuf_own_int_ker_en_insert(reg, 1);
786 //reg = sfn5122f_fatal_intr_reg_ker_lo_sram_perr_int_p_ker_en_insert(reg, 1);
787 sfn5122f_fatal_intr_reg_ker_lo_wr(d, ~reg);
788 sfn5122f_fatal_intr_reg_ker_hi_wr(d, 0XFFFFFFFFFFFFFFFF);
790 /* Setup RSS indirection table (maps from hash value to packet to RXQ) */
791 for (int i = 0; i < 128; i++) {
793 sfn5122f_rx_indirection_tbl_wr( d, i, rx_indir_tbl[i]);
796 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
797 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
798 (from linux driver) */
799 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
800 reg = sfn5122f_tx_reserved_reg_lo_tx_rx_spacer_en_insert(reg, 1);
801 reg = sfn5122f_tx_reserved_reg_lo_tx_one_pkt_per_q_insert(reg, 1);
802 reg = sfn5122f_tx_reserved_reg_lo_tx_dis_non_ip_ev_insert(reg, 1);
804 /* Enable software events */
805 reg = sfn5122f_tx_reserved_reg_lo_tx_soft_evt_en_insert(reg, 1);
806 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
807 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_threshold_insert(reg, 2);
808 /* Disable hardware watchdog which can misfire */
809 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_wd_tmr_insert(reg, 0x3fffff);
810 /* Squash TX of packets of 16 bytes or less */
811 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
813 reg2 = sfn5122f_tx_reserved_reg_hi_rd(d);
814 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_en_insert(reg2, 0);
815 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_chk_dis_insert(reg2, 0);
816 reg2 = sfn5122f_tx_reserved_reg_hi_tx_rx_spacer_insert(reg2, 0xfe);
817 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
818 sfn5122f_tx_reserved_reg_hi_wr(d, reg2);
820 init_port(pci_function);
821 get_link(pci_function);
822 DEBUG("BASIC CARD INIT DONE \n");
825 static void start_all(void)
828 uint8_t in[CMD_MAC_STATS_IN_LEN];
829 unsigned long long* stats = (unsigned long long *) mac_virt;
832 start_port(pci_function);
834 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
835 /* Enable interrupts */
836 /* Use an interrupt level unused by event queues */
837 reg = sfn5122f_int_en_reg_ker_lo_rd(d);
839 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0);
842 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0x1f);
844 reg = sfn5122f_int_en_reg_ker_lo_drv_int_en_ker_insert(reg, 1);
846 /* undocumented field */
847 reg = sfn5122f_int_en_reg_ker_lo_ker_int_ker_insert(reg, 0);
848 sfn5122f_int_en_reg_ker_lo_wr(d, reg);
849 sfn5122f_int_en_reg_ker_hi_wr(d, sfn5122f_int_en_reg_ker_hi_rd(d));
851 /* Start MAC stats */
852 memset(in, 0, sizeof(in));
853 stats[0x60] = (unsigned long long) (-1);
854 memcpy(in, &mac_phys, 8);
855 pointer = (uint8_t *) &mac_phys;
856 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0xD;
859 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
860 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
861 errval_t err = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN,
862 NULL, 0, NULL, pci_function, d);
864 assert(err_is_ok(err));
867 /**************************************************************************
869 ***************************************************************************/
872 static void queue_hw_stop(uint16_t n)
877 reg = sfn5122f_tx_flush_descq_reg_lo_rd(d);
878 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_insert(reg, n);
879 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_cmd_insert(reg, 1);
880 sfn5122f_tx_flush_descq_reg_lo_wr(d, reg);
881 sfn5122f_tx_flush_descq_reg_hi_wr(d, sfn5122f_tx_flush_descq_reg_hi_rd(d));
883 reg = sfn5122f_rx_flush_descq_reg_lo_rd(d);
884 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_insert(reg, n);
885 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_cmd_insert(reg, 1);
886 sfn5122f_rx_flush_descq_reg_lo_wr(d, reg);
887 sfn5122f_rx_flush_descq_reg_hi_wr(d, sfn5122f_rx_flush_descq_reg_hi_rd(d));
889 /* TODO Wait for DRIVER_EVENT */
890 /* clear pointer table entries */
891 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, 0);
892 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, 0);
893 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, 0);
894 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, 0);
896 /*Free RX queue tbl entries*/
898 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
899 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
900 queues[n].rx_buf_tbl);
902 if (queues[n].userspace) {
903 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
904 queues[n].rx_buf_tbl + NUM_ENT_RX_USR);
906 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
907 queues[n].rx_buf_tbl + NUM_ENT_RX);
911 /*Free TX queue tbl entries*/
913 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
914 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
915 queues[n].tx_buf_tbl + NUM_ENT_TX );
916 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
917 queues[n].tx_buf_tbl);
919 /*Free EV queue tbl entries*/
921 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
922 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
923 queues[n].ev_buf_tbl + NUM_ENT_EVQ );
924 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
925 queues[n].ev_buf_tbl);
930 static uint32_t init_evq(uint16_t n)
934 struct frame_identity frameid = { .base = 0, .bytes = 0 };
935 uint64_t ev_phys, reg, buffer_offset;
939 reg = sfn5122f_timer_tbl_lo_timer_q_en_insert(reg, 1);
940 // set to 0 if interrupts for receives/sends should be generated
942 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 0);
944 reg = sfn5122f_timer_tbl_lo_int_pend_insert(reg, 0);
945 reg = sfn5122f_timer_tbl_lo_int_armd_insert(reg, 0);
946 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 1);
948 // timer mode disabled
949 reg = sfn5122f_timer_tbl_lo_timer_mode_insert(reg, 0);
950 sfn5122f_timer_tbl_lo_wr(d, n, reg);
951 sfn5122f_timer_tbl_hi_wr(d, n, sfn5122f_timer_tbl_hi_rd(d, n));
953 r = invoke_frame_identify(queues[n].ev_frame, &frameid);
954 assert(err_is_ok(r));
955 ev_phys = frameid.base;
956 ev_size = frameid.bytes;
958 buffer_offset = alloc_buf_tbl_entries(ev_phys, NUM_ENT_EVQ, 0, 0, d);
959 if (buffer_offset == -1) {
963 DEBUG("EV_QUEUE_%d: buf_off %ld, phys 0x%lx\n",n , buffer_offset, ev_phys);
965 reg = sfn5122f_evq_ptr_tbl_lo_rd(d, n);
966 reg = sfn5122f_evq_ptr_tbl_lo_evq_en_insert(reg, 1);
967 reg = sfn5122f_evq_ptr_tbl_lo_evq_size_insert(reg, 3);
968 reg = sfn5122f_evq_ptr_tbl_lo_evq_buf_base_id_insert(reg,
971 sfn5122f_evq_ptr_tbl_lo_wr(d, n, reg);
972 sfn5122f_evq_ptr_tbl_hi_wr(d, n, sfn5122f_evq_ptr_tbl_hi_rd(d, n));
974 /* No write collection for this register */
975 reg = sfn5122f_timer_command_reg_lo_rd(d,n);
976 reg = sfn5122f_timer_command_reg_lo_tc_timer_val_insert(reg, 0);
978 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
980 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
982 sfn5122f_timer_command_reg_lo_wr(d, n, reg);
984 sfn5122f_evq_rptr_reg_wr(d, n, queues[n].ev_head);
986 return buffer_offset;
989 static uint32_t init_rxq(uint16_t n, bool userspace)
995 struct frame_identity frameid = { .base = 0, .bytes = 0 };
996 uint64_t rx_phys, reg_lo, reg_hi, buffer_offset;
998 * This will define a buffer in the buffer table, allowing
999 * it to be used for event queues, descriptor rings etc.
1001 /* Get physical addresses for rx/tx rings and event queue */
1003 r = invoke_frame_identify(queues[n].rx_frame, &frameid);
1004 assert(err_is_ok(r));
1005 rx_phys = frameid.base;
1006 rx_size = frameid.bytes;
1009 num_ent_rx = NUM_ENT_RX_USR;
1011 num_ent_rx = NUM_ENT_RX;
1015 buffer_offset = alloc_buf_tbl_entries(rx_phys, num_ent_rx, 0, 0, d);
1017 if (buffer_offset == -1) {
1021 DEBUG("RX_QUEUE_%d: buf_off %ld, phys %lx, size %lx \n", n,
1022 buffer_offset, rx_phys, rx_size);
1023 /* setup RX queue */
1024 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rd(d, n);
1025 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rd(d, n);
1026 /* Which buffer table entries are used (which is the first entry) */
1027 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_buf_base_id_insert(reg_lo, buffer_offset);
1028 /* Which event queue is associated with this queue*/
1029 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_evq_id_insert(reg_lo, n);
1030 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_owner_id_insert(reg_lo, 0);
1032 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_label_insert(reg_lo, n);
1034 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1035 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_size_insert(reg_lo, 1);
1038 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 0);
1040 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 1);
1043 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_jumbo_insert(reg_lo, 0);
1045 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_en_insert(reg_lo, 1);
1047 /* Hardware verifies data digest */
1048 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_ddig_en_insert(reg_hi, 1);
1049 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_hdig_en_insert(reg_hi, 1);
1051 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, reg_lo);
1052 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, reg_hi);
1054 return buffer_offset;
1057 static uint32_t init_txq(uint16_t n, bool csum, bool userspace)
1062 struct frame_identity frameid = { .base = 0, .bytes = 0 };
1063 uint64_t tx_phys, reg, reg1, buffer_offset;
1064 /* Get physical addresses for rx/tx rings and event queue */
1065 r = invoke_frame_identify(queues[n].tx_frame, &frameid);
1066 assert(err_is_ok(r));
1067 tx_phys = frameid.base;
1068 tx_size = frameid.bytes;
1070 buffer_offset = alloc_buf_tbl_entries(tx_phys, NUM_ENT_TX, 0, 0, d);
1072 if (buffer_offset == -1) {
1076 DEBUG("TX_QUEUE_%d: buf_off %ld, phys %lx\n",n , buffer_offset, tx_phys);
1077 /* setup TX queue */
1078 reg = sfn5122f_tx_desc_ptr_tbl_lo_rd(d, n);
1079 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_rd(d, n);
1080 /* Which buffer table entries are used (which is the first entry) */
1081 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_buf_base_id_insert(reg,
1083 /* Which event queue is associated with this queue */
1084 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_evq_id_insert(reg , n);
1085 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_owner_id_insert(reg, 0);
1086 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_label_insert(reg , n);
1087 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1088 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_size_insert(reg , 1);
1090 /* No user lvl networking */
1092 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 0);
1094 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 1);
1097 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_ddig_en_insert(reg1, 0);
1098 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_hdig_en_insert(reg1, 0);
1100 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_non_ip_drop_dis_insert(reg1, 1);
1103 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_descq_en_insert(reg1 , 1);
1105 /* Enable offload of checksum */
1106 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_ip_chksm_dis_insert(reg1, !csum);
1107 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_tcp_chksm_dis_insert(reg1, !csum);
1108 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, reg);
1109 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, reg1);
1111 return buffer_offset;
1115 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector)
1121 res = bmallocator_alloc(&msix_alloc, msix_index);
1124 err = get_apicid_from_core(core, &dest);
1125 assert(err_is_ok(err));
1127 err = pci_msix_vector_init(*msix_index, dest, vector);
1128 assert(err_is_ok(err));
1130 DEBUG("MSI-X vector setup index=%"PRIx64", core=%d apic=%d swvec=%x\n",
1131 *msix_index, core, dest, vector);
1134 /** Here are the global interrupts handled. */
1135 static void interrupt_handler(void* arg)
1140 uint8_t* net_ivec_fatal = (uint8_t *) int_ker_virt;
1142 // bit 64 is indicator for a fatal event
1143 syserr = (net_ivec_fatal[8] & 0x1);
1145 // TODO handle fatal interrupt
1146 USER_PANIC("FATAL INTERRUPT");
1151 queue = sfn5122f_int_isr0_reg_lo_rd(d);
1152 DEBUG("AN INTERRUPT OCCURED %d \n", queue);
1153 // Don't need to start event queues because we're already polling
1156 /******************************************************************************/
1157 /* Management interface implemetation */
1159 static void idc_queue_init_data(struct sfn5122f_binding *b,
1160 struct capref registers,
1165 r = sfn5122f_queue_init_data__tx(b, NOP_CONT, registers, macaddr);
1166 // TODO: handle busy
1167 assert(err_is_ok(r));
1170 /** Tell queue driver that we are done initializing the queue. */
1171 static void idc_queue_memory_registered(struct sfn5122f_binding *b)
1174 r = sfn5122f_queue_memory_registered__tx(b, NOP_CONT);
1175 // TODO: handle busy
1176 assert(err_is_ok(r));
1179 /** Send request to queue driver to rewrite the tail pointers of its queues. */
1180 static void idc_write_queue_tails(struct sfn5122f_binding *b)
1184 qd_write_queue_tails(b);
1188 r = sfn5122f_write_queue_tails__tx(b, NOP_CONT);
1189 // TODO: handle busy
1190 assert(err_is_ok(r));
1193 /** Signal queue driver that the queue is stopped. */
1194 static void idc_queue_terminated(struct sfn5122f_binding *b)
1197 r = sfn5122f_queue_terminated__tx(b, NOP_CONT);
1198 // TODO: handle busy
1199 assert(err_is_ok(r));
1202 /** Request from queue driver for register memory cap */
1203 void cd_request_device_info(struct sfn5122f_binding *b)
1206 qd_queue_init_data(b, *regframe, d_mac[pci_function]);
1209 idc_queue_init_data(b, *regframe, d_mac[pci_function]);
1212 /** Request from queue driver to initialize hardware queue. */
1213 void cd_register_queue_memory(struct sfn5122f_binding *b,
1215 struct capref tx_frame,
1216 struct capref rx_frame,
1217 struct capref ev_frame,
1224 // Save state so we can restore the configuration in case we need to do a
1228 queues[n].enabled = false;
1229 queues[n].tx_frame = tx_frame;
1230 queues[n].rx_frame = rx_frame;
1231 queues[n].ev_frame = ev_frame;
1232 queues[n].tx_head = 0;
1233 queues[n].rx_head = 0;
1234 queues[n].ev_head = 0;
1235 queues[n].rxbufsz = rxbufsz;
1236 queues[n].binding = b;
1237 queues[n].use_irq = use_irq;
1238 queues[n].userspace = userspace;
1239 queues[n].msix_index = -1;
1240 queues[n].msix_intvec = vector;
1241 queues[n].msix_intdest = core;
1243 queues[n].ev_buf_tbl = init_evq(n);
1245 queues[n].tx_buf_tbl = init_txq(n, csum_offload, userspace);
1246 queues[n].rx_buf_tbl = init_rxq(n, userspace);
1248 if(queues[n].ev_buf_tbl == -1 ||
1249 queues[n].tx_buf_tbl == -1 ||
1250 queues[n].rx_buf_tbl == -1){
1252 DEBUG("Allocating queue failed \n");
1256 if (queues[n].use_irq) {
1257 if (queues[n].msix_intvec != 0) {
1258 if (queues[n].msix_index == -1) {
1259 setup_interrupt(&queues[n].msix_index, queues[n].msix_intdest,
1260 queues[n].msix_intvec);
1265 queues[n].enabled = true;
1267 idc_write_queue_tails(queues[n].binding);
1270 qd_queue_memory_registered(b);
1274 idc_queue_memory_registered(b);
1282 static void idc_terminate_queue(struct sfn5122f_binding *b, uint16_t n)
1284 DEBUG("idc_terminate_queue(q=%d) \n", n);
1288 queues[n].enabled = false;
1289 queues[n].binding = NULL;
1291 // TODO: Do we have to free the frame caps, or destroy the binding?
1292 idc_queue_terminated(b);
1298 /** Send response about filter registration to device manager */
1299 static void idc_filter_registered(struct sfn5122f_binding *b,
1306 r = sfn5122f_filter_registered__tx(b, NOP_CONT, buf_id_rx, buf_id_tx, err,
1308 // TODO: handle busy
1309 assert(err_is_ok(r));
1312 /** Send response about filter deregistration to device manager */
1313 static void idc_filter_unregistered(struct sfn5122f_binding *b,
1318 r = sfn5122f_filter_unregistered__tx(b, NOP_CONT, filter, err);
1319 // TODO: handle busy
1320 assert(err_is_ok(r));
1324 static void idc_register_port_filter(struct sfn5122f_binding *b,
1328 sfn5122f_port_type_t type,
1331 DEBUG("idc_register_port_filter: called (q=%d t=%d p=%d)\n",
1336 waitset_init(&rpc_ws);
1341 struct sfn5122f_filter_ip f = {
1343 .dst_ip = htonl(ip),
1351 uint64_t fid = -1ULL;
1353 err = reg_port_filter(&f, &fid);
1354 DEBUG("filter registered: err=%"PRIu64", fid=%"PRIu64"\n", err, fid);
1356 idc_filter_registered(b, buf_id_rx, buf_id_tx, err, fid);
1360 static void idc_unregister_filter(struct sfn5122f_binding *b,
1363 DEBUG("unregister_filter: called (%"PRIx64")\n", filter);
1364 idc_filter_unregistered(b, filter, LIB_ERR_NOT_IMPLEMENTED);
1368 static struct sfn5122f_rx_vtbl rx_vtbl = {
1369 .request_device_info = cd_request_device_info,
1370 .register_queue_memory = cd_register_queue_memory,
1371 .terminate_queue = idc_terminate_queue,
1372 .register_port_filter = idc_register_port_filter,
1373 .unregister_filter = idc_unregister_filter,
1377 static void cd_create_queue(struct sfn5122f_devif_binding *b, struct capref rx, struct capref tx,
1380 // Save state so we can restore the configuration in case we need to do a
1384 for (int i = 0; i < NUM_QUEUES; i++) {
1385 if (queues[i].enabled == false) {
1392 err = SFN_ERR_ALLOC_QUEUE;
1393 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, NULL_CAP, err);
1394 assert(err_is_ok(err));
1397 queues[n].enabled = false;
1398 queues[n].tx_frame = tx;
1399 queues[n].rx_frame = rx;
1400 queues[n].ev_frame = ev;
1401 queues[n].tx_head = 0;
1402 queues[n].rx_head = 0;
1403 queues[n].ev_head = 0;
1404 queues[n].rxbufsz = MTU_MAX;
1405 queues[n].devif = b;
1406 queues[n].use_irq = false;
1407 queues[n].userspace = true;
1408 queues[n].msix_index = -1;
1410 queues[n].ev_buf_tbl = init_evq(n);
1412 queues[n].tx_buf_tbl = init_txq(n, csum_offload, true);
1413 queues[n].rx_buf_tbl = init_rxq(n, true);
1415 if(queues[n].ev_buf_tbl == -1 ||
1416 queues[n].tx_buf_tbl == -1 ||
1417 queues[n].rx_buf_tbl == -1){
1418 err = SFN_ERR_ALLOC_QUEUE;
1419 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, NULL_CAP, err);
1420 assert(err_is_ok(err));
1423 queues[n].enabled = true;
1424 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, n, *regframe, SYS_ERR_OK);
1425 assert(err_is_ok(err));
1428 static void cd_register_region(struct sfn5122f_devif_binding *b, uint16_t qid, struct capref region)
1431 struct frame_identity id;
1432 uint64_t buffer_offset = 0;
1434 err = invoke_frame_identify(region, &id);
1435 if (err_is_fail(err)) {
1436 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, SFN_ERR_REGISTER_REGION);
1437 assert(err_is_ok(err));
1440 size_t size = id.bytes;
1441 lpaddr_t addr = id.base;
1443 // TODO unsigned/signed not nice ...
1444 buffer_offset = alloc_buf_tbl_entries(addr, size/BUF_SIZE, qid, true, d);
1445 if (buffer_offset == -1) {
1446 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, SFN_ERR_REGISTER_REGION);
1447 assert(err_is_ok(err));
1450 err = b->tx_vtbl.register_region_response(b, NOP_CONT, buffer_offset, SYS_ERR_OK);
1451 assert(err_is_ok(err));
1454 static void cd_destroy_queue(struct sfn5122f_devif_binding *b, uint16_t qid)
1456 USER_PANIC("NIY \n");
1460 static struct sfn5122f_devif_rx_vtbl rx_vtbl_devif = {
1461 .create_queue_call = cd_create_queue,
1462 .destroy_queue_call = cd_destroy_queue,
1463 .register_region_call = cd_register_region,
1466 static void export_cb(void *st, errval_t err, iref_t iref)
1468 const char *suffix = "_sfn5122fmng";
1469 char name[strlen(service_name) + strlen(suffix) + 1];
1471 assert(err_is_ok(err));
1473 // Build label for interal management service
1474 sprintf(name, "%s%s", service_name, suffix);
1476 err = nameservice_register(name, iref);
1477 assert(err_is_ok(err));
1478 DEBUG("Management interface exported\n");
1482 static errval_t connect_cb(void *st, struct sfn5122f_binding *b)
1484 DEBUG("New connection on management interface\n");
1485 b->rx_vtbl = rx_vtbl;
1489 static void export_devif_cb(void *st, errval_t err, iref_t iref)
1491 const char *suffix = "_sfn5122fmng_devif";
1492 char name[strlen(service_name) + strlen(suffix) + 1];
1494 assert(err_is_ok(err));
1496 // Build label for interal management service
1497 sprintf(name, "%s%s", service_name, suffix);
1499 err = nameservice_register(name, iref);
1500 assert(err_is_ok(err));
1501 DEBUG("Devif Management interface exported\n");
1505 static errval_t connect_devif_cb(void *st, struct sfn5122f_devif_binding *b)
1507 DEBUG("New connection on devif management interface\n");
1508 b->rx_vtbl = rx_vtbl_devif;
1513 * Initialize management interface for queue drivers.
1514 * This has to be done _after_ the hardware is initialized.
1516 static void initialize_mngif(void)
1520 r = sfn5122f_export(NULL, export_cb, connect_cb, get_default_waitset(),
1521 IDC_BIND_FLAGS_DEFAULT);
1522 assert(err_is_ok(r));
1524 r = sfn5122f_devif_export(NULL, export_devif_cb, connect_devif_cb, get_default_waitset(),
1525 IDC_BIND_FLAGS_DEFAULT);
1526 assert(err_is_ok(r));
1530 /*****************************************************************************/
1531 /* ARP service client */
1533 /** Get information about the local TCP/IP configuration*/
1534 static errval_t arp_ip_info(void)
1536 errval_t err, msgerr;
1540 err = arp_rpc.vtbl.ip_info(&arp_rpc, 0, &msgerr, &ip, &gw, &mask);
1541 if (err_is_fail(err)) {
1547 static void a_bind_cb(void *st, errval_t err, struct net_ARP_binding *b)
1549 assert(err_is_ok(err));
1550 err = net_ARP_rpc_client_init(&arp_rpc, b);
1551 assert(err_is_ok(err));
1552 net_arp_connected = true;
1555 /** Bind to ARP service (currently blocking) */
1556 static void bind_arp(struct waitset *ws)
1561 DEBUG("bind_arp()\n");
1562 err = nameservice_blocking_lookup("sfn5122f_ARP", &iref);
1563 assert(err_is_ok(err));
1564 DEBUG("resolved\n");
1566 err = net_ARP_bind(iref, a_bind_cb, NULL, ws, IDC_BIND_FLAGS_DEFAULT);
1567 assert(err_is_ok(err));
1568 DEBUG("binding initiated\n");
1570 while (!net_arp_connected) {
1571 event_dispatch_non_block(ws);
1572 event_dispatch_non_block(get_default_waitset());
1574 DEBUG("bound_arp\n");
1578 /******************************************************************************/
1579 /* Initialization code for driver */
1581 /** Callback from pci to initialize a specific PCI device. */
1582 static void pci_init_card(struct device_mem* bar_info, int bar_count)
1587 d = malloc(sizeof(*d));
1588 /* Map first BAR for register access */
1589 assert(bar_count >= 1);
1590 DEBUG("BAR count %d \n", bar_count);
1591 map_device(&bar_info[0]);
1592 regframe = bar_info[0].frame_cap;
1593 DEBUG("BAR[0] mapped (v=%llx p=%llx l=%llx)\n",
1594 (unsigned long long) bar_info[0].vaddr,
1595 (unsigned long long) bar_info[0].paddr,
1596 (unsigned long long) bar_info[0].bytes);
1598 /* Initialize Mackerel binding */
1599 sfn5122f_initialize(d, (void*) bar_info[0].vaddr);
1601 // Initialize manager for MSI-X vectors
1603 //d_msix = malloc(sizeof(*d_msix));
1604 //map_device(&bar_info[1]);
1605 //sfn5122f_msix_initialize(d_msix, (void*) bar_info[1].vaddr);
1606 DEBUG("Enabling MSI-X interrupts\n");
1607 uint16_t msix_count = 0;
1608 err = pci_msix_enable(&msix_count);
1609 assert(err_is_ok(err));
1610 assert(msix_count > 0);
1611 DEBUG("MSI-X #vecs=%d\n", msix_count);
1613 res = bmallocator_init(&msix_alloc, msix_count);
1616 DEBUG("Using legacy interrupts\n");
1619 /* Get all information needed */
1621 /* Initialize hardware registers etc. */
1622 /* Start interrups / mac_stats etc. */
1624 /* Init rx filters */
1625 init_rx_filter_config();
1626 /* initalize managemnt interface */
1630 static void parse_cmdline(int argc, char **argv)
1634 for (i = 1; i < argc; i++) {
1635 if (strncmp(argv[i], "cardname=", strlen("cardname=") - 1) == 0) {
1636 service_name = argv[i] + strlen("cardname=");
1637 } else if (strncmp(argv[i], "bus=", strlen("bus=") - 1) == 0) {
1638 pci_bus = atol(argv[i] + strlen("bus="));
1639 } else if (strncmp(argv[i], "device=", strlen("device=") - 1) == 0) {
1640 pci_device = atol(argv[i] + strlen("device="));
1641 } else if (strncmp(argv[i], "function=", strlen("function=") - 1) == 0){
1642 pci_function = atol(argv[i] + strlen("function="));
1643 if (pci_function != 0) {
1644 USER_PANIC("Second port not implemented, please use function=0")
1646 } else if (strncmp(argv[i], "msix=", strlen("msix=") - 1) == 0){
1647 USER_PANIC("MSI-X not fully supported yet");
1648 use_msix = !!atol(argv[i] + strlen("msix="));
1649 //qd_rgument(argv[i]);
1651 qd_argument(argv[i]);
1656 static void eventloop(void)
1660 ws = get_default_waitset();
1661 DEBUG("SFN5122F enter event loop \n");
1672 int main(int argc, char** argv)
1674 DEBUG("SFN5122F driver started \n");
1677 parse_cmdline(argc, argv);
1678 /* Register our device driver */
1679 r = pci_client_connect();
1680 assert(err_is_ok(r));
1681 r = pci_register_driver_irq(pci_init_card, PCI_CLASS_ETHERNET,
1682 PCI_DONT_CARE, PCI_DONT_CARE,
1683 PCI_VENDOR_SOLARFLARE, DEVICE_ID,
1684 pci_bus, pci_device, pci_function,
1685 interrupt_handler, NULL);
1687 while (!initialized) {
1688 event_dispatch(get_default_waitset());