2 * Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
14 #include <net_queue_manager/net_queue_manager.h>
15 #include <barrelfish/nameservice_client.h>
16 #include <barrelfish/spawn_client.h>
17 #include <barrelfish/deferred.h>
19 #include <ipv4/lwip/inet.h>
20 #include <barrelfish/debug.h>
21 #include <if/sfn5122f_defs.h>
22 #include <if/sfn5122f_devif_defs.h>
23 #include <if/sfn5122f_devif_defs.h>
24 #include <if/net_ARP_defs.h>
25 #include <if/net_ARP_defs.h>
29 #include "sfn5122f_debug.h"
30 #include "buffer_tbl.h"
37 struct sfn5122f_binding *binding;
38 struct sfn5122f_devif_binding *devif;
39 struct capref tx_frame;
40 struct capref rx_frame;
41 struct capref ev_frame;
49 // first entries of the buffer table to make up queue
61 static bool use_msix = false;
62 static const char *service_name = "sfn5122f";
63 static sfn5122f_t *d = NULL;
64 //static sfn5122f_msix_t *d_msix = NULL;
65 static uint64_t d_mac[2];
66 static int initialized = 0;
67 static struct capref *regframe;
69 static struct capref int_ker;
70 static void* int_ker_virt;
72 static struct capref mac_stats;
73 static void* mac_virt;
74 static uint64_t mac_phys;
76 static uint8_t pci_function = 0;
78 static uint32_t cap[2];
79 static uint32_t speed[2];
80 static uint32_t flags[2];
81 static uint32_t fcntl [2];
84 static uint32_t phy_caps[2];
85 static uint32_t phy_flags[2];
86 static uint32_t phy_media[2];
87 /* Loopback mode none and speed */
88 static uint32_t phy_loopback_mode = 0;
89 //static uint32_t phy_loopback_speed = 0;
91 static uint32_t wol_filter_id = 0;
94 static struct net_ARP_binding *arp_binding;
95 static bool net_arp_connected = false;
96 static struct waitset rpc_ws;
98 static bool csum_offload = 1;
100 static uint32_t rx_indir_tbl[128];
103 static struct queue_state queues[1024];
104 /* PCI device address passed on command line */
105 static uint32_t pci_bus = PCI_DONT_CARE;
106 static uint32_t pci_device = PCI_DONT_CARE;
107 static struct bmallocator msix_alloc;
108 static size_t cdriver_msix = -1;
109 static uint8_t cdriver_vector;
111 // first to start everything
112 static bool first = 1;
115 uint8_t rx_hash_key[40];
119 static uint32_t ip = 0;
121 enum filter_type_ip {
130 enum filter_type_mac {
137 struct sfn5122f_filter_ip {
154 struct sfn5122f_filter_mac {
171 /* scatter and rss enable */
172 static bool rss_en = 0;
173 static bool scatter_en = 0;
174 static struct sfn5122f_filter_ip filters_rx_ip[NUM_FILTERS_IP];
175 //static struct sfn5122f_filter_ip filters_tx_ip[NUM_FILTERS_IP];
178 static struct sfn5122f_filter_mac filters_rx_ip[NUM_FILTERS_MAC];
179 static struct sfn5122f_filter_mac filters_tx_ip[NUM_FILTERS_MAC];
183 /******************************************************************************/
185 void qd_main(void) __attribute__((weak));
186 void qd_argument(const char *arg) __attribute__((weak));
187 void qd_interrupt(void) __attribute__((weak));
188 void qd_queue_init_data(struct sfn5122f_binding *b, struct capref registers,
189 uint64_t macaddr) __attribute__((weak));
190 void qd_queue_memory_registered(struct sfn5122f_binding *b) __attribute__((weak));
191 void qd_write_queue_tails(struct sfn5122f_binding *b) __attribute__((weak));
194 void cd_request_device_info(struct sfn5122f_binding *b);
195 void cd_register_queue_memory(struct sfn5122f_binding *b,
206 static void idc_write_queue_tails(struct sfn5122f_binding *b);
208 static void device_init(void);
209 static void start_all(void);
210 static void probe_all(void);
211 static uint32_t init_txq(uint16_t n, lpaddr_t phys, bool csum, bool userspace);
212 static uint32_t init_rxq(uint16_t n, lpaddr_t phys, bool userspace);
213 static uint32_t init_evq(uint16_t n, lpaddr_t phys);
214 static void queue_hw_stop(uint16_t n);
216 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector);
217 static void global_interrupt_handler(void* arg);
219 static void bind_arp(struct waitset *ws);
220 static errval_t arp_ip_info(void);
221 /***************************************************************************/
224 static void sfn5122f_filter_port_setup(int idx, struct sfn5122f_filter_ip* filter)
226 sfn5122f_rx_filter_tbl_lo_t filter_lo = 0;
227 sfn5122f_rx_filter_tbl_hi_t filter_hi = 0;
229 if (filter->type_ip == sfn5122f_PORT_UDP) {
231 // Add destination IP
232 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
234 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
236 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 1);
237 filter_lo = sfn5122f_rx_filter_tbl_lo_src_tcp_dest_udp_insert(
238 filter_lo, filter->dst_port);
240 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
241 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
242 DEBUG("UPD filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
244 idx, filter->dst_ip, filter->dst_port,
245 filter->src_ip, filter->src_port, filter->queue);
248 if (filter->type_ip == sfn5122f_PORT_TCP) {
249 // Add dst IP and port
250 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
252 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
254 filter_lo = sfn5122f_rx_filter_tbl_lo_dest_port_tcp_insert(filter_lo,
256 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 0);
257 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
258 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
259 DEBUG("TCP filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
261 idx, filter->dst_ip, filter->dst_port,
262 filter->src_ip, filter->src_port, filter->queue);
265 filter_hi = sfn5122f_rx_filter_tbl_hi_rxq_id_insert(filter_hi, filter->queue);
266 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, rss_en);
267 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, scatter_en);
269 sfn5122f_rx_filter_tbl_lo_wr(d, idx, filter_lo);
270 sfn5122f_rx_filter_tbl_hi_wr(d, idx, filter_hi);
273 static uint32_t build_key(struct sfn5122f_filter_ip* f)
275 uint32_t data[4] = {0,0,0,0};
283 if (f->type_ip == sfn5122f_PORT_UDP) {
293 data[0] = host1 << 16 | port1;
294 data[1] = port2 << 16 | host1 >> 16;
297 return data[0] ^ data[1] ^ data[2] ^ data[3];
300 static uint16_t filter_hash(uint32_t key)
304 /* First 16 rounds */
305 tmp = 0x1fff ^ key >> 16;
306 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
307 tmp = tmp ^ tmp >> 9;
309 tmp = tmp ^ tmp << 13 ^ key;
310 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
311 return tmp ^ tmp >> 9;
314 static bool filter_equals(struct sfn5122f_filter_ip* f1,
315 struct sfn5122f_filter_ip* f2)
317 if (f1->type_ip != f2->type_ip) {
319 } else if ((f1->src_ip != f2->src_ip) ||
320 (f1->dst_ip != f2->dst_ip) ||
321 (f1->queue != f2->queue)) {
323 } else if ((f1->src_port != f2->src_port) &&
324 (f2->dst_port != f1->dst_port)) {
331 static uint16_t filter_increment(uint32_t key)
336 static int ftqf_alloc(struct sfn5122f_filter_ip* f)
338 // Documentation suggest hashing using a certain algorithm
341 unsigned int incr = 0;
344 hash = filter_hash(key);
345 incr = filter_increment(key);
347 key = hash & (NUM_FILTERS_IP - 1);
350 if (filters_rx_ip[key].enabled == false) {
352 } else if (filter_equals(&filters_rx_ip[key], f)){
360 key = (key + incr) & (NUM_FILTERS_IP - 1);
368 static errval_t reg_port_filter(struct sfn5122f_filter_ip* f, uint64_t* fid)
372 DEBUG("reg_port_filter: called\n");
374 if ((filt_ind=ftqf_alloc(f)) < 0) {
375 return FILTER_ERR_NOT_ENOUGH_MEMORY;
378 filters_rx_ip[filt_ind] = *f;
379 filters_rx_ip[filt_ind].enabled = true;
381 sfn5122f_filter_port_setup(filt_ind, f);
390 /***************************************************************************/
391 /* Helper functions*/
392 static void decode_link(uint32_t fcntl1 , uint32_t flags1 , uint32_t speed1)
396 DEBUG("LINK MODE: AUTO \n");
399 DEBUG("LINK MODE: RX/TX \n");
402 DEBUG("LINK MODE: RESPOND \n");
405 DEBUG("LINK MODE: NONE \n");
408 DEBUG("LINK SPEED: %"PRIu32" \n", speed1);
409 DEBUG("LINK FLAGS: %8lX \n", (long unsigned int) flags1);
410 if (!!(flags1 & 1)) {
411 DEBUG("LINK IS UP \n");
414 if (!!(flags1 & 1 << 0x1)) {
415 DEBUG("LINK IS FULL DUPLEX \n");
420 static void handle_assertions(void)
427 memset(in, 0, sizeof(in));
428 in[CMD_GET_ASSERTS_IN_CLEAR_OFFSET] = 0;
430 err = mcdi_rpc(CMD_GET_ASSERTS, in , CMD_GET_ASSERTS_IN_LEN, out,
431 CMD_GET_ASSERTS_OUT_LEN, &outlen, pci_function, d);
432 assert(err_is_ok(err));
435 /* TODO handle assertions */
436 printf("THERE WERE ASSERTIONS: %"PRIu8" \n ", out[0]);
437 /* exit assertions -> special reboot*/
439 err = mcdi_rpc(CMD_REBOOT, in, CMD_REBOOT_IN_LEN ,
440 NULL, 0, NULL, pci_function, d);
441 assert(err_is_ok(err));
446 /* Get Link and write settings into global variables */
447 static void get_link(uint8_t port)
449 uint8_t out[CMD_GET_LINK_OUT_LEN];
452 err = mcdi_rpc(CMD_GET_LINK, NULL, 0 , out, CMD_GET_LINK_OUT_LEN, NULL, port,d);
453 assert(err_is_ok(err));
455 memcpy(&cap[port], out, 4);
456 memcpy(&speed[port], out+CMD_GET_LINK_OUT_SPEED_OFFSET, 4);
457 memcpy(&fcntl[port], out+CMD_GET_LINK_OUT_FCNTL_OFFSET, 4);
458 memcpy(&flags[port], out+CMD_GET_LINK_OUT_FLAGS_OFFSET, 4);
460 decode_link(fcntl[port], flags[port], speed[port]);
466 static void init_port(uint8_t port)
468 uint8_t in[CMD_SET_MAC_IN_LEN];
472 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
473 /* linux driver sets these bits */
478 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
480 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
481 /* Reject unicast packets? */
482 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
483 /* Set wanted flow control of the card 2 -> bidirectional*/
484 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
485 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
486 assert(err_is_ok(err));
488 memset(mc_hash, 0, sizeof(mc_hash));
489 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
490 NULL, 0 , NULL, port, d);
491 assert(err_is_ok(err));
493 memset(in, 0 , sizeof(in));
494 memcpy(in + CMD_SET_LINK_IN_CAP_OFFSET, &cap[pci_function], 4);
496 err = mcdi_rpc(CMD_SET_LINK, in, CMD_SET_LINK_IN_LEN, NULL, 0, NULL, 0, d);
497 assert(err_is_ok(err));
500 static void start_port(uint8_t port)
502 uint8_t in[CMD_SET_MAC_IN_LEN];
506 memset(&in, 0, sizeof(in));
508 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
509 NULL, 0 , NULL, port, d);
510 assert(err_is_ok(err));
513 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
514 /* seems like the linux driver sets all bits not set
515 from the MAC address to 1*/
520 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
521 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
522 /* Reject unicast packets ? */
523 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
524 /* Set wanted functionality (flow control) of card -> set to 2 for RX/TX
526 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
527 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
528 assert(err_is_ok(err));
530 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
531 NULL, 0 , NULL, port, d);
533 assert(err_is_ok(err));
536 /******************************************************************************
538 *****************************************************************************/
540 static void probe_all(void)
550 struct frame_identity frameid = { .base = 0, .bytes = 0 };
555 // Test and clear MC-reboot flag for port/function
556 offset = MCDI_REBOOT_OFFSET(pci_function);
557 reg = sfn5122f_mc_treg_smem_rd(d,offset);
559 sfn5122f_mc_treg_smem_wr(d,offset,0);
562 /*print out any assertions */
564 // Let BMC know that driver is in charg of filter/link setttings
565 // before we can restet NIC
566 memset(&in, 0, sizeof(in));
567 memset(&out, 0 , sizeof(out));
569 r = mcdi_rpc(CMD_GET_VERSION, NULL, 0, out, CMD_GET_VERSION_OUT_LEN,
570 &outlen, pci_function, d);
571 assert(err_is_ok(r));
574 memset(&out, 0 , sizeof(out));
576 // driver is operating / + update
579 r = mcdi_rpc(CMD_DRV_ATTACH, in, CMD_DRV_ATTACH_IN_LEN, out,
580 CMD_DRV_ATTACH_OUT_LEN, &outlen, pci_function, d);
581 assert(err_is_ok(r));
584 r = mcdi_rpc(CMD_PORT_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
585 assert(err_is_ok(r));
588 if(mcdi_rpc(CMD_WOL_FILTER_GET, NULL, 0, out, CMD_WOL_FILTER_GET_OUT_LEN,
589 &outlen, pci_function, d) == SYS_ERR_OK) {
590 memcpy(&wol_filter_id, out , 4);
592 // Reset filter of card
593 mcdi_rpc(CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
596 // memory for INT_KER
597 int_ker_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
598 2*sizeof(uint64_t), &int_ker);
599 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
600 // Read in non volatile configuration
601 memset(&out, 0, sizeof(out));
602 r = mcdi_rpc(CMD_GET_BOARD_CONFIG, NULL, 0, out,
603 CMD_GET_BOARD_CONFIG_OUT_LEN, &outlen, pci_function, d);
604 assert(err_is_ok(r));
606 memcpy(&d_mac[0], out+MCDI_MAC_PORT_OFFSET(0) ,6);
607 memcpy(&d_mac[1], out+MCDI_MAC_PORT_OFFSET(1) ,6);
609 // read phy configuration
610 r = mcdi_rpc(CMD_GET_PHY_CFG, NULL, 0, out, CMD_GET_PHY_CFG_OUT_LEN, &outlen,
612 assert(err_is_ok(r));
614 memcpy(&phy_caps[pci_function], out+CMD_GET_PHY_CFG_OUT_CAP_OFFSET, 4);
615 memcpy(&phy_flags[pci_function], out+CMD_GET_PHY_CFG_OUT_FLAGS_OFFSET, 4);
616 memcpy(&phy_media[pci_function], out+CMD_GET_PHY_CFG_OUT_MEDIA_OFFSET, 4);
618 // get loopback modes
619 r = mcdi_rpc(CMD_GET_LOOPBACK_MODES, NULL, 0, out,
620 CMD_GET_LOOPBACK_MODES_OUT_LEN, &outlen, pci_function, d);
621 assert(err_is_ok(r));
622 memcpy(&phy_loopback_mode, out+CMD_GET_LOOPBACK_MODES_SUGGESTED_OFFSET,4);
623 // loopback mode NONE is no valid condition
624 phy_loopback_mode &= ~(1);
628 mac_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
629 NUM_MAC_STATS*sizeof(uint64_t),
632 assert(mac_virt != NULL);
633 r = invoke_frame_identify(mac_stats, &frameid);
634 assert(err_is_ok(r));
635 mac_phys = frameid.base;
636 memset(mac_virt, 0, NUM_MAC_STATS*sizeof(uint64_t));
639 memset(&in, 0, sizeof(in));
640 memcpy(in, &mac_phys, 8);
642 // Settings for DMA of MAC stats
643 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0x6;
644 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
645 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
646 r = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN, NULL, 0, NULL,
648 assert(err_is_ok(r));
654 // Init card IP filters
655 static void init_rx_filter_config(void)
657 uint64_t reg_hi, reg_lo;
659 for (int i = 0; i < NUM_FILTERS_IP; i++) {
660 sfn5122f_rx_filter_tbl_lo_wr(d, i, 0);
661 sfn5122f_rx_filter_tbl_hi_wr(d, i, 0);
664 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_rd(d);
665 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_rd(d);
667 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_full_search_limit_insert(reg_hi, 1);
668 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_wildcard_search_limit_insert(reg_hi, 3);
671 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_q_id_lo_insert(reg_lo, 0);
672 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_q_id_insert(reg_lo, 0);
673 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_rss_enabled_insert(reg_lo, 0);
674 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_rss_enabled_insert(reg_lo, 0);
676 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_full_srch_limit_insert(reg_lo, 1);
677 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_wild_srch_limit_insert(reg_lo, 3);
678 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_full_srch_limit_insert(reg_lo, 1);
679 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_wild_srch_limit_insert(reg_lo, 3);
682 sfn5122f_rx_filter_ctl_reg_lo_wr(d,reg_lo);
683 sfn5122f_rx_filter_ctl_reg_hi_wr(d,reg_hi);
687 static void device_init(void)
690 struct frame_identity frameid = { .base = 0, .bytes = 0 };
691 uint64_t reg, reg2; // tmp_key = 0;
692 uint8_t in[24]; // set length to biggest in length needed
694 memset(&in, 0, sizeof(in));
696 // recover from failed assertion post-reset
699 /* ignore TX of packets 16 bytes and less */
700 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
701 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
702 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
703 sfn5122f_tx_reserved_reg_hi_wr(d, sfn5122f_tx_reserved_reg_hi_rd(d));
704 //Disable TX_NO_EOP_DISC_EN because else would limit packets to 16
705 reg = sfn5122f_tx_cfg_reg_lo_rd(d);
706 reg = sfn5122f_tx_cfg_reg_lo_tx_no_eop_disc_en_insert(reg, 0);
707 reg = sfn5122f_tx_cfg_reg_lo_tx_ownerr_ctl_insert(reg, 1);
708 reg = sfn5122f_tx_cfg_reg_lo_tx_filter_en_bit_insert(reg, 1);
709 sfn5122f_tx_cfg_reg_lo_wr(d, reg);
710 sfn5122f_tx_cfg_reg_hi_wr(d, sfn5122f_tx_cfg_reg_hi_rd(d));
712 reg = sfn5122f_rx_cfg_reg_lo_rd(d);
713 // unset bit and set other bit which are not in documentation (43 and 47)
714 reg = sfn5122f_rx_cfg_reg_lo_rx_desc_push_en_insert(reg, 0) ;
715 reg = sfn5122f_rx_cfg_reg_lo_rx_ingr_en_insert(reg, 1);
716 reg = sfn5122f_rx_cfg_reg_lo_rx_usr_buf_size_insert(reg, (MTU_MAX-256) >> 5);
717 //reg = sfn5122f_rx_cfg_reg_lo_rx_usr_buf_size_insert(reg, 4096 >> 5);
718 //reg = sfn5122f_rx_cfg_reg_lo_rx_ownerr_ctl_insert(reg, 1);
719 reg = sfn5122f_rx_cfg_reg_lo_rx_ip_hash_insert(reg, 1);
720 //reg = sfn5122f_rx_cfg_reg_lo_rx_hash_insrt_hdr_insert(reg, 1);
721 reg = sfn5122f_rx_cfg_reg_lo_rx_hash_alg_insert(reg, 1);
722 sfn5122f_rx_cfg_reg_lo_wr(d, reg);
723 sfn5122f_rx_cfg_reg_hi_wr(d, sfn5122f_rx_cfg_reg_hi_rd(d));
724 /* enable event logging, no UART
725 Event destination is queue 0 */
727 r = mcdi_rpc(CMD_LOG_CTRL, in, CMD_LOG_CTRL_IN_LEN,
728 NULL, 0, NULL, pci_function, d);
729 assert(err_is_ok(r));
731 /* Set destination of TX/RX flush event */
733 sfn5122f_dp_ctrl_reg_lo_fls_evq_id_wrf(d, 0);
734 sfn5122f_dp_ctrl_reg_hi_wr(d, sfn5122f_dp_ctrl_reg_hi_rd(d));
736 /* Disalbe user events for now */
737 sfn5122f_usr_ev_cfg_lo_usrev_dis_wrf(d , 1);
738 sfn5122f_usr_ev_cfg_hi_wr(d, sfn5122f_usr_ev_cfg_hi_rd(d));
741 // This seems to be not device specific i.e. works for other
743 /* Set position of descriptor caches in SRAM */
744 sfn5122f_srm_tx_dc_cfg_reg_lo_wr(d, TX_DC_BASE);
745 sfn5122f_srm_tx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_tx_dc_cfg_reg_hi_rd(d));
746 sfn5122f_srm_rx_dc_cfg_reg_lo_srm_rx_dc_base_adr_wrf(d, RX_DC_BASE);
747 sfn5122f_srm_rx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_rx_dc_cfg_reg_hi_rd(d));
749 /* Set TX descriptor cache size to 16 */
750 sfn5122f_tx_dc_cfg_reg_lo_tx_dc_size_wrf(d, 1);
751 sfn5122f_tx_dc_cfg_reg_hi_wr(d, sfn5122f_tx_dc_cfg_reg_hi_rd(d));
753 /* Set RX descriptor cache size to 64 and low watermark */
754 sfn5122f_rx_dc_cfg_reg_lo_rx_dc_size_wrf(d, 3);
755 sfn5122f_rx_dc_cfg_reg_hi_wr(d, sfn5122f_rx_dc_cfg_reg_hi_rd(d));
758 reg = sfn5122f_rx_dc_pf_wm_reg_lo_rx_dc_pf_lwm_insert(reg, RX_DESC_CACHE_SIZE -8);
759 sfn5122f_rx_dc_pf_wm_reg_lo_wr(d, reg);
760 sfn5122f_rx_dc_pf_wm_reg_hi_wr(d, sfn5122f_rx_dc_pf_wm_reg_hi_rd(d));
762 /*programm init ker address for interrupts */
763 r = invoke_frame_identify(int_ker, &frameid);
764 assert(err_is_ok(r));
766 sfn5122f_int_adr_reg_ker_lo_wr(d, frameid.base);
767 reg = sfn5122f_int_adr_reg_ker_hi_rd(d);
769 // disable vector write if we use MSI-X
771 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 1);
772 if (cdriver_msix == -1) {
773 r = pci_setup_inthandler(global_interrupt_handler, NULL, &cdriver_vector);
774 assert(err_is_ok(r));
775 setup_interrupt(&cdriver_msix, disp_get_core_id(), cdriver_vector);
778 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 0);
780 sfn5122f_int_adr_reg_ker_hi_wr(d, reg);
782 /* Enable all the genuinley fatal interrupts */
783 reg = sfn5122f_fatal_intr_reg_ker_lo_ill_adr_int_ker_en_insert(reg, 1);
784 /* Enable rxbuf/txbuf interrupt fields not documented.
786 reg = sfn5122f_fatal_intr_reg_ker_lo_rxbuf_own_int_ker_en_insert(reg, 1);
787 reg = sfn5122f_fatal_intr_reg_ker_lo_txbuf_own_int_ker_en_insert(reg, 1);
789 //reg = sfn5122f_fatal_intr_reg_ker_lo_sram_perr_int_p_ker_en_insert(reg, 1);
790 sfn5122f_fatal_intr_reg_ker_lo_wr(d, ~reg);
791 sfn5122f_fatal_intr_reg_ker_hi_wr(d, 0XFFFFFFFFFFFFFFFF);
793 /* Setup RSS indirection table (maps from hash value to packet to RXQ) */
794 for (int i = 0; i < 128; i++) {
796 sfn5122f_rx_indirection_tbl_wr( d, i, rx_indir_tbl[i]);
799 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
800 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
801 (from linux driver) */
802 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
803 reg = sfn5122f_tx_reserved_reg_lo_tx_rx_spacer_en_insert(reg, 1);
804 reg = sfn5122f_tx_reserved_reg_lo_tx_one_pkt_per_q_insert(reg, 1);
805 reg = sfn5122f_tx_reserved_reg_lo_tx_dis_non_ip_ev_insert(reg, 1);
807 /* Enable software events */
808 reg = sfn5122f_tx_reserved_reg_lo_tx_soft_evt_en_insert(reg, 1);
809 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
810 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_threshold_insert(reg, 2);
811 /* Disable hardware watchdog which can misfire */
812 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_wd_tmr_insert(reg, 0x3fffff);
813 /* Squash TX of packets of 16 bytes or less */
814 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
816 reg2 = sfn5122f_tx_reserved_reg_hi_rd(d);
817 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_en_insert(reg2, 0);
818 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_chk_dis_insert(reg2, 0);
819 reg2 = sfn5122f_tx_reserved_reg_hi_tx_rx_spacer_insert(reg2, 0xfe);
820 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
821 sfn5122f_tx_reserved_reg_hi_wr(d, reg2);
823 init_port(pci_function);
824 get_link(pci_function);
825 DEBUG("BASIC CARD INIT DONE \n");
828 static void start_all(void)
831 uint8_t in[CMD_MAC_STATS_IN_LEN];
832 unsigned long long* stats = (unsigned long long *) mac_virt;
835 start_port(pci_function);
837 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
838 /* Enable interrupts */
839 /* Use an interrupt level unused by event queues */
840 reg = sfn5122f_int_en_reg_ker_lo_rd(d);
842 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0);
845 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0x1f);
847 reg = sfn5122f_int_en_reg_ker_lo_drv_int_en_ker_insert(reg, 1);
849 /* undocumented field */
850 reg = sfn5122f_int_en_reg_ker_lo_ker_int_ker_insert(reg, 0);
851 sfn5122f_int_en_reg_ker_lo_wr(d, reg);
852 sfn5122f_int_en_reg_ker_hi_wr(d, sfn5122f_int_en_reg_ker_hi_rd(d));
854 /* Start MAC stats */
855 memset(in, 0, sizeof(in));
856 stats[0x60] = (unsigned long long) (-1);
857 memcpy(in, &mac_phys, 8);
858 pointer = (uint8_t *) &mac_phys;
859 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0xD;
862 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
863 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
864 errval_t err = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN,
865 NULL, 0, NULL, pci_function, d);
866 assert(err_is_ok(err));
869 /**************************************************************************
871 ***************************************************************************/
874 static void queue_hw_stop(uint16_t n)
879 reg = sfn5122f_tx_flush_descq_reg_lo_rd(d);
880 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_insert(reg, n);
881 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_cmd_insert(reg, 1);
882 sfn5122f_tx_flush_descq_reg_lo_wr(d, reg);
883 sfn5122f_tx_flush_descq_reg_hi_wr(d, sfn5122f_tx_flush_descq_reg_hi_rd(d));
885 reg = sfn5122f_rx_flush_descq_reg_lo_rd(d);
886 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_insert(reg, n);
887 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_cmd_insert(reg, 1);
888 sfn5122f_rx_flush_descq_reg_lo_wr(d, reg);
889 sfn5122f_rx_flush_descq_reg_hi_wr(d, sfn5122f_rx_flush_descq_reg_hi_rd(d));
891 /* TODO Wait for DRIVER_EVENT */
892 /* clear pointer table entries */
893 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, 0);
894 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, 0);
895 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, 0);
896 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, 0);
898 /*Free RX queue tbl entries*/
900 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
901 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
902 queues[n].rx_buf_tbl);
904 if (queues[n].userspace) {
905 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
906 queues[n].rx_buf_tbl + NUM_ENT_RX_USR);
908 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
909 queues[n].rx_buf_tbl + NUM_ENT_RX);
912 /*Free TX queue tbl entries*/
914 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
915 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
916 queues[n].tx_buf_tbl + NUM_ENT_TX );
917 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
918 queues[n].tx_buf_tbl);
920 /*Free EV queue tbl entries*/
922 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
923 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
924 queues[n].ev_buf_tbl + NUM_ENT_EVQ );
925 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
926 queues[n].ev_buf_tbl);
931 static uint32_t init_evq(uint16_t n, lpaddr_t phys)
935 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
936 uint64_t reg, buffer_offset;
939 reg = sfn5122f_timer_tbl_lo_timer_q_en_insert(reg, 1);
940 // set to 0 if interrupts for receives/sends should be generated
942 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 0);
944 reg = sfn5122f_timer_tbl_lo_int_pend_insert(reg, 0);
945 reg = sfn5122f_timer_tbl_lo_int_armd_insert(reg, 0);
946 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 1);
948 // timer mode disabled
949 reg = sfn5122f_timer_tbl_lo_timer_mode_insert(reg, 0);
950 sfn5122f_timer_tbl_lo_wr(d, n, reg);
951 sfn5122f_timer_tbl_hi_wr(d, n, sfn5122f_timer_tbl_hi_rd(d, n));
954 r = invoke_frame_identify(queues[n].ev_frame, &frameid);
955 assert(err_is_ok(r));
956 ev_phys = frameid.base;
959 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_EVQ, 0, 0, d);
960 if (buffer_offset == -1) {
964 DEBUG("EV_QUEUE_%d: buf_off %ld, phys 0x%lx\n",n , buffer_offset, phys);
966 reg = sfn5122f_evq_ptr_tbl_lo_rd(d, n);
967 reg = sfn5122f_evq_ptr_tbl_lo_evq_en_insert(reg, 1);
968 reg = sfn5122f_evq_ptr_tbl_lo_evq_size_insert(reg, 3);
969 reg = sfn5122f_evq_ptr_tbl_lo_evq_buf_base_id_insert(reg,
972 sfn5122f_evq_ptr_tbl_lo_wr(d, n, reg);
973 sfn5122f_evq_ptr_tbl_hi_wr(d, n, sfn5122f_evq_ptr_tbl_hi_rd(d, n));
975 /* No write collection for this register */
976 reg = sfn5122f_timer_command_reg_lo_rd(d,n);
977 reg = sfn5122f_timer_command_reg_lo_tc_timer_val_insert(reg, 0);
979 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
981 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
984 sfn5122f_timer_command_reg_lo_wr(d, n, reg);
987 sfn5122f_evq_rptr_reg_wr(d, n, queues[n].ev_head);
989 return buffer_offset;
992 static uint32_t init_rxq(uint16_t n, lpaddr_t phys, bool userspace)
996 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
997 uint64_t reg_lo, reg_hi, buffer_offset;
999 * This will define a buffer in the buffer table, allowing
1000 * it to be used for event queues, descriptor rings etc.
1002 /* Get physical addresses for rx/tx rings and event queue */
1004 r = invoke_frame_identify(queues[n].rx_frame, &frameid);
1005 assert(err_is_ok(r));
1006 rx_phys = frameid.base;
1007 rx_size = frameid.bytes;
1010 num_ent_rx = NUM_ENT_RX_USR;
1012 num_ent_rx = NUM_ENT_RX;
1016 buffer_offset = alloc_buf_tbl_entries(phys, num_ent_rx, 0, 0, d);
1018 if (buffer_offset == -1) {
1022 DEBUG("RX_QUEUE_%d: buf_off %ld, phys %lx, size %lx \n", n,
1023 buffer_offset, phys, rx_size);
1024 /* setup RX queue */
1025 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rd(d, n);
1026 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rd(d, n);
1027 /* Which buffer table entries are used (which is the first entry) */
1028 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_buf_base_id_insert(reg_lo, buffer_offset);
1029 /* Which event queue is associated with this queue*/
1030 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_evq_id_insert(reg_lo, n);
1033 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_owner_id_insert(reg_lo, 0);
1035 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_owner_id_insert(reg_lo, n+1);
1038 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_label_insert(reg_lo, n);
1040 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1041 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_size_insert(reg_lo, 1);
1044 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 0);
1046 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 1);
1049 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_jumbo_insert(reg_lo, 0);
1051 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_en_insert(reg_lo, 1);
1053 /* Hardware verifies data digest */
1054 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_ddig_en_insert(reg_hi, 1);
1055 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_hdig_en_insert(reg_hi, 1);
1057 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, reg_lo);
1058 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, reg_hi);
1060 return buffer_offset;
1064 static uint32_t init_txq(uint16_t n, uint64_t phys,
1065 bool csum, bool userspace)
1069 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
1070 uint64_t reg, reg1, buffer_offset;
1072 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_TX, 0, 0, d);
1074 if (buffer_offset == -1) {
1078 DEBUG("TX_QUEUE_%d: buf_off %ld, phys %lx\n",n , buffer_offset, phys);
1079 /* setup TX queue */
1080 reg = sfn5122f_tx_desc_ptr_tbl_lo_rd(d, n);
1081 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_rd(d, n);
1082 /* Which buffer table entries are used (which is the first entry) */
1083 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_buf_base_id_insert(reg,
1085 /* Which event queue is associated with this queue */
1086 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_evq_id_insert(reg , n);
1088 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_owner_id_insert(reg, 0);
1090 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_owner_id_insert(reg, n+1);
1092 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_label_insert(reg , n);
1093 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1094 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_size_insert(reg , 2);
1096 /* No user lvl networking */
1098 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 0);
1100 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 1);
1103 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_ddig_en_insert(reg1, 0);
1104 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_hdig_en_insert(reg1, 0);
1106 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_non_ip_drop_dis_insert(reg1, 1);
1109 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_descq_en_insert(reg1 , 1);
1111 /* Enable offload of checksum */
1112 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_ip_chksm_dis_insert(reg1, !csum);
1113 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_tcp_chksm_dis_insert(reg1, !csum);
1114 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, reg);
1115 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, reg1);
1117 return buffer_offset;
1121 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector)
1127 res = bmallocator_alloc(&msix_alloc, msix_index);
1130 err = get_apicid_from_core(core, &dest);
1131 assert(err_is_ok(err));
1133 err = pci_msix_vector_init(*msix_index, dest, vector);
1134 assert(err_is_ok(err));
1136 DEBUG("MSI-X vector setup index=%"PRIx64", core=%d apic=%d swvec=%x\n",
1137 *msix_index, core, dest, vector);
1140 /** Here are the global interrupts handled. */
1141 static void global_interrupt_handler(void* arg)
1146 uint8_t* net_ivec_fatal = (uint8_t *) int_ker_virt;
1148 // bit 64 is indicator for a fatal event
1149 syserr = (net_ivec_fatal[8] & 0x1);
1151 // TODO handle fatal interrupt
1152 USER_PANIC("FATAL INTERRUPT");
1157 queue = sfn5122f_int_isr0_reg_lo_rd(d);
1158 DEBUG("AN INTERRUPT OCCURED %d \n", queue);
1159 // Don't need to start event queues because we're already polling
1162 /******************************************************************************/
1163 /* Management interface implemetation */
1165 static void idc_queue_init_data(struct sfn5122f_binding *b,
1166 struct capref registers,
1171 r = sfn5122f_queue_init_data__tx(b, NOP_CONT, registers, macaddr);
1172 // TODO: handle busy
1173 assert(err_is_ok(r));
1176 /** Tell queue driver that we are done initializing the queue. */
1177 static void idc_queue_memory_registered(struct sfn5122f_binding *b)
1180 r = sfn5122f_queue_memory_registered__tx(b, NOP_CONT);
1181 // TODO: handle busy
1182 assert(err_is_ok(r));
1185 /** Send request to queue driver to rewrite the tail pointers of its queues. */
1186 static void idc_write_queue_tails(struct sfn5122f_binding *b)
1190 qd_write_queue_tails(b);
1194 r = sfn5122f_write_queue_tails__tx(b, NOP_CONT);
1195 // TODO: handle busy
1196 assert(err_is_ok(r));
1199 /** Request from queue driver for register memory cap */
1200 void cd_request_device_info(struct sfn5122f_binding *b)
1203 qd_queue_init_data(b, *regframe, d_mac[pci_function]);
1206 idc_queue_init_data(b, *regframe, d_mac[pci_function]);
1209 /** Request from queue driver to initialize hardware queue. */
1210 void cd_register_queue_memory(struct sfn5122f_binding *b,
1212 struct capref tx_frame,
1213 struct capref rx_frame,
1214 struct capref ev_frame,
1221 // Save state so we can restore the configuration in case we need to do a
1226 queues[n].enabled = false;
1227 queues[n].tx_frame = tx_frame;
1228 queues[n].rx_frame = rx_frame;
1229 queues[n].ev_frame = ev_frame;
1230 queues[n].tx_head = 0;
1231 queues[n].rx_head = 0;
1232 queues[n].ev_head = 0;
1233 queues[n].rxbufsz = rxbufsz;
1234 queues[n].binding = b;
1235 queues[n].use_irq = use_irq;
1236 queues[n].userspace = userspace;
1237 queues[n].msix_index = -1;
1238 queues[n].msix_intvec = vector;
1239 queues[n].msix_intdest = core;
1241 struct frame_identity id;
1242 err = invoke_frame_identify(ev_frame, &id);
1243 assert(err_is_ok(err));
1244 queues[n].ev_buf_tbl = init_evq(n, id.base);
1248 err = invoke_frame_identify(tx_frame, &id);
1249 assert(err_is_ok(err));
1250 queues[n].tx_buf_tbl = init_txq(n, id.base, csum_offload, userspace);
1252 err = invoke_frame_identify(rx_frame, &id);
1253 assert(err_is_ok(err));
1254 queues[n].rx_buf_tbl = init_rxq(n, id.base, userspace);
1257 if(queues[n].ev_buf_tbl == -1 ||
1258 queues[n].tx_buf_tbl == -1 ||
1259 queues[n].rx_buf_tbl == -1){
1261 DEBUG("Allocating queue failed \n");
1265 queues[n].enabled = true;
1267 if (queues[n].use_irq) {
1268 if (queues[n].msix_intvec != 0) {
1269 if (queues[n].msix_index == -1) {
1270 setup_interrupt(&queues[n].msix_index, queues[n].msix_intdest,
1271 queues[n].msix_intvec);
1276 idc_write_queue_tails(queues[n].binding);
1279 qd_queue_memory_registered(b);
1283 idc_queue_memory_registered(b);
1291 static errval_t idc_terminate_queue(struct sfn5122f_binding *b, uint16_t n)
1293 DEBUG("idc_terminate_queue(q=%d) \n", n);
1297 queues[n].enabled = false;
1298 queues[n].binding = NULL;
1305 static errval_t idc_register_port_filter(struct sfn5122f_binding *b,
1309 sfn5122f_port_type_t type,
1314 DEBUG("idc_register_port_filter: called (q=%d t=%d p=%d)\n",
1319 waitset_init(&rpc_ws);
1324 struct sfn5122f_filter_ip f = {
1326 .dst_ip = htonl(ip),
1334 *err = reg_port_filter(&f, fid);
1335 DEBUG("filter registered: err=%"PRIu64", fid=%"PRIu64"\n", *err, *fid);
1341 static errval_t idc_unregister_filter(struct sfn5122f_binding *b,
1342 uint64_t filter, errval_t *err)
1344 DEBUG("unregister_filter: called (%"PRIx64")\n", filter);
1345 *err = LIB_ERR_NOT_IMPLEMENTED;
1349 static struct sfn5122f_rx_vtbl rx_vtbl = {
1350 .request_device_info = cd_request_device_info,
1351 .register_queue_memory = cd_register_queue_memory,
1354 static struct sfn5122f_rpc_rx_vtbl rpc_rx_vtbl = {
1355 .terminate_queue_call = idc_terminate_queue,
1356 .register_port_filter_call = idc_register_port_filter,
1357 .unregister_filter_call = idc_unregister_filter,
1360 static void cd_create_queue(struct sfn5122f_devif_binding *b, struct capref frame,
1361 bool user, bool interrupt, uint8_t core, uint8_t msix_vector)
1363 DEBUG("cd_create_queue \n");
1365 struct frame_identity id;
1368 for (int i = 0; i < NUM_QUEUES; i++) {
1369 if (queues[i].enabled == false) {
1376 err = NIC_ERR_ALLOC_QUEUE;
1377 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, NULL_CAP, err);
1378 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, err);
1379 assert(err_is_ok(err));
1382 queues[n].use_irq = interrupt;
1383 queues[n].enabled = false;
1384 queues[n].tx_frame = frame;
1385 queues[n].tx_head = 0;
1386 queues[n].rx_head = 0;
1387 queues[n].ev_head = 0;
1388 queues[n].rxbufsz = MTU_MAX;
1389 queues[n].devif = b;
1390 queues[n].userspace = user;
1391 queues[n].msix_index = -1;
1392 queues[n].msix_intdest = core;
1393 queues[n].msix_intvec = msix_vector;
1395 if (queues[n].use_irq) {
1396 if (queues[n].msix_intvec != 0) {
1397 if (queues[n].msix_index == -1) {
1398 setup_interrupt(&queues[n].msix_index, queues[n].msix_intdest,
1399 queues[n].msix_intvec);
1404 err = invoke_frame_identify(frame, &id);
1405 assert(err_is_ok(err));
1406 queues[n].ev_buf_tbl = init_evq(n, id.base+sizeof(uint64_t)*(TX_ENTRIES+RX_ENTRIES));
1408 queues[n].tx_buf_tbl = init_txq(n, id.base, csum_offload, user);
1409 queues[n].rx_buf_tbl = init_rxq(n, id.base+ sizeof(uint64_t)*TX_ENTRIES, user);
1411 if(queues[n].ev_buf_tbl == -1 ||
1412 queues[n].tx_buf_tbl == -1 ||
1413 queues[n].rx_buf_tbl == -1){
1414 err = NIC_ERR_ALLOC_QUEUE;
1415 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, err);
1416 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, NULL_CAP, err);
1417 assert(err_is_ok(err));
1420 queues[n].enabled = true;
1421 DEBUG("created queue %d \n", n);
1422 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, n, *regframe, SYS_ERR_OK);a
1425 err = slot_alloc(®s);
1426 assert(err_is_ok(err));
1427 err = cap_copy(regs, *regframe);
1428 assert(err_is_ok(err));
1430 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, n, regs, SYS_ERR_OK);
1431 assert(err_is_ok(err));
1432 DEBUG("cd_create_queue end\n");
1435 static void cd_register_region(struct sfn5122f_devif_binding *b, uint16_t qid,
1436 struct capref region)
1439 struct frame_identity id;
1440 uint64_t buffer_offset = 0;
1442 err = invoke_frame_identify(region, &id);
1443 if (err_is_fail(err)) {
1444 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, NIC_ERR_REGISTER_REGION);
1445 assert(err_is_ok(err));
1448 size_t size = id.bytes;
1449 lpaddr_t addr = id.base;
1451 // TODO unsigned/signed not nice ...
1452 buffer_offset = alloc_buf_tbl_entries(addr, size/BUF_SIZE, qid, true, d);
1453 if (buffer_offset == -1) {
1454 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, NIC_ERR_REGISTER_REGION);
1455 assert(err_is_ok(err));
1458 err = b->tx_vtbl.register_region_response(b, NOP_CONT, buffer_offset, SYS_ERR_OK);
1459 assert(err_is_ok(err));
1463 static void cd_deregister_region(struct sfn5122f_devif_binding *b, uint64_t buftbl_id,
1467 free_buf_tbl_entries(buftbl_id, size/BUF_SIZE, d);
1469 err = b->tx_vtbl.deregister_region_response(b, NOP_CONT, SYS_ERR_OK);
1470 assert(err_is_ok(err));
1473 static void cd_destroy_queue(struct sfn5122f_devif_binding *b, uint16_t qid)
1478 queues[qid].enabled = false;
1479 queues[qid].binding = NULL;
1481 err = b->tx_vtbl.destroy_queue_response(b, NOP_CONT, SYS_ERR_OK);
1482 assert(err_is_ok(err));
1486 static struct sfn5122f_devif_rx_vtbl rx_vtbl_devif = {
1487 .create_queue_call = cd_create_queue,
1488 .destroy_queue_call = cd_destroy_queue,
1489 .register_region_call = cd_register_region,
1490 .deregister_region_call = cd_deregister_region,
1493 static void export_cb(void *st, errval_t err, iref_t iref)
1495 const char *suffix = "_sfn5122fmng";
1496 char name[strlen(service_name) + strlen(suffix) + 1];
1498 assert(err_is_ok(err));
1500 // Build label for interal management service
1501 sprintf(name, "%s%s", service_name, suffix);
1503 err = nameservice_register(name, iref);
1504 assert(err_is_ok(err));
1505 DEBUG("Management interface exported\n");
1509 static errval_t connect_cb(void *st, struct sfn5122f_binding *b)
1511 DEBUG("New connection on management interface\n");
1512 b->rx_vtbl = rx_vtbl;
1513 b->rpc_rx_vtbl = rpc_rx_vtbl;
1517 static void export_devif_cb(void *st, errval_t err, iref_t iref)
1519 const char *suffix = "_sfn5122fmng_devif";
1520 char name[strlen(service_name) + strlen(suffix) + 1];
1522 assert(err_is_ok(err));
1524 // Build label for interal management service
1525 sprintf(name, "%s%s", service_name, suffix);
1527 err = nameservice_register(name, iref);
1528 assert(err_is_ok(err));
1529 DEBUG("Devif Management interface exported\n");
1533 static errval_t connect_devif_cb(void *st, struct sfn5122f_devif_binding *b)
1535 DEBUG("New connection on devif management interface\n");
1536 b->rx_vtbl = rx_vtbl_devif;
1541 * Initialize management interface for queue drivers.
1542 * This has to be done _after_ the hardware is initialized.
1544 static void initialize_mngif(void)
1548 r = sfn5122f_export(NULL, export_cb, connect_cb, get_default_waitset(),
1549 IDC_BIND_FLAGS_DEFAULT);
1550 assert(err_is_ok(r));
1551 r = sfn5122f_devif_export(NULL, export_devif_cb, connect_devif_cb,
1552 get_default_waitset(), 1);
1553 assert(err_is_ok(r));
1557 /*****************************************************************************/
1558 /* ARP service client */
1560 /** Get information about the local TCP/IP configuration*/
1561 static errval_t arp_ip_info(void)
1563 errval_t err, msgerr;
1567 err = arp_binding->rpc_tx_vtbl.ip_info(arp_binding, 0, &msgerr, &ip, &gw, &mask);
1568 if (err_is_fail(err)) {
1574 static void a_bind_cb(void *st, errval_t err, struct net_ARP_binding *b)
1576 assert(err_is_ok(err));
1578 net_ARP_rpc_client_init(arp_binding);
1579 net_arp_connected = true;
1582 /** Bind to ARP service (currently blocking) */
1583 static void bind_arp(struct waitset *ws)
1588 DEBUG("bind_arp()\n");
1589 err = nameservice_blocking_lookup("sfn5122f_ARP", &iref);
1590 assert(err_is_ok(err));
1591 DEBUG("resolved\n");
1593 err = net_ARP_bind(iref, a_bind_cb, NULL, ws, IDC_BIND_FLAGS_DEFAULT);
1594 assert(err_is_ok(err));
1595 DEBUG("binding initiated\n");
1597 while (!net_arp_connected) {
1598 event_dispatch_non_block(ws);
1599 event_dispatch_non_block(get_default_waitset());
1601 DEBUG("bound_arp\n");
1605 /******************************************************************************/
1606 /* Initialization code for driver */
1608 /** Callback from pci to initialize a specific PCI device. */
1609 static void pci_init_card(struct device_mem* bar_info, int bar_count)
1614 d = malloc(sizeof(*d));
1615 /* Map first BAR for register access */
1616 assert(bar_count >= 1);
1617 DEBUG("BAR count %d \n", bar_count);
1618 map_device(&bar_info[0]);
1619 regframe = bar_info[0].frame_cap;
1620 DEBUG("BAR[0] mapped (v=%llx p=%llx l=%llx)\n",
1621 (unsigned long long) bar_info[0].vaddr,
1622 (unsigned long long) bar_info[0].paddr,
1623 (unsigned long long) bar_info[0].bytes);
1625 /* Initialize Mackerel binding */
1626 sfn5122f_initialize(d, (void*) bar_info[0].vaddr);
1628 // Initialize manager for MSI-X vectors
1630 //d_msix = malloc(sizeof(*d_msix));
1631 //map_device(&bar_info[1]);
1632 //sfn5122f_msix_initialize(d_msix, (void*) bar_info[1].vaddr);
1633 DEBUG("Enabling MSI-X interrupts\n");
1634 uint16_t msix_count = 0;
1635 err = pci_msix_enable(&msix_count);
1636 assert(err_is_ok(err));
1637 assert(msix_count > 0);
1638 DEBUG("MSI-X #vecs=%d\n", msix_count);
1640 res = bmallocator_init(&msix_alloc, msix_count);
1643 DEBUG("Using legacy interrupts\n");
1646 /* Get all information needed */
1648 /* Initialize hardware registers etc. */
1649 /* Start interrups / mac_stats etc. */
1651 /* Init rx filters */
1652 init_rx_filter_config();
1653 /* initalize managemnt interface */
1663 static void parse_cmdline(int argc, char **argv)
1667 for (i = 1; i < argc; i++) {
1668 if (strncmp(argv[i], "cardname=", strlen("cardname=") - 1) == 0) {
1669 service_name = argv[i] + strlen("cardname=");
1670 } else if (strncmp(argv[i], "bus=", strlen("bus=") - 1) == 0) {
1671 pci_bus = atol(argv[i] + strlen("bus="));
1672 } else if (strncmp(argv[i], "device=", strlen("device=") - 1) == 0) {
1673 pci_device = atol(argv[i] + strlen("device="));
1674 } else if (strncmp(argv[i], "function=", strlen("function=") - 1) == 0){
1675 pci_function = atol(argv[i] + strlen("function="));
1676 if (pci_function != 0) {
1677 USER_PANIC("Second port not implemented, please use function=0")
1679 } else if (strncmp(argv[i], "msix=", strlen("msix=") - 1) == 0){
1680 USER_PANIC("MSI-X not fully supported yet");
1681 use_msix = !!atol(argv[i] + strlen("msix="));
1682 //qd_rgument(argv[i]);
1684 printf("Unrecognized argument %s ignored \n", argv[i]);
1690 static void eventloop(void)
1694 ws = get_default_waitset();
1695 DEBUG("SFN5122F enter event loop \n");
1701 static void cd_main(void)
1706 int main(int argc, char** argv)
1708 DEBUG("SFN5122F driver started \n");
1711 parse_cmdline(argc, argv);
1712 /* Register our device driver */
1713 r = pci_client_connect();
1714 assert(err_is_ok(r));
1715 r = pci_register_driver_irq(pci_init_card, PCI_CLASS_ETHERNET,
1716 PCI_DONT_CARE, PCI_DONT_CARE,
1717 PCI_VENDOR_SOLARFLARE, DEVICE_ID,
1718 pci_bus, pci_device, pci_function,
1719 global_interrupt_handler, NULL);
1721 while (!initialized) {
1722 event_dispatch(get_default_waitset());