2 * Copyright (c) 2007-2011, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
14 #include <net_queue_manager/net_queue_manager.h>
15 #include <barrelfish/nameservice_client.h>
16 #include <barrelfish/spawn_client.h>
17 #include <barrelfish/deferred.h>
18 #include <netd/netd.h>
19 #include <net_device_manager/net_device_manager.h>
21 #include <ipv4/lwip/inet.h>
22 #include <barrelfish/debug.h>
23 #include <if/sfn5122f_defs.h>
24 #include <if/sfn5122f_devif_defs.h>
25 #include <if/net_filter_defs.h>
26 #include <if/net_ARP_defs.h>
27 #include <if/net_ARP_defs.h>
30 #include "sfn5122f_debug.h"
31 #include "buffer_tbl.h"
32 #include "sfn5122f_qdriver.h"
40 struct sfn5122f_binding *binding;
41 struct sfn5122f_devif_binding *devif;
42 struct capref tx_frame;
43 struct capref rx_frame;
44 struct capref ev_frame;
52 // first entries of the buffer table to make up queue
63 static bool use_msix = false;
64 static const char *service_name = "sfn5122f";
65 static sfn5122f_t *d = NULL;
67 //static sfn5122f_msix_t *d_msix = NULL;
68 static uint64_t d_mac[2];
69 static int initialized = 0;
70 static struct capref *regframe;
72 static struct capref int_ker;
73 static void* int_ker_virt;
75 static struct capref mac_stats;
76 static void* mac_virt;
77 static uint64_t mac_phys;
81 static uint32_t cap[2];
82 static uint32_t speed[2];
83 static uint32_t flags[2];
84 static uint32_t fcntl [2];
87 static uint32_t phy_caps[2];
88 static uint32_t phy_flags[2];
89 static uint32_t phy_media[2];
90 /* Loopback mode none and speed */
91 static uint32_t phy_loopback_mode = 0;
92 //static uint32_t phy_loopback_speed = 0;
94 static uint32_t wol_filter_id = 0;
97 //static struct net_ARP_binding *arp_binding;
98 //static bool net_arp_connected = false;
100 static bool csum_offload = 1;
102 static uint32_t rx_indir_tbl[128];
105 static struct queue_state queues[1024];
106 /* PCI device address passed on command line */
107 static uint32_t pci_bus = PCI_DONT_CARE;
108 static uint32_t pci_device = PCI_DONT_CARE;
109 static uint32_t pci_vendor = PCI_DONT_CARE;
110 static uint32_t pci_devid = PCI_DONT_CARE;
111 static uint32_t pci_function = 0;
113 static struct bmallocator msix_alloc;
114 static size_t cdriver_msix = -1;
115 static uint8_t cdriver_vector;
117 static bool use_interrupt = true;
119 // first to start everything
120 static bool first = 1;
123 uint8_t rx_hash_key[40];
127 static uint32_t ip = 0x2704710A;
128 //static uint32_t ip = 0;
130 enum filter_type_ip {
139 enum filter_type_mac {
146 struct sfn5122f_filter_ip {
163 struct sfn5122f_filter_mac {
180 /* scatter and rss enable */
181 static bool rss_en = 0;
182 static bool scatter_en = 0;
183 static struct sfn5122f_filter_ip filters_rx_ip[NUM_FILTERS_IP];
184 //static struct sfn5122f_filter_ip filters_tx_ip[NUM_FILTERS_IP];
187 static struct sfn5122f_filter_mac filters_rx_ip[NUM_FILTERS_MAC];
188 static struct sfn5122f_filter_mac filters_tx_ip[NUM_FILTERS_MAC];
192 /******************************************************************************/
194 void qd_main(void) __attribute__((weak));
195 void qd_argument(const char *arg) __attribute__((weak));
196 void qd_interrupt(void) __attribute__((weak));
197 void qd_queue_init_data(struct sfn5122f_binding *b, struct capref registers,
198 uint64_t macaddr) __attribute__((weak));
199 void qd_queue_memory_registered(struct sfn5122f_binding *b) __attribute__((weak));
200 void qd_write_queue_tails(struct sfn5122f_binding *b) __attribute__((weak));
203 void cd_request_device_info(struct sfn5122f_binding *b);
204 void cd_register_queue_memory(struct sfn5122f_binding *b,
215 static void idc_write_queue_tails(struct sfn5122f_binding *b);
217 static void device_init(void);
218 static void start_all(void);
219 static void probe_all(void);
220 static uint32_t init_txq(uint16_t n, lpaddr_t phys, bool csum, bool userspace);
221 static uint32_t init_rxq(uint16_t n, lpaddr_t phys, bool userspace);
222 static uint32_t init_evq(uint16_t n, lpaddr_t phys, bool interrupt);
223 static void queue_hw_stop(uint16_t n);
225 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector);
226 static void global_interrupt_handler(void* arg);
229 static void bind_arp(struct waitset *ws);
230 static errval_t arp_ip_info(void);
232 /***************************************************************************/
235 static void sfn5122f_filter_port_setup(int idx, struct sfn5122f_filter_ip* filter)
237 sfn5122f_rx_filter_tbl_lo_t filter_lo = 0;
238 sfn5122f_rx_filter_tbl_hi_t filter_hi = 0;
240 if (filter->type_ip == net_filter_PORT_UDP) {
242 // Add destination IP
243 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
245 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
247 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 1);
248 filter_lo = sfn5122f_rx_filter_tbl_lo_src_tcp_dest_udp_insert(
249 filter_lo, filter->dst_port);
251 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
252 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
253 DEBUG("UPD filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
255 idx, filter->dst_ip, filter->dst_port,
256 filter->src_ip, filter->src_port, filter->queue);
259 if (filter->type_ip == net_filter_PORT_TCP) {
260 // Add dst IP and port
261 filter_hi = sfn5122f_rx_filter_tbl_hi_dest_ip_insert(filter_hi,
263 filter_lo = sfn5122f_rx_filter_tbl_lo_src_ip_insert(filter_lo,
265 filter_lo = sfn5122f_rx_filter_tbl_lo_dest_port_tcp_insert(filter_lo,
267 filter_hi = sfn5122f_rx_filter_tbl_hi_tcp_udp_insert(filter_hi, 0);
268 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, 0);
269 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, 0);
270 DEBUG("TCP filter index %d: ip_dst %x port_dst %d ip_src %x port_src %d"
272 idx, filter->dst_ip, filter->dst_port,
273 filter->src_ip, filter->src_port, filter->queue);
276 filter_hi = sfn5122f_rx_filter_tbl_hi_rxq_id_insert(filter_hi, filter->queue);
277 filter_hi = sfn5122f_rx_filter_tbl_hi_rss_en_insert(filter_hi, rss_en);
278 filter_hi = sfn5122f_rx_filter_tbl_hi_scatter_en_insert(filter_hi, scatter_en);
280 sfn5122f_rx_filter_tbl_lo_wr(d, idx, filter_lo);
281 sfn5122f_rx_filter_tbl_hi_wr(d, idx, filter_hi);
284 static uint32_t build_key(struct sfn5122f_filter_ip* f)
286 uint32_t data[4] = {0,0,0,0};
294 if (f->type_ip == sfn5122f_PORT_UDP) {
304 data[0] = host1 << 16 | port1;
305 data[1] = port2 << 16 | host1 >> 16;
308 return data[0] ^ data[1] ^ data[2] ^ data[3];
311 static uint16_t filter_hash(uint32_t key)
315 /* First 16 rounds */
316 tmp = 0x1fff ^ key >> 16;
317 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
318 tmp = tmp ^ tmp >> 9;
320 tmp = tmp ^ tmp << 13 ^ key;
321 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
322 return tmp ^ tmp >> 9;
326 static bool filter_equals(struct sfn5122f_filter_ip* f1,
327 struct sfn5122f_filter_ip* f2)
329 if (f1->type_ip != f2->type_ip) {
331 } else if ((f1->src_ip != f2->src_ip) ||
332 (f1->dst_ip != f2->dst_ip) ||
333 (f1->queue != f2->queue)) {
335 } else if ((f1->src_port != f2->src_port) &&
336 (f2->dst_port != f1->dst_port)) {
343 static uint16_t filter_increment(uint32_t key)
348 static int ftqf_alloc(struct sfn5122f_filter_ip* f)
350 // Documentation suggest hashing using a certain algorithm
353 unsigned int incr = 0;
356 hash = filter_hash(key);
357 incr = filter_increment(key);
359 key = hash & (NUM_FILTERS_IP - 1);
362 if (filters_rx_ip[key].enabled == false) {
370 key = (key + incr) & (NUM_FILTERS_IP - 1);
377 static errval_t reg_port_filter(struct sfn5122f_filter_ip* f, uint64_t* fid)
381 DEBUG("reg_port_filter: called\n");
383 if ((filt_ind=ftqf_alloc(f)) < 0) {
384 return FILTER_ERR_NOT_ENOUGH_MEMORY;
387 filters_rx_ip[filt_ind] = *f;
388 filters_rx_ip[filt_ind].enabled = true;
390 sfn5122f_filter_port_setup(filt_ind, f);
399 /***************************************************************************/
400 /* Helper functions*/
401 static void decode_link(uint32_t fcntl1 , uint32_t flags1 , uint32_t speed1)
405 DEBUG("LINK MODE: AUTO \n");
408 DEBUG("LINK MODE: RX/TX \n");
411 DEBUG("LINK MODE: RESPOND \n");
414 DEBUG("LINK MODE: NONE \n");
417 DEBUG("LINK SPEED: %"PRIu32" \n", speed1);
418 DEBUG("LINK FLAGS: %8lX \n", (long unsigned int) flags1);
419 if (!!(flags1 & 1)) {
420 DEBUG("LINK IS UP \n");
423 if (!!(flags1 & 1 << 0x1)) {
424 DEBUG("LINK IS FULL DUPLEX \n");
429 static void handle_assertions(void)
436 memset(in, 0, sizeof(in));
437 in[CMD_GET_ASSERTS_IN_CLEAR_OFFSET] = 0;
439 err = mcdi_rpc(CMD_GET_ASSERTS, in , CMD_GET_ASSERTS_IN_LEN, out,
440 CMD_GET_ASSERTS_OUT_LEN, &outlen, pci_function, d);
441 assert(err_is_ok(err));
444 /* TODO handle assertions */
445 printf("THERE WERE ASSERTIONS: %"PRIu8" \n ", out[0]);
446 /* exit assertions -> special reboot*/
448 err = mcdi_rpc(CMD_REBOOT, in, CMD_REBOOT_IN_LEN ,
449 NULL, 0, NULL, pci_function, d);
450 assert(err_is_ok(err));
455 /* Get Link and write settings into global variables */
456 static void get_link(uint8_t port)
458 uint8_t out[CMD_GET_LINK_OUT_LEN];
461 err = mcdi_rpc(CMD_GET_LINK, NULL, 0 , out, CMD_GET_LINK_OUT_LEN, NULL, port,d);
462 assert(err_is_ok(err));
464 memcpy(&cap[port], out, 4);
465 memcpy(&speed[port], out+CMD_GET_LINK_OUT_SPEED_OFFSET, 4);
466 memcpy(&fcntl[port], out+CMD_GET_LINK_OUT_FCNTL_OFFSET, 4);
467 memcpy(&flags[port], out+CMD_GET_LINK_OUT_FLAGS_OFFSET, 4);
469 decode_link(fcntl[port], flags[port], speed[port]);
475 static void init_port(uint8_t port)
477 uint8_t in[CMD_SET_MAC_IN_LEN];
481 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
482 /* linux driver sets these bits */
487 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
489 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
490 /* Reject unicast packets? */
491 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
492 /* Set wanted flow control of the card 2 -> bidirectional*/
493 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
494 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
495 assert(err_is_ok(err));
497 memset(mc_hash, 0, sizeof(mc_hash));
498 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
499 NULL, 0 , NULL, port, d);
500 assert(err_is_ok(err));
502 memset(in, 0 , sizeof(in));
503 memcpy(in + CMD_SET_LINK_IN_CAP_OFFSET, &cap[pci_function], 4);
505 err = mcdi_rpc(CMD_SET_LINK, in, CMD_SET_LINK_IN_LEN, NULL, 0, NULL, 0, d);
506 assert(err_is_ok(err));
509 static void start_port(uint8_t port)
511 uint8_t in[CMD_SET_MAC_IN_LEN];
515 memset(&in, 0, sizeof(in));
517 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
518 NULL, 0 , NULL, port, d);
519 assert(err_is_ok(err));
522 memcpy(in + CMD_SET_MAC_IN_ADR_OFFSET, &d_mac[port], 6 );
523 /* seems like the linux driver sets all bits not set
524 from the MAC address to 1*/
529 memcpy(in + CMD_SET_MAC_IN_MTU_OFFSET , ®, 4);
530 in[CMD_SET_MAC_IN_DRAIN_OFFSET] = 0;
531 /* Reject unicast packets ? */
532 in[CMD_SET_MAC_IN_REJECT_OFFSET] = 1;
533 /* Set wanted functionality (flow control) of card -> set to 2 for RX/TX
535 in[CMD_SET_MAC_IN_FCTNL_OFFSET] = 2;
536 err = mcdi_rpc(CMD_SET_MAC, in, CMD_SET_MAC_IN_LEN, NULL, 0, NULL, port, d);
537 assert(err_is_ok(err));
539 err = mcdi_rpc(CMD_SET_MCAST_HASH, mc_hash , CMD_SET_MCAST_HASH_IN_LEN,
540 NULL, 0 , NULL, port, d);
542 assert(err_is_ok(err));
545 /******************************************************************************
547 *****************************************************************************/
549 static void probe_all(void)
559 struct frame_identity frameid = { .base = 0, .bytes = 0 };
564 // Test and clear MC-reboot flag for port/function
565 offset = MCDI_REBOOT_OFFSET(pci_function);
566 reg = sfn5122f_mc_treg_smem_rd(d,offset);
568 sfn5122f_mc_treg_smem_wr(d,offset,0);
571 /*print out any assertions */
573 // Let BMC know that driver is in charg of filter/link setttings
574 // before we can restet NIC
575 memset(&in, 0, sizeof(in));
576 memset(&out, 0 , sizeof(out));
578 r = mcdi_rpc(CMD_GET_VERSION, NULL, 0, out, CMD_GET_VERSION_OUT_LEN,
579 &outlen, pci_function, d);
580 assert(err_is_ok(r));
583 memset(&out, 0 , sizeof(out));
585 // driver is operating / + update
588 r = mcdi_rpc(CMD_DRV_ATTACH, in, CMD_DRV_ATTACH_IN_LEN, out,
589 CMD_DRV_ATTACH_OUT_LEN, &outlen, pci_function, d);
590 assert(err_is_ok(r));
593 r = mcdi_rpc(CMD_PORT_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
594 assert(err_is_ok(r));
597 if(mcdi_rpc(CMD_WOL_FILTER_GET, NULL, 0, out, CMD_WOL_FILTER_GET_OUT_LEN,
598 &outlen, pci_function, d) == SYS_ERR_OK) {
599 memcpy(&wol_filter_id, out , 4);
601 // Reset filter of card
602 mcdi_rpc(CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL, pci_function, d);
605 // memory for INT_KER
606 int_ker_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
607 2*sizeof(uint64_t), &int_ker);
608 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
609 // Read in non volatile configuration
610 memset(&out, 0, sizeof(out));
611 r = mcdi_rpc(CMD_GET_BOARD_CONFIG, NULL, 0, out,
612 CMD_GET_BOARD_CONFIG_OUT_LEN, &outlen, pci_function, d);
613 assert(err_is_ok(r));
615 memcpy(&d_mac[0], out+MCDI_MAC_PORT_OFFSET(0) ,6);
616 memcpy(&d_mac[1], out+MCDI_MAC_PORT_OFFSET(1) ,6);
618 // read phy configuration
619 r = mcdi_rpc(CMD_GET_PHY_CFG, NULL, 0, out, CMD_GET_PHY_CFG_OUT_LEN, &outlen,
621 assert(err_is_ok(r));
623 memcpy(&phy_caps[pci_function], out+CMD_GET_PHY_CFG_OUT_CAP_OFFSET, 4);
624 memcpy(&phy_flags[pci_function], out+CMD_GET_PHY_CFG_OUT_FLAGS_OFFSET, 4);
625 memcpy(&phy_media[pci_function], out+CMD_GET_PHY_CFG_OUT_MEDIA_OFFSET, 4);
627 // get loopback modes
628 r = mcdi_rpc(CMD_GET_LOOPBACK_MODES, NULL, 0, out,
629 CMD_GET_LOOPBACK_MODES_OUT_LEN, &outlen, pci_function, d);
630 assert(err_is_ok(r));
631 memcpy(&phy_loopback_mode, out+CMD_GET_LOOPBACK_MODES_SUGGESTED_OFFSET,4);
632 // loopback mode NONE is no valid condition
633 phy_loopback_mode &= ~(1);
637 mac_virt = alloc_map_frame(VREGION_FLAGS_READ_WRITE,
638 NUM_MAC_STATS*sizeof(uint64_t),
641 assert(mac_virt != NULL);
642 r = invoke_frame_identify(mac_stats, &frameid);
643 assert(err_is_ok(r));
644 mac_phys = frameid.base;
645 memset(mac_virt, 0, NUM_MAC_STATS*sizeof(uint64_t));
648 memset(&in, 0, sizeof(in));
649 memcpy(in, &mac_phys, 8);
651 // Settings for DMA of MAC stats
652 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0x6;
653 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
654 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
655 r = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN, NULL, 0, NULL,
657 assert(err_is_ok(r));
663 // Init card IP filters
664 static void init_rx_filter_config(void)
666 uint64_t reg_hi, reg_lo;
668 for (int i = 0; i < NUM_FILTERS_IP; i++) {
669 sfn5122f_rx_filter_tbl_lo_wr(d, i, 0);
670 sfn5122f_rx_filter_tbl_hi_wr(d, i, 0);
673 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_rd(d);
674 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_rd(d);
676 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_full_search_limit_insert(reg_hi, 1);
677 reg_hi = sfn5122f_rx_filter_ctl_reg_hi_ethernet_wildcard_search_limit_insert(reg_hi, 3);
679 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_q_id_lo_insert(reg_lo, 0);
680 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_q_id_insert(reg_lo, 0);
681 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_unicast_nomatch_rss_enabled_insert(reg_lo, 0);
682 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_multicast_nomatch_rss_enabled_insert(reg_lo, 0);
684 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_full_srch_limit_insert(reg_lo, 1);
685 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_udp_wild_srch_limit_insert(reg_lo, 3);
686 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_full_srch_limit_insert(reg_lo, 1);
687 reg_lo = sfn5122f_rx_filter_ctl_reg_lo_tcp_wild_srch_limit_insert(reg_lo, 3);
690 sfn5122f_rx_filter_ctl_reg_lo_wr(d,reg_lo);
691 sfn5122f_rx_filter_ctl_reg_hi_wr(d,reg_hi);
695 static void device_init(void)
698 struct frame_identity frameid = { .base = 0, .bytes = 0 };
699 uint64_t reg, reg2; // tmp_key = 0;
700 uint8_t in[24]; // set length to biggest in length needed
702 memset(&in, 0, sizeof(in));
704 // recover from failed assertion post-reset
707 /* ignore TX of packets 16 bytes and less */
708 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
709 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
710 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
711 sfn5122f_tx_reserved_reg_hi_wr(d, sfn5122f_tx_reserved_reg_hi_rd(d));
712 //Disable TX_NO_EOP_DISC_EN because else would limit packets to 16
713 reg = sfn5122f_tx_cfg_reg_lo_rd(d);
714 reg = sfn5122f_tx_cfg_reg_lo_tx_no_eop_disc_en_insert(reg, 0);
715 reg = sfn5122f_tx_cfg_reg_lo_tx_ownerr_ctl_insert(reg, 1);
716 reg = sfn5122f_tx_cfg_reg_lo_tx_filter_en_bit_insert(reg, 1);
717 sfn5122f_tx_cfg_reg_lo_wr(d, reg);
718 sfn5122f_tx_cfg_reg_hi_wr(d, sfn5122f_tx_cfg_reg_hi_rd(d));
720 reg = sfn5122f_rx_cfg_reg_lo_rd(d);
721 // unset bit and set other bit which are not in documentation (43 and 47)
722 reg = sfn5122f_rx_cfg_reg_lo_rx_desc_push_en_insert(reg, 0) ;
723 reg = sfn5122f_rx_cfg_reg_lo_rx_ingr_en_insert(reg, 1);
724 //reg = sfn5122f_rx_cfg_reg_lo_rx_usr_buf_size_insert(reg, (MTU_MAX-256) >> 5);
725 reg = sfn5122f_rx_cfg_reg_lo_rx_usr_buf_size_insert(reg, 4096 >> 5);
726 //reg = sfn5122f_rx_cfg_reg_lo_rx_ownerr_ctl_insert(reg, 1);
727 reg = sfn5122f_rx_cfg_reg_lo_rx_ip_hash_insert(reg, 1);
728 //reg = sfn5122f_rx_cfg_reg_lo_rx_hash_insrt_hdr_insert(reg, 1);
729 reg = sfn5122f_rx_cfg_reg_lo_rx_hash_alg_insert(reg, 1);
730 sfn5122f_rx_cfg_reg_lo_wr(d, reg);
731 sfn5122f_rx_cfg_reg_hi_wr(d, sfn5122f_rx_cfg_reg_hi_rd(d));
732 /* enable event logging, no UART
733 Event destination is queue 0 */
735 r = mcdi_rpc(CMD_LOG_CTRL, in, CMD_LOG_CTRL_IN_LEN,
736 NULL, 0, NULL, pci_function, d);
737 assert(err_is_ok(r));
739 /* Set destination of TX/RX flush event */
741 sfn5122f_dp_ctrl_reg_lo_fls_evq_id_wrf(d, 0);
742 sfn5122f_dp_ctrl_reg_hi_wr(d, sfn5122f_dp_ctrl_reg_hi_rd(d));
744 /* Disalbe user events for now */
745 sfn5122f_usr_ev_cfg_lo_usrev_dis_wrf(d , 1);
746 sfn5122f_usr_ev_cfg_hi_wr(d, sfn5122f_usr_ev_cfg_hi_rd(d));
749 // This seems to be not device specific i.e. works for other
751 /* Set position of descriptor caches in SRAM */
752 sfn5122f_srm_tx_dc_cfg_reg_lo_wr(d, TX_DC_BASE);
753 sfn5122f_srm_tx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_tx_dc_cfg_reg_hi_rd(d));
754 sfn5122f_srm_rx_dc_cfg_reg_lo_srm_rx_dc_base_adr_wrf(d, RX_DC_BASE);
755 sfn5122f_srm_rx_dc_cfg_reg_hi_wr(d, sfn5122f_srm_rx_dc_cfg_reg_hi_rd(d));
757 /* Set TX descriptor cache size to 16 */
758 sfn5122f_tx_dc_cfg_reg_lo_tx_dc_size_wrf(d, 1);
759 sfn5122f_tx_dc_cfg_reg_hi_wr(d, sfn5122f_tx_dc_cfg_reg_hi_rd(d));
761 /* Set RX descriptor cache size to 64 and low watermark */
762 sfn5122f_rx_dc_cfg_reg_lo_rx_dc_size_wrf(d, 3);
763 sfn5122f_rx_dc_cfg_reg_hi_wr(d, sfn5122f_rx_dc_cfg_reg_hi_rd(d));
766 reg = sfn5122f_rx_dc_pf_wm_reg_lo_rx_dc_pf_lwm_insert(reg, RX_DESC_CACHE_SIZE -8);
767 sfn5122f_rx_dc_pf_wm_reg_lo_wr(d, reg);
768 sfn5122f_rx_dc_pf_wm_reg_hi_wr(d, sfn5122f_rx_dc_pf_wm_reg_hi_rd(d));
770 /*programm init ker address for interrupts */
771 r = invoke_frame_identify(int_ker, &frameid);
772 assert(err_is_ok(r));
774 sfn5122f_int_adr_reg_ker_lo_wr(d, frameid.base);
775 reg = sfn5122f_int_adr_reg_ker_hi_rd(d);
777 // disable vector write if we use MSI-X
779 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 1);
780 if (cdriver_msix == -1) {
781 r = pci_setup_inthandler(global_interrupt_handler, NULL, &cdriver_vector);
782 assert(err_is_ok(r));
783 setup_interrupt(&cdriver_msix, disp_get_core_id(), cdriver_vector);
786 reg = sfn5122f_int_adr_reg_ker_hi_norm_int_vec_dis_ker_insert(reg, 0);
788 sfn5122f_int_adr_reg_ker_hi_wr(d, reg);
790 /* Enable all the genuinley fatal interrupts */
791 reg = sfn5122f_fatal_intr_reg_ker_lo_ill_adr_int_ker_en_insert(reg, 1);
792 /* Enable rxbuf/txbuf interrupt fields not documented.
794 reg = sfn5122f_fatal_intr_reg_ker_lo_rxbuf_own_int_ker_en_insert(reg, 1);
795 reg = sfn5122f_fatal_intr_reg_ker_lo_txbuf_own_int_ker_en_insert(reg, 1);
797 //reg = sfn5122f_fatal_intr_reg_ker_lo_sram_perr_int_p_ker_en_insert(reg, 1);
798 sfn5122f_fatal_intr_reg_ker_lo_wr(d, ~reg);
799 sfn5122f_fatal_intr_reg_ker_hi_wr(d, 0XFFFFFFFFFFFFFFFF);
801 /* Setup RSS indirection table (maps from hash value to packet to RXQ) */
802 for (int i = 0; i < 128; i++) {
804 sfn5122f_rx_indirection_tbl_wr( d, i, rx_indir_tbl[i]);
807 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
808 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
809 (from linux driver) */
810 reg = sfn5122f_tx_reserved_reg_lo_rd(d);
811 reg = sfn5122f_tx_reserved_reg_lo_tx_rx_spacer_en_insert(reg, 1);
812 reg = sfn5122f_tx_reserved_reg_lo_tx_one_pkt_per_q_insert(reg, 0);
813 reg = sfn5122f_tx_reserved_reg_lo_tx_dis_non_ip_ev_insert(reg, 1);
815 /* Enable software events */
816 reg = sfn5122f_tx_reserved_reg_lo_tx_soft_evt_en_insert(reg, 1);
817 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
818 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_threshold_insert(reg, 2);
819 /* Disable hardware watchdog which can misfire */
820 reg = sfn5122f_tx_reserved_reg_lo_tx_pref_wd_tmr_insert(reg, 0x3fffff);
821 /* Squash TX of packets of 16 bytes or less */
822 reg = sfn5122f_tx_reserved_reg_lo_tx_flush_min_len_en_insert(reg, 1);
824 reg2 = sfn5122f_tx_reserved_reg_hi_rd(d);
825 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_en_insert(reg2, 0);
826 reg2 = sfn5122f_tx_reserved_reg_hi_tx_push_chk_dis_insert(reg2, 0);
827 //reg2 = sfn5122f_tx_reserved_reg_hi_tx_rx_spacer_insert(reg2, 0xfe);
828 reg2 = sfn5122f_tx_reserved_reg_hi_tx_rx_spacer_insert(reg2, 0x1);
829 sfn5122f_tx_reserved_reg_lo_wr(d, reg);
830 sfn5122f_tx_reserved_reg_hi_wr(d, reg2);
832 init_port(pci_function);
833 get_link(pci_function);
834 DEBUG("BASIC CARD INIT DONE \n");
837 static void start_all(void)
841 start_port(pci_function);
843 memset(int_ker_virt, 0, 2*sizeof(uint64_t));
844 /* Enable interrupts */
845 /* Use an interrupt level unused by event queues */
846 reg = sfn5122f_int_en_reg_ker_lo_rd(d);
848 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0);
851 reg = sfn5122f_int_en_reg_ker_lo_ker_int_leve_sel_insert(reg, 0x1f);
853 reg = sfn5122f_int_en_reg_ker_lo_drv_int_en_ker_insert(reg, 1);
855 /* undocumented field */
856 reg = sfn5122f_int_en_reg_ker_lo_ker_int_ker_insert(reg, 0);
857 sfn5122f_int_en_reg_ker_lo_wr(d, reg);
858 sfn5122f_int_en_reg_ker_hi_wr(d, sfn5122f_int_en_reg_ker_hi_rd(d));
860 /* Start MAC stats */
862 uint8_t in[CMD_MAC_STATS_IN_LEN];
863 unsigned long long* stats = (unsigned long long *) mac_virt;
866 memset(in, 0, sizeof(in));
867 stats[0x60] = (unsigned long long) (-1);
868 memcpy(in, &mac_phys, 8);
869 pointer = (uint8_t *) &mac_phys;
870 in[CMD_MAC_STATS_IN_CMD_OFFSET] = 0xD;
873 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET] = 8;
874 in[CMD_MAC_STATS_IN_DMA_LEN_OFFSET+1] = 3;
875 errval_t err = mcdi_rpc(CMD_MAC_STATS, in, CMD_MAC_STATS_IN_LEN,
876 NULL, 0, NULL, pci_function, d);
877 assert(err_is_ok(err));
881 /**************************************************************************
883 ***************************************************************************/
886 static void queue_hw_stop(uint16_t n)
891 reg = sfn5122f_tx_flush_descq_reg_lo_rd(d);
892 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_insert(reg, n);
893 reg = sfn5122f_tx_flush_descq_reg_lo_tx_flush_descq_cmd_insert(reg, 1);
894 sfn5122f_tx_flush_descq_reg_lo_wr(d, reg);
895 sfn5122f_tx_flush_descq_reg_hi_wr(d, sfn5122f_tx_flush_descq_reg_hi_rd(d));
897 reg = sfn5122f_rx_flush_descq_reg_lo_rd(d);
898 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_insert(reg, n);
899 reg = sfn5122f_rx_flush_descq_reg_lo_rx_flush_descq_cmd_insert(reg, 1);
900 sfn5122f_rx_flush_descq_reg_lo_wr(d, reg);
901 sfn5122f_rx_flush_descq_reg_hi_wr(d, sfn5122f_rx_flush_descq_reg_hi_rd(d));
903 /* TODO Wait for DRIVER_EVENT */
904 /* clear pointer table entries */
905 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, 0);
906 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, 0);
907 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, 0);
908 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, 0);
910 /*Free RX queue tbl entries*/
912 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
913 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
914 queues[n].rx_buf_tbl);
916 if (queues[n].userspace) {
917 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
918 queues[n].rx_buf_tbl + NUM_ENT_RX_USR);
920 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
921 queues[n].rx_buf_tbl + NUM_ENT_RX);
924 /*Free TX queue tbl entries*/
926 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
927 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
928 queues[n].tx_buf_tbl + NUM_ENT_TX );
929 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
930 queues[n].tx_buf_tbl);
932 /*Free EV queue tbl entries*/
934 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_cmd_insert(reg, 1);
935 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_end_id_insert(reg,
936 queues[n].ev_buf_tbl + NUM_ENT_EVQ );
937 reg = sfn5122f_buf_tbl_upd_reg_lo_buf_clr_start_id_insert(reg,
938 queues[n].ev_buf_tbl);
943 static uint32_t init_evq(uint16_t n, lpaddr_t phys, bool interrupt)
947 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
948 uint64_t reg, buffer_offset;
951 reg = sfn5122f_timer_tbl_lo_timer_q_en_insert(reg, 1);
952 // set to 0 if interrupts for receives/sends should be generated
954 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 0);
956 reg = sfn5122f_timer_tbl_lo_int_pend_insert(reg, 0);
957 reg = sfn5122f_timer_tbl_lo_int_armd_insert(reg, 0);
958 if (use_interrupt && interrupt) {
959 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 0);
961 reg = sfn5122f_timer_tbl_lo_host_notify_mode_insert(reg, 1);
964 // timer mode disabled
965 reg = sfn5122f_timer_tbl_lo_timer_mode_insert(reg, 0);
966 sfn5122f_timer_tbl_lo_wr(d, n, reg);
967 sfn5122f_timer_tbl_hi_wr(d, n, sfn5122f_timer_tbl_hi_rd(d, n));
970 r = invoke_frame_identify(queues[n].ev_frame, &frameid);
971 assert(err_is_ok(r));
972 ev_phys = frameid.base;
975 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_EVQ, 0, 0, d);
976 if (buffer_offset == -1) {
980 DEBUG("EV_QUEUE_%d: buf_off %ld, phys 0x%lx\n",n , buffer_offset, phys);
982 reg = sfn5122f_evq_ptr_tbl_lo_rd(d, n);
983 reg = sfn5122f_evq_ptr_tbl_lo_evq_en_insert(reg, 1);
984 reg = sfn5122f_evq_ptr_tbl_lo_evq_size_insert(reg, 6);
985 reg = sfn5122f_evq_ptr_tbl_lo_evq_buf_base_id_insert(reg,
988 sfn5122f_evq_ptr_tbl_lo_wr(d, n, reg);
989 sfn5122f_evq_ptr_tbl_hi_wr(d, n, sfn5122f_evq_ptr_tbl_hi_rd(d, n));
991 /* No write collection for this register */
992 reg = sfn5122f_timer_command_reg_lo_rd(d,n);
993 reg = sfn5122f_timer_command_reg_lo_tc_timer_val_insert(reg, 0);
995 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
997 reg = sfn5122f_timer_command_reg_lo_tc_timer_mode_insert(reg, 0);
1000 sfn5122f_timer_command_reg_lo_wr(d, n, reg);
1002 sfn5122f_evq_rptr_reg_wr(d, n, queues[n].ev_head);
1004 return buffer_offset;
1007 static uint32_t init_rxq(uint16_t n, lpaddr_t phys, bool userspace)
1010 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
1011 uint64_t reg_lo, reg_hi, buffer_offset;
1013 * This will define a buffer in the buffer table, allowing
1014 * it to be used for event queues, descriptor rings etc.
1016 /* Get physical addresses for rx/tx rings and event queue */
1018 r = invoke_frame_identify(queues[n].rx_frame, &frameid);
1019 assert(err_is_ok(r));
1020 rx_phys = frameid.base;
1021 rx_size = frameid.bytes;
1026 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_RX_USR, 0, false, d);
1028 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_RX, 0, false, d);
1031 if (buffer_offset == -1) {
1035 DEBUG("RX_QUEUE_%d: buf_off %ld, phys %lx\n", n,
1036 buffer_offset, phys);
1037 /* setup RX queue */
1038 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rd(d, n);
1039 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rd(d, n);
1040 /* Which buffer table entries are used (which is the first entry) */
1041 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_buf_base_id_insert(reg_lo, buffer_offset);
1042 /* Which event queue is associated with this queue*/
1043 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_evq_id_insert(reg_lo, n);
1046 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_owner_id_insert(reg_lo, 0);
1048 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_owner_id_insert(reg_lo, n+1);
1051 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_label_insert(reg_lo, n);
1053 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1054 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_size_insert(reg_lo, 3);
1057 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 0);
1059 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_type_insert(reg_lo, 1);
1062 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_jumbo_insert(reg_lo, 0);
1064 reg_lo = sfn5122f_rx_desc_ptr_tbl_lo_rx_descq_en_insert(reg_lo, 1);
1066 /* Hardware verifies data digest */
1067 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_ddig_en_insert(reg_hi, 0);
1068 reg_hi = sfn5122f_rx_desc_ptr_tbl_hi_rx_iscsi_hdig_en_insert(reg_hi, 0);
1070 sfn5122f_rx_desc_ptr_tbl_lo_wr(d, n, reg_lo);
1071 sfn5122f_rx_desc_ptr_tbl_hi_wr(d, n, reg_hi);
1073 return buffer_offset;
1077 static uint32_t init_txq(uint16_t n, uint64_t phys,
1078 bool csum, bool userspace)
1082 //struct frame_identity frameid = { .base = 0, .bytes = 0 };
1083 uint64_t reg, reg1, buffer_offset;
1085 buffer_offset = alloc_buf_tbl_entries(phys, NUM_ENT_TX, 0, 0, d);
1087 if (buffer_offset == -1) {
1091 DEBUG("TX_QUEUE_%d: buf_off %ld, phys %lx\n",n , buffer_offset, phys);
1092 /* setup TX queue */
1093 reg = sfn5122f_tx_desc_ptr_tbl_lo_rd(d, n);
1094 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_rd(d, n);
1095 /* Which buffer table entries are used (which is the first entry) */
1096 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_buf_base_id_insert(reg,
1098 /* Which event queue is associated with this queue */
1099 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_evq_id_insert(reg , n);
1101 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_owner_id_insert(reg, 0);
1103 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_owner_id_insert(reg, n+1);
1105 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_label_insert(reg , n);
1106 /* 1024 entries = 1 (512 = 0; 2048 = 2 ; 4096 = 3) */
1107 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_size_insert(reg , 3);
1109 /* No user lvl networking */
1111 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 0);
1113 reg = sfn5122f_tx_desc_ptr_tbl_lo_tx_descq_type_insert(reg, 1);
1116 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_ddig_en_insert(reg1, 0);
1117 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_iscsi_hdig_en_insert(reg1, 0);
1119 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_non_ip_drop_dis_insert(reg1, 1);
1122 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_descq_en_insert(reg1 , 1);
1124 /* Enable offload of checksum */
1125 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_ip_chksm_dis_insert(reg1, !csum);
1126 reg1 = sfn5122f_tx_desc_ptr_tbl_hi_tx_tcp_chksm_dis_insert(reg1, !csum);
1127 sfn5122f_tx_desc_ptr_tbl_lo_wr(d, n, reg);
1128 sfn5122f_tx_desc_ptr_tbl_hi_wr(d, n, reg1);
1130 return buffer_offset;
1134 static void setup_interrupt(size_t *msix_index, uint8_t core, uint8_t vector)
1140 res = bmallocator_alloc(&msix_alloc, msix_index);
1143 err = get_apicid_from_core(core, &dest);
1144 assert(err_is_ok(err));
1146 err = pci_msix_vector_init(*msix_index, dest, vector);
1147 assert(err_is_ok(err));
1149 DEBUG("MSI-X vector setup index=%"PRIx64", core=%d apic=%d swvec=%x\n",
1150 *msix_index, core, dest, vector);
1153 static void resend_interrupt(void* arg)
1156 uint64_t i = (uint64_t) arg;
1157 err = queues[i].devif->tx_vtbl.interrupt(queues[i].devif, NOP_CONT, i);
1158 // If the queue is busy, there is already an oustanding message
1159 if (err_is_fail(err) && err != FLOUNDER_ERR_TX_BUSY) {
1160 USER_PANIC("Error when sending interrupt %s \n", err_getstring(err));
1164 /** Here are the global interrupts handled. */
1165 static void global_interrupt_handler(void* arg)
1169 uint32_t q_to_check;
1171 uint8_t* net_ivec_fatal = (uint8_t *) int_ker_virt;
1173 // bit 64 is indicator for a fatal event
1174 syserr = (net_ivec_fatal[8] & 0x1);
1176 // TODO handle fatal interrupt
1177 USER_PANIC("FATAL INTERRUPT");
1182 q_to_check = sfn5122f_int_isr0_reg_lo_rd(d);
1184 for (uint64_t i = 1; i < 32; i++) {
1185 if ((q_to_check >> i) & 0x1) {
1186 if (queues[i].use_irq && queues[i].devif != NULL) {
1187 DEBUG("Interrupt to queue %lu \n", i);
1188 err = queues[i].devif->tx_vtbl.interrupt(queues[i].devif, NOP_CONT, i);
1189 if (err_is_fail(err)) {
1190 err = queues[i].devif->register_send(queues[i].devif,
1191 get_default_waitset(),
1192 MKCONT(resend_interrupt, (void*)i));
1198 if (q_to_check & 0x1) {
1199 DEBUG("Interrupt to queue 0 \n");
1205 // Don't need to start event queues because we're already polling
1208 /******************************************************************************/
1209 /* Management interface implemetation */
1211 static void idc_queue_init_data(struct sfn5122f_binding *b,
1212 struct capref registers,
1217 r = sfn5122f_queue_init_data__tx(b, NOP_CONT, registers, macaddr);
1218 // TODO: handle busy
1219 assert(err_is_ok(r));
1222 /** Tell queue driver that we are done initializing the queue. */
1223 static void idc_queue_memory_registered(struct sfn5122f_binding *b)
1226 r = sfn5122f_queue_memory_registered__tx(b, NOP_CONT);
1227 // TODO: handle busy
1228 assert(err_is_ok(r));
1231 /** Send request to queue driver to rewrite the tail pointers of its queues. */
1232 static void idc_write_queue_tails(struct sfn5122f_binding *b)
1236 qd_write_queue_tails(b);
1240 r = sfn5122f_write_queue_tails__tx(b, NOP_CONT);
1241 // TODO: handle busy
1242 assert(err_is_ok(r));
1245 /** Request from queue driver for register memory cap */
1246 void cd_request_device_info(struct sfn5122f_binding *b)
1249 qd_queue_init_data(b, *regframe, d_mac[pci_function]);
1252 idc_queue_init_data(b, *regframe, d_mac[pci_function]);
1255 /** Request from queue driver to initialize hardware queue. */
1256 void cd_register_queue_memory(struct sfn5122f_binding *b,
1258 struct capref tx_frame,
1259 struct capref rx_frame,
1260 struct capref ev_frame,
1267 // Save state so we can restore the configuration in case we need to do a
1272 queues[n].enabled = false;
1273 queues[n].tx_frame = tx_frame;
1274 queues[n].rx_frame = rx_frame;
1275 queues[n].ev_frame = ev_frame;
1276 queues[n].tx_head = 0;
1277 queues[n].rx_head = 0;
1278 queues[n].ev_head = 0;
1279 queues[n].rxbufsz = rxbufsz;
1280 queues[n].binding = b;
1281 queues[n].use_irq = use_irq;
1282 queues[n].userspace = userspace;
1283 queues[n].msix_index = -1;
1284 queues[n].msix_intvec = vector;
1285 queues[n].msix_intdest = core;
1287 struct frame_identity id;
1288 err = invoke_frame_identify(ev_frame, &id);
1289 assert(err_is_ok(err));
1290 queues[n].ev_buf_tbl = init_evq(n, id.base, use_irq);
1294 err = invoke_frame_identify(tx_frame, &id);
1295 assert(err_is_ok(err));
1296 queues[n].tx_buf_tbl = init_txq(n, id.base, csum_offload, userspace);
1298 err = invoke_frame_identify(rx_frame, &id);
1299 assert(err_is_ok(err));
1300 queues[n].rx_buf_tbl = init_rxq(n, id.base, userspace);
1303 if(queues[n].ev_buf_tbl == -1 ||
1304 queues[n].tx_buf_tbl == -1 ||
1305 queues[n].rx_buf_tbl == -1){
1307 DEBUG("Allocating queue failed \n");
1311 queues[n].enabled = true;
1313 if (queues[n].use_irq) {
1314 if (queues[n].msix_intvec != 0) {
1315 if (queues[n].msix_index == -1) {
1316 setup_interrupt(&queues[n].msix_index, queues[n].msix_intdest,
1317 queues[n].msix_intvec);
1322 idc_write_queue_tails(queues[n].binding);
1325 qd_queue_memory_registered(b);
1329 idc_queue_memory_registered(b);
1338 static errval_t idc_terminate_queue(struct sfn5122f_binding *b, uint16_t n)
1340 DEBUG("idc_terminate_queue(q=%d) \n", n);
1344 queues[n].enabled = false;
1345 queues[n].binding = NULL;
1352 static errval_t idc_register_port_filter(struct sfn5122f_binding *b,
1356 sfn5122f_port_type_t type,
1364 printf("IP %d \n", ip);
1367 DEBUG("idc_register_port_filter: called (q=%d t=%d p=%d)\n",
1370 struct sfn5122f_filter_ip f = {
1372 .dst_ip = htonl(ip),
1380 *err = reg_port_filter(&f, fid);
1381 DEBUG("filter registered: err=%"PRIu64", fid=%"PRIu64"\n", *err, *fid);
1387 static errval_t idc_unregister_filter(struct sfn5122f_binding *b,
1388 uint64_t filter, errval_t *err)
1390 DEBUG("unregister_filter: called (%"PRIx64")\n", filter);
1391 *err = LIB_ERR_NOT_IMPLEMENTED;
1394 static struct sfn5122f_rx_vtbl rx_vtbl = {
1395 .request_device_info = cd_request_device_info,
1396 .register_queue_memory = cd_register_queue_memory,
1399 static struct sfn5122f_rpc_rx_vtbl rpc_rx_vtbl = {
1400 .terminate_queue_call = idc_terminate_queue,
1401 .register_port_filter_call = idc_register_port_filter,
1402 .unregister_filter_call = idc_unregister_filter,
1406 static void cd_create_queue(struct sfn5122f_devif_binding *b, struct capref frame,
1407 bool user, bool interrupt, uint8_t core, uint8_t msix_vector)
1409 DEBUG("cd_create_queue \n");
1411 struct frame_identity id;
1414 for (int i = 1; i < NUM_QUEUES; i++) {
1415 if (queues[i].enabled == false) {
1422 err = NIC_ERR_ALLOC_QUEUE;
1423 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, 0, NULL_CAP, err);
1424 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, err);
1425 assert(err_is_ok(err));
1428 queues[n].use_irq = interrupt;
1429 queues[n].enabled = false;
1430 queues[n].tx_frame = frame;
1431 queues[n].tx_head = 0;
1432 queues[n].rx_head = 0;
1433 queues[n].ev_head = 0;
1434 queues[n].rxbufsz = MTU_MAX;
1435 queues[n].devif = b;
1436 queues[n].userspace = user;
1437 queues[n].msix_index = -1;
1438 queues[n].msix_intdest = core;
1439 queues[n].msix_intvec = msix_vector;
1441 if (queues[n].use_irq && use_msix) {
1442 if (queues[n].msix_intvec != 0) {
1443 if (queues[n].msix_index == -1) {
1444 setup_interrupt(&queues[n].msix_index, queues[n].msix_intdest,
1445 queues[n].msix_intvec);
1450 err = invoke_frame_identify(frame, &id);
1451 assert(err_is_ok(err));
1453 queues[n].tx_buf_tbl = init_txq(n, id.base, csum_offload, user);
1454 queues[n].rx_buf_tbl = init_rxq(n, id.base+ sizeof(uint64_t)*TX_ENTRIES, user);
1456 queues[n].ev_buf_tbl = init_evq(n, id.base+sizeof(uint64_t)*(TX_ENTRIES+RX_ENTRIES),
1458 if(queues[n].ev_buf_tbl == -1 ||
1459 queues[n].tx_buf_tbl == -1 ||
1460 queues[n].rx_buf_tbl == -1){
1461 err = NIC_ERR_ALLOC_QUEUE;
1462 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, err);
1463 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, 0, 0, NULL_CAP, err);
1464 assert(err_is_ok(err));
1467 queues[n].enabled = true;
1468 DEBUG("created queue %d \n", n);
1469 //err = b->tx_vtbl.create_queue_response(b, NOP_CONT, n, *regframe, SYS_ERR_OK);a
1472 err = slot_alloc(®s);
1473 assert(err_is_ok(err));
1474 err = cap_copy(regs, *regframe);
1475 assert(err_is_ok(err));
1477 err = b->tx_vtbl.create_queue_response(b, NOP_CONT, d_mac[pci_function], n,
1479 assert(err_is_ok(err));
1480 DEBUG("cd_create_queue end\n");
1483 static void cd_register_region(struct sfn5122f_devif_binding *b, uint16_t qid,
1484 struct capref region)
1487 struct frame_identity id;
1488 uint64_t buffer_offset = 0;
1490 err = invoke_frame_identify(region, &id);
1491 if (err_is_fail(err)) {
1492 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, NIC_ERR_REGISTER_REGION);
1493 assert(err_is_ok(err));
1496 size_t size = id.bytes;
1497 lpaddr_t addr = id.base;
1499 // TODO unsigned/signed
1500 buffer_offset = alloc_buf_tbl_entries(addr, size/BUF_SIZE, qid, true, d);
1501 if (buffer_offset == -1) {
1502 err = b->tx_vtbl.register_region_response(b, NOP_CONT, 0, NIC_ERR_REGISTER_REGION);
1503 assert(err_is_ok(err));
1506 err = b->tx_vtbl.register_region_response(b, NOP_CONT, buffer_offset, SYS_ERR_OK);
1507 if (err_is_fail(err)) {
1513 static void cd_deregister_region(struct sfn5122f_devif_binding *b, uint64_t buftbl_id,
1517 free_buf_tbl_entries(buftbl_id, size/BUF_SIZE, d);
1519 err = b->tx_vtbl.deregister_region_response(b, NOP_CONT, SYS_ERR_OK);
1520 assert(err_is_ok(err));
1523 static void cd_destroy_queue(struct sfn5122f_devif_binding *b, uint16_t qid)
1528 queues[qid].enabled = false;
1529 queues[qid].binding = NULL;
1531 err = b->tx_vtbl.destroy_queue_response(b, NOP_CONT, SYS_ERR_OK);
1532 assert(err_is_ok(err));
1536 static struct sfn5122f_devif_rx_vtbl rx_vtbl_devif = {
1537 .create_queue_call = cd_create_queue,
1538 .destroy_queue_call = cd_destroy_queue,
1539 .register_region_call = cd_register_region,
1540 .deregister_region_call = cd_deregister_region,
1544 static void export_cb(void *st, errval_t err, iref_t iref)
1546 const char *suffix = "_sfn5122fmng";
1547 char name[strlen(service_name) + strlen(suffix) + 1];
1549 assert(err_is_ok(err));
1551 // Build label for interal management service
1552 sprintf(name, "%s%s", service_name, suffix);
1554 err = nameservice_register(name, iref);
1555 assert(err_is_ok(err));
1556 DEBUG("Management interface exported\n");
1560 static errval_t connect_cb(void *st, struct sfn5122f_binding *b)
1562 DEBUG("New connection on management interface\n");
1563 b->rx_vtbl = rx_vtbl;
1564 b->rpc_rx_vtbl = rpc_rx_vtbl;
1568 static void export_devif_cb(void *st, errval_t err, iref_t iref)
1570 const char *suffix = "_sfn5122fmng_devif";
1571 char name[strlen(service_name) + strlen(suffix) + 1];
1573 assert(err_is_ok(err));
1575 // Build label for interal management service
1576 sprintf(name, "%s%s", service_name, suffix);
1578 err = nameservice_register(name, iref);
1579 assert(err_is_ok(err));
1580 DEBUG("Devif Management interface exported\n");
1585 static errval_t connect_devif_cb(void *st, struct sfn5122f_devif_binding *b)
1587 DEBUG("New connection on devif management interface\n");
1588 b->rx_vtbl = rx_vtbl_devif;
1593 /****************************************************************************/
1594 /* Net filter interface implementation */
1595 /****************************************************************************/
1598 static errval_t cb_install_filter(struct net_filter_binding *b,
1599 net_filter_filter_type_t type,
1610 printf("IP %d \n", ip);
1613 struct sfn5122f_filter_ip f = {
1614 .dst_port = dst_port,
1615 .src_port = src_port,
1616 .dst_ip = htonl(dst_ip),
1617 .src_ip = htonl(src_ip),
1623 errval_t err = reg_port_filter(&f, fid);
1624 assert(err_is_ok(err));
1625 DEBUG("filter registered: err=%"PRIu64", fid=%"PRIu64"\n", err, *fid);
1630 static errval_t cb_remove_filter(struct net_filter_binding *b,
1631 net_filter_filter_type_t type,
1635 if ((type == net_filter_PORT_UDP || type == net_filter_PORT_TCP)
1636 && filters_rx_ip[filter_id].enabled == true) {
1637 filters_rx_ip[filter_id].enabled = false;
1639 sfn5122f_rx_filter_tbl_lo_wr(d, filter_id, 0);
1640 sfn5122f_rx_filter_tbl_hi_wr(d, filter_id, 0);
1643 *err = NET_FILTER_ERR_NOT_FOUND;
1646 DEBUG("unregister_filter: called (%"PRIx64")\n", filter_id);
1650 static struct net_filter_rpc_rx_vtbl net_filter_rpc_rx_vtbl = {
1651 .install_filter_ip_call = cb_install_filter,
1652 .remove_filter_call = cb_remove_filter,
1653 .install_filter_mac_call = NULL,
1656 static void net_filter_export_cb(void *st, errval_t err, iref_t iref)
1659 printf("exported net filter interface\n");
1660 err = nameservice_register("net_filter_sfn5122f", iref);
1661 assert(err_is_ok(err));
1662 DEBUG("Net filter interface exported\n");
1666 static errval_t net_filter_connect_cb(void *st, struct net_filter_binding *b)
1668 printf("New connection on net filter interface\n");
1669 b->rpc_rx_vtbl = net_filter_rpc_rx_vtbl;
1674 * Initialize management interface for queue drivers.
1675 * This has to be done _after_ the hardware is initialized.
1677 static void initialize_mngif(void)
1682 r = sfn5122f_export(NULL, export_cb, connect_cb, get_default_waitset(),
1683 IDC_BIND_FLAGS_DEFAULT);
1684 assert(err_is_ok(r));
1686 r = sfn5122f_devif_export(NULL, export_devif_cb, connect_devif_cb,
1687 get_default_waitset(), 1);
1688 assert(err_is_ok(r));
1690 r = net_filter_export(NULL, net_filter_export_cb, net_filter_connect_cb,
1691 get_default_waitset(), 1);
1692 assert(err_is_ok(r));
1696 /*****************************************************************************/
1697 /* ARP service client */
1699 /** Get information about the local TCP/IP configuration*/
1701 static errval_t arp_ip_info(void)
1703 errval_t err, msgerr;
1707 err = arp_binding->rpc_tx_vtbl.ip_info(arp_binding, 0, &msgerr, &ip, &gw, &mask);
1708 if (err_is_fail(err)) {
1714 static void a_bind_cb(void *st, errval_t err, struct net_ARP_binding *b)
1716 assert(err_is_ok(err));
1718 net_ARP_rpc_client_init(arp_binding);
1719 net_arp_connected = true;
1722 static void bind_arp(struct waitset *ws)
1727 DEBUG("bind_arp()\n");
1728 err = nameservice_blocking_lookup("sfn5122f_ARP", &iref);
1729 assert(err_is_ok(err));
1730 DEBUG("resolved\n");
1732 err = net_ARP_bind(iref, a_bind_cb, NULL, ws, IDC_BIND_FLAGS_DEFAULT);
1733 assert(err_is_ok(err));
1734 DEBUG("binding initiated\n");
1736 while (!net_arp_connected) {
1737 event_dispatch_non_block(ws);
1738 event_dispatch_non_block(get_default_waitset());
1740 DEBUG("bound_arp\n");
1744 /******************************************************************************/
1745 /* Initialization code for driver */
1747 /** Callback from pci to initialize a specific PCI device. */
1748 static void pci_init_card(struct device_mem* bar_info, int bar_count)
1753 d = malloc(sizeof(*d));
1754 /* Map first BAR for register access */
1755 assert(bar_count >= 1);
1756 DEBUG("BAR count %d \n", bar_count);
1757 map_device(&bar_info[0]);
1758 regframe = bar_info[0].frame_cap;
1759 DEBUG("BAR[0] mapped (v=%llx p=%llx l=%llx)\n",
1760 (unsigned long long) bar_info[0].vaddr,
1761 (unsigned long long) bar_info[0].paddr,
1762 (unsigned long long) bar_info[0].bytes);
1764 /* Initialize Mackerel binding */
1765 sfn5122f_initialize(d, (void*) bar_info[0].vaddr);
1766 d_virt = bar_info[0].vaddr;
1768 // Initialize manager for MSI-X vectors
1770 //d_msix = malloc(sizeof(*d_msix));
1771 //map_device(&bar_info[1]);
1772 //sfn5122f_msix_initialize(d_msix, (void*) bar_info[1].vaddr);
1773 DEBUG("Enabling MSI-X interrupts\n");
1774 uint16_t msix_count = 0;
1775 err = pci_msix_enable(&msix_count);
1776 assert(err_is_ok(err));
1777 assert(msix_count > 0);
1778 DEBUG("MSI-X #vecs=%d\n", msix_count);
1780 res = bmallocator_init(&msix_alloc, msix_count);
1783 DEBUG("Using legacy interrupts\n");
1786 /* Get all information needed */
1788 /* Initialize hardware registers etc. */
1789 /* Start interrups / mac_stats etc. */
1791 /* Init rx filters */
1792 init_rx_filter_config();
1793 /* initalize managemnt interface */
1802 static void parse_cmdline(int argc, char **argv)
1805 * XXX: the following contains a hack only to start the driver when
1806 * the supplied bus/dev/funct matches the Kaluga start arguments.
1810 for (i = 1; i < argc; i++) {
1811 if (strncmp(argv[i], "cardname=", strlen("cardname=") - 1) == 0) {
1812 service_name = argv[i] + strlen("cardname=");
1813 } else if (strncmp(argv[i], "bus=", strlen("bus=") - 1) == 0) {
1814 tmp = atol(argv[i] + strlen("bus="));
1815 if (pci_bus == PCI_DONT_CARE) {
1819 if (pci_bus != tmp) {
1820 printf("DRIVER STARTED FOR BUS: 0x%x/0x%x\n", pci_bus, tmp);
1823 pci_bus = atol(argv[i] + strlen("bus="));
1824 } else if (strncmp(argv[i], "device=", strlen("device=") - 1) == 0) {
1825 tmp = atol(argv[i] + strlen("device="));
1826 if (pci_device == PCI_DONT_CARE) {
1830 if (pci_device != tmp) {
1831 printf("DRIVER STARTED FOR DEVICE: 0x%x/0x%x\n", pci_device, tmp);
1835 } else if (strncmp(argv[i], "function=", strlen("function=") - 1) == 0){
1836 tmp = atol(argv[i] + strlen("function="));
1837 if (pci_function == PCI_DONT_CARE) {
1841 if (pci_function != tmp) {
1842 printf("DRIVER STARTED FOR FUNCTION: 0x%x/0x%x\n", pci_bus, tmp);
1846 if (pci_function != 0) {
1847 USER_PANIC("Second port not implemented, please use function=0")
1849 } else if (strncmp(argv[i], "msix=", strlen("msix=") - 1) == 0){
1850 USER_PANIC("MSI-X not fully supported yet");
1851 use_msix = !!atol(argv[i] + strlen("msix="));
1852 //qd_rgument(argv[i]);
1854 printf("Unrecognized argument %s ignored \n", argv[i]);
1860 static void eventloop(void)
1864 ws = get_default_waitset();
1865 DEBUG("SFN5122F enter event loop \n");
1871 static void cd_main(void)
1879 static errval_t init_stack(void)
1882 struct netd_state *state;
1883 char* card_name = "sfn5122f";
1884 uint32_t allocated_queue = 0;
1885 uint32_t total_queues = 16;
1886 uint8_t filter_type = 2;
1887 bool do_dhcp = false;
1888 char* ip_addr_str = "10.113.4.39";
1889 char* netmask_str = "255.255.252.0";
1890 char* gateway_str = "10.113.4.4";
1893 err = init_device_manager(card_name, total_queues, filter_type);
1894 if (err_is_fail(err)) {
1898 err = netd_init(&state, card_name, allocated_queue, do_dhcp,
1899 ip_addr_str, netmask_str, gateway_str);
1900 if (err_is_fail(err)) {
1908 int main(int argc, char** argv)
1910 DEBUG("SFN5122F driver started \n");
1914 uint32_t parsed = sscanf(argv[argc - 1], "%x:%x:%x:%x:%x", &pci_vendor,
1915 &pci_devid, &pci_bus, &pci_device, &pci_function);
1917 pci_vendor = PCI_DONT_CARE;
1918 pci_devid = PCI_DONT_CARE;
1919 pci_bus = PCI_DONT_CARE;
1920 pci_device = PCI_DONT_CARE;
1923 if ((pci_vendor != PCI_VENDOR_SOLARFLARE) || (pci_devid != DEVICE_ID)) {
1924 printf("VENDOR/DEVICE ID MISMATCH: %x/%x %x/%x\n",
1925 pci_vendor, PCI_VENDOR_SOLARFLARE, pci_devid, DEVICE_ID);
1931 parse_cmdline(argc, argv);
1934 /* Register our device driver */
1935 err = pci_client_connect();
1936 assert(err_is_ok(err));
1937 err = pci_register_driver_irq(pci_init_card, PCI_CLASS_ETHERNET,
1938 PCI_DONT_CARE, PCI_DONT_CARE,
1939 pci_vendor, pci_devid,
1940 pci_bus, pci_device, pci_function,
1941 global_interrupt_handler, NULL);
1943 while (!initialized) {
1944 event_dispatch(get_default_waitset());
1947 init_queue_0("sfn5122f", d_mac[pci_function], d_virt,
1948 use_interrupt, false, &queues[0].ev_frame,
1949 &queues[0].tx_frame, &queues[0].rx_frame);
1951 queues[0].enabled = false;
1952 queues[0].tx_head = 0;
1953 queues[0].rx_head = 0;
1954 queues[0].ev_head = 0;
1955 queues[0].rxbufsz = MTU_MAX;
1956 queues[0].binding = NULL;
1957 queues[0].use_irq = true;
1958 queues[0].userspace = false;
1960 struct frame_identity id;
1961 err = invoke_frame_identify(queues[0].ev_frame, &id);
1962 assert(err_is_ok(err));
1963 queues[0].ev_buf_tbl = init_evq(0, id.base, queues[0].use_irq);
1965 err = invoke_frame_identify(queues[0].tx_frame, &id);
1966 assert(err_is_ok(err));
1967 queues[0].tx_buf_tbl = init_txq(0, id.base, csum_offload, false);
1969 err = invoke_frame_identify(queues[0].rx_frame, &id);
1970 assert(err_is_ok(err));
1971 queues[0].rx_buf_tbl = init_rxq(0, id.base, false);
1973 write_queue_tails();
1979 if (err_is_fail(err)) {
1980 USER_PANIC("Failed initalizing netd etc %s \n", err_getstring(err));
1985 struct sfn5122f_filter_ip f = {
1987 .dst_ip = htonl(0x2704710A),
1990 .type_ip = sfn5122f_PORT_UDP,
1995 for (int i = 0; i < 10; i++) {
1998 err = reg_port_filter(&f, &fid);
1999 assert(err_is_ok(err));