--- /dev/null
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2012, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for /lib/net
+--
+--------------------------------------------------------------------------
+
+[ build library {
+ target = "net",
+ cFiles = [ "net.c", "netbufs.c", "netif.c", "pbuf.c" ],
+ addIncludes = [ "include", "/lib/lwip-2.0.2/src/include/" ],
+ addLibraries = libDeps [ "lwip2", "devif", "devif_backend_idc",
+ "devif_backend_solarflare", "devif_backend_e10k",
+ "devif_backend_loopback" ]
+ },
+
+ build application {
+ target = "net_udp_echo",
+ cFiles = [ "test/udp_echo.c" ],
+ addIncludes = [ "include", "/lib/lwip-2.0.2/src/include/" ],
+ addLibraries = libDeps [ "net" ]
+ }
+]
--- /dev/null
+/**
+ * @brief
+ * debug.h
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef LIB_NET_DEBUG_H_
+#define LIB_NET_DEBUG_H_
+
+//#define NETDEBUG(x...) debug_printf(NETDEBUG_SUBSYSTEM x);
+
+#define NETDEBUG(fmt, ...) debug_printf(NETDEBUG_SUBSYSTEM ": %s : " fmt, __FUNCTION__, ##__VA_ARGS__)
+
+#endif /* LIB_NET_DEBUG_H_ */
--- /dev/null
+/**
+ * @brief
+ * net.h
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef LIB_NET_INCLUDE_NETWORKING_H_
+#define LIB_NET_INCLUDE_NETWORKING_H_
+
+// forward declarations
+struct devq;
+struct eth_addr;
+
+errval_t networking_init_default(void);
+
+static inline errval_t networking_init_with_queue(void *q) {return SYS_ERR_OK;}
+
+errval_t networking_poll(void);
+
+
+
+errval_t networking_get_mac(struct devq *q, uint8_t *hwaddr, uint8_t hwaddrlen);
+
+
+
+/**
+ * @brief creates a queue to the given card and the queueid
+ *
+ * @param cardname network card to create the queue for
+ * @param queueid queueid of the network card
+ * @param retqueue returns the pointer to the queue
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t networking_create_queue(const char *cardname, uint64_t queueid,
+ struct devq **retqueue);
+
+/**
+ * @brief obtains the default setting for initializaion of the driver
+ *
+ * @param queue returns the queue to be used
+ * @param cardname returns the card name to be used
+ *
+ * @return SYS_ERR_OK on success, SKB_ERR_* on failure
+ */
+errval_t networking_get_defaults(uint64_t *queue, char **cardname);
+
+
+#endif /* LIB_NET_INCLUDE_NETWORKING_H_ */
--- /dev/null
+/**
+ * @brief
+ * net.h
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef LIB_NET_INCLUDE_NETWORKING_BUFFER_H_
+#define LIB_NET_INCLUDE_NETWORKING_BUFFER_H_
+
+#define NETWORKING_BUFFER_DEFAULT_SIZE 2048
+
+struct net_buf_pool;
+struct pbuf;
+struct devq;
+
+/**
+ * @brief initializes the networking buffer pools
+ *
+ * @param dev_q the device queue to create the buffer pool for
+ * @param numbuf number of initial buffers
+ * @param size size of the networking buffer
+ * @param retbp buffer pool to initialize
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_init(struct devq *dev_q, size_t numbuf, size_t size,
+ struct net_buf_pool **retbp);
+
+
+/**
+ * @brief grows the number of available buffers
+ *
+ * @param bp buffer pool to grow
+ * @param numbuf number of buffers to create
+ * @param size size of a buffer
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_grow(struct net_buf_pool *bp, size_t numbuf,
+ size_t size);
+
+/**
+ * @brief adds a previously allocated frame to the buffer pool
+ *
+ * @param bp buffer pool to add the frame to
+ * @param frame frame capability
+ * @param buffersize size of a buffer
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_add(struct net_buf_pool *bp,
+ struct capref frame, size_t buffersize);
+
+struct pbuf *net_buf_alloc(struct net_buf_pool *bp);
+
+/**
+ * @brief
+ * @param p
+ */
+void net_buf_free(struct pbuf *p);
+
+/**
+ * @brief
+ * @param bp
+ * @param regionid
+ * @param offset
+ * @return
+ */
+struct pbuf *net_buf_get_by_region(struct net_buf_pool *bp,
+ uint32_t regionid, size_t offset);
+
+#endif /* LIB_NET_INCLUDE_NETWORKING_BUFFER_H_ */
--- /dev/null
+/**
+ * @brief
+ * net.h
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef LIB_NET_INCLUDE_NET_NETIF_H_
+#define LIB_NET_INCLUDE_NET_NETIF_H_
+
+// forward declarations
+struct netif;
+struct devq;
+
+
+/*
+ * ===============================================================================
+ * Network Interface Management
+ * ===============================================================================
+ */
+
+
+/**
+ * @brief initializes a netif structure for LWIP with a device queue
+ *
+ * @param netif the netif to be initialized
+ * @param devq the device queue to be used
+ *
+ * @return SYS_ERR_OK on success, errva on failure
+ */
+errval_t net_if_init_devq(struct netif *netif, struct devq *devq);
+
+
+/**
+ * @brief adds the netif to the LWIP
+ *
+ * @param netif the netif ot be added
+ * @param state state to be passed
+ *
+ * @return
+ */
+errval_t net_if_add(struct netif *netif, void *state);
+
+
+/**
+ * @brief removes a network interface
+ *
+ * @param netif the LWIP netif
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_remove(struct netif *netif);
+
+/*
+ * ===============================================================================
+ * Buffer Management
+ * ===============================================================================
+ */
+
+
+/**
+ * @brief adds a new receive buffer to the interface
+ *
+ * @param netif the LWIP netif
+ * @param pbuf packet buffer to be added
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_add_rx_buf(struct netif *netif, struct pbuf *pbuf);
+
+/**
+ * @brief adds a new transmit buffer to the interface
+ *
+ * @param netif the LWIP netif
+ * @param pbuf packt boffer to be transmitted
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_add_tx_buf(struct netif *netif, struct pbuf *pbuf);
+
+
+/*
+ * ===============================================================================
+ * Polling the interfaces
+ * ===============================================================================
+ */
+
+
+/**
+ * @brief polls then network interface for new incoming packets
+ *
+ * @param netif the LWIP netif to be polled
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_poll(struct netif *netif);
+
+/**
+ * @brief polls all added network interfaces
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_poll_all(void);
+
+#endif /* LIB_NET_INCLUDE_NET_NETIF_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+// stdlib includes
+
+// barrelfish includes
+
+// lwip includes
+#include "lwip/init.h"
+#include "lwip/netif.h"
+#include "lwip/ip.h"
+#include "lwip/prot/ethernet.h"
+
+
+#include <net_interfaces/flags.h>
+#include "networking_internal.h"
+
+struct net_state state = {0};
+
+#define NETWORKING_DEFAULT_QUEUE_ID 0
+#define NETWORKING_BUFFER_COUNT 1024
+#define NETWORKING_BUFFER_SIZE 2048
+
+
+#define NETDEBUG_SUBSYSTEM "net"
+
+/**
+ * @brief obtains the default setting for initializaion of the driver
+ *
+ * @param queue returns the queue to be used
+ * @param cardname returns the card name to be used
+ *
+ * @return SYS_ERR_OK on success, SKB_ERR_* on failure
+ */
+errval_t networking_get_defaults(uint64_t *queue, char **cardname)
+{
+ /* TODO: get the values from the SKB */
+
+ *queue = NETWORKING_DEFAULT_QUEUE_ID;
+ *cardname = "loopback";
+
+ return SYS_ERR_OK;
+}
+
+static errval_t create_loopback_queue (uint64_t queueid, struct devq **retqueue)
+{
+ errval_t err;
+
+ debug_printf("net: creating loopback queue.\n");
+
+ err = loopback_queue_create((struct loopback_queue **)retqueue);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ return SYS_ERR_OK;
+}
+
+static errval_t create_driver_queue (uint64_t queueid, struct devq **retqueue)
+{
+
+ return SYS_ERR_OK;
+}
+
+static errval_t create_e10k_queue (uint64_t queueid, struct devq **retqueue)
+{
+ return SYS_ERR_OK;
+}
+
+static errval_t create_sfn5122f_queue (uint64_t queueid, struct devq **retqueue)
+{
+ return SYS_ERR_OK;
+}
+
+
+typedef errval_t (*queue_create_fn)(uint64_t queueid, struct devq **retqueue);
+struct networking_card
+{
+ char *cardname;
+ queue_create_fn createfn;
+} networking_cards [] = {
+ { "loopback", create_loopback_queue},
+ { "e1000", create_driver_queue},
+ { "e10k", create_e10k_queue},
+ { "sfn5122f", create_sfn5122f_queue},
+ { NULL, NULL}
+};
+
+
+/**
+ * @brief creates a queue to the given card and the queueid
+ *
+ * @param cardname network card to create the queue for
+ * @param queueid queueid of the network card
+ * @param retqueue returns the pointer to the queue
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t networking_create_queue(const char *cardname, uint64_t queueid,
+ struct devq **retqueue)
+{
+ debug_printf("net: creating queue for card='%s', queueid=%" PRIu64 "...\n",
+ cardname, queueid);
+
+ struct networking_card *nc = networking_cards;
+ while(nc->cardname != NULL) {
+ if (strncmp(cardname, nc->cardname, strlen(nc->cardname)) == 0) {
+ return nc->createfn(queueid, retqueue);
+ }
+ nc++;
+ }
+
+ debug_printf("net: ERROR unknown queue. card='%s', queueid=%" PRIu64 "\n",
+ cardname, queueid);
+
+ return -1;
+}
+
+errval_t networking_get_mac(struct devq *q, uint8_t *hwaddr, uint8_t hwaddrlen) {
+ debug_printf("net: obtaining MAC address for card.\n");
+ return SYS_ERR_OK;
+}
+
+#define NETWORKING_POLL_MAX 100
+#include <lwip/pbuf.h>
+#include <lwip/prot/ethernet.h>
+#include <lwip/prot/ip4.h>
+#include <lwip/prot/udp.h>
+
+errval_t networking_poll(void)
+{
+ struct net_state *st = &state;
+
+ return net_if_poll(&st->netif);
+}
+
+/**
+ * @brief
+ * @return
+ */
+errval_t networking_init_default(void) {
+ errval_t err;
+
+ if(state.initialized) {
+ debug_printf("WARNING. initialize called twice. Ignoring\n");
+ return SYS_ERR_OK;
+ }
+
+ NETDEBUG("initializing networking...\n");
+
+ // obtain the settings to create the queue
+ err = networking_get_defaults(&state.queueid, &state.cardname);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ // create the queue
+ err = networking_create_queue(state.cardname, state.queueid, &state.queue);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ // initialize LWIP
+ NETDEBUG("initializing LWIP...\n");
+ lwip_init();
+
+ /* create buffers */
+ err = net_buf_init(state.queue, NETWORKING_BUFFER_COUNT,
+ NETWORKING_BUFFER_SIZE, &state.pool);
+ if (err_is_fail(err)) {
+ goto out_err1;
+ }
+
+ NETDEBUG("creating netif for LWIP...\n");
+ err = net_if_init_devq(&state.netif, state.queue);
+ if (err_is_fail(err)) {
+ goto out_err2;
+ }
+
+ err = net_if_add(&state.netif, &state);
+ if (err_is_fail(err)) {
+ goto out_err2;
+ }
+
+ NETDEBUG("setting default netif...\n");
+ netif_set_default(&state.netif);
+
+
+ NETDEBUG("adding RX buffers\n");
+
+ for (int i = 0; i < 10; i++) {
+ struct pbuf *p = net_buf_alloc(state.pool);
+ if (p == NULL) {
+ NETDEBUG("net: WARNING there was no buffer\n");
+ break;
+ }
+ err = net_if_add_rx_buf(&state.netif, p);
+ if (err_is_fail(err)) {
+ break;
+ }
+ }
+
+
+ NETDEBUG("starting DHCP...\n");
+
+
+ NETDEBUG("initialization complete.\n");
+
+ return SYS_ERR_OK;
+
+ out_err2:
+ // TODO: clear buffers
+
+ out_err1:
+ // TODO: cleanup queue
+
+
+
+ return err;
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include <devif/queue_interface.h>
+
+#include <lwip/pbuf.h>
+
+#include "networking_internal.h"
+#define NETDEBUG_SUBSYSTEM "net_buf"
+
+
+///< the default flags to map the buffers
+#define NETWORKING_DEFAULT_BUFFER_FLAGS VREGION_FLAGS_READ_WRITE
+
+///< buffer alignment
+#define NETWORKING_BUFFER_ALIGN 2048
+
+
+
+/**
+ * @brief initializes the networking buffer pools
+ *
+ * @param dev_q the device queue to create the buffer pool for
+ * @param numbuf number of initial buffers
+ * @param size size of the networking buffer
+ * @param retbp buffer pool to initialize
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_init(struct devq *dev_q, size_t numbuf, size_t size,
+ struct net_buf_pool **retbp)
+{
+ errval_t err;
+
+ assert(retbp);
+
+ NETDEBUG("initializing buffer pool with %zu x %zu buffers...\n", numbuf, size);
+
+ struct net_buf_pool *netbp = calloc(1, sizeof(*netbp));
+ if (netbp == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ netbp->dev_q = dev_q;
+
+ err = net_buf_grow(netbp, numbuf, size);
+ if (err_is_fail(err)) {
+ free(netbp);
+ return err;
+ }
+
+ *retbp = netbp;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * @brief adds a previously allocated frame to the buffer pool
+ *
+ * @param bp buffer pool to add the frame to
+ * @param frame frame capability
+ * @param buffersize size of a buffer
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_add(struct net_buf_pool *bp, struct capref frame, size_t buffersize)
+{
+ errval_t err;
+
+
+ struct net_buf_region *reg = calloc(1, sizeof(struct net_buf_region));
+ if (reg == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ reg->buffer_size = ROUND_UP(buffersize, NETWORKING_BUFFER_ALIGN);
+ reg->framecap = frame;
+ reg->pool = bp;
+
+ err = invoke_frame_identify(reg->framecap, ®->frame);
+ if (err_is_fail(err)) {
+ goto out_err1;
+ }
+
+ NETDEBUG("bp=%p, framesize=%zu kB, elementsize=%zu\n", bp,
+ reg->frame.bytes >> 10, buffersize);
+
+
+ size_t numbuf = reg->frame.bytes / reg->buffer_size;
+
+
+ reg->netbufs = calloc(numbuf, sizeof(struct net_buf_p));
+ if (reg->netbufs == NULL) {
+ err = LIB_ERR_MALLOC_FAIL;
+ goto out_err1;
+ }
+
+ err = vspace_map_one_frame_attr(®->vbase, reg->frame.bytes, reg->framecap,
+ NETWORKING_DEFAULT_BUFFER_FLAGS, NULL, NULL);
+ if (err_is_fail(err)) {
+ goto out_err2;
+ }
+
+ NETDEBUG("netbufs mapped at %p\n", reg->vbase);
+
+ if (bp->dev_q) {
+ debug_printf("netbuf: registering region with devq...\n");
+ err = devq_register(bp->dev_q, reg->framecap, ®->regionid);
+ if (err_is_fail(err)) {
+ goto out_err1;
+ }
+ NETDEBUG("registered region with devq. pbase=%" PRIxGENPADDR ", regionid=%" PRIx32 "\n",
+ reg->frame.base, reg->regionid);
+ }
+
+ size_t offset = 0;
+ for (size_t i = 0; i < numbuf; i++) {
+ struct net_buf_p *nb = ®->netbufs[i];
+
+ nb->offset = offset;
+ nb->vbase = reg->vbase + offset;
+ nb->region = reg;
+ nb->pbuf.custom_free_function = net_buf_free;
+
+ /* enqueue to freelist */
+ nb->pbuf.pbuf.next = bp->pbufs;
+ bp->pbufs = &nb->pbuf.pbuf;
+ bp->buffer_count++;
+ bp->buffer_free++;
+ offset += reg->buffer_size;
+ }
+
+ reg->next = bp->regions;
+ bp->regions = reg;
+
+ assert(bp->pbufs);
+
+ NETDEBUG("new region added to pool. free count: %zu / %zu\n",
+ bp->buffer_free, bp->buffer_count);
+
+ return SYS_ERR_OK;
+
+ out_err2:
+ free(reg->netbufs);
+ out_err1:
+ free(reg);
+
+ return err;
+}
+
+/**
+ * @brief grows the number of available buffers
+ *
+ * @param bp buffer pool to grow
+ * @param numbuf number of buffers to create
+ * @param size size of a buffer
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_buf_grow(struct net_buf_pool *bp, size_t numbuf,
+ size_t size)
+{
+ errval_t err;
+
+ NETDEBUG("bp=%p, numbuf=%zu, size=%zu\n", bp, numbuf, size);
+
+ size = ROUND_UP(size, NETWORKING_BUFFER_ALIGN);
+
+ size_t alloc_size = ROUND_UP(numbuf * size, BASE_PAGE_SIZE);
+
+ NETDEBUG("allocate frame of %zu kB\n", alloc_size >> 10);
+
+ struct capref frame;
+ err = frame_alloc(&frame, alloc_size, &alloc_size);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ err = net_buf_add(bp, frame, size);
+ if (err_is_fail(err)) {
+ cap_destroy(frame);
+ }
+
+ return err;
+}
+
+
+struct pbuf *net_buf_alloc(struct net_buf_pool *bp)
+{
+ if (bp->pbufs) {
+ struct net_buf_p *nb = (struct net_buf_p *)bp->pbufs;
+ bp->pbufs = bp->pbufs->next;
+ bp->buffer_free--;
+ struct pbuf* p;
+ p = pbuf_alloced_custom(PBUF_RAW, 0, PBUF_REF, &nb->pbuf,
+ nb->vbase, nb->region->buffer_size);
+
+ NETDEBUG("bp=%p, allocated pbuf=%p, free count %zu / %zu\n", bp, p,
+ bp->buffer_free, bp->buffer_count);
+ return p;
+ }
+
+ NETDEBUG("bp=%p has no free buffers. Free %zu / %zu\n", bp, bp->buffer_free,
+ bp->buffer_count);
+
+ return NULL;
+}
+
+void net_buf_free(struct pbuf *p)
+{
+ NETDEBUG("pbuf=%p\n", p);
+ // TODO sanity checks ?
+ struct net_buf_p *nb = (struct net_buf_p *)p;
+ struct net_buf_pool *bp = nb->region->pool;
+ nb->pbuf.pbuf.next = bp->pbufs;
+ bp->pbufs = &nb->pbuf.pbuf;
+ bp->buffer_free++;
+}
+
+struct pbuf *net_buf_get_by_region(struct net_buf_pool *bp,
+ uint32_t regionid, size_t offset)
+{
+ NETDEBUG("bp=%p, rid=%u, offset=%zu\n", bp, regionid, offset);
+
+ struct net_buf_region *reg = bp->regions;
+ while(reg) {
+ if (reg->regionid == regionid) {
+ /* found */
+ if (reg->frame.bytes < offset) {
+ return NULL;
+ }
+ return ®->netbufs[offset / reg->buffer_size].pbuf.pbuf;
+ }
+ reg = reg->next;
+ }
+ return NULL;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+#include <devif/queue_interface.h>
+#include <net_interfaces/flags.h>
+
+
+#include <lwip/opt.h>
+#include <lwip/netif.h>
+#include "include/net/netif.h"
+
+#include <netif/etharp.h>
+
+#include "networking_internal.h"
+
+
+#define NETDEBUG_SUBSYSTEM "net_if"
+
+
+///< the default MTU for the net interfaces
+#define NET_IF__MTU 1500
+
+///< the networking interface flags
+#define NETWORING_NETIF_FLAGS \
+ (NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET);
+
+///< the network interface name
+#define NET_IF__NAME0 'e'
+#define NET_IF__NAME1 'n'
+
+
+
+static err_t net_if_linkoutput(struct netif *netif, struct pbuf *p)
+{
+ errval_t err;
+ err = net_if_add_tx_buf(netif, p);
+ if (err_is_fail(err)) {
+ return ERR_IF;
+ }
+
+ return ERR_OK;
+}
+
+
+
+static void net_if_status_cb(struct netif *netif)
+{
+ debug_printf("netif status changed %s\n", ip4addr_ntoa(netif_ip4_addr(netif)));
+}
+
+
+static err_t netif_init_cb(struct netif *netif)
+{
+
+ netif->flags = NETWORING_NETIF_FLAGS;
+ netif->mtu = NET_IF__MTU;
+ netif_set_status_callback(netif, net_if_status_cb);
+ netif_set_up(netif);
+ netif_set_link_up(netif);
+
+ return ERR_OK;
+}
+
+
+/*
+ * ===============================================================================
+ * Network Interface Management
+ * ===============================================================================
+ */
+
+
+/**
+ * @brief initializes a netif structure for LWIP with a device queue
+ *
+ * @param netif the netif to be initialized
+ * @param devq the device queue to be used
+ *
+ * @return SYS_ERR_OK on success, errva on failure
+ */
+errval_t net_if_init_devq(struct netif *netif, struct devq *devq)
+{
+ errval_t err;
+
+ NETDEBUG("netif=%p, devq=%p\n", netif, devq);
+
+ netif->hwaddr_len = ETHARP_HWADDR_LEN;
+
+ // obtain the mac address
+ err = networking_get_mac(devq, netif->hwaddr, netif->hwaddr_len);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ /* set the output functions */
+ netif->output = etharp_output;
+ netif->linkoutput = net_if_linkoutput;
+
+ /* set the interface name */
+ netif->name[0] = NET_IF__NAME0;
+ netif->name[1] = NET_IF__NAME1;
+
+ return SYS_ERR_OK;
+}
+
+
+/**
+ * @brief initializes the netif
+ *
+ * @param netif
+ * @param devq
+ * @param mac
+ *
+ * @return
+ */
+errval_t net_if_add(struct netif *netif, void *st)
+{
+ NETDEBUG("netif=%p, state=%p\n", netif, st);
+
+ /* TODO: use something sensible here ?? -> RUN DHCP*/
+ ip4_addr_t ipaddr, netmask, gw;
+ IP4_ADDR(&gw, 192,168,0,1);
+ IP4_ADDR(&ipaddr, 192,168,0,2);
+ IP4_ADDR(&netmask, 255,255,255,0);
+
+ netif_add(netif, &ipaddr, &netmask, &gw, st,
+ netif_init_cb, netif_input);
+
+ return SYS_ERR_OK;
+}
+
+
+/**
+ * @brief removes a network interface
+ *
+ * @param netif the LWIP netif
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_remove(struct netif *netif)
+{
+ NETDEBUG("netif=%p\n", netif);
+
+ /* TODO: need other things to do here ? */
+ netif_remove(netif);
+
+ return SYS_ERR_OK;
+}
+
+
+/*
+ * ===============================================================================
+ * Buffer Management
+ * ===============================================================================
+ */
+
+
+/**
+ * @brief adds a new receive buffer to the interface
+ *
+ * @param netif the LWIP netif
+ * @param pbuf packet buffer to be added
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_add_rx_buf(struct netif *netif, struct pbuf *pbuf)
+{
+ struct net_state *st = netif->state;
+ struct net_buf_p *nb = (struct net_buf_p *)pbuf;
+
+ NETDEBUG("netif=%p <- pbuf=%p (reg=%u, offset=%" PRIxLPADDR ")\n", netif,
+ pbuf, nb->region->regionid, nb->offset);
+
+ return devq_enqueue(st->queue, nb->region->regionid, nb->offset,
+ nb->region->buffer_size, 0, nb->region->buffer_size,
+ NETIF_RXFLAG);
+}
+
+/**
+ * @brief adds a new transmit buffer to the interface
+ *
+ * @param netif the LWIP netif
+ * @param pbuf packt boffer to be transmitted
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_add_tx_buf(struct netif *netif, struct pbuf *pbuf)
+{
+ errval_t err;
+
+ struct net_state *st = netif->state;
+ struct net_buf_p *nb = (struct net_buf_p *)pbuf;
+
+ NETDEBUG("netif=%p <- pbuf=%p (reg=%u, offset=%" PRIxLPADDR ")\n", netif,
+ pbuf, nb->region->regionid, nb->offset);
+
+ LINK_STATS_INC(link.xmit);
+
+ uint64_t flags = NETIF_TXFLAG;
+ for (struct pbuf * tmpp = pbuf; tmpp != 0; tmpp = tmpp->next) {
+ pbuf_ref(tmpp);
+
+ if (tmpp->next == NULL) {
+ flags |= NETIF_TXFLAG_LAST;
+ }
+
+ err = devq_enqueue(st->queue, nb->region->regionid, nb->offset, tmpp->len, 0,
+ tmpp->len, flags);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ }
+
+ return SYS_ERR_OK;
+}
+
+
+/*
+ * ===============================================================================
+ * Polling the interfaces
+ * ===============================================================================
+ */
+
+
+#define NET_IF_POLL_MAX 100
+
+/**
+ * @brief polls then network interface for new incoming packets
+ *
+ * @param netif the LWIP netif to be polled
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_poll(struct netif *netif)
+{
+ NETDEBUG("netif=%p\n", netif);
+
+ errval_t err;
+
+ struct net_state *st = netif->state;
+ if (st == NULL) {
+ /* XXX: return an error code ?? */
+ return SYS_ERR_OK;
+ }
+
+ for (int i = 0; i < NET_IF_POLL_MAX; i++) {
+ struct devq_buf buf;
+ err = devq_dequeue(st->queue, &buf.rid, &buf.offset, &buf.length,
+ &buf.valid_data, &buf.valid_length, &buf.flags);
+ if (err_is_fail(err)) {
+ NETDEBUG("netif=%p, polling %u/%u: %s\n", netif, i, NET_IF_POLL_MAX,
+ err_getstring(err));
+ if (err_no(err) == DEVQ_ERR_QUEUE_EMPTY) {
+ return SYS_ERR_OK;
+ }
+ return err;
+ }
+
+ struct pbuf *p = net_buf_get_by_region(st->pool, buf.rid, buf.offset);
+ if (p == NULL) {
+ NETDEBUG("netif=%p, polling %u/%u. ERROR. No PBUF found for rid=%u, "
+ "offset=%"PRIxLPADDR "\n", netif, i, NET_IF_POLL_MAX,
+ buf.rid, buf.offset);
+ continue;
+ }
+
+ if (buf.flags & NETIF_TXFLAG) {
+ NETDEBUG("netif=%p, polling %u/%u. TX done of pbuf=%p (rid=%u, "
+ "offset=%"PRIxLPADDR ")\n", netif, i, NET_IF_POLL_MAX,
+ p, buf.rid, buf.offset);
+ net_buf_free(p);
+ continue;
+ }
+
+ if (buf.flags & NETIF_RXFLAG) {
+ NETDEBUG("netif=%p, polling %u/%u. RX done of pbuf=%p (rid=%u, "
+ "offset=%"PRIxLPADDR ")\n", netif, i, NET_IF_POLL_MAX,
+ p, buf.rid, buf.offset);
+
+#if 1
+#include <lwip/pbuf.h>
+#include <lwip/prot/ethernet.h>
+#include <lwip/prot/ip.h>
+ #include <lwip/prot/ip4.h>
+#include <lwip/prot/udp.h>
+ p->len = 64 + SIZEOF_ETH_HDR + IP_HLEN + UDP_HLEN; //buf.valid_length;
+ p->tot_len = p->len;
+
+ struct eth_hdr* ethhdr = p->payload;
+ struct ip_hdr *iphdr = p->payload + SIZEOF_ETH_HDR;
+ struct udp_hdr *udphdr = p->payload + SIZEOF_ETH_HDR + IP_HLEN;
+
+ memset(ethhdr->dest.addr, 0xaa, sizeof(ethhdr->dest.addr));
+ memset(ethhdr->src.addr, 0xbb, sizeof(ethhdr->src.addr));
+ ethhdr->type = PP_HTONS(ETHTYPE_IP);
+
+ iphdr->_len = lwip_htons(64 + UDP_HLEN + IP_HLEN);
+ IPH_VHL_SET(iphdr, 4, 5);
+
+ IP4_ADDR(&(iphdr->dest), 192,168,0,2);
+ IP4_ADDR(&(iphdr->src), 192,168,0,3);
+
+ iphdr->_proto = IP_PROTO_UDP;
+
+ udphdr->dest = PP_HTONS(7);
+ udphdr->src = PP_HTONS(11);
+ udphdr->len = PP_HTONS(64 + UDP_HLEN);
+
+ netif_input(p, &st->netif);
+
+#endif
+ /* XXX: do this at another time ? */
+ p = net_buf_alloc(st->pool);
+ net_if_add_rx_buf(&st->netif, p);
+ }
+ }
+
+
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * @brief polls all added network interfaces
+ *
+ * @return SYS_ERR_OK on success, errval on failure
+ */
+errval_t net_if_poll_all(void)
+{
+ NETDEBUG("polling all interfaces\n");
+
+ errval_t err;
+ struct netif *netif = netif_list;
+ while(netif) {
+ err = net_if_poll(netif);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "failed to poll network interface");
+ }
+ }
+ return SYS_ERR_OK;
+}
+
+
--- /dev/null
+/**
+ * @brief
+ * net.h
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef LIB_NET_INCLUDE_NETWORKING_INTERNAL_H_
+#define LIB_NET_INCLUDE_NETWORKING_INTERNAL_H_
+
+
+#include <barrelfish/barrelfish.h>
+
+#include <devif/queue_interface.h>
+#include <devif/backends/loopback_devif.h>
+
+#include <lwip/netif.h>
+
+#include <net/net.h>
+#include <net/netbufs.h>
+#include <net/netif.h>
+
+#include "debug.h"
+
+/**
+ * @brief encapsulates the state of the networking library
+ */
+struct net_state {
+ uint64_t queueid;
+ char *cardname;
+ bool initialized;
+ struct devq *queue;
+ struct net_buf_pool *pool;
+ struct netif netif;
+
+ // ip4_addr_t ipaddr, netmask, gw;
+};
+
+extern struct net_state state;
+
+
+struct net_buf_pool;
+
+struct net_buf_p
+{
+ struct pbuf_custom pbuf;
+ lpaddr_t offset;
+ void *vbase;
+ struct net_buf_region *region;
+};
+
+struct net_buf_region
+{
+ void *vbase;
+ struct frame_identity frame;
+ struct capref framecap;
+ struct net_buf_region *next;
+ size_t buffer_size;
+ regionid_t regionid;
+ struct net_buf_pool *pool;
+ struct net_buf_p *netbufs; /// array of netbufs
+};
+
+struct net_buf_pool
+{
+ struct net_buf_region *regions;
+ struct devq *dev_q;
+ struct pbuf *pbufs;
+ // stats
+ size_t buffer_count;
+ size_t buffer_free;
+
+};
+
+#endif /* LIB_NET_INCLUDE_NETWORKING_INTERNAL_H_ */
--- /dev/null
+/**
+ * @file
+ * Packet buffer management
+ */
+
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ *
+ *
+ * This file contains modified pbuf functions:
+ *
+ * pbuf_alloced_custom
+ * pbuf_alloc
+ * pbuf_realloc
+ * pbuf_free
+ *
+ */
+
+
+
+
+/**
+ * @defgroup pbuf Packet buffers (PBUF)
+ * @ingroup infrastructure
+ *
+ * Packets are built from the pbuf data structure. It supports dynamic
+ * memory allocation for packet contents or can reference externally
+ * managed packet contents both in RAM and ROM. Quick allocation for
+ * incoming packets is provided through pools with fixed sized pbufs.
+ *
+ * A packet may span over multiple pbufs, chained as a singly linked
+ * list. This is called a "pbuf chain".
+ *
+ * Multiple packets may be queued, also using this singly linked list.
+ * This is called a "packet queue".
+ *
+ * So, a packet queue consists of one or more pbuf chains, each of
+ * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
+ * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
+ *
+ * The differences between a pbuf chain and a packet queue are very
+ * precise but subtle.
+ *
+ * The last pbuf of a packet has a ->tot_len field that equals the
+ * ->len field. It can be found by traversing the list. If the last
+ * pbuf of a packet has a ->next field other than NULL, more packets
+ * are on the queue.
+ *
+ * Therefore, looping through a pbuf of a single packet, has an
+ * loop end condition (tot_len == p->len), NOT (next == NULL).
+ *
+ * Example of custom pbuf usage for zero-copy RX:
+ @code{.c}
+typedef struct my_custom_pbuf
+{
+ struct pbuf_custom p;
+ void* dma_descriptor;
+} my_custom_pbuf_t;
+
+LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool");
+
+void my_pbuf_free_custom(void* p)
+{
+ my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p;
+
+ LOCK_INTERRUPTS();
+ free_rx_dma_descriptor(my_pbuf->dma_descriptor);
+ LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf);
+ UNLOCK_INTERRUPTS();
+}
+
+void eth_rx_irq()
+{
+ dma_descriptor* dma_desc = get_RX_DMA_descriptor_from_ethernet();
+ my_custom_pbuf_t* my_pbuf = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL);
+
+ my_pbuf->p.custom_free_function = my_pbuf_free_custom;
+ my_pbuf->dma_descriptor = dma_desc;
+
+ invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length);
+
+ struct pbuf* p = pbuf_alloced_custom(PBUF_RAW,
+ dma_desc->rx_length,
+ PBUF_REF,
+ &my_pbuf->p,
+ dma_desc->rx_data,
+ dma_desc->max_buffer_size);
+
+ if(netif->input(p, netif) != ERR_OK) {
+ pbuf_free(p);
+ }
+}
+ @endcode
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ *
+ *
+ *
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/stats.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/pbuf.h"
+#include "lwip/sys.h"
+#if LWIP_TCP && TCP_QUEUE_OOSEQ
+#include "lwip/priv/tcp_priv.h"
+#endif
+#if LWIP_CHECKSUM_ON_COPY
+#include "lwip/inet_chksum.h"
+#endif
+
+#include "networking_internal.h"
+
+#include <string.h>
+
+#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
+/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
+ aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
+#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
+
+#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
+#define PBUF_POOL_IS_EMPTY()
+#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
+
+#if !NO_SYS
+#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
+#include "lwip/tcpip.h"
+#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \
+ if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \
+ SYS_ARCH_PROTECT(old_level); \
+ pbuf_free_ooseq_pending = 0; \
+ SYS_ARCH_UNPROTECT(old_level); \
+ } } while(0)
+#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+#endif /* !NO_SYS */
+
+volatile u8_t pbuf_free_ooseq_pending;
+#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
+
+/**
+ * Attempt to reclaim some memory from queued out-of-sequence TCP segments
+ * if we run out of pool pbufs. It's better to give priority to new packets
+ * if we're running out.
+ *
+ * This must be done in the correct thread context therefore this function
+ * can only be used with NO_SYS=0 and through tcpip_callback.
+ */
+#if !NO_SYS
+static
+#endif /* !NO_SYS */
+void
+pbuf_free_ooseq(void)
+{
+ struct tcp_pcb* pcb;
+ SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
+
+ for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
+ if (NULL != pcb->ooseq) {
+ /** Free the ooseq pbufs of one PCB only */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
+ tcp_segs_free(pcb->ooseq);
+ pcb->ooseq = NULL;
+ return;
+ }
+ }
+}
+
+#if !NO_SYS
+/**
+ * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
+ */
+static void
+pbuf_free_ooseq_callback(void *arg)
+{
+ LWIP_UNUSED_ARG(arg);
+ pbuf_free_ooseq();
+}
+#endif /* !NO_SYS */
+
+/** Queue a call to pbuf_free_ooseq if not already queued. */
+static void
+pbuf_pool_is_empty(void)
+{
+#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
+ SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
+#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+ u8_t queued;
+ SYS_ARCH_DECL_PROTECT(old_level);
+ SYS_ARCH_PROTECT(old_level);
+ queued = pbuf_free_ooseq_pending;
+ pbuf_free_ooseq_pending = 1;
+ SYS_ARCH_UNPROTECT(old_level);
+
+ if (!queued) {
+ /* queue a call to pbuf_free_ooseq if not already queued */
+ PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
+ }
+#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+}
+#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
+
+
+/**
+ * @ingroup pbuf
+ * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
+ *
+ * The actual memory allocated for the pbuf is determined by the
+ * layer at which the pbuf is allocated and the requested size
+ * (from the size parameter).
+ *
+ * @param layer flag to define header size
+ * @param length size of the pbuf's payload
+ * @param type this parameter decides how and where the pbuf
+ * should be allocated as follows:
+ *
+ * - PBUF_RAM: buffer memory for pbuf is allocated as one large
+ * chunk. This includes protocol headers as well.
+ * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
+ * protocol headers. Additional headers must be prepended
+ * by allocating another pbuf and chain in to the front of
+ * the ROM pbuf. It is assumed that the memory used is really
+ * similar to ROM in that it is immutable and will not be
+ * changed. Memory which is dynamic should generally not
+ * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
+ * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
+ * protocol headers. It is assumed that the pbuf is only
+ * being used in a single thread. If the pbuf gets queued,
+ * then pbuf_take should be called to copy the buffer.
+ * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
+ * the pbuf pool that is allocated during pbuf_init().
+ *
+ * @return the allocated pbuf. If multiple pbufs where allocated, this
+ * is the first pbuf of a pbuf chain.
+ */
+struct pbuf *
+pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
+{
+ struct pbuf *p, *q, *r;
+ u16_t offset;
+ s32_t rem_len; /* remaining length */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
+
+ /* determine header offset */
+ switch (layer) {
+ case PBUF_TRANSPORT:
+ /* add room for transport (often TCP) layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
+ break;
+ case PBUF_IP:
+ /* add room for IP layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
+ break;
+ case PBUF_LINK:
+ /* add room for link layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
+ break;
+ case PBUF_RAW_TX:
+ /* add room for encapsulating link layer headers (e.g. 802.11) */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN;
+ break;
+ case PBUF_RAW:
+ /* no offset (e.g. RX buffers or chain successors) */
+ offset = 0;
+ break;
+ default:
+ LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
+ return NULL;
+ }
+
+ switch (type) {
+ case PBUF_POOL:
+ /* allocate head of pbuf chain into p */
+
+ p = net_buf_alloc(state.pool);
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
+ if (p == NULL) {
+ PBUF_POOL_IS_EMPTY();
+ return NULL;
+ }
+ p->type = type;
+ p->next = NULL;
+
+ /* make the payload pointer point 'offset' bytes into pbuf data memory */
+ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p->payload + offset));
+ LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
+ ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
+ /* the total length of the pbuf chain is the requested size */
+ p->tot_len = length;
+ /* set the length of the first pbuf in the chain */
+ p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
+ LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
+ ((u8_t*)p->payload + p->len <=
+ (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
+ LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
+ (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
+ /* set reference count (needed here in case we fail) */
+ p->ref = 1;
+
+ /* now allocate the tail of the pbuf chain */
+
+ /* remember first pbuf for linkage in next iteration */
+ r = p;
+ /* remaining length to be allocated */
+ rem_len = length - p->len;
+ /* any remaining pbufs to be allocated? */
+ while (rem_len > 0) {
+ q = net_buf_alloc(state.pool);
+ if (q == NULL) {
+ PBUF_POOL_IS_EMPTY();
+ /* free chain so far allocated */
+ pbuf_free(p);
+ /* bail out unsuccessfully */
+ return NULL;
+ }
+ q->type = type;
+ q->flags = 0;
+ q->next = NULL;
+ /* make previous pbuf point to this pbuf */
+ r->next = q;
+ /* set total length of this pbuf and next in chain */
+ LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
+ q->tot_len = (u16_t)rem_len;
+ /* this pbuf length is pool size, unless smaller sized tail */
+ q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
+ LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
+ ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
+ LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
+ ((u8_t*)p->payload + p->len <=
+ (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
+ q->ref = 1;
+ /* calculate remaining length to be allocated */
+ rem_len -= q->len;
+ /* remember this pbuf for linkage in next iteration */
+ r = q;
+ }
+ /* end of chain */
+ /*r->next = NULL;*/
+
+ break;
+ case PBUF_RAM:
+ {
+ mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length);
+
+ /* bug #50040: Check for integer overflow when calculating alloc_len */
+ if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) {
+ return NULL;
+ }
+
+ /* If pbuf is to be allocated in RAM, allocate memory for it. */
+ p = net_buf_alloc(state.pool);
+ }
+
+ if (p == NULL) {
+ return NULL;
+ }
+ /* Set up internal structure of the pbuf. */
+ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p->payload + offset));
+ p->len = p->tot_len = length;
+ p->next = NULL;
+ p->type = type;
+
+ LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
+ ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
+ break;
+ /* pbuf references existing (non-volatile static constant) ROM payload? */
+ case PBUF_ROM:
+ /* pbuf references existing (externally allocated) RAM payload? */
+ case PBUF_REF:
+ /* only allocate memory for the pbuf structure */
+ p = (struct pbuf *)memp_malloc(MEMP_PBUF);
+ if (p == NULL) {
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
+ (type == PBUF_ROM) ? "ROM" : "REF"));
+ return NULL;
+ }
+ /* caller must set this field properly, afterwards */
+ p->payload = NULL;
+ p->len = p->tot_len = length;
+ p->next = NULL;
+ p->type = type;
+ break;
+ default:
+ LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
+ return NULL;
+ }
+ /* set reference count */
+ p->ref = 1;
+ /* set flags */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
+ return p;
+}
+
+#if LWIP_SUPPORT_CUSTOM_PBUF
+/**
+ * @ingroup pbuf
+ * Initialize a custom pbuf (already allocated).
+ *
+ * @param l flag to define header size
+ * @param length size of the pbuf's payload
+ * @param type type of the pbuf (only used to treat the pbuf accordingly, as
+ * this function allocates no memory)
+ * @param p pointer to the custom pbuf to initialize (already allocated)
+ * @param payload_mem pointer to the buffer that is used for payload and headers,
+ * must be at least big enough to hold 'length' plus the header size,
+ * may be NULL if set later.
+ * ATTENTION: The caller is responsible for correct alignment of this buffer!!
+ * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
+ * big enough to hold 'length' plus the header size
+ */
+struct pbuf*
+pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
+ void *payload_mem, u16_t payload_mem_len)
+{
+ u16_t offset;
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
+
+ /* determine header offset */
+ switch (l) {
+ case PBUF_TRANSPORT:
+ /* add room for transport (often TCP) layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
+ break;
+ case PBUF_IP:
+ /* add room for IP layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
+ break;
+ case PBUF_LINK:
+ /* add room for link layer header */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
+ break;
+ case PBUF_RAW_TX:
+ /* add room for encapsulating link layer headers (e.g. 802.11) */
+ offset = PBUF_LINK_ENCAPSULATION_HLEN;
+ break;
+ case PBUF_RAW:
+ offset = 0;
+ break;
+ default:
+ LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
+ return NULL;
+ }
+
+ if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
+ return NULL;
+ }
+
+ p->pbuf.next = NULL;
+ if (payload_mem != NULL) {
+ p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
+ } else {
+ p->pbuf.payload = NULL;
+ }
+ p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
+ p->pbuf.len = p->pbuf.tot_len = length;
+ p->pbuf.type = type;
+ p->pbuf.ref = 1;
+ return &p->pbuf;
+}
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+
+/**
+ * @ingroup pbuf
+ * Shrink a pbuf chain to a desired length.
+ *
+ * @param p pbuf to shrink.
+ * @param new_len desired new length of pbuf chain
+ *
+ * Depending on the desired length, the first few pbufs in a chain might
+ * be skipped and left unchanged. The new last pbuf in the chain will be
+ * resized, and any remaining pbufs will be freed.
+ *
+ * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
+ * @note May not be called on a packet queue.
+ *
+ * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
+ */
+void
+pbuf_realloc(struct pbuf *p, u16_t new_len)
+{
+ struct pbuf *q;
+ u16_t rem_len; /* remaining length */
+ s32_t grow;
+
+ LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
+ LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
+ p->type == PBUF_ROM ||
+ p->type == PBUF_RAM ||
+ p->type == PBUF_REF);
+
+ /* desired length larger than current length? */
+ if (new_len >= p->tot_len) {
+ /* enlarging not yet supported */
+ return;
+ }
+
+ return;
+
+ /* the pbuf chain grows by (new_len - p->tot_len) bytes
+ * (which may be negative in case of shrinking) */
+ grow = new_len - p->tot_len;
+
+ /* first, step over any pbufs that should remain in the chain */
+ rem_len = new_len;
+ q = p;
+ /* should this pbuf be kept? */
+ while (rem_len > q->len) {
+ /* decrease remaining length by pbuf length */
+ rem_len -= q->len;
+ /* decrease total length indicator */
+ LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
+ q->tot_len += (u16_t)grow;
+ /* proceed to next pbuf in chain */
+ q = q->next;
+ LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
+ }
+ /* we have now reached the new last pbuf (in q) */
+ /* rem_len == desired length for pbuf q */
+
+ /* shrink allocated memory for PBUF_RAM */
+ /* (other types merely adjust their length fields */
+ if ((q->type == PBUF_RAM) && (rem_len != q->len)
+#if LWIP_SUPPORT_CUSTOM_PBUF
+ && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+ ) {
+ USER_PANIC("SHOULD Not trim memory\n");
+ /* reallocate and adjust the length of the pbuf that will be split */
+ q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
+ LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
+ }
+ /* adjust length fields for new last pbuf */
+ q->len = rem_len;
+ q->tot_len = q->len;
+
+ /* any remaining pbufs in chain? */
+ if (q->next != NULL) {
+ /* free remaining pbufs in chain */
+ pbuf_free(q->next);
+ }
+ /* q is last packet in chain */
+ q->next = NULL;
+}
+
+
+/**
+ * @ingroup pbuf
+ * Dereference a pbuf chain or queue and deallocate any no-longer-used
+ * pbufs at the head of this chain or queue.
+ *
+ * Decrements the pbuf reference count. If it reaches zero, the pbuf is
+ * deallocated.
+ *
+ * For a pbuf chain, this is repeated for each pbuf in the chain,
+ * up to the first pbuf which has a non-zero reference count after
+ * decrementing. So, when all reference counts are one, the whole
+ * chain is free'd.
+ *
+ * @param p The pbuf (chain) to be dereferenced.
+ *
+ * @return the number of pbufs that were de-allocated
+ * from the head of the chain.
+ *
+ * @note MUST NOT be called on a packet queue (Not verified to work yet).
+ * @note the reference counter of a pbuf equals the number of pointers
+ * that refer to the pbuf (or into the pbuf).
+ *
+ * @internal examples:
+ *
+ * Assuming existing chains a->b->c with the following reference
+ * counts, calling pbuf_free(a) results in:
+ *
+ * 1->2->3 becomes ...1->3
+ * 3->3->3 becomes 2->3->3
+ * 1->1->2 becomes ......1
+ * 2->1->1 becomes 1->1->1
+ * 1->1->1 becomes .......
+ *
+ */
+u8_t
+pbuf_free(struct pbuf *p)
+{
+ u16_t type;
+ struct pbuf *q;
+ u8_t count;
+
+ if (p == NULL) {
+ LWIP_ASSERT("p != NULL", p != NULL);
+ /* if assertions are disabled, proceed with debug output */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("pbuf_free(p == NULL) was called.\n"));
+ return 0;
+ }
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
+
+ PERF_START;
+
+ LWIP_ASSERT("pbuf_free: sane type",
+ p->type == PBUF_RAM || p->type == PBUF_ROM ||
+ p->type == PBUF_REF || p->type == PBUF_POOL);
+
+ count = 0;
+ /* de-allocate all consecutive pbufs from the head of the chain that
+ * obtain a zero reference count after decrementing*/
+ while (p != NULL) {
+ u16_t ref;
+ SYS_ARCH_DECL_PROTECT(old_level);
+ /* Since decrementing ref cannot be guaranteed to be a single machine operation
+ * we must protect it. We put the new ref into a local variable to prevent
+ * further protection. */
+ SYS_ARCH_PROTECT(old_level);
+ /* all pbufs in a chain are referenced at least once */
+ LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
+ /* decrease reference count (number of pointers to pbuf) */
+ ref = --(p->ref);
+ SYS_ARCH_UNPROTECT(old_level);
+ /* this pbuf is no longer referenced to? */
+ if (ref == 0) {
+ /* remember next pbuf in chain for next iteration */
+ q = p->next;
+ LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
+ type = p->type;
+#if LWIP_SUPPORT_CUSTOM_PBUF
+ /* is this a custom pbuf? */
+ if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
+ struct pbuf_custom *pc = (struct pbuf_custom*)p;
+ LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
+ pc->custom_free_function(p);
+ } else
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+ {
+ USER_PANIC("SHOULD NOT REACH HERE...\n");
+ /* is this a pbuf from the pool? */
+ if (type == PBUF_POOL) {
+ memp_free(MEMP_PBUF_POOL, p);
+ /* is this a ROM or RAM referencing pbuf? */
+ } else if (type == PBUF_ROM || type == PBUF_REF) {
+ memp_free(MEMP_PBUF, p);
+ /* type == PBUF_RAM */
+ } else {
+ mem_free(p);
+ }
+ }
+ count++;
+ /* proceed to next pbuf */
+ p = q;
+ /* p->ref > 0, this pbuf is still referenced to */
+ /* (and so the remaining pbufs in chain as well) */
+ } else {
+ LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
+ /* stop walking through the chain */
+ p = NULL;
+ }
+ }
+ PERF_STOP("pbuf_free");
+ /* return number of de-allocated pbufs */
+ return count;
+}
+
+
+
+
+
--- /dev/null
+/**
+ * @brief
+ * udp_echo.c
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <lwip/ip.h>
+#include <lwip/udp.h>
+#include <lwip/pbuf.h>
+#include <net/net.h>
+
+#define UDP_ECHOSERVER_PORT 7
+
+static void echo_recv_handler(void *arg, struct udp_pcb *upcb, struct pbuf *p,
+ const ip_addr_t *addr, uint16_t port)
+{
+ debug_printf("UDP ECHO received packet\n");
+ udp_sendto(upcb, p, addr, port);
+ pbuf_free(p);
+}
+
+int main(int argc, char *argv[])
+{
+ errval_t err;
+
+ debug_printf("UDP ECHO started.\n");
+
+ /* connect to the network */
+ err = networking_init_default();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Failed to initialize the network");
+ }
+
+ debug_printf("UDP ECHO network initialized.\n");
+
+ //create a new UDP PCB
+ struct udp_pcb *pcb = udp_new(); //UDP connection data
+ if (pcb == NULL) {
+ return ERR_MEM;
+ }
+
+ debug_printf("UDP ECHO pcb created.\n");
+
+ err_t r = udp_bind(pcb, IP_ADDR_ANY, UDP_ECHOSERVER_PORT);
+ if(r != ERR_OK) {
+ udp_remove(pcb);
+ return(r);
+ }
+
+ debug_printf("UDP ECHO bound to UDP port %u.\n", UDP_ECHOSERVER_PORT);
+
+ udp_recv(pcb, echo_recv_handler, 0);
+
+
+ debug_printf("UDP ECHO start receiving messages\n");
+
+
+ for (int i = 0; i < 100; i++) {
+ networking_poll();
+ }
+
+ debug_printf("UDP ECHO termiated.\n");
+
+ return 0;
+}
+
+