Added code from two Bulk Transfer Distributed Systems Lab projects:
authorStefan Kaestle <stefan.kaestle@inf.ethz.ch>
Wed, 20 Aug 2014 11:33:02 +0000 (13:33 +0200)
committerStefan Kaestle <stefan.kaestle@inf.ethz.ch>
Wed, 20 Aug 2014 11:33:02 +0000 (13:33 +0200)
1) Reto Achermann, Antoine Kaufmann. Bulk Transfer over Network
2) Added code from two Bulk Transfer Distributed Systems Labs projects

The reports are available from the Barrelfish website.

57 files changed:
.gitignore [new file with mode: 0644]
errors/errno.fugu
hake/menu.lst.x86_64
hake/symbolic_targets.mk
if/Hakefile
if/block_service.if [new file with mode: 0644]
if/bulk_ctrl.if [new file with mode: 0644]
include/barrelfish/memobj.h
include/barrelfish/waitset.h
include/bulk_transfer/bulk_allocator.h [new file with mode: 0644]
include/bulk_transfer/bulk_local.h [new file with mode: 0644]
include/bulk_transfer/bulk_net.h [new file with mode: 0644]
include/bulk_transfer/bulk_sm.h [new file with mode: 0644]
include/bulk_transfer/bulk_transfer.h [new file with mode: 0644]
lib/barrelfish/Hakefile
lib/barrelfish/capabilities.c
lib/barrelfish/vspace/memobj_fixed.c [new file with mode: 0644]
lib/barrelfish/vspace/utils.c
lib/bulk_transfer/Hakefile [new file with mode: 0644]
lib/bulk_transfer/backends/backend.c [new file with mode: 0644]
lib/bulk_transfer/backends/backend.h [new file with mode: 0644]
lib/bulk_transfer/backends/local/control_channel.c [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_backend.h [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_buffer.c [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_channel.c [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_control.c [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_endpoint.c [new file with mode: 0644]
lib/bulk_transfer/backends/net/bulk_net_pool.c [new file with mode: 0644]
lib/bulk_transfer/backends/sm/control_channel.c [new file with mode: 0644]
lib/bulk_transfer/bulk_allocator.c [new file with mode: 0644]
lib/bulk_transfer/bulk_buffer.c [new file with mode: 0644]
lib/bulk_transfer/bulk_buffer.h [new file with mode: 0644]
lib/bulk_transfer/bulk_channel.c [new file with mode: 0644]
lib/bulk_transfer/bulk_endpoint.c [new file with mode: 0644]
lib/bulk_transfer/bulk_pool.c [new file with mode: 0644]
lib/bulk_transfer/bulk_pool.h [new file with mode: 0644]
lib/bulk_transfer/bulk_transfer.c [new file with mode: 0644]
lib/bulk_transfer/control_channel.c [new file with mode: 0644]
lib/bulk_transfer/error_codes [new file with mode: 0644]
lib/bulk_transfer/helpers.h [new file with mode: 0644]
usr/block_server/Hakefile [new file with mode: 0644]
usr/block_server/block_server.c [new file with mode: 0644]
usr/block_server/block_server.h [new file with mode: 0644]
usr/block_server/block_server_client.c [new file with mode: 0644]
usr/block_server/block_storage.c [new file with mode: 0644]
usr/block_server/block_storage.h [new file with mode: 0644]
usr/block_server/block_storage_cache.c [new file with mode: 0644]
usr/block_server/block_storage_cache.h [new file with mode: 0644]
usr/block_server/local_server.c [new file with mode: 0644]
usr/block_server/local_server.h [new file with mode: 0644]
usr/block_server/network_client.c [new file with mode: 0644]
usr/block_server/network_client.h [new file with mode: 0644]
usr/block_server/network_common.h [new file with mode: 0644]
usr/block_server/network_server.c [new file with mode: 0644]
usr/block_server/network_server.h [new file with mode: 0644]
usr/tests/bulk_transfer/Hakefile [new file with mode: 0644]
usr/tests/bulk_transfer/bulk_mini.c [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..9448368
--- /dev/null
@@ -0,0 +1,5 @@
+# vim swap files
+*.swp
+
+# ctags
+tags
index 3b84d7b..a945d38 100755 (executable)
@@ -963,3 +963,32 @@ errors cache FS_CACHE_ {
 errors common ERR_ {
     failure NOTIMP              "Not implemented",
 };
+
+errors bulk_transfer BULK_TRANSFER_ {
+    failure MEM                 "Internal not enough memory error",
+    failure CHAN_CREATE         "Channel create operation failed.",
+    failure CHAN_BIND           "Channel bind operation failed.",
+    failure CHAN_ASSIGN_POOL    "Channel assign pool operation failed.",
+    failure CHAN_STATE          "Channel has a wrong state",
+    failure CHAN_TRUST          "Channel has a invalid trust level.",
+    failure CHAN_INVALID_EP     "Channel has an invalid endpoint.",
+    failure POOL_INVALD         "The pool does not match.",
+    failure POOL_NOT_ASSIGNED   "The pool has not yet been assigned to a channel.",
+    failure POOL_MAP            "Mapping of the pool failed",
+    failure POOL_UNMAP          "The Unmapping of the pool failed",
+    failure POOL_ALREADY_ASSIGNED "The pool has already been assigned to this channel.",
+    failure POOL_ALREADY_REMAPPED "The pool has already been remapped.",
+    failure BUFFER_NOT_OWNED    "The supplied buffer is not owned by this domain.",
+    failure BUFFER_INVALID      "The buffer is not valid.",
+    failure BUFFER_ALREADY_MAPPED "The buffer is already mapped.",
+    failure BUFFER_STATE        "The buffer has a wrong state.",
+    failure BUFFER_REFCOUNT     "The buffer has a wrong reference count.",
+    failure BUFFER_NOT_A_COPY   "The released buffer is not a copy.",
+    failure BUFFER_MAP          "The mapping of the buffer failed",
+    failure BUFFER_UNMAP        "The unmapping of the buffer failed.",
+    failure ALLOC_BUFFER_SIZE   "The supplied buffer size is not valid.",
+    failure ALLOC_BUFFER_COUNT  "The supplied buffer count is not valid.",
+    
+    
+};
+
index 3cbdc01..666d318 100644 (file)
@@ -46,6 +46,10 @@ module /x86_64/sbin/e1000n auto
 module /x86_64/sbin/NGD_mng auto
 module /x86_64/sbin/netd auto
 
+## Block Service
+module /x86_64/sbin/block_server
+module /x86_64/sbin/block_server_client
+
 # General user domains
 module /x86_64/sbin/serial
 module  /x86_64/sbin/fish nospawn
index 7120851..c06b31c 100644 (file)
@@ -171,6 +171,8 @@ MODULES_x86_64= \
        sbin/angler \
        sbin/sshd \
        sbin/lshw \
+       sbin/block_server \
+       sbin/block_server_client \
 
 # the following are broken in the newidc system
 MODULES_x86_64_broken= \
index a67bd93..48d45c8 100644 (file)
                "xmplrpc",
                "xmplthc",
                "unixsock",
-              "bcache",
-              "replay",
-              "empty"],
+                  "bcache",
+                  "replay",
+                  "block_service", 
+                  "bulk_ctrl",
+                  "empty"
+              ],
              arch <- allArchitectures
 ] ++
 
diff --git a/if/block_service.if b/if/block_service.if
new file mode 100644 (file)
index 0000000..68d9478
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+/*
+       TODO: asynchronous or rpc style?
+       TODO: add bulk transfer sequence number?
+*/
+
+interface block_service "block service interface" {
+    message write(uint32 start_block, uint32 count);
+    message read(uint32 start_block, uint32 count);
+};
diff --git a/if/bulk_ctrl.if b/if/bulk_ctrl.if
new file mode 100644 (file)
index 0000000..942a687
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+/*
+ * TODO: Decide if we send the aggregate object over the control channel
+ *       or if we split it up and send raw buffers
+ *
+ */
+
+interface bulk_ctrl "bulk control channel interface" {
+    message copy_start(uint32 count);
+    message copy(uint32 poolid, uint32 offset, uint32 length);
+    
+    message move_start(uint32 count);
+    message move(uint32 poolid, uint32 offset, uint32 length);
+    
+    message free_start(uint32 count);
+    message free(uint32 poolid, uint32 offset, uint32 length);
+    
+    message assign_pool(uint32 bufsize, uint32 count);
+    message transfer_cap();
+    
+    
+};
index fac10c5..4ae483c 100644 (file)
@@ -27,6 +27,7 @@ enum memobj_type {
     PINNED,
     ONE_FRAME_ONE_MAP,
     MEMOBJ_VFS, // see lib/vfs/mmap.c
+    MEMOBJ_FIXED,
 };
 
 typedef uint32_t memobj_flags_t;
@@ -107,6 +108,18 @@ struct memobj_anon {
     struct slab_alloc frame_slab;         ///< Slab to back the frame list
 };
 
+/**
+ * this memobj can be mapped into a single vregion and backed by a fixed number
+ * of equal sized frames
+ */
+struct memobj_fixed {
+    struct memobj    m;          ///< public memobj interface
+    size_t           count;      ///< the number of frames
+    size_t           chunk_size; ///< the size of the frames
+    struct vregion  *vregion;   ///< the associated vregion
+    struct capref   *frames;     ///< the tracked frames
+};
+
 errval_t memobj_create_pinned(struct memobj_pinned *memobj, size_t size,
                               memobj_flags_t flags);
 
@@ -124,6 +137,12 @@ errval_t memobj_create_one_frame_lazy(struct memobj_one_frame_lazy *memobj,
 errval_t memobj_create_one_frame_one_map(struct memobj_one_frame_one_map *memobj,
                                          size_t size, memobj_flags_t flags);
 
+errval_t memobj_create_fixed(struct memobj_fixed *memobj, size_t size,
+                             memobj_flags_t flags, size_t count,
+                             size_t chunk_size);
+
+errval_t memobj_destroy_fixed(struct memobj *memobj);
+
 __END_DECLS
 
 #endif // LIBBARRELFISH_MEMOBJ_H
index 1974618..70cf137 100644 (file)
@@ -23,6 +23,7 @@ __BEGIN_DECLS
 #include <errors/errno.h>
 
 #include <stdbool.h>
+#include <barrelfish/types.h>
 
 struct waitset;
 struct thread;
diff --git a/include/bulk_transfer/bulk_allocator.h b/include/bulk_transfer/bulk_allocator.h
new file mode 100644 (file)
index 0000000..bccbd5f
--- /dev/null
@@ -0,0 +1,110 @@
+/**
+ * \file
+ * \brief Allocator for managing buffers in a bulk transfer pool.
+ *
+ * Note using this allocator is optional, an application can do its own buffer
+ * management.
+ */
+
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_ALLOCAOR_H
+#define BULK_ALLOCAOR_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+/**
+ *
+ */
+struct bulk_buffer_mng {
+    struct bulk_buffer     *buffer;
+    struct bulk_buffer_mng *next;
+};
+
+/**
+ *
+ */
+struct bulk_allocator {
+    struct bulk_pool       *pool;
+    size_t                  num_free;
+    struct bulk_buffer_mng *free_buffers;
+};
+
+
+/**
+ *
+ */
+struct bulk_pool_constraints {
+    uintptr_t range_min;
+    uintptr_t range_max;
+    uintptr_t alignment;
+};
+
+
+
+/**
+ * initializes a new bulk allocator with a pool and allocates memory for it.
+ *
+ * @param alloc         pointer to an unused allocator handle
+ * @param buffer_count  the number of buffers to allocate
+ * @param buffer_size   the size of a single buffer
+ * @param constraints   memory requirements for this pool or NULL if none
+ */
+errval_t bulk_alloc_init(struct bulk_allocator         *alloc,
+                         size_t                         buffer_count,
+                         size_t                         buffer_size,
+                         struct bulk_pool_constraints  *constraints);
+
+/**
+ * creates a new allocator based on the supplied capability. It creates as many
+ * buffers as possible of size buffer_size that fit into the capability.
+ *
+ * @param alloc         an unused allocator handle
+ * @param buffer_size   the size of a single buffer
+ * @param frame         capability for backing the bulk pool
+ */
+errval_t bulk_alloc_init_from_cap(struct bulk_allocator   *alloc,
+                                  size_t                   buffer_size,
+                                  struct capref            *frame);
+
+
+/**
+ * Frees up the bulk allocator and it's pool.
+ *
+ * @param alloc handle to a bulk allocator to be freed
+ */
+errval_t bulk_alloc_free(struct bulk_allocator *alloc);
+
+
+
+/**
+ * Gets a new bulk buffer from the allocator.
+ *
+ * @param   alloc   the allocator handle to allocate the buffer from
+ *
+ * @return  pointer to a bulk_buffer on success
+ *          NULL if there are no buffer left
+ *
+ */
+struct bulk_buffer *bulk_alloc_new_buffer(struct bulk_allocator *alloc);
+
+
+/**
+ * returns a buffer back to the allocator. The pools must match.
+ *
+ * @param alloc     the allocator to hand the buffer back
+ * @param buffer    the buffer to hand back to the allocator
+ */
+errval_t bulk_alloc_return_buffer(struct bulk_allocator *alloc,
+                                  struct bulk_buffer    *buffer);
+
+
+#endif /* BULK_ALLOCAOR_H */
+
diff --git a/include/bulk_transfer/bulk_local.h b/include/bulk_transfer/bulk_local.h
new file mode 100644 (file)
index 0000000..e0bf30a
--- /dev/null
@@ -0,0 +1,37 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_LOCAL_H
+#define BULK_LOCAL_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+
+
+struct bulk_local_endpoint {
+    struct bulk_endpoint_descriptor   generic;
+    struct bulk_channel              *other_channel;
+};
+
+/**
+ * @param endpoint      Pointer to the endpoint that is to be initialized
+ * @param other_channel NULL for a create channel endpoint, the unbound channel
+ *                      of the other endpoint.
+ */
+void bulk_local_init_endpoint(struct bulk_local_endpoint *endpoint,
+                              struct bulk_channel        *other_channel);
+
+
+#endif /* BULK_LOCAL_H */
+
diff --git a/include/bulk_transfer/bulk_net.h b/include/bulk_transfer/bulk_net.h
new file mode 100644 (file)
index 0000000..92720cc
--- /dev/null
@@ -0,0 +1,170 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_NET_H
+#define BULK_NET_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+
+
+/* TODO: correct inlude */
+struct ip_addr  {
+                int foo;
+};
+
+/**
+ * endpoint descriptor for the bulk network interface.
+ *
+ * XXX: the queue identifiers are for the control channel.
+ *      we may have to distinguish the different pools with different queues
+ *      because the buffer sizes may vary among pools.
+ *      - RA
+ */
+struct bulk_net_endpoint_descriptor {
+    /* generic part */
+    struct bulk_endpoint_descriptor ep_generic; ///< generic endpoint part
+    /* start of implementation specific part */
+    struct ip_addr                  ip;          ///< ip address
+    uint16_t                        port;        ///< port
+    uint64_t                        rx_queue_id; ///< rx queue (control channel)
+    uint64_t                        tx_queue_id; ///< tx queue (control channel)
+    /*
+     * XXX: do we want to add the connection information here as well?
+     *      e.g. tcp_pcb ?
+     */
+};
+
+/**
+ * setup parameters for creating a network endpoint
+ */
+struct bulk_net_ep_setup {
+    struct ip_addr  ip;         ///< the ip address of the endpoint
+    uint16_t        port;       ///< the port of the endpoint
+    /* possibly queue id */
+    uint64_t        rx_queue_id; ///< rx queue (control channel)
+    uint64_t        tx_queue_id; ///< tx queue (control channel)
+};
+
+
+/**
+ * enumeration of possible message types over the control channel
+ */
+enum bulk_net_msg_type {
+    BULK_NET_MSG_BIND,          ///< binding to the channel
+    BULK_NET_MSG_POOL_ASSIGN,   ///< pool has been assigned to the channel
+    BULK_NET_MSG_POOL_REMOVE,   ///< pool has been removed from the channel
+    BULK_NET_MSG_DATA           ///< data arrives over the channel
+};
+
+struct bulk_net_header {
+    enum bulk_net_msg_type  type;
+    size_t                  size;
+};
+
+struct bulk_net_msg {
+    struct bulk_net_header header;
+    /* meta data of the bulk data */
+    union {
+        struct {
+            struct bulk_pool_id id;
+            size_t buffer_size;
+            size_t num_buffers;
+        } pool_assign;
+
+        struct {
+            struct bulk_pool_id id;
+        } pool_remove;
+
+        struct {
+            struct bulk_net_ep_setup ep_setup;
+            struct bulk_channel_setup channel_setup;
+        } bind;
+
+        struct {
+
+
+        } data;
+
+    } msg;
+};
+
+
+/*
+ * ---------------------------------------------------------------------------
+ * Implementation Specific Interface Functions >>>
+ *
+ * TODO: make asynchronous
+ */
+errval_t bulk_net_channel_pass(struct bulk_channel *channel,
+                               struct bulk_buffer  *buffer,
+                               void                *meta,
+                               struct bulk_continuation cont);
+
+errval_t bulk_net_channel_copy(struct bulk_channel *channel,
+                               struct bulk_buffer  *buffer,
+                               void                *meta,
+                               struct bulk_continuation cont);
+
+errval_t bulk_net_channel_release(struct bulk_channel *channel,
+                                  struct bulk_buffer  *buffer,
+                                  struct bulk_continuation cont);
+
+errval_t bulk_net_channel_move(struct bulk_channel *channel,
+                               struct bulk_buffer   *buffer,
+                               void                 *meta,
+                               struct bulk_continuation cont);
+
+errval_t bulk_net_channel_assign_pool(struct bulk_channel *channel,
+                                      struct bulk_pool    *pool);
+
+errval_t bulk_net_channel_bind(struct bulk_channel *channel);
+
+errval_t bulk_net_channel_create(struct bulk_channel *channel);
+
+struct bulk_implementation *bulk_net_get_implementation(void);
+
+/*
+ * <<< Implementation Specific Interface Functions
+ * ---------------------------------------------------------------------------
+ */
+
+/**
+ * Creates a new bulk endpoint which uses the network backend
+ *
+ * @param ep_desc   memory location to create the endpoint in
+ * @param setup     the setup parameters for the endpoint
+ *
+ * This function is intended to be used by the creator.
+ */
+errval_t bulk_net_ep_create(struct bulk_net_endpoint_descriptor *ep_desc,
+                            struct bulk_net_ep_setup            *setup);
+
+/**
+ * Destroys the given endpoint
+ *
+ * @param   ep_desc the endpoint to be destroyed
+ */
+errval_t bulk_net_ep_destroy(struct bulk_net_endpoint_descriptor *ep_desc);
+
+/**
+ * Explicitly creates a specific remote endpoint
+ *
+ * @param ep_desc   memory location to create the endpoint
+ * @param ip        the ip of the server machine
+ * @param port      the port where the otherside listens to
+ */
+errval_t bulk_net_ep_create_remote(struct bulk_net_endpoint_descriptor *ep_desc,
+                                   struct ip_addr ip, uint16_t port);
+#endif /* BULK_NET_H */
diff --git a/include/bulk_transfer/bulk_sm.h b/include/bulk_transfer/bulk_sm.h
new file mode 100644 (file)
index 0000000..9f15fe6
--- /dev/null
@@ -0,0 +1,26 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_SM_H
+#define BULK_SM_H
+
+struct bulk_sm_endpoint_descriptor {
+    /* generic part */
+    struct bulk_endpoint_descriptor ep_generic; ///< generic endpoint part
+    /* start of implementation specific part */
+    /* TODO: references to iref / flounder data structure */
+};
+
+
+#endif /* BULK_SM_H */
diff --git a/include/bulk_transfer/bulk_transfer.h b/include/bulk_transfer/bulk_transfer.h
new file mode 100644 (file)
index 0000000..a18a1fb
--- /dev/null
@@ -0,0 +1,486 @@
+/**
+ * \file
+ * \brief Generic bulk data transfer mechanism
+ */
+
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_TRANSFER_H
+#define BULK_TRANSFER_H
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/waitset.h>
+
+/**
+ * Specifies the direction of data flow over a channel.
+ */
+enum bulk_channel_direction {
+    BULK_DIRECTION_TX,  ///< This side of the channel is the data source
+    BULK_DIRECTION_RX   ///< This side of the channel is the data sink
+};
+
+/**
+ * The role of the domain with respect to the channel.
+ *
+ * 1) Creation: upon channel creation the role can either be given or generic
+ * 2) Binding: The roles are given either Master-Slave or Slave-Master
+ */
+enum bulk_channel_role {
+    BULK_ROLE_GENERIC,  ///< the role of this endpoint depends on the binding side
+    BULK_ROLE_MASTER,   ///< this endpoint is the channel master
+    BULK_ROLE_SLAVE     ///< this endpoint is the channel slave
+};
+
+/**
+ * the trust levels of the channel
+ */
+enum bulk_trust_level {
+    BULK_TRUST_UNINITIALIZED, ///< trust level is not initialized
+    BULK_TRUST_NONE,          ///< untrusted case, policies are enforced
+    BULK_TRUST_HALF,          ///< same as untrusted, but no revocation of caps
+    BULK_TRUST_FULL           ///< complete trust, no unmapping
+};
+
+/**
+ *
+ */
+enum bulk_channel_state {
+    BULK_STATE_UNINITIALIZED,  ///< channel not initialized, no endpoint assigned
+    BULK_STATE_INITIALIZED,    ///< local endpoint assigned, ready for binding
+    BULK_STATE_BINDING,        ///< binding is in progress
+    BULK_STATE_CONNECTED,      ///< binding is completed and ready for use
+    BULK_STATE_TEARDOWN,       ///< teardown is initiated
+    BULK_STATE_CLOSED          ///< the channel has been closed
+};
+
+/* forward declarations */
+struct bulk_channel;
+struct bulk_channel_constraints;
+struct bulk_pool;
+struct bulk_pool_list;
+struct bulk_buffer;
+
+
+/**
+ * continuation to make the interface asynchronous
+ */
+struct bulk_continuation {
+    void (*handler)(void *arg, errval_t err, struct bulk_channel *channel);
+    void *arg;
+};
+
+/**
+ * Function pointers provided by an implementation of the bulk transfer
+ * mechanism over a specific backend. Functions correspond closely to the
+ * public interface.
+ *
+ * XXX: do we want to give a pointer to the closure or the closure itself?
+ *      the event_closure just has two fields, so it may be reasonable to do so.
+ *      - RA
+ */
+struct bulk_implementation {
+    errval_t (*channel_create)(struct bulk_channel  *channel);
+
+    errval_t (*channel_bind)(struct bulk_channel  *channel);
+
+    errval_t (*channel_destroy)(struct bulk_channel  *channel);
+
+    errval_t (*assign_pool)(struct bulk_channel *channel,
+                            struct bulk_pool    *pool);
+
+    errval_t (*remove_pool)(struct bulk_channel *channel,
+                            struct bulk_pool    *pool,
+                            struct bulk_continuation cont);
+
+    errval_t (*move)(struct bulk_channel  *channel,
+                     struct bulk_buffer   *buffer,
+                     void                 *meta,
+                     struct bulk_continuation cont);
+
+    errval_t (*copy)(struct bulk_channel  *channel,
+                     struct bulk_buffer   *buffer,
+                     void                 *meta,
+                     struct bulk_continuation cont);
+
+    errval_t (*release)(struct bulk_channel  *channel,
+                        struct bulk_buffer   *buffer,
+                        struct bulk_continuation cont);
+
+    errval_t (*pass)(struct bulk_channel  *channel,
+                     struct bulk_buffer   *buffer,
+                     void                 *meta,
+                     struct bulk_continuation cont);
+    /* XXX: remove ? */
+    errval_t (*request)(struct bulk_channel  *channel,
+                        size_t                count,
+                        struct bulk_continuation cont);
+};
+
+/**
+ * specifies constraints on the channel. This involves limiting the supported
+ * memory range or alignment requirements.
+ */
+struct bulk_channel_constraints {
+    uintptr_t mem_range_min;    ///< minimum physical address supported
+    uintptr_t mem_range_max;    ///< maximum physical address supported
+    uintptr_t men_align;        ///< minimum memory alignment constraint
+};
+
+/** Callbacks for events */
+struct bulk_channel_callbacks {
+    /**
+     * For exporting side: other endpoint successfully bound
+     * For binding side: binding succeeded
+     */
+    void (*bind_done)(struct bulk_channel *channel, errval_t err);
+
+    /**
+     * the other side wants to teardown the channel
+     * For initiating side: teardown completed
+     * For other side: teardown initiated
+     */
+    void (*teardown_received)(struct bulk_channel *channel);
+
+    /**
+     * The other endpoint requests to assign a new pool to this channel.
+     * @return If an error value is returned, the pool is not assigned and the
+     *         error code is sent to the other side (veto).
+     */
+    errval_t (*pool_assigned)(struct bulk_channel *channel,
+                              struct bulk_pool *pool);
+
+    /**
+     * The other endpoint wants to remove a pool from this channel
+     */
+    errval_t (*pool_removed)(struct bulk_channel *channel,
+                             struct bulk_pool *pool);
+
+    /** Incoming moved buffer (sink) */
+    void (*move_received)(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta);
+
+    /** Incoming passed buffer (source) */
+    void (*buffer_received)(struct bulk_channel *channel,
+                            struct bulk_buffer  *buffer,
+                            void                *meta);
+
+    /** Incoming copied buffer (sink) */
+    void (*copy_received)(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta);
+
+    /** Released copied buffer (source) */
+    void (*copy_released)(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer);
+
+    /** the other endpoint ran out of buffers and requests more buffers */
+     /*
+      * XXX: Its a point of argument to have this on the bulk interface or
+      *      to push it to the service level. Also: maybe need to specify the
+      *      pool id here.
+      *      - RA
+      * */
+    void (*request_received)(struct bulk_channel *channel,
+                             size_t               count);
+};
+
+
+/** Handle/Representation for one end of a bulk transfer channel */
+struct bulk_channel {
+    /** callbacks for the channel events */
+    struct bulk_channel_callbacks    *callbacks;
+    /** the local endpoint for this channel */
+    struct bulk_endpoint_descriptor *ep;
+    /** the current channel state */
+    enum bulk_channel_state          state;
+    /** pool allocators */
+    // struct bulk_pool_allocator      *pool_allocators;
+    /** orderd list of assigned pools to this channel */
+    struct bulk_pool_list           *pools;
+    /** the direction of data flow */
+    enum bulk_channel_direction      direction;
+    /** role of this side of the channel */
+    enum bulk_channel_role           role;
+    /** the trust level of this channel */
+    enum bulk_trust_level            trust;
+    /** constraints of this channel */
+    struct bulk_channel_constraints  constraints;
+    /** the size of the transmitted meta information per bulk transfer */
+    size_t                           meta_size;
+    /** the waitset for this channel */
+    struct waitset                  *waitset;
+    /** pointer to user specific state for this channel */
+    void                            *user_state;
+    /** implementation specific data */
+    /*
+     * XXX: maybe we want to have an abstract channel and specific channel
+     *      as with the endpoints here aswell ?
+     *      - RA
+     */
+    void                            *impl_data;
+};
+
+/**
+ * generic bulk endpoint
+ *
+ * This serves as an abstract representation of an endpoint. This data structure
+ * must be part of the implementation specific endpoint struct.
+ */
+struct bulk_endpoint_descriptor {
+    /** Pointer to backend-function pointers for this endpoint */
+    struct bulk_implementation *f;
+    /** TODO: are there more generic endpoint information ? */
+};
+
+
+/**
+    this struct represents the pool id which consists of the domain id of the
+    allocator and the domain local allocation counter
+    TODO: ensure system wide uniquenes also between different machines
+ */
+struct bulk_pool_id {
+    uint32_t    machine;
+    domainid_t  dom;
+    uint32_t    local;
+};
+
+
+/**
+ * represents the state of a buffer
+ */
+enum bulk_buffer_state {
+    BULK_BUFFER_INVALID,    ///< the buffer is not present XXX: name?
+    BULK_BUFFER_READ_ONLY,  ///< the buffer is mapped read only
+    BULK_BUFFER_RO_OWNED,   ///< the buffer is copied first
+    BULK_BUFFER_READ_WRITE  ///< the buffer is mapped read write
+};
+
+/**
+ * The bulk pool is a continuous region in (virtual) memory that consists of
+ * equally sized buffers.
+ */
+struct bulk_pool {
+    /** TODO: find a unique id*/
+    struct bulk_pool_id     id;
+    /** the base address of the pool */
+    lvaddr_t                 base_address;
+    /** the size of a single buffer in bytes */
+    size_t                   buffer_size;
+    /**  pool trust level depending on first assignment */
+    enum bulk_trust_level    trust;
+    /** pointer to an implementation dependent data structure */
+    void                    *impl_data;
+    /** capability for the entire pool */
+    struct capref            pool_cap;
+    /** the maximum number of buffers in this pool */
+    size_t                   num_buffers;
+    /** array of the buffers for this pool (pre allocated) */
+    struct bulk_buffer     **buffers;
+};
+
+/**
+ * a list of bulk pools assigned to a channel, keep the list ordered by the id
+ */
+struct bulk_pool_list {
+    struct bulk_pool_list *next;    ///< pointer to the next element
+    struct bulk_pool      *pool;    ///< the pool
+};
+
+/**
+ * a bulk buffer is the base unit for bulk data transfer in the system
+ */
+struct bulk_buffer {
+    /** the virtual address of the buffer */
+    void                     *address;
+    /** XXX: maybe we have to use the pool_id here */
+    struct bulk_pool         *pool;
+    /** capability for this buffer */
+    struct capref             cap;
+    /** offset in the capability  */
+    lpaddr_t                  cap_offset;
+    /** state of the buffer */
+    enum bulk_buffer_state    state;
+    /** local refrence counting */
+    uint32_t                  local_ref_count;
+};
+
+
+/*
+ * ---------------------------------------------------------------------------
+ * Channel Management >>>
+ */
+
+/**
+ * setup parameters for creating a new bulk channel
+ */
+struct bulk_channel_setup {
+    /** Channel direction (RX/TX) */
+    enum bulk_channel_direction       direction;
+    /** Endpoint role (master/slave) */
+    enum bulk_channel_role            role;
+    /** trust level for this channel */
+    enum bulk_trust_level             trust;
+    /** */
+    struct bulk_channel_constraints   constraints;
+    /** Size of metadata to be passed along with transfers and passed buffers. */
+    size_t                            meta_size;
+    /** Waitset on which events for this channel will be dispatched */
+    struct waitset                   *waitset;
+    /** */
+    void                             *user_state;
+};
+
+/**
+ * parameters used on binding ot a channel
+ */
+struct bulk_channel_bind_params {
+    /** Endpoint role (master/slave) */
+    enum bulk_channel_role            role;
+    /** trust level for this channel */
+    enum bulk_trust_level             trust;
+    /** the channel constraints */
+    struct bulk_channel_constraints   constraints;
+    /** Waitset on which events for this channel will be dispatched */
+    struct waitset                   *waitset;
+    /** user state for the channel */
+    void                             *user_state;
+};
+
+
+/**
+ * Create a new channel.
+ *
+ * @param channel   Pointer to unused channel handle
+ * @param ep_desc   Description of endpoint to bind to
+ * @param callbacks Callbacks for events on this channel
+ * @param setup     struct containing the setup information
+ */
+errval_t bulk_channel_create(struct bulk_channel              *channel,
+                             struct bulk_endpoint_descriptor  *ep_desc,
+                             struct bulk_channel_callbacks    *callbacks,
+                             struct bulk_channel_setup        *setup);
+
+/**
+ * Bind to an existing unbound channel.
+ *
+ * @param channel   Pointer to unused channel handle
+ * @param ep_desc   Description of endpoint to bind to
+ * @param callbacks Callbacks for events on this channel
+ * @param params    parameters for the binding process
+ *
+ * There is the bind done callback that serves as a continuation for this.
+ */
+errval_t bulk_channel_bind(struct bulk_channel              *channel,
+                           struct bulk_endpoint_descriptor  *remote_ep_desc,
+                           struct bulk_channel_callbacks    *callbacks,
+                           struct bulk_channel_bind_params  *params);
+
+
+/**
+ * Assign a pool to a channel.
+ *
+ * @param channel Channel
+ * @param pool    Pool to assign (must not be assigned to this channel yet)
+ *
+ * * There is the pool assigned callback that serves as a continuation for this.
+ */
+errval_t bulk_channel_assign_pool(struct bulk_channel *channel,
+                                  struct bulk_pool    *pool);
+
+/**
+ * Remove a pool from a channel
+ *
+ * @param channel Channel
+ * @param pool    Pool to remove (must be previously assigned to the channel)
+ *
+ */
+errval_t bulk_channel_remove_pool(struct bulk_channel       *channel,
+                                  struct bulk_pool          *pool,
+                                  struct bulk_continuation   cont);
+
+/**
+ * Free a channel
+ *
+ * @param channel        Channel to be freed
+ */
+errval_t bulk_channel_destroy(struct bulk_channel      *channel);
+
+/*
+ * ---------------------------------------------------------------------------
+ * <<< Channel Management
+ */
+
+
+
+/**
+ * Move buffer on the channel. Data and ownership are passed to the other
+ * endpoint. After the other endpoint is done with the respective buffer, it can
+ * pass it back.
+ *
+ * @param channel Channel, this endpoint must be source
+ * @param buffer  Buffer, must hold ownership and belong to a pool on this
+ *                channel
+ * @param meta    Pointer to metadata to be passed along with the data
+ *                (channel-wide meta_size is used).
+ * @param cont    event continuation
+ */
+errval_t bulk_channel_move(struct bulk_channel      *channel,
+                           struct bulk_buffer       *buffer,
+                           void                     *meta,
+                           struct bulk_continuation  cont);
+
+/**
+ * Pass buffer ownership to the other endpoint, the buffer contents are not
+ * guaranteed to be transported.
+ *
+ * @param channel Channel
+ * @param buffer  Buffer, must hold ownership and belong to a pool on this
+ *                channel
+ * @param meta    Pointer to metadata to be passed along with the buffer
+ *                (channel-wide meta_size is used).
+ * @param cont    event continuation
+ */
+errval_t bulk_channel_pass(struct bulk_channel      *channel,
+                           struct bulk_buffer       *buffer,
+                           void                     *meta,
+                           struct bulk_continuation  cont);
+
+/**
+ * Copy buffer to other endpoint.
+ *
+ * @param channel Channel, this endpoint must be source
+ * @param buffer  Buffer, must belong to a pool on this channel. Must hold
+ *                ownersihp, or hold a copy of this buffer.
+ * @param meta    Pointer to metadata to be passed along with the buffer
+ *                (channel-wide meta_size is used).
+ * @param cont    event continuation
+ */
+errval_t bulk_channel_copy(struct bulk_channel      *channel,
+                           struct bulk_buffer       *buffer,
+                           void                     *meta,
+                           struct bulk_continuation  cont);
+/**
+ * Release copy received over channel. Must only be called after all outgoing
+ * copies from this domain of the same buffer have been released.
+ *
+ * @param channel Channel, this endpoint must be sink
+ * @param buffer  Buffer, must have received it as a copy over this channel, all
+ *                outgoing copies must have been released.
+ * @param cont    event continuation
+ */
+errval_t bulk_channel_release(struct bulk_channel       *channel,
+                              struct bulk_buffer        *buffer,
+                              struct bulk_continuation   cont);
+
+
+
+#endif /* BULK_TRANSFER_H */
+
index 930a833..847122f 100644 (file)
@@ -21,7 +21,7 @@
                       "terminal.c", "spawn_client.c", "vspace/vspace.c", 
                       "vspace/vregion.c", "vspace/memobj_one_frame.c",
                       "vspace/memobj_one_frame_lazy.c",
-                      "vspace/utils.c",
+                      "vspace/utils.c", "vspace/memobj_fixed.c",
                       "vspace/memobj_one_frame_one_map.c", "vspace/mmu_aware.c",
                       "slot_alloc/single_slot_alloc.c", "slot_alloc/multi_slot_alloc.c",
                       "slot_alloc/slot_alloc.c", "slot_alloc/range_slot_alloc.c",
index 4243a2c..a1162b4 100644 (file)
@@ -237,7 +237,7 @@ static errval_t cap_revoke_remote(capaddr_t src, uint8_t vbits)
 /**
  * \brief Retype a capability into one or more new capabilities
  *
- * \param dest_start    Location of first desination slot, which must be empty
+ * \param dest_start    Location of first destination slot, which must be empty
  * \param src           Source capability to retype
  * \param new_type      Kernel object type to retype to.
  * \param size_bits     Size of created objects as a power of two
diff --git a/lib/barrelfish/vspace/memobj_fixed.c b/lib/barrelfish/vspace/memobj_fixed.c
new file mode 100644 (file)
index 0000000..a8adb90
--- /dev/null
@@ -0,0 +1,367 @@
+/**
+ * \file
+ * \brief memory object of anonymous type.
+ * The object maintains a list of frames.
+ *
+ * The object maintains a list of frames and a list of vregions.
+ * The lists are backed by slabs.
+ * The slabs may have to be grown,
+ * in which case the object will use #vspace_pinned_alloc.
+ *
+ * morecore uses this memory object so it cannot use malloc for its lists.
+ * Therefore, this uses slabs and grows them using the pinned memory.
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+#include "vspace_internal.h"
+
+/**
+ * \brief Map the memory object into a region
+ *
+ * \param memobj   The memory object
+ * \param region  The region to add
+ */
+static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
+{
+    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
+
+    /* make sure we are not overshooting the end */
+    assert(memobj->size >= (vregion->offset + vregion->size));
+
+    /* the vregion must start at one of the backed frames */
+    if (vregion->offset % fixed->chunk_size) {
+        return LIB_ERR_MEMOBJ_MAP_REGION;
+    }
+
+    if (fixed->vregion) {
+        return LIB_ERR_MEMOBJ_VREGION_ALREADY_MAPPED;
+    }
+
+    fixed->vregion = vregion;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Unmap the memory object from a region
+ *
+ * \param memobj   The memory object
+ * \param region  The region to remove
+ */
+static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
+{
+    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
+    errval_t err;
+
+    if (fixed->vregion != vregion) {
+        return LIB_ERR_VSPACE_VREGION_NOT_FOUND;
+    }
+
+    struct vspace *vspace = vregion_get_vspace(vregion);
+    struct pmap *pmap = vspace_get_pmap(vspace);
+
+    genvaddr_t vregion_base = vregion_get_base_addr(vregion);
+    genvaddr_t vregion_offset = vregion_get_offset(vregion);
+
+    err = pmap->f.unmap(pmap, vregion_base + vregion_offset, vregion->size,
+    NULL);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_PMAP_UNMAP);
+    }
+
+    fixed->vregion = NULL;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Set the protection on a range
+ *
+ * \param memobj  The memory object
+ * \param region  The vregion to modify the mappings on
+ * \param offset  Offset into the memory object
+ * \param range   The range of space to set the protection for
+ * \param flags   The protection flags
+ */
+static errval_t protect(struct memobj *memobj,
+                        struct vregion *vregion,
+                        genvaddr_t offset,
+                        size_t range,
+                        vs_prot_flags_t flags)
+{
+    struct vspace *vspace = vregion_get_vspace(vregion);
+    struct pmap *pmap = vspace_get_pmap(vspace);
+    genvaddr_t base = vregion_get_base_addr(vregion);
+    genvaddr_t vregion_offset = vregion_get_offset(vregion);
+    errval_t err;
+
+    err = pmap->f.modify_flags(pmap, base + offset + vregion_offset, range,
+                    flags, &range);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Pin a range
+ *
+ * \param memobj  The memory object
+ * \param region  The vregion to modify the state on
+ * \param offset  Offset into the memory object
+ * \param range   The range of space to pin
+ */
+static errval_t pin(struct memobj *memobj,
+                    struct vregion *vregion,
+                    genvaddr_t offset,
+                    size_t range)
+{
+    USER_PANIC("NYI");
+}
+
+/**
+ * \brief Unpin a range
+ *
+ * \param memobj  The memory object
+ * \param region  The vregion to modify the state on
+ * \param offset  Offset into the memory object
+ * \param range   The range of space to unpin
+ */
+static errval_t unpin(struct memobj *memobj,
+                      struct vregion *vregion,
+                      genvaddr_t offset,
+                      size_t range)
+{
+    USER_PANIC("NYI");
+}
+
+/**
+ * \brief Set a frame for an offset into the memobj
+ *
+ * \param memobj  The memory object
+ * \param offset  Offset into the memory object
+ * \param frame   The frame cap for the offset
+ * \param size    The size of frame cap
+ *
+ * Pagefault relies on frames inserted in order
+ */
+static errval_t fill(struct memobj *memobj,
+                     genvaddr_t offset,
+                     struct capref frame,
+                     size_t size)
+{
+    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
+
+    if (offset % fixed->chunk_size || size != fixed->chunk_size) {
+        return LIB_ERR_MEMOBJ_FILL;
+    }
+
+    size_t slot = offset / fixed->chunk_size;
+    if (slot >= fixed->count) {
+        return LIB_ERR_MEMOBJ_WRONG_OFFSET;
+    }
+
+    if (!capref_is_null((fixed->frames[slot]))) {
+        return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
+    }
+
+    fixed->frames[slot] = frame;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Unmap/remove one frame from the end of the memobj
+ *
+ * \param memobj     The memory object
+ * \param offset     The offset from which to remove a frame from
+ * \param ret_frame  Pointer to return the removed frame
+ *
+ */
+static errval_t unfill(struct memobj *memobj,
+                       genvaddr_t offset,
+                       struct capref *ret_frame,
+                       genvaddr_t *ret_offset)
+{
+    errval_t err;
+    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
+
+    size_t slot = offset / fixed->chunk_size;
+    if (slot >= fixed->count || capref_is_null(fixed->frames[slot])) {
+        return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
+    }
+
+    if (fixed->vregion) {
+        struct vregion *vregion = fixed->vregion;
+        size_t retsize;
+        struct vspace *vspace = vregion_get_vspace(vregion);
+        struct pmap *pmap = vspace_get_pmap(vspace);
+        genvaddr_t vregion_base = vregion_get_base_addr(vregion);
+        genvaddr_t vregion_offset = vregion_get_offset(vregion);
+
+        err = pmap->f.unmap(pmap, vregion_base + vregion_offset + offset,
+                        fixed->chunk_size, &retsize);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_PMAP_UNMAP);
+        }
+
+        assert(retsize == fixed->chunk_size);
+        // Return the frame
+        if (ret_offset) {
+            *ret_offset = vregion_offset + offset;
+        }
+    }
+
+    if (ret_frame) {
+        *ret_frame = fixed->frames[slot];
+    }
+
+    fixed->frames[slot] = NULL_CAP;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Page fault handler
+ *
+ * \param memobj  The memory object
+ * \param region  The associated vregion
+ * \param offset  Offset into memory object of the page fault
+ * \param type    The fault type
+ *
+ * Locates the frame for the offset and maps it in.
+ * Relies on fill inserting frames in order.
+ */
+static errval_t pagefault(struct memobj *memobj,
+                          struct vregion *vregion,
+                          genvaddr_t offset,
+                          vm_fault_type_t type)
+{
+    errval_t err;
+    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
+
+    assert(!(offset % fixed->chunk_size));
+
+    size_t slot = (vregion->offset + offset) / fixed->chunk_size;
+
+    if (slot >= fixed->count) {
+        return LIB_ERR_MEMOBJ_WRONG_OFFSET;
+    }
+
+    if (capref_is_null(fixed->frames[slot])) {
+        return LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER;
+    }
+
+    struct vspace *vspace = vregion_get_vspace(vregion);
+    struct pmap *pmap = vspace_get_pmap(vspace);
+
+    genvaddr_t base = vregion_get_base_addr(vregion);
+    genvaddr_t vregion_offset = vregion_get_offset(vregion);
+    vregion_flags_t flags = vregion_get_flags(vregion);
+
+    err = pmap->f.map(pmap, base + vregion_offset + offset, fixed->frames[slot],
+                    0, fixed->chunk_size, flags, NULL, NULL);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_PMAP_MAP);
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Free up some pages by placing them in the backing storage
+ *
+ * \param memobj      The memory object
+ * \param size        The amount of space to free up
+ * \param frames      An array of capref frames to return the freed pages
+ * \param num_frames  The number of frames returned
+ *
+ * This will affect all the vregions that are associated with the object
+ */
+static errval_t pager_free(struct memobj *memobj,
+size_t size,
+                           struct capref *frames,
+                           size_t num_frames)
+{
+    USER_PANIC("NYI");
+}
+
+/**
+ * \brief Initialize
+ *
+ * \param memobj  The memory object
+ * \param size    Size of the memory region
+ * \param flags   Memory object specific flags
+ *
+ * This object handles multiple frames.
+ * The frames are mapped in on demand.
+ */
+errval_t memobj_create_fixed(struct memobj_fixed *fixed,
+                             size_t size,
+                             memobj_flags_t flags,
+                             size_t count,
+                             size_t chunk_size)
+{
+    struct memobj *memobj = &fixed->m;
+
+    /* Generic portion */
+    memobj->f.map_region = map_region;
+    memobj->f.unmap_region = unmap_region;
+    memobj->f.protect = protect;
+    memobj->f.pin = pin;
+    memobj->f.unpin = unpin;
+    memobj->f.fill = fill;
+    memobj->f.unfill = unfill;
+    memobj->f.pagefault = pagefault;
+    memobj->f.pager_free = pager_free;
+
+
+    assert(size == count * chunk_size);
+    assert((chunk_size % BASE_PAGE_SIZE)==0);
+
+    memobj->size = size;
+    memobj->flags = flags;
+
+    memobj->type = MEMOBJ_FIXED;
+
+    /* specific portion */
+    fixed->count = count;
+    fixed->chunk_size = chunk_size;
+
+    fixed->frames = malloc(count * sizeof(struct capref));
+    if (!fixed->frames) {
+        return LIB_ERR_MALLOC_FAIL;
+    }
+    memset(fixed->frames, 0, count * sizeof(struct capref));
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief Destroy the object
+ *
+ */
+errval_t memobj_destroy_fixed(struct memobj *memobj)
+{
+    struct memobj_fixed *m = (struct memobj_fixed *) memobj;
+
+    errval_t err = SYS_ERR_OK;
+
+    struct vregion *vregion = NULL;
+
+    err = vregion_destroy(vregion);
+    free(m->frames);
+    return err;
+}
index 6e68bd3..08db89f 100644 (file)
@@ -101,7 +101,7 @@ errval_t vspace_map_anon_aligned(void **retaddr, struct memobj **ret_memobj,
     errval_t err;
     struct memobj_anon *memobj = NULL;
     struct vregion *vregion = NULL;
-    
+
     // Allocate space
     memobj = malloc(sizeof(struct memobj_anon));
     assert(memobj != NULL);
@@ -111,15 +111,15 @@ errval_t vspace_map_anon_aligned(void **retaddr, struct memobj **ret_memobj,
 
     err = vspace_map_anon_nomalloc(retaddr, memobj, vregion, size,
                                    retsize, flags, alignment);
-    
+
     if (err_is_fail(err)) {
         free(memobj);
         free(vregion);
     }
-    
+
     *ret_memobj = (struct memobj *)memobj;
     *ret_vregion = vregion;
-    
+
     return err;
 }
 
@@ -134,9 +134,9 @@ errval_t vspace_map_anon_attr(void **retaddr, struct memobj **ret_memobj,
                               size_t *retsize, vregion_flags_t flags)
 {
     errval_t err;
-    struct memobj_anon *memobj = NULL;
+    struct memobj_anon *memobj = NULL;/* we have the address range, now we have to */
     struct vregion *vregion = NULL;
-    
+
     // Allocate space
     memobj = malloc(sizeof(struct memobj_anon));
     assert(memobj != NULL);
@@ -146,16 +146,16 @@ errval_t vspace_map_anon_attr(void **retaddr, struct memobj **ret_memobj,
 
     err = vspace_map_anon_nomalloc(retaddr, memobj, vregion, size,
                                    retsize, flags, 0);
-    
+
     if (err_is_fail(err))
     {
       free(memobj);
       free(vregion);
     }
-    
+
     *ret_memobj = (struct memobj *)memobj;
     *ret_vregion = vregion;
-    
+
     return err;
 }
 
diff --git a/lib/bulk_transfer/Hakefile b/lib/bulk_transfer/Hakefile
new file mode 100644 (file)
index 0000000..05430b5
--- /dev/null
@@ -0,0 +1,31 @@
+--------------------------------------------------------------------------
+-- Copyright (c) 2013, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for lib/bulk_transfer
+-- 
+--
+--------------------------------------------------------------------------
+
+[
+    build library { 
+        target = "bulk_transfer",
+        cFiles = [ "bulk_allocator.c",
+                   "bulk_transfer.c", 
+                   "bulk_buffer.c", 
+                   "bulk_channel.c",
+                   "bulk_endpoint.c", 
+                   "control_channel.c", 
+                   "bulk_pool.c", 
+                   "backends/net/bulk_net_endpoint.c",
+                   "backends/net/bulk_net_channel.c",
+                   "backends/local/control_channel.c"
+                 ],
+        flounderDefs = [ "bulk_ctrl" ],
+        flounderBindings = [ "bulk_ctrl" ]
+    }
+]
diff --git a/lib/bulk_transfer/backends/backend.c b/lib/bulk_transfer/backends/backend.c
new file mode 100644 (file)
index 0000000..aa094f8
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
diff --git a/lib/bulk_transfer/backends/backend.h b/lib/bulk_transfer/backends/backend.h
new file mode 100644 (file)
index 0000000..716f468
--- /dev/null
@@ -0,0 +1,22 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+struct bulk_backend_vtbl {
+
+
+
+};
+
+
+
diff --git a/lib/bulk_transfer/backends/local/control_channel.c b/lib/bulk_transfer/backends/local/control_channel.c
new file mode 100644 (file)
index 0000000..50c4909
--- /dev/null
@@ -0,0 +1,341 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/event_queue.h>
+#include <string.h>
+
+#include <bulk_transfer/bulk_local.h>
+
+#include "../../helpers.h"
+
+
+struct local_channel {
+    struct bulk_channel *other;
+    struct event_queue   events;
+};
+
+struct local_event {
+    struct event_queue_node eqn;
+    struct bulk_channel    *channel;
+    void                  (*handler)(struct local_event *);
+
+    union {
+        struct {
+            errval_t err;
+        } bind_done;
+
+        struct {
+            struct bulk_pool *pool;
+        } pool_assigned;
+
+        struct {
+            struct bulk_buffer *buffer;
+            void               *meta;
+        } move_received;
+
+        struct {
+            struct bulk_buffer *buffer;
+            void               *meta;
+        } buffer_received;
+
+        struct {
+            struct bulk_buffer *buffer;
+            void               *meta;
+        } copy_received;
+
+        struct {
+            struct bulk_buffer *buffer;
+        } copy_released;
+    } params;
+};
+
+static errval_t impl_create(struct bulk_channel *channel);
+static errval_t impl_bind(struct bulk_channel *channel);
+static errval_t impl_assign_pool(struct bulk_channel *channel,
+                                 struct bulk_pool    *pool);
+static errval_t impl_move(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont);
+static errval_t impl_pass(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont);
+static errval_t impl_copy(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont);
+static errval_t impl_release(struct bulk_channel *channel,
+                             struct bulk_buffer  *buffer,
+                             struct bulk_continuation cont);
+
+static struct bulk_implementation implementation = {
+    .channel_create = impl_create,
+    .channel_bind =   impl_bind,
+    .assign_pool =    impl_assign_pool,
+    .move =           impl_move,
+    .pass =           impl_pass,
+    .copy =           impl_copy,
+    .release =        impl_release,
+};
+
+static errval_t init_channel(struct bulk_channel *channel)
+{
+    struct local_channel *l = malloc(sizeof(*l));
+    if (l == NULL) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    channel->impl_data = l;
+    event_queue_init(&l->events, channel->waitset, EVENT_QUEUE_CONTINUOUS);
+    return SYS_ERR_OK;
+}
+
+static errval_t event_alloc(struct bulk_channel *channel,
+                            struct local_event **ev,
+                            void               (*handler)(struct local_event *),
+                            size_t               extra)
+{
+    *ev = malloc(sizeof(*ev) + extra);
+    if (*ev == NULL) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    (*ev)->channel = channel;
+    (*ev)->handler = handler;
+    return SYS_ERR_OK;
+}
+
+static void event_handler(void *arg)
+{
+    struct local_event *lev = arg;
+    lev->handler(lev);
+    free(lev);
+}
+
+static void event_enqueue(struct local_event *lev)
+{
+    struct local_channel *local = lev->channel->impl_data;
+    event_queue_add(&local->events, &lev->eqn,
+            MKCLOSURE(event_handler, lev));
+}
+
+static void event_bind_done(struct local_event *lev)
+{
+    lev->channel->callbacks->bind_done(
+            lev->channel,
+            lev->params.bind_done.err);
+}
+
+static void event_pool_assigned(struct local_event *lev)
+{
+    lev->channel->callbacks->pool_assigned(
+            lev->channel,
+            lev->params.pool_assigned.pool);
+}
+
+static void event_move_received(struct local_event *lev)
+{
+    lev->channel->callbacks->move_received(
+            lev->channel,
+            lev->params.move_received.buffer,
+            lev->params.move_received.meta);
+}
+
+static void event_buffer_received(struct local_event *lev)
+{
+    lev->channel->callbacks->buffer_received(
+            lev->channel,
+            lev->params.buffer_received.buffer,
+            lev->params.buffer_received.meta);
+}
+
+static void event_copy_received(struct local_event *lev)
+{
+    lev->channel->callbacks->copy_received(
+            lev->channel,
+            lev->params.copy_received.buffer,
+            lev->params.copy_received.meta);
+}
+
+static void event_copy_released(struct local_event *lev)
+{
+    lev->channel->callbacks->copy_released(
+            lev->channel,
+            lev->params.copy_released.buffer);
+}
+
+
+
+
+static errval_t impl_create(struct bulk_channel *channel)
+{
+    return init_channel(channel);
+}
+
+static errval_t impl_bind(struct bulk_channel *channel)
+{
+    errval_t err;
+    struct local_channel *l, *o_l;
+    struct bulk_local_endpoint *ep;
+    struct local_event *ev, *o_ev;
+
+    err = init_channel(channel);
+    if (err_is_fail(err)) {
+        return err;
+    }
+
+    ep = (struct bulk_local_endpoint *) channel->ep;
+    l = channel->impl_data;
+    l->other = ep->other_channel;
+    o_l = l->other->impl_data;
+    o_l->other = channel;
+
+    // Set channel parameters from other side
+    channel->role = bulk_role_other(l->other->role);
+    channel->direction = bulk_direction_other(l->other->role);
+    channel->meta_size = l->other->meta_size;
+
+    // Allocate events
+    err = event_alloc(channel, &ev, event_bind_done, 0);
+    if (err_is_fail(err)) {
+        goto error;
+    }
+    err = event_alloc(l->other, &o_ev, event_bind_done, 0);
+    if (err_is_fail(err)) {
+        free(ev);
+        goto error;
+    }
+
+    // Now we're sure that we can succeed
+    channel->state = BULK_STATE_CONNECTED;
+    l->other->state = BULK_STATE_CONNECTED;
+
+    // Trigger first event
+    ev->params.bind_done.err = SYS_ERR_OK;
+    event_enqueue(ev);
+
+    // Trigger second event
+    o_ev->params.bind_done.err = SYS_ERR_OK;
+    event_enqueue(o_ev);
+    return SYS_ERR_OK;
+
+error:
+    free(l);
+    return err;
+}
+
+static errval_t impl_assign_pool(struct bulk_channel *channel,
+                                 struct bulk_pool    *pool)
+{
+    struct local_channel *l = channel->impl_data;
+    struct local_event *ev;
+    errval_t err;
+
+    err = event_alloc(l->other, &ev, event_pool_assigned, 0);
+    if (!err_is_fail(err)) {
+        ev->params.pool_assigned.pool = pool;
+        event_enqueue(ev);
+    }
+    return err;
+}
+
+static errval_t impl_move(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont)
+{
+    struct local_channel *l = channel->impl_data;
+    struct local_event *ev;
+    void *m;
+    errval_t err;
+
+    err = event_alloc(l->other, &ev, event_move_received, channel->meta_size);
+    if (!err_is_fail(err)) {
+        m = ev + 1;
+        memcpy(m, meta, channel->meta_size);
+        ev->params.move_received.buffer = buffer;
+        ev->params.move_received.meta = m;
+        event_enqueue(ev);
+    }
+    return err;
+}
+
+static errval_t impl_pass(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont)
+{
+    struct local_channel *l = channel->impl_data;
+    struct local_event *ev;
+    void *m;
+    errval_t err;
+
+    err = event_alloc(l->other, &ev, event_buffer_received, channel->meta_size);
+    if (!err_is_fail(err)) {
+        m = ev + 1;
+        memcpy(m, meta, channel->meta_size);
+        ev->params.buffer_received.buffer = buffer;
+        ev->params.buffer_received.meta = m;
+        event_enqueue(ev);
+    }
+    return err;
+}
+
+static errval_t impl_copy(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          void                *meta,
+                          struct bulk_continuation cont)
+{
+    struct local_channel *l = channel->impl_data;
+    struct local_event *ev;
+    void *m;
+    errval_t err;
+
+    err = event_alloc(l->other, &ev, event_copy_received, channel->meta_size);
+    if (!err_is_fail(err)) {
+        m = ev + 1;
+        memcpy(m, meta, channel->meta_size);
+        ev->params.copy_received.buffer = buffer;
+        ev->params.copy_received.meta = m;
+        event_enqueue(ev);
+    }
+    return err;
+
+}
+
+static errval_t impl_release(struct bulk_channel *channel,
+                             struct bulk_buffer  *buffer,
+                             struct bulk_continuation cont)
+{
+    struct local_channel *l = channel->impl_data;
+    struct local_event *ev;
+    errval_t err;
+
+    err = event_alloc(l->other, &ev, event_copy_released, 0);
+    if (!err_is_fail(err)) {
+        ev->params.copy_released.buffer = buffer;
+        event_enqueue(ev);
+    }
+    return err;
+}
+
+
+void bulk_local_init_endpoint(struct bulk_local_endpoint *endpoint,
+                              struct bulk_channel        *other_channel)
+{
+    endpoint->generic.f = &implementation;
+    endpoint->other_channel = other_channel;
+}
+
diff --git a/lib/bulk_transfer/backends/net/bulk_net_backend.h b/lib/bulk_transfer/backends/net/bulk_net_backend.h
new file mode 100644 (file)
index 0000000..716f468
--- /dev/null
@@ -0,0 +1,22 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+struct bulk_backend_vtbl {
+
+
+
+};
+
+
+
diff --git a/lib/bulk_transfer/backends/net/bulk_net_buffer.c b/lib/bulk_transfer/backends/net/bulk_net_buffer.c
new file mode 100644 (file)
index 0000000..cb4884e
--- /dev/null
@@ -0,0 +1,20 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_net.h>
+
+
diff --git a/lib/bulk_transfer/backends/net/bulk_net_channel.c b/lib/bulk_transfer/backends/net/bulk_net_channel.c
new file mode 100644 (file)
index 0000000..b09f9df
--- /dev/null
@@ -0,0 +1,150 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_net.h>
+
+
+errval_t bulk_net_channel_create(struct bulk_channel *channel)
+{
+    assert(!"NYI: bulk_net_channel_create");
+
+    /*
+     * TODO:
+     * - initialize the channel struct
+     * - initialize the network queues
+     *
+     * bool lwip_init(const char *card_name, uint64_t queueid)
+     * -> does this has to be done once per domain or for every channel?
+     *
+     *     struct tcp_pcb *pcb = tcp_new();
+     * //    err_t e = tcp_bind(pcb, IP_ADDR_ANY, (HTTP_PORT + disp_get_core_id()));
+     *     err_t e = tcp_bind(pcb, IP_ADDR_ANY, HTTP_PORT);
+         * assert(e == ERR_OK);
+         * pcb = tcp_listen(pcb);
+         * assert(pcb != NULL);
+         * tcp_arg(pcb, pcb);
+         * tcp_accept(pcb, http_server_accept);
+     * - start listening on the queue
+     */
+    return SYS_ERR_OK;
+}
+
+errval_t bulk_net_channel_bind(struct bulk_channel *channel)
+{
+    assert(!"NYI: bulk_net_channel_bind");
+
+    /*
+     * TODO:
+     * - initialize the channel struct
+     * - initialize local queues
+     * struct tcp_pcb *pcb = tcp_new();
+     * tcp_connect();
+     * tcp_write...
+     * - connect to the network server
+     */
+
+    return SYS_ERR_OK;
+}
+
+errval_t bulk_net_channel_assign_pool(struct bulk_channel *channel,
+                                      struct bulk_pool    *pool)
+{
+    assert(!"NYI: bulk_net_channel_assign_pool");
+
+    /*
+     * TODO:
+     *  - send control message with pool information to the other side
+     */
+
+    return SYS_ERR_OK;
+}
+
+errval_t bulk_net_channel_move(struct bulk_channel      *channel,
+                               struct bulk_buffer       *buffer,
+                               void                     *meta,
+                               struct bulk_continuation  cont)
+{
+    assert(!"NYI: bulk_net_channel_move");
+
+
+
+    /**
+     *
+     * TODO:
+     *  - prepend meta data / header
+     *  - enqueue buffer on send queue
+     *  - register sent callback
+     *  - if (owner then set to read/write))
+     *  - free up buffer and hand it back to the pool
+     */
+
+    return SYS_ERR_OK;
+}
+
+/**
+ *
+ */
+errval_t bulk_net_channel_pass(struct bulk_channel *channel,
+                               struct bulk_buffer  *buffer,
+                               void                *meta,
+                               struct bulk_continuation cont)
+{
+    /* this is a no-op over the network hop,
+     * just do a buffer_release at this point */
+    return bulk_net_channel_release(channel, buffer, cont);
+}
+
+
+errval_t bulk_net_channel_copy(struct bulk_channel *channel,
+                               struct bulk_buffer  *buffer,
+                               void                *meta,
+                               struct bulk_continuation cont)
+{
+    assert(!"NYI: bulk_net_channel_copy");
+
+    /*
+     * TODO:
+     *  - prepend meta data / header
+     *  - enqueue buffer on send queue
+     *  - register sent callback
+     *  - if (owner then set to read/write))
+     */
+
+    return SYS_ERR_OK;
+}
+
+errval_t bulk_net_channel_release(struct bulk_channel *channel,
+                          struct bulk_buffer  *buffer,
+                          struct bulk_continuation cont)
+{
+    /* this is a no-op over the network hop */
+    return SYS_ERR_OK;
+}
+
+struct bulk_implementation bulk_net_implementation = {
+               .channel_create = bulk_net_channel_create,
+               .channel_bind   = bulk_net_channel_bind,
+               .assign_pool    = bulk_net_channel_assign_pool,
+               .move           = bulk_net_channel_move,
+               .pass           = bulk_net_channel_pass,
+               .copy           = bulk_net_channel_copy,
+               .release        = bulk_net_channel_release
+};
+
+struct bulk_implementation *bulk_net_get_implementation(void) {
+    return &bulk_net_implementation;
+}
diff --git a/lib/bulk_transfer/backends/net/bulk_net_control.c b/lib/bulk_transfer/backends/net/bulk_net_control.c
new file mode 100644 (file)
index 0000000..cb4884e
--- /dev/null
@@ -0,0 +1,20 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_net.h>
+
+
diff --git a/lib/bulk_transfer/backends/net/bulk_net_endpoint.c b/lib/bulk_transfer/backends/net/bulk_net_endpoint.c
new file mode 100644 (file)
index 0000000..d83563c
--- /dev/null
@@ -0,0 +1,98 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_net.h>
+
+
+/**
+ * Creates a new bulk endpoint which uses the network backend
+ *
+ * @param ep_desc   memory location to create the endpoint in
+ * @param setup     the setup parameters for the endpoint
+ *
+ * This function is intended to be used by the creator.
+ */
+errval_t bulk_net_ep_create(struct bulk_net_endpoint_descriptor *ep_desc,
+                            struct bulk_net_ep_setup            *setup)
+{
+    assert(ep_desc);
+
+    ep_desc->ip = setup->ip;
+    ep_desc->port = setup->port;
+
+    ep_desc->ep_generic.f = bulk_net_get_implementation();
+
+
+    /*
+     * XXX: Do we want to initialize the network queues and the
+     *      tcp connection at this point ?
+     *      Alternatively just prepare it and finalize it when the channel
+     *      gets created,
+     *      - RA
+     */
+    return SYS_ERR_OK;
+}
+
+/**
+ * Destroys the given endpoint
+ *
+ * @param   ep_desc the endpoint to be destroyed
+ */
+errval_t bulk_net_ep_destroy(struct bulk_net_endpoint_descriptor *ep_desc)
+{
+    assert(ep_desc);
+
+    /*
+     * TODO: free up potential resources e.g. tcp, rx/tx queues etc
+     *       only if resources are created upon endpoint create
+     *       otherwise this is a no-op
+     */
+
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * Explicitly creates a specific remote endpoint
+ *
+ * @param ep_desc   memory location to create the endpoint
+ * @param ip        the ip of the server machine
+ * @param port      the port where the otherside listens to
+ *
+ * This is used to explicitly specify an endpoint to connect to. In the end,
+ * a nameservice like lookup should return the correct endpoint descriptor.
+ */
+errval_t bulk_net_ep_create_remote(struct bulk_net_endpoint_descriptor *ep_desc,
+                                   struct ip_addr ip, uint16_t port)
+{
+    assert(ep_desc);
+
+
+    ep_desc->ip = ip;
+    ep_desc->port = port;
+
+    ep_desc->ep_generic.f = bulk_net_get_implementation();
+
+    /*
+     * this endpoint is used to specify the remote endpoint to bind to.
+     * no need to create a listener for new connections
+     *
+     * potential receiving queues are created upon channel bind
+     */
+
+    return SYS_ERR_OK;
+}
diff --git a/lib/bulk_transfer/backends/net/bulk_net_pool.c b/lib/bulk_transfer/backends/net/bulk_net_pool.c
new file mode 100644 (file)
index 0000000..cb4884e
--- /dev/null
@@ -0,0 +1,20 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_net.h>
+
+
diff --git a/lib/bulk_transfer/backends/sm/control_channel.c b/lib/bulk_transfer/backends/sm/control_channel.c
new file mode 100644 (file)
index 0000000..aa094f8
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
diff --git a/lib/bulk_transfer/bulk_allocator.c b/lib/bulk_transfer/bulk_allocator.c
new file mode 100644 (file)
index 0000000..6f890f0
--- /dev/null
@@ -0,0 +1,300 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_allocator.h>
+
+#include "bulk_pool.h"
+#include "bulk_buffer.h"
+
+static uint32_t local_id = 0;
+
+static errval_t bulk_alloc_set_pool_id(struct bulk_pool* pool)
+{
+    /*
+     * todo: get a system wide unique domain identifier
+     *       get a domain local sequence id
+     */
+    pool->id.machine = 0;
+    pool->id.dom = disp_get_domain_id();
+    pool->id.local = local_id++;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * initializes a new bulk allocator with a pool and allocates memory for it.
+ *
+ * @param alloc         pointer to an unused allocator handle
+ * @param buffer_count  the number of buffers to allocate
+ * @param buffer_size   the size of a single buffer
+ * @param constraints   memory requirements for this pool or NULL if none
+ */
+errval_t bulk_alloc_init(struct bulk_allocator *alloc,
+                         size_t buffer_count,
+                         size_t buffer_size,
+                         struct bulk_pool_constraints *constraints)
+{
+    assert(alloc);
+
+    if (!bulk_buffer_check_size(buffer_size)) {
+        return BULK_TRANSFER_ALLOC_BUFFER_SIZE;
+    }
+
+    if (buffer_count == 0) {
+        return BULK_TRANSFER_ALLOC_BUFFER_COUNT;
+    }
+
+    errval_t err;
+
+    size_t pool_size = buffer_size * buffer_count;
+
+    /* allocate memory for the pool struct */
+    alloc->pool = malloc(sizeof(struct bulk_pool_internal));
+    memset(alloc->pool, 0, sizeof(struct bulk_pool_internal));
+    if (alloc->pool == NULL) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    bulk_alloc_set_pool_id(alloc->pool);
+
+    alloc->pool->buffer_size = buffer_size;
+    alloc->pool->num_buffers = buffer_count;
+    alloc->pool->trust = BULK_TRUST_UNINITIALIZED;
+
+    err = bulk_pool_init_bufs(alloc->pool);
+    if (err_is_fail(err)) {
+        return err;
+    }
+
+    /* reserve a virtual memory range for the pool */
+
+    /* TODO: how to set the physical alignment constraints ? */
+
+    /* set ram affinity */
+    uint64_t minbase, maxlimit;
+    if ((constraints != NULL) && (constraints->range_min != 0)
+                    && (constraints->range_max + 1) != 0) {
+        ram_get_affinity(&minbase, &maxlimit);
+        ram_set_affinity(constraints->range_min, constraints->range_max);
+    }
+    size_t ret_size;
+    err = frame_alloc(&alloc->pool->pool_cap, pool_size, &ret_size);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_FRAME_ALLOC);
+    }
+
+    /* restore ram affinity */
+    if ((constraints != NULL) && (constraints->range_min != 0)
+                    && (constraints->range_max + 1) != 0) {
+        ram_set_affinity(minbase, maxlimit);
+    }
+
+    /*
+     * XXX: notice that at this point the buffer is mapped read write with
+     *      the pool capability. If a backend needs one unique cap per buffer,
+     *      it has to do this while assigning the buffer to the channel.
+     */
+    err = bulk_pool_map(alloc->pool);
+    if (err_is_fail(err)) {
+        /* TODO: error handling */
+        return err;
+    }
+
+    for (int i = 0; i < buffer_count; ++i) {
+        struct bulk_buffer *buf = alloc->pool->buffers[i];
+        /* setup the management structure for the free list */
+        struct bulk_buffer_mng *le = malloc(sizeof(struct bulk_buffer_mng));
+        le->buffer = buf;
+        le->next = alloc->free_buffers;
+        alloc->free_buffers = le;
+    }
+    alloc->num_free = buffer_count;
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * creates a new allocator based on the supplied capability. It creates as many
+ * buffers as possible of size buffer_size that fit into the capability.
+ *
+ * @param alloc         an unused allocator handle
+ * @param buffer_size   the size of a single buffer
+ * @param frame         capability for backing the bulk pool
+ */
+errval_t bulk_alloc_init_from_cap(struct bulk_allocator *alloc,
+size_t buffer_size,
+                                  struct capref *frame)
+{
+    assert(!"NYI: bulk_alloc_init");
+    return SYS_ERR_OK;
+}
+
+/**
+ * Frees up the bulk allocator and it's pool.
+ *
+ * @param alloc handle to a bulk allocator to be freed
+ */
+errval_t bulk_alloc_free(struct bulk_allocator *alloc)
+{
+
+    /*
+     * PERFORMING INTERNAL TESTS
+     */
+    debug_printf("PERFORMING INTERNAL TESTS...\n");
+    assert(alloc->num_free == 1);
+
+    struct bulk_buffer *buf = bulk_alloc_new_buffer(alloc);
+
+    buf = alloc->pool->buffers[4];
+    assert(buf);
+
+    errval_t err;
+    volatile char tmp;
+    char *e;
+
+    debug_printf("ABOUT TO UNMAP\n");
+    err = bulk_buffer_unmap(buf);
+    if (err_is_fail(err)) {
+        debug_printf("BUFFER UNMAP FAILED!\n");
+    }
+    debug_printf("ABOUT TO MAP AGAIN\n");
+    err = bulk_buffer_map(buf);
+    if (err_is_fail(err)) {
+        debug_printf("BUFFER MAP FAILED\n");
+    }
+    debug_printf("ABOUT CHECKING....\n");
+    e = buf->address;
+    for (int i = 0; i < 4096; ++i) {
+        e[i] = 1;
+    }
+
+
+#if 0
+    debug_printf("ABOUT TO UNMAP\n");
+    err = bulk_buffer_unmap(buf);
+    if (err_is_fail(err)) {
+        debug_printf("BUFFER UNMAP FAILED!\n");
+    }
+
+    debug_printf("ABOUT TO CRASH....\n");
+    e = buf->address;
+    for (int i = 0; i < 4096; ++i) {
+        e[i] = 1;
+    }
+#endif
+    debug_printf("ABOUT TO CHANGE STATE:\n");
+    err = bulk_buffer_change_state(buf, BULK_BUFFER_READ_ONLY);
+    if (err_is_fail(err)) {
+        debug_printf("change state failed");
+    }
+    debug_printf("ABOUT TO READ\n");
+
+    for (int i = 0; i < 4096; ++i) {
+        tmp = e[i];
+    }
+    debug_printf("ABOUT TO WRITE: \n");
+    for (int i = 0; i < 4096; ++i) {
+        e[i] = 1;
+    }
+
+    debug_printf("ABOUT TO CHANGE STATE:\n");
+    err = bulk_buffer_change_state(buf, BULK_BUFFER_READ_ONLY);
+    if (err_is_fail(err)) {
+        debug_printf("change state failed");
+    }
+    debug_printf("ABOUT TO READ\n");
+
+    for (int i = 0; i < 4096; ++i) {
+        tmp = e[i];
+    }
+    debug_printf("ABOUT TO WRITE: \n");
+    for (int i = 0; i < 4096; ++i) {
+        e[i] = 1;
+    }
+    assert(!"NYI: bulk_alloc_init");
+    return SYS_ERR_OK;
+
+}
+
+/**
+ * Gets a new bulk buffer from the allocator.
+ *
+ * @param   alloc   the allocator handle to allocate the buffer from
+ *
+ * @return  pointer to a bulk_buffer on success
+ *          NULL if there are no buffer left
+ *
+ */
+struct bulk_buffer *bulk_alloc_new_buffer(struct bulk_allocator *alloc)
+{
+    assert(alloc);
+
+    if (alloc->num_free == 0) {
+        return NULL;
+    }
+
+    struct bulk_buffer *buf = alloc->free_buffers->buffer;
+    struct bulk_buffer_mng *bm = alloc->free_buffers;
+
+    alloc->free_buffers = alloc->free_buffers->next;
+    alloc->num_free--;
+    free(bm);
+
+    /*
+     * XXX: do we want to have a special state for being "not allocated"
+     *      i.e. maybe set the state to invalid, even though mapped r/w?
+     */
+    assert(buf->state == BULK_BUFFER_READ_WRITE);
+
+    return buf;
+}
+
+/**
+ * returns a buffer back to the allocator. The pools must match.
+ *
+ * @param alloc     the allocator to hand the buffer back
+ * @param buffer    the buffer to hand back to the allocator
+ */
+errval_t bulk_alloc_return_buffer(struct bulk_allocator *alloc,
+                                  struct bulk_buffer *buffer)
+{
+    assert(alloc);
+
+    if (buffer == NULL || (buffer && buffer->state != BULK_BUFFER_READ_WRITE)) {
+        /* only read_write i.e. owned buffers can be added back */
+        return BULK_TRANSFER_BUFFER_STATE;
+    }
+
+    if (buffer->pool != alloc->pool) {
+        /* the buffers can only be added to the allocator */
+        return BULK_TRANSFER_POOL_INVALD;
+    }
+
+    struct bulk_buffer_mng *bm = malloc(sizeof(struct bulk_buffer_mng));
+    if (!bm) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    bm->buffer = buffer;
+    bm->next = alloc->free_buffers;
+    alloc->free_buffers = bm;
+    alloc->num_free++;
+
+    return SYS_ERR_OK;
+
+}
diff --git a/lib/bulk_transfer/bulk_buffer.c b/lib/bulk_transfer/bulk_buffer.c
new file mode 100644 (file)
index 0000000..bec7876
--- /dev/null
@@ -0,0 +1,260 @@
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
+#include "bulk_pool.h"
+#include "bulk_buffer.h"
+
+/**
+ * does the mapping of the buffer by filling the backing memobj with the frame
+ * and faulting on it. This is a no-op in full trusted mode.
+ *
+ * @param buf   the buffer to map
+ *
+ * Note: The new state of the buffer as well as the backing capability must be
+ *       set in the buffer struct.
+ */
+errval_t bulk_buffer_map(struct bulk_buffer *buffer)
+{
+    assert(buffer);
+
+    errval_t err;
+    struct bulk_pool_internal *pool_int;
+
+    if (buffer->pool->trust == BULK_TRUST_FULL) {
+        /* mapping in trusted case is a no-op */
+        return SYS_ERR_OK;
+    }
+
+    /* sanity check */
+    if (buffer->state == BULK_BUFFER_INVALID || capref_is_null(buffer->cap)) {
+        return BULK_TRANSFER_BUFFER_INVALID;
+    }
+
+    pool_int = (struct bulk_pool_internal *) buffer->pool;
+
+    struct vregion *vregion = pool_int->vregion;
+    struct memobj *memobj = vregion_get_memobj(vregion);
+
+    size_t offset = (lvaddr_t) buffer->address - buffer->pool->base_address;
+    size_t size = buffer->pool->buffer_size;
+
+    /* the capability was revoked thus we have to insert it again */
+    err = memobj->f.fill(memobj, offset, buffer->cap, size);
+    if (err_is_fail(err)) {
+        /* TODO: error handling */
+        return err_push(err, LIB_ERR_MEMOBJ_FILL);
+    }
+
+    /* there is a frame cap that backs the buffer, we can do the mapping */
+    err = memobj->f.pagefault(memobj, vregion, offset, 0);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
+    }
+
+    if (buffer->state != BULK_BUFFER_READ_WRITE) {
+        err = memobj->f.protect(memobj, vregion, offset, size,
+                        VREGION_FLAGS_READ);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_MEMOBJ_PROTECT);
+        }
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * does the unmapping of a single buffer according to the trust level,
+ * - if the channel is fully trusted, this results in a no-op.
+ * - otherwise, the mapping is removed
+ *
+ * This function does not revoke or delete any capabilities
+ *
+ * @param buf   the buffer to unmap
+ */
+errval_t bulk_buffer_unmap(struct bulk_buffer *buffer)
+{
+    assert(buffer);
+
+    errval_t err;
+    struct bulk_pool_internal *pool_int;
+
+    /* if there is a full trusted channel, then this is a no-op */
+    if (buffer->pool->trust == BULK_TRUST_FULL) {
+        return SYS_ERR_OK;
+    }
+
+    pool_int = (struct bulk_pool_internal *) buffer->pool;
+
+    /*
+     * TODO: add the vregion pointer to the private part of the pool
+     */
+    struct vregion *vregion = pool_int->vregion;
+    struct memobj *memobj = vregion_get_memobj(vregion);
+
+    size_t offset = (lvaddr_t) buffer->address - buffer->pool->base_address;
+
+    struct capref buf_cap;
+    genvaddr_t ret_offset;
+    /*
+     * we have to remove the capability from the memory object,
+     * this will be revoked by the other side anyway. This does also
+     * the unmapping of the frame.
+     */
+    err = memobj->f.unfill(memobj, offset, &buf_cap, &ret_offset);
+    if (err_is_fail(err)) {
+        /* TODO: ERROR handling */
+        return err;
+    }
+    if (ret_offset != offset || buf_cap.slot != buffer->cap.slot) {
+        /* there is something wrong... */
+        /* TODO: error handling */
+    }
+
+    /* TODO: do we want to update the state of the buffer to INVALID here ? */
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * changes the protection bits of an already mapped buffer according to the
+ * current buffer state.
+ *
+ * @param buffer    the buffer to modify the protection
+ */
+static errval_t bulk_buffer_remap(struct bulk_buffer *buffer)
+{
+    assert(buffer);
+
+    errval_t err;
+
+    struct vspace *vspace = get_current_vspace();
+    struct vregion *vregion = vspace_get_region(vspace,
+                    (void *) buffer->address);
+    struct memobj *memobj = vregion_get_memobj(vregion);
+
+    size_t offset = (lvaddr_t) buffer->address - buffer->pool->base_address;
+    size_t size = buffer->pool->buffer_size;
+
+    vs_prot_flags_t flags = VREGION_FLAGS_READ;
+    if (buffer->state == BULK_BUFFER_READ_WRITE) {
+        flags = VREGION_FLAGS_READ_WRITE;
+    }
+    err = memobj->f.protect(memobj, vregion, offset, size, flags);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_MEMOBJ_PROTECT);
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * checks if the buffer is owned by the calling domain
+ *
+ * @param buf   buffer to check for ownership
+ */
+uint8_t bulk_buffer_is_owner(struct bulk_buffer *buffer)
+{
+    return ((buffer->state == BULK_BUFFER_RO_OWNED)
+                    || buffer->state == BULK_BUFFER_READ_WRITE);
+}
+
+/**
+ * checks if the buffer is a read only copy
+ *
+ * @param buffer    the buffer to check
+ *
+ * @return true     if the buffer is a read only copy
+ *         false    if the buffer is not a copy
+ */
+uint8_t bulk_buffer_is_copy(struct bulk_buffer *buffer)
+{
+    return ((buffer->state == BULK_BUFFER_RO_OWNED)
+                    || buffer->state == BULK_BUFFER_READ_ONLY);
+}
+
+/**
+ * checks if the buffer is valid
+ *
+ * @param buffer    the buffer to check
+ *
+ * @return true     if the buffer is valid
+ *         false    if the buffer is not valid
+ */
+uint8_t bulk_buffer_is_valid(struct bulk_buffer *buffer)
+{
+    return !(buffer->state == BULK_BUFFER_INVALID);
+}
+
+/**
+ * changes the state of the buffer and adjust the mappings accordingly
+ *
+ * @param buffer    the buffer to change the state
+ * @param state     new state to transition the buffer to
+ */
+errval_t bulk_buffer_change_state(struct bulk_buffer *buffer,
+                                  enum bulk_buffer_state new_state)
+{
+    assert(buffer);
+
+    errval_t err = SYS_ERR_OK;
+
+    enum bulk_buffer_state st = buffer->state;
+
+    if (st == new_state) {
+        /* no change in state */
+        return SYS_ERR_OK;
+    }
+
+    buffer->state = new_state;
+
+    if (st == BULK_BUFFER_READ_WRITE) {
+        switch (new_state) {
+            case BULK_BUFFER_RO_OWNED:
+            case BULK_BUFFER_READ_ONLY:
+                err = bulk_buffer_remap(buffer);
+                break;
+            case BULK_BUFFER_INVALID:
+                err = bulk_buffer_unmap(buffer);
+                break;
+            default:
+                /* NO-OP */
+                break;
+        }
+    } else if (bulk_buffer_is_read_only(buffer)) {
+        switch (new_state) {
+            case BULK_BUFFER_READ_WRITE:
+                err = bulk_buffer_remap(buffer);
+                break;
+            case BULK_BUFFER_INVALID:
+                err = bulk_buffer_unmap(buffer);
+                break;
+            default:
+                /* NO-OP */
+                break;
+        }
+    } else if (st == BULK_BUFFER_INVALID) {
+        err = bulk_buffer_map(buffer);
+    }
+
+    if (err_is_fail(err)) {
+        /* TODO: Error handling */
+        return err;
+    }
+
+    return SYS_ERR_OK;
+}
diff --git a/lib/bulk_transfer/bulk_buffer.h b/lib/bulk_transfer/bulk_buffer.h
new file mode 100644 (file)
index 0000000..678c390
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_TRANSFER_BUFFER_H
+#define BULK_TRANSFER_BUFFER_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+/**
+ * checks if a given buffer is read only
+ *
+ * @param buffer    the buffer to check the read only access
+ *
+ * @return  true    if the buffer is read only
+ *          false   otherwise (rw or invalid)
+ */
+static inline uint8_t bulk_buffer_is_read_only(struct bulk_buffer *buffer) {
+    return ((buffer->state == BULK_BUFFER_READ_ONLY)
+                    || (buffer->state == BULK_BUFFER_RO_OWNED));
+}
+
+/**
+ * checks if the given buffer copy can be released
+ *
+ * @param buffer    the buffer to check the read only access
+ *
+ * @return  true    if the buffer copy can be released
+ *          false   otherwise (there are still references out there)
+ */
+static inline uint8_t bulk_buffer_can_release(struct bulk_buffer *buffer) {
+    return (buffer->local_ref_count == 0);
+}
+
+/**
+ * checks if the supplied size is valid for a buffer that is:
+ * - at least of size BASE_PAGE_SIZE
+ * - a power of two
+ */
+static inline uint8_t bulk_buffer_check_size (size_t x)
+{
+  return ((x != 0) && !(x & (x - 1)) && (x >= BASE_PAGE_SIZE));
+}
+
+/**
+ * does the mapping of the buffer according to the base address, capability
+ * and offset specified in the buffer struct.
+ *
+ * @param buf   the buffer to map
+ */
+errval_t bulk_buffer_map(struct bulk_buffer *buf);
+
+
+/**
+ * does the unmapping of a single buffer according to the trust level,
+ * - if the channel is fully trusted, this results in a no-op.
+ * - otherwise, the mapping is removed
+ *
+ * This function does not revoke or delete any capabilities
+ *
+ * @param buf   the buffer to unmap
+ */
+errval_t bulk_buffer_unmap(struct bulk_buffer *buf);
+
+
+/**
+ * changes the state of the buffer
+ *
+ * @param buffer    the buffer to change the state
+ * @param state     new state to transition the buffer to
+ */
+errval_t bulk_buffer_change_state(struct bulk_buffer       *buffer,
+                                  enum bulk_buffer_state    state);
+
+/**
+ * checks if the buffer is owned by the calling domain
+ *
+ * @param buffer   buffer to check for ownership
+ */
+uint8_t bulk_buffer_is_owner(struct bulk_buffer *buf);
+
+
+/**
+ * checks if the buffer is a read only copy
+ *
+ * @param buffer    the buffer to check
+ *
+ * @return true     if the buffer is a read only copy
+ *         false    if the buffer is not a copy
+ */
+uint8_t bulk_buffer_is_copy(struct bulk_buffer *buffer);
+
+
+/**
+ * checks if the buffer is valid
+ *
+ * @param buffer    the buffer to check
+ *
+ * @return true     if the buffer is valid
+ *         false    if the buffer is not valid
+ */
+uint8_t bulk_buffer_is_valid(struct bulk_buffer *buffer);
+
+
+
+
+#endif // ndef BULK_TRANSFER_BUFFER_H
+
diff --git a/lib/bulk_transfer/bulk_channel.c b/lib/bulk_transfer/bulk_channel.c
new file mode 100644 (file)
index 0000000..27f76bb
--- /dev/null
@@ -0,0 +1,368 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
+#include "bulk_pool.h"
+#include "bulk_buffer.h"
+
+/**
+ * Create a new channel.
+ *
+ * @param channel   Pointer to unused channel handle
+ * @param ep_desc   Description of endpoint to bind to
+ * @param callbacks Callbacks for events on this channel
+ * @param setup     struct containing the setup information
+ */
+errval_t bulk_channel_create(struct bulk_channel *channel,
+                             struct bulk_endpoint_descriptor *local_ep_desc,
+                             struct bulk_channel_callbacks *callbacks,
+                             struct bulk_channel_setup *setup)
+{
+    channel->state = BULK_STATE_UNINITIALIZED;
+    /*
+     * XXX: do we somehow track that this endpoint has not been assigned to
+     *      a channel before?
+     */
+    channel->ep = local_ep_desc;
+    channel->callbacks = callbacks;
+
+    channel->direction = setup->direction;
+    channel->role = setup->role;
+    channel->trust = setup->trust;
+    channel->constraints = setup->constraints;
+    channel->meta_size = setup->meta_size;
+    channel->waitset = setup->waitset;
+    channel->user_state = setup->user_state;
+
+    return local_ep_desc->f->channel_create(channel);
+}
+
+/**
+ * Bind to an existing unbound channel.
+ *
+ * @param channel   Pointer to unused channel handle
+ * @param ep_desc   Description of the remote endpoint to bind to
+ * @param callbacks Callbacks for events on this channel
+ * @param params    bind parameters
+ */
+errval_t bulk_channel_bind(struct bulk_channel *channel,
+                           struct bulk_endpoint_descriptor *remote_ep_desc,
+                           struct bulk_channel_callbacks *callbacks,
+                           struct bulk_channel_bind_params *params)
+{
+    if (channel->state != BULK_STATE_UNINITIALIZED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+    channel->state = BULK_STATE_UNINITIALIZED;
+    channel->ep = remote_ep_desc;
+    channel->callbacks = callbacks;
+
+    channel->role = params->role;
+    channel->trust = params->trust;
+    channel->constraints = params->constraints;
+    channel->waitset = params->waitset;
+    channel->user_state = params->user_state;
+
+    return remote_ep_desc->f->channel_bind(channel);
+}
+
+/**
+ * Free a channel
+ *
+ * @param channel        Channel to be freed
+ * @param free_resources Flag if the resources i.e. pools also should be freed
+ */
+errval_t bulk_channel_destroy(struct bulk_channel *channel)
+{
+    assert(!"NYI: bulk_channel_destroy");
+    switch (channel->state) {
+        case BULK_STATE_UNINITIALIZED:
+            return SYS_ERR_OK;
+            break;
+        case BULK_STATE_INITIALIZED:
+
+            break;
+        case BULK_STATE_BINDING:
+
+            break;
+
+        case BULK_STATE_CONNECTED:
+            break;
+
+        case BULK_STATE_TEARDOWN:
+            break;
+
+        case BULK_STATE_CLOSED:
+
+            break;
+        default:
+            return BULK_TRANSFER_CHAN_STATE;
+            break;
+    }
+    return SYS_ERR_OK;
+}
+
+/**
+ * Assign a pool to a channel.
+ *
+ * @param channel Channel
+ * @param pool    Pool to assign (must not be assigned to this channel yet)
+ */
+errval_t bulk_channel_assign_pool(struct bulk_channel *channel,
+                                  struct bulk_pool *pool)
+{
+    assert(channel);
+    assert(pool);
+
+    if (channel->state != BULK_STATE_CONNECTED || !(channel->ep)) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (pool->trust == BULK_TRUST_UNINITIALIZED) {
+        /* this pool  has never been assigned to a channel */
+        pool->trust = channel->trust;
+    }
+
+    /* a channel must not span trusted and non trusted channels */
+    if (channel->trust != pool->trust) {
+        return BULK_TRANSFER_CHAN_TRUST;
+    }
+
+    if (bulk_pool_is_assigned(pool, channel)) {
+        /*
+         * XXX: do we treat this as a no-op or should we return an
+         *      BULK_TRANSFER_POOL_ALREADY_ASSIGNED ?
+         *      - RA
+         */
+        return SYS_ERR_OK;
+    }
+
+    struct bulk_pool_list *new_pool = malloc(sizeof(struct bulk_pool_list));
+    if (new_pool == NULL) {
+        return BULK_TRANSFER_MEM;
+    }
+    new_pool->pool = pool;
+    new_pool->next = NULL;
+
+    /* add the pool to the ordered list of pools */
+    struct bulk_pool_list *list = channel->pools;
+    struct bulk_pool_list *prev = NULL;
+    while (list) {
+        if (bulk_pool_cmp_id(&pool->id, &list->pool->id) == 1) {
+            if (prev == NULL) {
+                channel->pools = new_pool;
+            } else {
+                new_pool->next = list;
+                prev->next = new_pool;
+            }
+            break;
+        }
+        prev = list;
+        list = list->next;
+    }
+    if (channel->pools == NULL) {
+        channel->pools = new_pool;
+    }
+
+    return channel->ep->f->assign_pool(channel, pool);
+}
+
+/**
+ * Remove a pool from a channel
+ *
+ * @param channel Channel
+ * @param pool    Pool to remove (must be previously assigned to the channel)
+ *
+ */
+errval_t bulk_channel_remove_pool(struct bulk_channel *channel,
+                                  struct bulk_pool *pool,
+                                  struct bulk_continuation cont)
+{
+    assert(!"NYI: removing a pool from a channel");
+
+    assert(channel);
+    assert(pool);
+
+    if (channel->state != BULK_STATE_CONNECTED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (!bulk_pool_is_assigned(pool, channel)) {
+        /*
+         * XXX: if there is no such pool on this channel simply return
+         *      or do we want to indicate an error here?
+         *      BULK_TRANSFER_POOL_NOT_ASSIGNED
+         *      - RA
+         */
+        return SYS_ERR_OK;
+    }
+
+    struct bulk_pool_list *list = channel->pools;
+    struct bulk_pool_list *prev = NULL;
+    while (list) {
+        if (bulk_pool_cmp_id(&list->pool->id, &pool->id) == 0) {
+            break;
+        }
+        prev = list;
+        list = list->next;
+    }
+    if (prev == NULL) {
+        channel->pools = list->next;
+    } else {
+        prev->next = list->next;
+    }
+
+    free(list);
+
+    /*
+     * TODO: we may want to track the channels which this pool was used,
+     *       so if the last reference is removed, we can unmap the pool
+     */
+
+    return channel->ep->f->remove_pool(channel, pool, cont);
+}
+
+errval_t bulk_channel_move(struct bulk_channel *channel,
+                           struct bulk_buffer *buffer,
+                           void *meta,
+                           struct bulk_continuation cont)
+{
+    assert(channel);
+    assert(buffer);
+
+    errval_t err;
+
+    if (channel->state != BULK_STATE_CONNECTED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (!bulk_pool_is_assigned(buffer->pool, channel)) {
+        return BULK_TRANSFER_POOL_NOT_ASSIGNED;
+    }
+
+    if (!bulk_buffer_is_owner(buffer)) {
+        return BULK_TRANSFER_BUFFER_NOT_OWNED;
+    }
+
+    err = bulk_buffer_unmap(buffer);
+    if (err_is_fail(err)) {
+        /*
+         * XXX: what do we do if the unmap fails?
+         */
+    }
+
+    return channel->ep->f->move(channel, buffer, meta, cont);
+}
+
+/**
+ *
+ */
+errval_t bulk_channel_pass(struct bulk_channel *channel,
+                           struct bulk_buffer *buffer,
+                           void *meta,
+                           struct bulk_continuation cont)
+{
+    assert(channel);
+    assert(buffer);
+
+    errval_t err;
+
+    if (channel->state != BULK_STATE_CONNECTED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (!bulk_pool_is_assigned(buffer->pool, channel)) {
+        return BULK_TRANSFER_POOL_NOT_ASSIGNED;
+    }
+
+    if (!bulk_buffer_is_owner(buffer)) {
+        return BULK_TRANSFER_BUFFER_NOT_OWNED;
+    }
+
+    err = bulk_buffer_unmap(buffer);
+    if (err_is_fail(err)) {
+        /*
+         * XXX: what do we do if the unmap fails?
+         */
+    }
+
+    return channel->ep->f->pass(channel, buffer, meta, cont);
+}
+
+/**
+ *
+ */
+errval_t bulk_channel_copy(struct bulk_channel *channel,
+                           struct bulk_buffer *buffer,
+                           void *meta,
+                           struct bulk_continuation cont)
+{
+    assert(channel);
+    assert(buffer);
+
+    errval_t err;
+
+    if (channel->state != BULK_STATE_CONNECTED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (!bulk_pool_is_assigned(buffer->pool, channel)) {
+        return BULK_TRANSFER_POOL_NOT_ASSIGNED;
+    }
+
+    if (!bulk_buffer_is_valid(buffer)) {
+        return BULK_TRANSFER_BUFFER_INVALID;
+    }
+
+    enum bulk_buffer_state new_state = BULK_BUFFER_READ_ONLY;
+    if (bulk_buffer_is_owner(buffer)) {
+        new_state = BULK_BUFFER_RO_OWNED;
+    }
+    err = bulk_buffer_change_state(buffer, new_state);
+
+    if (err_is_fail(err)) {
+        return BULK_TRANSFER_BUFFER_STATE;
+    }
+
+    return channel->ep->f->copy(channel, buffer, meta, cont);
+}
+
+/**
+ *
+ */
+errval_t bulk_channel_release(struct bulk_channel *channel,
+                              struct bulk_buffer *buffer,
+                              struct bulk_continuation cont)
+{
+    assert(channel);
+    assert(buffer);
+
+    if (channel->state != BULK_STATE_CONNECTED) {
+        return BULK_TRANSFER_CHAN_STATE;
+    }
+
+    if (!bulk_buffer_is_copy(buffer)) {
+        return BULK_TRANSFER_BUFFER_NOT_A_COPY;
+    }
+
+    if (!bulk_buffer_is_owner(buffer) && !bulk_buffer_can_release(buffer)) {
+        return BULK_TRANSFER_BUFFER_REFCOUNT;
+    }
+
+    return channel->ep->f->release(channel, buffer, cont);
+}
+
diff --git a/lib/bulk_transfer/bulk_endpoint.c b/lib/bulk_transfer/bulk_endpoint.c
new file mode 100644 (file)
index 0000000..aa094f8
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
diff --git a/lib/bulk_transfer/bulk_pool.c b/lib/bulk_transfer/bulk_pool.c
new file mode 100644 (file)
index 0000000..b50eb7b
--- /dev/null
@@ -0,0 +1,446 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include "bulk_pool.h"
+#include "bulk_buffer.h"
+
+/**
+ * compares two bulk pool ids
+ *
+ * @return  -1  if id1 is less than id2
+ *           0  if both ids are equal
+ *           1  if id1 is bigger than id2
+ */
+int8_t bulk_pool_cmp_id(struct bulk_pool_id *id1, struct bulk_pool_id *id2)
+{
+
+    if (id1->machine < id2->machine) {
+        return -1;
+    }
+
+    if (id1->machine > id2->machine) {
+        return 1;
+    }
+
+    if (id1->dom < id2->dom) {
+        return -1;
+    }
+
+    if (id1->dom > id2->dom) {
+        return 1;
+    }
+
+    if (id1->local < id2->local) {
+        return -1;
+    }
+
+    if (id1->local > id2->local) {
+        return 1;
+    }
+
+    assert(id1->machine == id2->machine);
+    assert(id1->dom == id2->dom);
+    assert(id1->local == id1->local);
+
+    return 0;
+}
+
+/**
+ * checks if a pool already has been assigned to that channel
+ *
+ * @param pool      the bulk pool to check for assignment
+ * @param channel   the channel to check for assignment
+ *
+ * @return true:    the pool is assigned to this channel
+ *         false:   the pools has not been assigned to the channel
+ */
+uint8_t bulk_pool_is_assigned(struct bulk_pool *pool,
+                              struct bulk_channel *channel)
+{
+    assert(channel);
+
+    struct bulk_pool_list *list = channel->pools;
+
+    while (list) {
+        switch ((bulk_pool_cmp_id(&list->pool->id, &pool->id) == 0)) {
+            case -1:
+                /* the ID of the pool in the list is lower, check next */
+                continue;
+            case 0:
+                /* we have a match */
+                return 1;
+            case 1:
+                /* we have a lower id than the pool in the list */
+                return 0;
+            default:
+                break;
+        }
+
+        list = list->next;
+    }
+
+    return 0;
+}
+
+/**
+ * checks if the pool is already remapped i.e. the per-buffer caps have been
+ * created
+ *
+ * @return false    if there are no caps per buffer
+ *         true     if the caps have been created
+ */
+static uint8_t bulk_pool_is_remapped(struct bulk_pool *pool)
+{
+    struct capref *pool_cap = &pool->pool_cap;
+    struct capref *buf_cap = &pool->buffers[0]->cap;
+
+    if (pool_cap->slot == buf_cap->slot) {
+        /**
+         * if the pool is remapped a new cnode is created with the buffer caps,
+         * thus the first buffer capability should have another cnode address
+         * as the pool has indicating the pool has been remapped.
+         */
+        if (pool_cap->cnode.address == buf_cap->cnode.address) {
+            return 0;
+        }
+        return 1;
+    }
+    return 1;
+}
+/**
+ * does the remapping of the pool if the trust level changes from fully trusted
+ * to a lower one. This function is called by the backend.
+ *
+ * @param pool  the pool to remap
+ */
+errval_t bulk_pool_remap(struct bulk_pool *pool)
+{
+
+    /* TODO: REMOVE this function */
+    assert(!"DEPRECATED: should not be used anymore");
+
+    errval_t err;
+
+    /* check if the pool is already remapped  */
+    if (bulk_pool_is_remapped(pool)) {
+        return BULK_TRANSFER_POOL_ALREADY_REMAPPED;
+    }
+
+    /* get the vspace / vregions / memobj pointers */
+    struct vspace *vspace = get_current_vspace();
+    struct vregion *vregion = vspace_get_region(vspace,
+                    (void *) pool->base_address);
+    struct memobj *memobj = vregion_get_memobj(vregion);
+
+    /*
+     * remove the pool capability from the memobj by unfilling it,
+     * this also does the unmap of the vregions
+     */
+    struct capref pool_cap;
+    genvaddr_t ret_offset;
+
+    err = memobj->f.unfill(memobj, 0, &pool_cap, &ret_offset);
+    if (err_is_fail(err)) {
+        /*
+         * XXX: this error should basically not happen here...
+         */
+        assert(err != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET);
+        return err_push(err, LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET);
+    }
+
+    /* sanity check that the removed offset is in fact zero */
+    assert(ret_offset == 0);
+
+    err = memobj->f.unfill(memobj, 0, &pool_cap, &ret_offset);
+    if (err != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) {
+        /*
+         * Note that a pool that has not been remapped only contains one
+         * frame. Thus this call to unfill has to return with the error
+         * code. Otherwise there is something wrong with the pool's memobj...
+         */
+        return BULK_TRANSFER_POOL_INVALD;
+    }
+
+    /*
+     * the pool cap has been successfully removed from the pool's memobj
+     * and we can start creating the buffer caps in a new cnode.
+     */
+    struct capref cnode_cap;
+    struct capref buf_cap = { .slot = 0 };
+
+    err = cnode_create(&cnode_cap, &buf_cap.cnode, pool->num_buffers, NULL);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_CNODE_CREATE);
+    }
+    /* determine the size bits for the buffer */
+    size_t size = 12;
+    size_t buf_size = pool->buffer_size >> 12;
+    while (buf_size >>= 1) {
+        ++size;
+    }
+    /* retype the pool cap into the buffer caps of the new cnode */
+    err = cap_retype(buf_cap, pool->pool_cap, ObjType_Frame, size);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_CAP_RETYPE);
+    }
+
+    for (int i = 0; i < pool->num_buffers; ++i) {
+        struct bulk_buffer *buf = pool->buffers[i];
+
+        size_t offset = i * pool->buffer_size;
+
+        /* update capability information of the buffer */
+        buf_cap.slot = i;
+        buf->cap_offset = 0;
+        buf->cap = buf_cap;
+
+        err = memobj->f.fill(memobj, offset, buf->cap, pool->buffer_size);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_MEMOBJ_FILL);
+        }
+
+        vregion = vspace_get_region(vspace, buf->address);
+        err = memobj->f.map_region(memobj, vregion);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_MEMOBJ_MAP_REGION);
+        }
+
+        /* create the actual mapping by faulting on it */
+        err = memobj->f.pagefault(memobj, vregion, offset, 0);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
+        }
+        /*
+         * TODO: do we want to abort the processing if one of the operations
+         *       fails for one of the buffers or just mark the buffer as
+         *       invalid and go on?
+         */
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * unmaps the entire pool and frees up the entire memory region of the pool.
+ *
+ * @param pool  the pool to unmap
+ */
+errval_t bulk_pool_unmap(struct bulk_pool *pool)
+{
+    assert(pool);
+    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
+
+    if (!pool_int->vregion) {
+        /* there is no vregion associated with the pool, so its not mapped */
+        return SYS_ERR_OK;
+    }
+
+    errval_t err = SYS_ERR_OK;
+
+    /* get the vspace / vregions / memobj pointers */
+    struct vregion *vregion = pool_int->vregion;
+    struct memobj *memobj = vregion_get_memobj(vregion);
+
+    struct capref ret_cap;
+    genvaddr_t ret_addr;
+
+    /* unfill and unmap the frames */
+    for (int i=0; i < pool->num_buffers; ++i) {
+        genvaddr_t offset = i * pool->buffer_size;
+
+        err = memobj->f.unfill(memobj, offset, &ret_cap, &ret_addr);
+        if (err_is_fail(err)) {
+            if (err == LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) {
+                break;
+            }
+            /* TODO: Error handling */
+            return err;
+        }
+    }
+
+    err = vregion_destroy(vregion);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_VREGION_DESTROY);
+    }
+
+    return SYS_ERR_OK;
+}
+
+/**
+ * Does the mapping of a pool depending on the trust level. If it is not trusted,
+ * only the memory range is allocated and the vregions for the buffers created,
+ * In the trusted case, the pool is backed with the pool cap and mapped.
+ *
+ * @param pool  the pool to map
+ */
+errval_t bulk_pool_map(struct bulk_pool *pool)
+{
+    assert(pool);
+
+    errval_t err;
+
+    if (pool->base_address != 0) {
+        /* the pool already has an base address thus is mapped */
+
+        /* XXX: maybe return an already mapped error ? */
+        return SYS_ERR_OK;
+    }
+
+    if (!bulk_buffer_check_size(pool->buffer_size)) {
+        return BULK_TRANSFER_ALLOC_BUFFER_SIZE;
+    }
+
+    size_t pool_size = pool->buffer_size * pool->num_buffers;
+
+    struct vspace *vspace = get_current_vspace();
+
+    struct memobj_fixed *memobj_fixed = malloc(sizeof(struct memobj_fixed));
+    if (!memobj_fixed) {
+        return BULK_TRANSFER_MEM;
+    }
+    struct memobj *memobj = &(memobj_fixed->m);
+
+    // Create a memobj
+    err = memobj_create_fixed(memobj_fixed, pool_size, 0, pool->num_buffers,
+                    pool->buffer_size);
+
+    if (err_is_fail(err)) {
+        err = err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON);
+    }
+
+    struct pmap *pmap = vspace_get_pmap(vspace);
+
+    /* allocate some virtual address space */
+    genvaddr_t address;
+    err = pmap->f.determine_addr(pmap, memobj, 4096, &address);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_PMAP_DETERMINE_ADDR);
+    }
+    pool->base_address = vspace_genvaddr_to_lvaddr(address);
+
+    if (pool->trust == BULK_TRUST_FULL
+                    || pool->trust == BULK_TRUST_UNINITIALIZED) {
+        if (capref_is_null(pool->pool_cap)) {
+            return SYS_ERR_CAP_NOT_FOUND;
+        }
+
+        /*
+         * the pool cap has been successfully removed from the pool's memobj
+         * and we can start creating the buffer caps in a new cnode.
+         */
+        struct capref cnode_cap;
+        struct capref buf_cap = { .slot = 0 };
+
+        err = cnode_create(&cnode_cap, &buf_cap.cnode, pool->num_buffers, NULL);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_CNODE_CREATE);
+        }
+        /* determine the size bits for the buffer */
+        size_t size = 12;
+        size_t buf_size = pool->buffer_size >> 12;
+        while (buf_size >>= 1) {
+            ++size;
+        }
+        /* retype the pool cap into the buffer caps of the new cnode */
+        err = cap_retype(buf_cap, pool->pool_cap, ObjType_Frame, size);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_CAP_RETYPE);
+        }
+
+        for (int i = 0; i < pool->num_buffers; ++i) {
+            struct bulk_buffer *buf = pool->buffers[i];
+
+            /* update capability information of the buffer */
+            buf_cap.slot = i;
+            buf->cap_offset = 0;
+            buf->cap = buf_cap;
+
+            err = memobj->f.fill(memobj, i * pool->buffer_size, buf->cap,
+                            pool->buffer_size);
+            if (err_is_fail(err)) {
+                /* TODO: error handling */
+                return err_push(err, LIB_ERR_MEMOBJ_FILL);
+            }
+        }
+    }
+
+    /* we have the address range, now we have to */
+    struct vregion *vregion = malloc(sizeof(struct vregion));
+    if (!vregion) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    err = vregion_map_fixed(vregion, get_current_vspace(), memobj, 0, pool_size,
+                    address, VREGION_FLAGS_READ_WRITE);
+    if (err_is_fail(err)) {
+        return err_push(err, LIB_ERR_MEMOBJ_MAP_REGION);
+    }
+
+    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
+    pool_int->vregion = vregion;
+
+    for (int i = 0; i < pool->num_buffers; ++i) {
+
+        size_t offset = (i * pool->buffer_size);
+
+        if (pool->trust == BULK_TRUST_FULL
+                        || pool->trust == BULK_TRUST_UNINITIALIZED) {
+            err = memobj->f.pagefault(memobj, vregion, offset, 0);
+            if (err_is_fail(err)) {
+                return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
+            }
+            struct bulk_buffer *buf = pool->buffers[i];
+            buf->state = BULK_BUFFER_READ_WRITE;
+            buf->address = (void *) vspace_genvaddr_to_lvaddr(address + offset);
+        }
+    }
+    return SYS_ERR_OK;
+}
+
+/**
+ * initializes the buffers for a pool given the struct pool is allocated and
+ * filled with the num bufs
+ *
+ * @param pool  pointer to a pool with the information
+ */
+errval_t bulk_pool_init_bufs(struct bulk_pool *pool)
+{
+    size_t buffer_count = pool->num_buffers;
+
+    /* allocate memory for buffers */
+    struct bulk_buffer *bufs = malloc(
+                    buffer_count * sizeof(struct bulk_buffer));
+    if (!bufs) {
+        return BULK_TRANSFER_MEM;
+    }
+    memset(bufs, 0, buffer_count * sizeof(struct bulk_buffer));
+
+    pool->buffers = malloc(buffer_count * sizeof(void *));
+    if (!pool->buffers) {
+        return BULK_TRANSFER_MEM;
+    }
+
+    for (int i = 0; i < buffer_count; ++i) {
+        (bufs + i)->state = BULK_BUFFER_INVALID;
+        (bufs + i)->pool = pool;
+        pool->buffers[i] = bufs + i;
+    }
+
+    return SYS_ERR_OK;
+}
diff --git a/lib/bulk_transfer/bulk_pool.h b/lib/bulk_transfer/bulk_pool.h
new file mode 100644 (file)
index 0000000..d53cf7f
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_TRANSFER_POOL_H
+#define BULK_TRANSFER_POOL_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+/**
+ * internal representation of a bulk pool
+ *
+ */
+struct bulk_pool_internal {
+    struct bulk_pool pool;      ///< pointer to the public interface
+    /* internal fields */
+    struct vregion *vregion;    ///< pointer to the vregion of the pool
+};
+
+/**
+ * compares two bulk pool ids
+ *
+ * @return  -1  if id1 is less than id2
+ *           0  if both ids are equal
+ *           1  if id1 is bigger than id2
+ */
+int8_t bulk_pool_cmp_id(struct bulk_pool_id *id1,
+                        struct bulk_pool_id *id2);
+
+/**
+ * checks if a pool already has been assigned to that channel
+ *
+ * @param pool      the bulk pool to check for assignment
+ * @param channel   the channel to check for assignment
+ *
+ * @return true:    the pool is assigned to this channel
+ *         false:   the pools has not been assigned to the channel
+ */
+uint8_t bulk_pool_is_assigned(struct bulk_pool      *pool,
+                              struct bulk_channel   *channel);
+
+
+/**
+ * Does the mapping of a pool depending on the trust level. If it is not trusted,
+ * only the memory range is allocated and the vregions for the buffers created,
+ * In the trusted case, the pool is backed with the pool cap and mapped.
+ *
+ * @param pool  the pool to map
+ */
+errval_t bulk_pool_map(struct bulk_pool *pool);
+
+
+/**
+ * does the remapping of the pool if the trust level changes from fully trusted
+ * to a lower one. This function is called by the backend.
+ *
+ * @param pool  the pool to remap
+ */
+errval_t bulk_pool_remap(struct bulk_pool *pool);
+
+
+/**
+ * unmaps the entire pool and frees up the entire memory region of the pool.
+ *
+ * @param pool  the pool to unmap
+ */
+errval_t bulk_pool_unmap(struct bulk_pool *pool);
+
+
+/**
+ * initializes the buffers for a pool given the struct pool is allocated and
+ * filled with the basic information about the number of buffers in the pool.
+ *
+ * @param pool  pointer to a pool with the information
+ */
+errval_t bulk_pool_init_bufs(struct bulk_pool *pool);
+
+#endif // BULK_TRANSFER_POOL_H
+
diff --git a/lib/bulk_transfer/bulk_transfer.c b/lib/bulk_transfer/bulk_transfer.c
new file mode 100644 (file)
index 0000000..aa094f8
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
diff --git a/lib/bulk_transfer/control_channel.c b/lib/bulk_transfer/control_channel.c
new file mode 100644 (file)
index 0000000..aa094f8
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+
diff --git a/lib/bulk_transfer/error_codes b/lib/bulk_transfer/error_codes
new file mode 100644 (file)
index 0000000..0c5d714
--- /dev/null
@@ -0,0 +1,15 @@
+BULK_POOL_NOT_ASSIGNED
+BULK_POOL_ALREADY_ASSIGNED
+BULK_POOL_ALREADY_REMAPPED
+BULK_POOL_INVALID
+
+BULK_BUFFER_NOT_OWNED
+BULK_BUFFER_INVALID
+BULK_BUFFER_ALREADY_MAPPED
+BULK_BUFFER_STATE
+BULK_BUFFER_NOT_A_COPY
+BULK_BUFFER_REFCOUNT
+
+BULK_CHANNEL_STATE
+BULK_CHANNEL_TRUST
+BULK_CHANNEL_INVALID_EP
\ No newline at end of file
diff --git a/lib/bulk_transfer/helpers.h b/lib/bulk_transfer/helpers.h
new file mode 100644 (file)
index 0000000..eb20684
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BULK_TRANSFER_HELPERS_H
+#define BULK_TRANSFER_HELPERS_H
+
+#include <bulk_transfer/bulk_transfer.h>
+
+static inline enum bulk_channel_role bulk_role_other(
+        enum bulk_channel_role role)
+{
+    if (role == BULK_ROLE_MASTER) {
+        return BULK_ROLE_SLAVE;
+    } else if (role == BULK_ROLE_SLAVE){
+        return BULK_ROLE_MASTER;
+    } else {
+        /* XXX: What do we do with that? */
+        return BULK_ROLE_GENERIC;
+    }
+}
+
+static inline enum bulk_channel_direction bulk_direction_other(
+        enum bulk_channel_direction direction)
+{
+    if (direction == BULK_DIRECTION_TX) {
+        return BULK_DIRECTION_RX;
+    } else {
+        return BULK_DIRECTION_TX;
+    }
+}
+
+
+#endif // ndef BULK_TRANSFER_HELPERS_H
+
diff --git a/usr/block_server/Hakefile b/usr/block_server/Hakefile
new file mode 100644 (file)
index 0000000..635392c
--- /dev/null
@@ -0,0 +1,31 @@
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2010, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for /usr/block_server
+--
+--------------------------------------------------------------------------
+
+[ build application { 
+    target = "block_server",
+    cFiles = [ "block_server.c",    "network_server.c", 
+               "block_storage.c",   "local_server.c" ],
+    addLibraries = libDeps [ "bulk_transfer", "lwip", "net_if_raw"],
+    flounderDefs = [ "block_service" ],
+    flounderBindings = [ "block_service" ],
+    architectures = [ "x86_64", "x86_32" ]
+  },
+    
+  build application { 
+    target = "block_server_client",
+    cFiles = [ "block_server_client.c", "network_client.c", "local_server.c", "block_storage_cache.c" ],
+    addLibraries = libDeps [ "bulk_transfer" ],
+    flounderDefs = [ "block_service" ],
+    flounderBindings = [ "block_service" ],
+    architectures = [ "x86_64", "x86_32" ]
+  }
+]
\ No newline at end of file
diff --git a/usr/block_server/block_server.c b/usr/block_server/block_server.c
new file mode 100644 (file)
index 0000000..e96e639
--- /dev/null
@@ -0,0 +1,111 @@
+/**
+ * \file
+ * \brief block_server process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+#include <stdio.h>
+#include <string.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include <bulk_transfer/bulk_transfer.h>
+#include <bulk_transfer/bulk_allocator.h>
+
+#include "block_server.h"
+#include "local_server.h"
+#include "network_server.h"
+#include "block_storage.h"
+
+
+static void run_test(void)
+{
+    struct bulk_allocator alloc;
+#define NUM_BUFS 10
+#define BUF_SIZE 4096
+    errval_t err;
+    err = bulk_alloc_init(&alloc, NUM_BUFS, BUF_SIZE, NULL);
+    if (err_is_fail(err)) {
+        USER_PANIC_ERR(err, "Bulk alloc failed");
+    }
+
+    printf("Pool allocated @ %p\n", (void *)alloc.pool->base_address);
+
+    struct bulk_buffer *buf;
+    for (int i = 0; i < NUM_BUFS; ++i) {
+        buf = bulk_alloc_new_buffer(&alloc);
+        if (buf) {
+            printf("Buffer %i @ %p\n", i, buf->address);
+            memset(buf->address, i, BUF_SIZE);
+            char *e = buf->address;
+            for (int j=0; j < BUF_SIZE; ++j) {
+                if (e[j] != i) {
+                    printf("ERROR: buffer has not the intended content");
+                }
+            }
+        } else {
+            printf("No new buffer %i\n", i);
+        }
+    }
+
+    bulk_alloc_return_buffer(&alloc, buf);
+    bulk_alloc_free(&alloc);
+
+    printf("\n\n=============================================\n");
+    abort();
+}
+
+/**
+ *
+ *
+ */
+int main(int argc, char *argv[])
+{
+    printf("\n\n==============================================\n");
+    debug_printf("block server started.\n");
+
+    errval_t err;
+
+    run_test();
+
+    return SYS_ERR_OK;
+
+    /* initialize the block store */
+    err = block_storage_init(BLOCK_COUNT, BLOCK_SIZE);
+    if (err_is_fail(err)) {
+        printf("ERROR: could not initialize the block store\n");
+        return 1;
+    }
+
+    /* Initialize the core local flounder interface  */
+    /* XXX: Do we need this? */
+    err = block_local_init(SERVICE_FLAG_DEFAULT);
+    if (err_is_fail(err)) {
+        block_storage_dealloc();
+        printf("ERROR: could not initialize the flounder service.\n");
+        return 1;
+    }
+
+    /* initialize the network service */
+    err = block_net_init(BLOCK_NET_PORT);
+    if (err_is_fail(err)) {
+        block_storage_dealloc();
+        printf("ERROR: could not initialize the network service.\n");
+        return 1;
+    }
+
+    /* start the network service */
+    err = block_net_start();
+
+    /* start the floudner service */
+    err = block_local_start();
+
+}
+
diff --git a/usr/block_server/block_server.h b/usr/block_server/block_server.h
new file mode 100644 (file)
index 0000000..004b5cc
--- /dev/null
@@ -0,0 +1,24 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BLOCK_SERVER_H
+#define BLOCK_SERVER_H
+
+#define BLOCK_NET_PORT 10000
+
+#define BLOCK_COUNT 4096
+
+#define BLOCK_SIZE 4096
+
+#endif /* BLOCK_SERVER_H */
diff --git a/usr/block_server/block_server_client.c b/usr/block_server/block_server_client.c
new file mode 100644 (file)
index 0000000..9439806
--- /dev/null
@@ -0,0 +1,78 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdio.h>
+
+#include <barrelfish/barrelfish.h>
+
+#include "block_server.h"
+#include "local_server.h"
+#include "network_client.h"
+
+static char *ip = "127.0.0.1";
+
+/**
+ * \brief Contains the connection parameters to the network block server
+ */
+static struct block_net_server block_server;
+
+/**
+ * \brief Main function for the network block server client.
+ *
+ */
+int main(int argc, char *argv[])
+{
+    debug_printf("Block service network client started.\n");
+
+    errval_t err;
+
+    /*
+     * Connect to the network block server
+     */
+
+    block_server.ip = ip;
+    block_server.port = BLOCK_NET_PORT;
+
+    return SYS_ERR_OK;
+
+    err = block_net_connect(&block_server);
+    if (err_is_fail(err)) {
+        printf("ERROR: Could not connect to block server: %s, (%i)\n",
+                        block_server.ip, block_server.port);
+        return 1;
+    }
+
+    /*
+     * TODO: Setup block caching
+     */
+
+    /*
+     * Initialize the core local server (flounder interface)
+     */
+    err = block_local_init(SERVICE_FLAG_CLIENT);
+    if (err_is_fail(err)) {
+        /* disconnect from the network server */
+        block_net_disconnect(&block_server);
+        printf("ERROR: Could not initialize local server.\n");
+        return 1;
+    }
+
+    err = block_local_start();
+    if (err_is_fail(err)) {
+        block_net_disconnect(&block_server);
+        printf("ERROR: Could not start the local server.\n");
+        return 1;
+    }
+}
+
diff --git a/usr/block_server/block_storage.c b/usr/block_server/block_storage.c
new file mode 100644 (file)
index 0000000..e2d415e
--- /dev/null
@@ -0,0 +1,80 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include "block_storage.h"
+
+static struct block_storage blocks;
+
+/**
+ * \brief this function allocates an in memory block storage
+ *
+ * The intended use of this function is to use bulk buffers as a backend
+ * for the block store. The used bulk buffers can then be used directly
+ * to be sent over the bulk channel.
+ */
+static errval_t block_storage_alloc(void)
+{
+    assert(!"NYI: block_storage_alloc");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief this function populates the the previously allocated block storage
+ *        with sample data
+ */
+static errval_t block_storage_fill(void)
+{
+    assert(!"NYI: block_storage_fill");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief this function initializes the in-RAM block storage of the block
+ *        server.
+ *
+ * \param num_blocks    the number of blocks to initialize
+ * \param block_size    the size of a single block in bytes
+ *
+ * \return SYS_ERR_OK on success
+ *
+ */
+errval_t block_storage_init(size_t num_blocks, size_t block_size)
+{
+    blocks.block_size = block_size;
+    blocks.num_blocks = num_blocks;
+
+    errval_t err;
+
+    // allocate the block store
+    err = block_storage_alloc();
+
+    // initialize the block store with sample data
+
+    err = block_storage_fill();
+
+    return SYS_ERR_OK;
+}
+
+
+/**
+ * \brief frees up the in ram block storage
+ */
+errval_t block_storage_dealloc(void)
+{
+    assert(!"NYI: block_storage_dealloc");
+    return SYS_ERR_OK;
+}
+
diff --git a/usr/block_server/block_storage.h b/usr/block_server/block_storage.h
new file mode 100644 (file)
index 0000000..91975c4
--- /dev/null
@@ -0,0 +1,44 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BLOCK_STORAGE_H
+#define BLOCK_STORAGE_H
+
+/// The default size of a single block
+#define DEFAULT_BLOCK_SIZE 4096
+
+/// The default number of blocks
+#define DEFAULT_BLOCK_COUNT 1024
+
+struct block
+{
+/* todo: reference to a bulk block */
+};
+
+struct block_storage
+{
+    size_t num_blocks;
+    size_t block_size;
+    struct block *blocks;
+};
+
+errval_t block_storage_init(size_t num_blocks, size_t block_size);
+
+errval_t block_storage_dealloc(void);
+
+errval_t block_storage_get(size_t bid, void **ret_block);
+
+errval_t block_storage_set(size_t bid, struct block *blk);
+
+#endif /* BLOCK_STORAGE_H */
diff --git a/usr/block_server/block_storage_cache.c b/usr/block_server/block_storage_cache.c
new file mode 100644 (file)
index 0000000..64f7ef5
--- /dev/null
@@ -0,0 +1,52 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include "block_storage_cache.h"
+
+/**
+ * \brief inserts a new block into the cache
+ *
+ * \param blockid   the id of the block to insert
+ * \param data      the data of the block XXX: This should be bulk_buf?
+ */
+errval_t block_cache_insert(size_t blockid, void *data)
+{
+    assert(!"NYI: block_cache_insert");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief invalidates a block in the cache
+ *
+ * \param blockid   the id of the block to invalidate
+ */
+errval_t block_cache_invalidate(size_t blockid)
+{
+    assert(!"NYI: block_cache_invalidate");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief looks up a block in the cache and returns it if present
+ *
+ * \param blockid   the ID of the block to lookup
+ * \param ret_data  pointer to the returned data XXX: bulk_buf?
+ */
+errval_t block_cache_lookup(size_t blockid, void **ret_data)
+{
+    assert(!"NYI: block_cache_lookup");
+    return SYS_ERR_OK;
+}
diff --git a/usr/block_server/block_storage_cache.h b/usr/block_server/block_storage_cache.h
new file mode 100644 (file)
index 0000000..8e7a46b
--- /dev/null
@@ -0,0 +1,24 @@
+/**
+ * \file
+ * \brief block_server client process.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BLOCK_STORAGE_CACHE_H
+#define BLOCK_STORAGE_CACHE_H
+
+errval_t block_cache_insert(size_t blockid, void *data);
+
+errval_t block_cache_invalidate(size_t blockid);
+
+errval_t block_cache_lookup(size_t blockid, void **ret_data);
+
+#endif /* BLOCK_STORAGE_CACHE_H */
diff --git a/usr/block_server/local_server.c b/usr/block_server/local_server.c
new file mode 100644 (file)
index 0000000..b344079
--- /dev/null
@@ -0,0 +1,162 @@
+/**
+ * \file
+ * \brief Network server thread of the bulk server
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/nameservice_client.h>
+
+#include <if/block_service_defs.h>
+
+#include "local_server.h"
+
+/* ---------------------------  Server State  ---------------------------- */
+static uint8_t server_state = SERVICE_STATE_UNINITIALIZED;
+
+static iref_t server_iref = 0;
+
+static uint8_t is_client = 0;
+
+/* -------------------------  Request Callbacks  ------------------------- */
+
+/**
+ * \brief callback for read requests on the flounder channel
+ */
+static void rx_read_request(struct block_service_binding *binding,
+                            uint32_t start_block, uint32_t count)
+{
+    /*
+     * if is server, then serve the request locally
+     * else forward it to the network server
+     */
+    if (is_client) {
+
+    }
+}
+
+static void rx_write_request(struct block_service_binding *binding,
+                             uint32_t start_block, uint32_t count)
+{
+    /*
+     * if is server, then serve the request locally
+     * else forward it to the network server
+     */
+    if (is_client) {
+
+    }
+}
+
+/* ---------------------  Connection Initialization  --------------------- */
+
+/**
+ * \brief accepts new connections to the local  via the flounder interface
+ */
+static errval_t block_local_accept_cb(void *st, struct block_service_binding *b)
+{
+    // do the channel initialization
+    b->rx_vtbl.read = rx_read_request;
+    b->rx_vtbl.write = rx_write_request;
+
+    /* TODO: Do we need some state to setup here? */
+
+    assert(!"NYI: block_local_accept_cb");
+    return SYS_ERR_OK;
+}
+
+/* ----------------------  Channel Initialization  ----------------------- */
+
+/**
+ * \brief callback for interfrace export
+ */
+static void block_local_export_cb(void *st, errval_t err, iref_t iref)
+{
+    if (err_is_fail(err)) {
+        /* TODO: Error handling */
+        server_state = SERVICE_STATE_FAILURE;
+    }
+
+    server_iref = iref;
+
+    server_state = SERVICE_STATE_EXPORTED;
+}
+
+/**
+ * \brief initializes the machine local block server
+ */
+errval_t block_local_init(uint32_t flags)
+{
+    errval_t err;
+
+    is_client = (flags & SERVICE_FLAG_CLIENT) && 1;
+
+    // initialize worker thread pool
+
+    // export the interface
+    err = block_service_export(NULL, block_local_export_cb,
+                    block_local_accept_cb, get_default_waitset(),
+                    IDC_EXPORT_FLAGS_DEFAULT);
+    if (err_is_fail(err)) {
+        /* TODO: Error handling */
+    }
+    // start new thread for accepting flounder connections
+
+    assert(!"NYI: block_local_init");
+    return SYS_ERR_OK;
+}
+
+/* -------------------------  Server Management  ------------------------- */
+
+/**
+ * \brief starts the machine local server of block service to accept requests
+ *
+ * This function should not return until stopped.
+ */
+errval_t block_local_start(void)
+{
+    // start listen to connections on the flounder channel
+
+    if (server_state != SERVICE_STATE_EXPORTED) {
+        /* TODO: ERROR */
+    }
+
+    /* TODO: give the service a name
+     *  -> Must it have a system wide unique name i.e. two services
+     *     with the same name running on two different hosts?
+     */
+    errval_t err = nameservice_register("TODO: NAME", server_iref);
+    if (err_is_fail(err)) {
+        /* TODO: Error handling */
+    }
+
+    /*
+     * TODO: message main loop
+     */
+    assert(!"NYI: block_local_start");
+    return SYS_ERR_OK;
+}
+/**
+ * \brief stops the request handling of the machine local block service requests
+ */
+errval_t block_local_stop(void)
+{
+    // set the stop flag.
+
+    // tear down all bulk channels
+
+    // stop the flounder service
+
+    // free up resources
+
+    assert(!"NYI: block_local_stop");
+    return SYS_ERR_OK;
+}
+
diff --git a/usr/block_server/local_server.h b/usr/block_server/local_server.h
new file mode 100644 (file)
index 0000000..61113da
--- /dev/null
@@ -0,0 +1,41 @@
+/**
+ * \file
+ * \brief Network server thread of the bulk server
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+
+#ifndef BLOCK_LOCAL_SERVER_H
+#define BLOCK_LOCAL_SERVER_H
+
+/* ---------------------------  Server State  ---------------------------- */
+enum local_server_state {
+    SERVICE_STATE_UNINITIALIZED = 0,
+    SERVICE_STATE_EXPORTED = 1,
+    SERVICE_STATE_RUNNING = 2,
+    SERVICE_STATE_STOPPED = 3,
+    SERVICE_STATE_FAILURE = 4,
+};
+
+#define SERVICE_FLAG_DEFAULT 0x0
+#define SERVICE_FLAG_CLIENT  0x1
+
+
+
+errval_t block_local_init(uint32_t flags);
+
+errval_t block_local_start(void);
+
+errval_t block_local_stop(void);
+
+
+#endif /* BLOCK_LOCAL_SERVER_H */
diff --git a/usr/block_server/network_client.c b/usr/block_server/network_client.c
new file mode 100644 (file)
index 0000000..a642130
--- /dev/null
@@ -0,0 +1,94 @@
+/**
+ * \file
+ * \brief Network client of the block service
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include "network_common.h"
+#include "network_client.h"
+
+/**
+ * \brief connects to a network block service
+ *
+ * TODO: Specification which block service to connect to
+ */
+errval_t block_net_connect(struct block_net_server *server)
+{
+    // initialize network connection to the block server
+
+    // obtain basic information about the block server
+
+    // potential initialize local cache
+
+    // initialize the bulk channel to the block server
+    assert( !"NYI: block_net_connect");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief disconnects form the network block service
+ */
+errval_t block_net_disconnect(struct block_net_server *server)
+{
+    // tear down the bulk channel
+
+    // tear down the network connection
+
+    // free allocated cache
+    assert( !"NYI: block_net_connect");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief issues a new retrieval request to the network block server
+ *
+ * \param block_start   the id of the first block
+ * \param count         the number of blocks to read
+ * \param ret_data      the data returned
+ *
+ * TODO: appropriate data type for ret_data
+ */
+errval_t block_net_read(struct block_net_server *server, size_t block_start,
+                        size_t count, void *ret_data)
+{
+    // issue a new read request
+
+    // wait until the data arrives
+
+    // return data and status code
+    assert( !"NYI: block_net_connect");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief forwards the write request to the network block server
+ *
+ * \param block_start   the id of the first block
+ * \param count         the numbers of blocks to write
+ * \param data          the data to write
+ *
+ * TODO: appropriate data type for ret_data
+ */
+errval_t block_net_write(struct block_net_server *server, size_t block_start,
+                         size_t count, void *data)
+{
+    // issue a new write request
+
+    // send data over the bulk channel
+
+    // wait until completed
+
+    // return status code
+    assert( !"NYI: block_net_connect");
+    return SYS_ERR_OK;
+}
diff --git a/usr/block_server/network_client.h b/usr/block_server/network_client.h
new file mode 100644 (file)
index 0000000..4092df1
--- /dev/null
@@ -0,0 +1,37 @@
+/**
+ * \file
+ * \brief Network client of the block service
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BLOCK_NETWORK_CLIENT_H
+#define BLOCK_NETWORK_CLIENT_H
+
+struct block_net_server
+{
+    char *ip;
+    uint16_t port;
+/* other fields needed to represent the connection */
+};
+
+errval_t block_net_connect(struct block_net_server *server);
+
+errval_t block_net_disconnect(struct block_net_server *server);
+
+errval_t block_net_read(struct block_net_server *server, size_t block_start,
+                        size_t count, void *ret_data);
+
+errval_t block_net_write(struct block_net_server *server, size_t block_start,
+                         size_t count, void *data);
+
+struct block_net_server *block_net_server_lookup(size_t block_start);
+
+#endif /* BLOCK_NETWORK_CLIENT_H */
diff --git a/usr/block_server/network_common.h b/usr/block_server/network_common.h
new file mode 100644 (file)
index 0000000..c49f8fb
--- /dev/null
@@ -0,0 +1,35 @@
+/**
+ * \file
+ * \brief Network client of the block service
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BLOCK_NETWORK_COMMON_H
+#define BLOCK_NETWORK_COMMON_H
+
+enum block_net_msg_type {
+    BLOCK_SETUP,    ///< transfer setup information of the bulk channel
+    BLOCK_READ,     ///< issue a block read request
+    BLOCK_WRITE     ///< issue a block write request
+};
+
+/**
+ * data structure representing the data of the messages transferred over the
+ * service channel
+ */
+struct block_net_msg {
+    enum block_net_msg_type op;
+    uintptr_t blockid;
+};
+
+
+
+#endif /* BLOCK_NETWORK_COMMON_H */
diff --git a/usr/block_server/network_server.c b/usr/block_server/network_server.c
new file mode 100644 (file)
index 0000000..6db032b
--- /dev/null
@@ -0,0 +1,223 @@
+/**
+ * \file
+ * \brief Network server thread of the bulk server
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/waitset.h>
+
+#include <lwip/tcp.h>
+#include <lwip/init.h>
+
+#include "network_common.h"
+#include "network_server.h"
+
+#if IMPLEMENTED
+
+/**
+ * \brief handles block read request of a connected client
+ */
+static errval_t handle_block_read(size_t start_block, size_t count)
+{
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief handles block write requests of a connected client
+ */
+static errval_t handle_block_write(size_t start_block, size_t count)
+{
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief handler for disconnect requests
+ */
+static errval_t handle_disconnect(void)
+{
+    // free up resources
+
+    // close the network connection
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief handles a generic request from a client and checks the request type
+ */
+static errval_t handle_request_common(void)
+{
+    // check request type
+
+    // defer handling to specialiced function
+
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief handles the connection event of a new network block server clients
+ */
+static errval_t client_connect_cb(void)
+{
+    // setup data structurs for the newly connected client
+
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+#endif
+
+static struct tcp_pcb *server_pcb;
+
+static err_t bs_net_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p,
+                              err_t err)
+{
+    assert(!"NYI: bs_net_recv");
+    return ERR_OK;
+}
+
+/*
+ * This function is called periodically from TCP.
+ * and is also responsible for taking care of stale connections.
+ */
+static err_t bs_net_poll(void *arg, struct tcp_pcb *tpcb)
+{
+    assert(!"NYI: bs_net_poll");
+    return ERR_OK;
+}
+
+static void bs_net_err(void *arg, err_t err)
+{
+    assert(!"NYI: bs_net_err");
+}
+
+static err_t bs_net_accept(void *arg, struct tcp_pcb *tpcb, err_t err)
+{
+#if TCP_LISTEN_BACKLOG
+    /* Decrease the listen backlog counter */
+    struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen*)arg;
+    tcp_accepted(lpcb);
+#endif
+    tcp_setprio(tpcb, TCP_PRIO_NORMAL);
+
+    /*
+     * TODO:  allocate a new connection control structure for this
+     */
+    void *bs_conn = NULL;
+
+    tcp_arg(tpcb, bs_conn);
+
+    tcp_recv(tpcb, bs_net_recv);
+    tcp_err(tpcb, bs_net_err);
+    tcp_poll(tpcb, bs_net_poll, 4);
+
+
+
+    return ERR_OK;
+}
+
+/**
+ * \brief initializes the network server of the block service
+ */
+errval_t block_net_init(uint16_t port)
+{
+    if (lwip_init_auto() == false) {
+        printf("ERROR: lwip_init_auto failed!\n");
+        return 1;
+    }
+
+    server_pcb = tcp_new();
+
+    err_t e = tcp_bind(server_pcb, IP_ADDR_ANY, port);
+    if (e != ERR_OK) {
+        printf("ERROR: tcp_bind failed!\n");
+        return 2;
+    }
+
+
+    // set up the network sockets / queues
+
+    // setup the worker thread pool
+
+    assert(!"NYI: block_net_init");
+    return SYS_ERR_OK;
+}
+
+static bool server_running = false;
+
+/**
+ * \brief starts the network server of block service to accept requests
+ */
+errval_t block_net_start(void)
+{
+    errval_t err;
+
+    server_pcb = tcp_listen(server_pcb);
+    if (server_pcb == NULL) {
+        printf("ERROR: tcp_listen failed!\n");
+        return 1;
+    }
+
+    tcp_arg(server_pcb, server_pcb);
+    tcp_accept(server_pcb, bs_net_accept);
+
+    server_running = true;
+
+    struct waitset *ws = get_default_waitset();
+    while(server_running) {
+        err = event_dispatch_non_block(ws);
+        if (err != LIB_ERR_NO_EVENT) {
+            if (err_is_fail(err)) {
+                DEBUG_ERR(err, "in event_dispatch");
+                break;
+            }
+        }
+
+        wrapper_perform_lwip_work();
+        err = event_dispatch(ws);
+        if (err_is_fail(err)) {
+            DEBUG_ERR(err, "in event_dispatch");
+            break;
+        }
+    }
+
+    assert(!"NYI: block_net_start");
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief stops the request handling of the network block service
+ */
+errval_t block_net_stop(void)
+{
+    server_running = false;
+    return SYS_ERR_OK;
+}
+
+/**
+ * \brief lookup of the block server connection based on the requested block
+ *
+ * The client may be connected to multiple network block servers. The request
+ * needs to be forwarded to the correct block server based in the requested
+ * block id.
+ *
+ * XXX: Supply the block server ID instead? or just say there is one block server?
+ */
+struct block_net_server *block_net_server_lookup(size_t block_start)
+{
+    assert(!"NYI: block_net_server_lookup");
+    return NULL;
+}
+
diff --git a/usr/block_server/network_server.h b/usr/block_server/network_server.h
new file mode 100644 (file)
index 0000000..414c5a8
--- /dev/null
@@ -0,0 +1,39 @@
+/**
+ * \file
+ * \brief Network server thread of the bulk server
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+
+#ifndef BLOCK_NETWORK_SERVER_H
+#define BLOCK_NETWORK_SERVER_H
+
+#include "network_common.h"
+
+/**
+ * struct representing a connection from the block server client to the
+ * block server.
+ */
+struct bs_conn {
+
+
+};
+
+errval_t block_net_init(uint16_t port);
+
+errval_t block_net_start(void);
+
+errval_t block_net_stop(void);
+
+struct block_net_server *block_net_server_lookup(size_t block_start);
+
+#endif /* BLOCK_NETWORK_SERVER_H */
diff --git a/usr/tests/bulk_transfer/Hakefile b/usr/tests/bulk_transfer/Hakefile
new file mode 100644 (file)
index 0000000..6913eee
--- /dev/null
@@ -0,0 +1,17 @@
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2011, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for hellotest
+-- 
+--------------------------------------------------------------------------
+
+[ build application { target = "bulk_mini",
+                      cFiles = [ "bulk_mini.c" ],
+                      addLibraries = libDeps [ "bulk_transfer" ]
+                 }
+]
diff --git a/usr/tests/bulk_transfer/bulk_mini.c b/usr/tests/bulk_transfer/bulk_mini.c
new file mode 100644 (file)
index 0000000..f4eb47e
--- /dev/null
@@ -0,0 +1,196 @@
+#include <stdio.h>
+
+#include<bulk_transfer/bulk_transfer.h>
+#include<bulk_transfer/bulk_allocator.h>
+#include<bulk_transfer/bulk_local.h>
+
+#define BUFSZ 0x1000
+
+/******************************************************************************/
+/* Receiver */
+
+static void cb_rx_bind_done(struct bulk_channel *channel,
+                            errval_t err);
+static void cb_rx_move_received(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer,
+                                void                *meta);
+static void cb_rx_copy_received(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer,
+                                void                *meta);
+
+static struct bulk_channel rx_channel;
+static struct bulk_channel_callbacks rx_callbacks = {
+    .bind_done = cb_rx_bind_done,
+    .move_received = cb_rx_move_received,
+    .copy_received = cb_rx_copy_received,
+};
+
+
+static void cb_rx_bind_done(struct bulk_channel *channel,
+                            errval_t err)
+{
+    printf("cb_rx_bind_done\n");
+}
+
+static void cb_rx_move_received(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer,
+                                void                *meta)
+{
+    uint32_t meta_val = *((uint32_t*) meta);
+    printf("cb_rx_move_received: meta=%x\n", meta_val);
+    meta_val++;
+    errval_t err = bulk_channel_pass(channel, buffer, &meta_val);
+    assert(!err_is_fail(err));
+}
+
+static void cb_rx_copy_received(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer,
+                                void                *meta)
+{
+    printf("cb_rx_copy_received: meta=%x\n", *((uint32_t*) meta));
+    errval_t err = bulk_channel_release(channel, buffer);
+    assert(!err_is_fail(err));
+}
+
+
+
+static void init_receiver(struct waitset *waitset)
+{
+    static struct bulk_local_endpoint ep;
+    errval_t err;
+    struct bulk_channel_setup setup = {
+        .direction = BULK_DIRECTION_RX,
+        .role = BULK_ROLE_SLAVE,
+        .meta_size = sizeof(uint32_t),
+        .waitset = waitset,
+        .trust = BULK_TRUST_FULL,
+    };
+
+    bulk_local_init_endpoint(&ep, NULL);
+    err = bulk_channel_create(&rx_channel, &ep.generic, &rx_callbacks, &setup);
+    assert(!err_is_fail(err));
+}
+
+
+/******************************************************************************/
+/* Sender */
+
+static void cb_tx_bind_done(struct bulk_channel *channel,
+                            errval_t err);
+static void cb_tx_buffer_received(struct bulk_channel *channel,
+                                  struct bulk_buffer  *buffer,
+                                  void                *meta);
+static void cb_tx_copy_released(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer);
+
+static struct bulk_channel tx_channel;
+static int tx_phase = 0;
+static struct bulk_pool_allocator tx_allocator;
+static struct bulk_channel_callbacks tx_callbacks = {
+    .bind_done = cb_tx_bind_done,
+    .buffer_received = cb_tx_buffer_received,
+    .copy_released = cb_tx_copy_released,
+};
+
+
+static void cb_tx_bind_done(struct bulk_channel *channel,
+                            errval_t err)
+{
+    printf("cb_tx_bind_done\n");
+    tx_phase = 1;
+
+    err = bulk_channel_assign_pool(channel, tx_allocator.pool);
+    assert(!err_is_fail(err));
+}
+
+static void cb_tx_buffer_received(struct bulk_channel *channel,
+                                  struct bulk_buffer  *buffer,
+                                  void                *meta)
+{
+    printf("cp_tx_buffer_received: meta=%x\n", *((uint32_t*) meta));
+    errval_t err = bulk_buffer_free(&tx_allocator, buffer);
+    assert(!err_is_fail(err));
+
+    tx_phase = 3;
+}
+
+static void cb_tx_copy_released(struct bulk_channel *channel,
+                                struct bulk_buffer  *buffer)
+{
+    printf("cp_tx_copy_released\n");
+    tx_phase = 5;
+    errval_t err = bulk_buffer_free(&tx_allocator, buffer);
+    assert(!err_is_fail(err));
+
+}
+
+
+static void init_sender(struct waitset *waitset)
+{
+    static struct bulk_local_endpoint ep;
+    errval_t err;
+    struct bulk_channel_bind_params params = {
+        .waitset = waitset,
+        .trust = BULK_TRUST_FULL,
+    };
+
+    err = bulk_pool_alloc_init_new(&tx_allocator, 4, BUFSZ, NULL);
+    assert(!err_is_fail(err));
+
+    bulk_local_init_endpoint(&ep, &rx_channel);
+    err = bulk_channel_bind(&tx_channel, &ep.generic, &tx_callbacks, &params);
+    assert(!err_is_fail(err));
+
+}
+
+static void tx_process(void)
+{
+    errval_t err;
+    uint32_t meta;
+    struct bulk_buffer *buffer;
+
+    if (tx_phase == 1) {
+        meta = 42;
+        printf("Allocating buffer...\n");
+        err = bulk_buffer_alloc(&tx_allocator, &buffer);
+        assert(!err_is_fail(err));
+        printf("Starting move... meta=%x\n", meta);
+        err = bulk_channel_move(&tx_channel, buffer, &meta);
+        assert(!err_is_fail(err));
+
+        tx_phase++;
+    } else if (tx_phase == 3) {
+        meta = 44;
+        printf("Allocating buffer...\n");
+        err = bulk_buffer_alloc(&tx_allocator, &buffer);
+        assert(!err_is_fail(err));
+        printf("Starting copy... meta=%x\n", meta);
+        err = bulk_channel_copy(&tx_channel, buffer, &meta);
+        assert(!err_is_fail(err));
+
+        tx_phase++;
+    }
+}
+
+
+
+/******************************************************************************/
+/* Test control */
+
+
+int main(int argc, char *argv[])
+{
+    struct waitset *ws = get_default_waitset();
+    printf("bulk_mini: enter\n");
+    init_receiver(ws);
+    printf("bulk_mini: rx_init done\n");
+    init_sender(ws);
+    printf("bulk_mini: tx_init done\n");
+    while (true) {
+        tx_process();
+        event_dispatch(ws);
+    }
+    return 0;
+}
+
+