failure DEVICE_STATUS "VirtIO device has the wrong status",
failure QUEUE_ACTIVE "The selected qeueue is already activated",
failure QUEUE_INVALID "The selected queue does not exist",
+ failure BUF_SIZE "The buffer size is invalid.",
+ failure ARG_INVALID "The given argument is invalid.",
+ failure NO_BUFFER "No buffer given, number of buffers is 0",
+ failure ALLOC_FULL "The allocator is already full",
+ failure BUF_USED "The buffer is already used",
};
#modulenounzip /skb_ramfs.cpio.gz nospawn
#module /k1om/sbin/kaluga boot
#module /k1om/sbin/acpi boot
-module /k1om/sbin/spawnd boot bootk1om=0-3
+module /k1om/sbin/spawnd boot bootk1om=0,4,8,12,16
module /k1om/sbin/startd boot
#module /k1om/sbin/routing_setup boot
module /k1om/sbin/xeon_phi_test
/**
- * \brief this struct represents a virtio driver
- *
- * This can be seen as on the guest side of the virtio channel
+ * VirtIO Memory segment
*/
-struct virtio_driver
-{
- void *device_config;
+struct virtio_buffer_allocator;
+enum virtio_buffer_state {
+ VIRTIO_BUFFER_S_INVALID,
+ VIRTIO_BUFFER_S_FREE,
+ VIRTIO_BUFFER_S_ALLOCED,
+ VIRTIO_BUFFER_S_QUEUED
};
-
/**
- * VirtIO Memory segment
+ * represents a VirtIO buffer to be used
*/
-
-enum virtio_buffer_state {
-
-};
-
struct virtio_buffer
{
- enum virtio_buffer_state state;
-
- struct virtio_buffer_head *head;
- struct virtio_buffer *next;
+ struct virtio_buffer_allocator *a;
+ enum virtio_buffer_state state; ///< state of this buffer
+ lpaddr_t paddr; ///< physical address of the buffer
+ void *buf; ///< mapped virtual address of the buffer
+ size_t length; ///< size of this buffer
+ struct virtio_buffer_list *lhead; ///< pointer to the buffer list head
+ struct virtio_buffer *next; ///< pointer to the next buffer in the list
};
-struct virtio_buffer_head
+/**
+ * represents a list of buffers
+ */
+struct virtio_buffer_list
{
struct virtio_buffer *head;
struct virtio_buffer *tail;
};
-errval_t virtio_buffer_alloc_init(struct virtio_buffer_allocator *alloc,
- uint16_t nbufs);
-errval_t virtio_buffer_alloc(void);
-errval_t virtio_buffer_free(void);
+/**
+ * \brief initializes the buffer allocator and allocates memory for the
+ * buffers
+ *
+ * \param alloc the allocator struct to initialize
+ * \param nbufs number of buffers to allocate
+ * \param bufsz size of each buffer to allocate
+ *
+ * \return SYS_ERR_OK on success
+ */
+errval_t virtio_buffer_alloc_init(struct virtio_buffer_allocator **alloc,
+ size_t nbufs,
+ size_t bufsz);
+
+/**
+ * \brief destroys a buffer allocator by freeing up all the resources used
+ * by the buffers
+ *
+ * \param alloc the allocator to destroy
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_buffer_alloc_destroy(struct virtio_buffer_allocator *alloc);
+
+/**
+ * \brief allocated a new virti_buffer from the buffer allocator
+ */
+struct virtio_buffer *virtio_buffer_alloc(struct virtio_buffer_allocator *alloc);
+
+/**
+ * \brief frees up a unused buffer by returning it to the allocator
+ *
+ * \param buf the buffer to be freed
+ */
+errval_t virtio_buffer_free(struct virtio_buffer *buf);
+
+/**
+ * \brief returns the backing frame capability of a buffer allocator
+ */
+errval_t virtio_buffer_alloc_get_cap(struct virtio_buffer_allocator *alloc,
+ struct capref *ret_cap);
+/**
+ * \brief initializes a new VirtIO buffer list to be used for chaining buffers
+ *
+ * \param bl buffer list to initialize
+ *
+ * \return SYS_ERR_OK on success
+ */
+errval_t virtio_blist_init(struct virtio_buffer_list *bl);
-errval_t virtio_buffer_list_reset(void);
-errval_t virtio_buffer_list_append(void);
+/**
+ * \brief frees up the buffer list by returning the buffers to the allocator
+ *
+ * \param bl buffer list to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_blist_free(struct virtio_buffer_list *bl);
+/**
+ * \brief appends a buffer to the tail of buffer list
+ *
+ * \param bl the list to append the buffer to
+ * \param buf the buffer to be appended
+ */
+errval_t virtio_blist_append(struct virtio_buffer_list *bl,
+ struct virtio_buffer *buf);
+/**
+ * \brief returns and removes the head of the list
+ *
+ * \param bl buffer list
+ *
+ * \returns pointer to virtio_buffer on sucess
+ * NULL on failuer
+ */
+struct virtio_buffer *virtio_blist_get(struct virtio_buffer_list *bl);
errval_t virtio_device_set_status(struct virtio_device *dev,
uint8_t status);
-errval_t virtio_device_get_status(struct virtio_device *dev,
- uint8_t *ret_status);
errval_t virtio_device_feature_negotiate(struct virtio_device *dev);
errval_t virtio_device_specific_setup(struct virtio_device *dev);
+bool virtio_device_has_feature(struct virtio_device *dev,
+ uint8_t feature);
+
#endif // VIRTIO_VIRTIO_DEVICE_H
#define VIRTQUEUE_CHAIN_END VIRTQUEUE_SIZE_MAX
/// Feature flag indicating that the ring supports indirect descriptors
-#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
+#define VIRTIO_RING_F_INDIRECT_DESC 28
/// Feature flag indicating that the ring supports interrupt suppression
-#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
+#define VIRTIO_RING_F_EVENT_IDX 29
/**
uint64_t mask;
mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
- mask |= VIRTIO_RING_F_INDIRECT_DESC;
- mask |= VIRTIO_RING_F_EVENT_IDX;
+ mask |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
+ mask |= (1 <<VIRTIO_RING_F_EVENT_IDX);
return (features & mask);
}
*/
errval_t virtio_virtqueue_desc_enqueue(struct virtqueue *vq,
- struct virtio_buffer *buf,
+ struct virtio_buffer_list *bl,
void *vaddr,
uint16_t writeable,
uint16_t readable);
+
+
#if 0
-int virtqueue_enqueue(struct virtqueue *vq, void *cookie,
- struct sglist *sg, int readable, int writable);
void *virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
void *virtqueue_poll(struct virtqueue *vq, uint32_t *len);
-
-uint64_t virtqueue_filter_features(uint64_t features);
-
void virtqueue_dump(struct virtqueue *vq);
#endif
id.base,
(uintptr_t )init->dev_reg);
- err = virtio_device_init(dev, init);
+ err = virtio_device_open(dev, init);
if (err_is_fail(err)) {
vspace_unmap(init->dev_reg);
}
return err;
}
+/**
+ * \brief checks if the device supports a certain feature
+ *
+ * \param dev the device to query for the feature
+ * \param feature the featurebit to check
+ *
+ * \returns true if the device supports that feature
+ * false if the device does not support that feature
+ */
+bool virtio_device_has_feature(struct virtio_device *dev,
+ uint8_t feature)
+{
+ /*
+ * if the device is not configured yet, we don't know the features
+ */
+ if(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) {
+ return false;
+ }
+
+ return (dev->features & (1UL<<feature)) != 0;
+}
+
* ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
+#include <string.h>
+
#include <barrelfish/barrelfish.h>
#include <virtio/virtio.h>
-#if 0
+#include "vbuffer.h"
-void stack_alloc_init(struct stack_allocator *alloc, size_t size)
+/**
+ * \brief initializes the buffer allocator and allocates memory for the
+ * buffers
+ *
+ * \param alloc the allocator struct to initialize
+ * \param nbufs number of buffers to allocate
+ * \param bufsz size of each buffer to allocate
+ *
+ * \return SYS_ERR_OK on success
+ */
+errval_t virtio_buffer_alloc_init(struct virtio_buffer_allocator **alloc,
+ size_t nbufs,
+ size_t bufsz)
{
- alloc->size = size;
- alloc->top = 0;
- alloc->stack = calloc(size, sizeof(void *));
+ errval_t err;
+
+ if (!alloc) {
+ return VIRTIO_ERR_ARG_INVALID;
+ }
+
+ if (nbufs == 0) {
+ return VIRTIO_ERR_NO_BUFFER;
+ }
+
+ size_t size = ROUND_UP(bufsz, BASE_PAGE_SIZE);
+ if (size != bufsz) {
+ debug_printf("WARNING: buffer size rounded up to multiple of page size:"
+ "[%lx] -> [%lx]\n",
+ (uint64_t) bufsz, (uint64_t) size);
+ }
+
+ struct capref frame;
+ err = frame_alloc(&frame, size * nbufs, NULL);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ struct frame_identity id;
+ err = invoke_frame_identify(frame, &id);
+ if (err_is_fail(err)) {
+ cap_destroy(frame);
+ return err;
+ }
+
+ void *buf;
+ err = vspace_map_one_frame_attr(&buf,
+ size * nbufs,
+ frame,
+ VIRTIO_VREGION_FLAGS_RING,
+ NULL,
+ NULL);
+ if (err_is_fail(err)) {
+ cap_destroy(frame);
+ return err;
+ }
+
+ struct virtio_buffer_allocator *vbuf_alloc;
+
+ vbuf_alloc = malloc(sizeof(*vbuf_alloc));
+ if (vbuf_alloc == NULL) {
+ vspace_unmap(buf);
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ vbuf_alloc->buffers = calloc(nbufs, sizeof(void *));
+ if (vbuf_alloc->buffers == NULL) {
+ vspace_unmap(buf);
+ free(vbuf_alloc);
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ void *buffers = malloc(nbufs * sizeof(struct virtio_buffer));
+ if (buffers == NULL) {
+ vspace_unmap(buf);
+ free(vbuf_alloc->buffers);
+ free(vbuf_alloc);
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ vbuf_alloc->cap = frame;
+ vbuf_alloc->size = nbufs;
+ vbuf_alloc-> top = nbufs;
+
+ struct virtio_buffer *vbuf;
+ for (uint32_t i = 0; i < nbufs; ++i) {
+ vbuf = buffers + i;
+ vbuf = vbuf_alloc->buffers[i];
+ vbuf->buf = (((uint8_t*)buf)+i*size);
+ vbuf->length = size;
+ vbuf->paddr = id.base +i*size;
+ vbuf->state = VIRTIO_BUFFER_S_FREE;
+ vbuf->a = vbuf_alloc;
+ vbuf_alloc->buffers[i] = vbuf;
+ }
+ vbuf_alloc->size = size;
+ vbuf_alloc->top = size;
+
+ *alloc = vbuf_alloc;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief destroys a buffer allocator by freeing up all the resources used
+ * by the buffers
+ *
+ * \param alloc the allocator to destroy
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_buffer_alloc_destroy(struct virtio_buffer_allocator *alloc)
+{
+ assert(!"NYI: virtio_buffer_alloc_destroy");
+ return SYS_ERR_OK;
+}
+
+struct virtio_buffer *virtio_buffer_alloc(struct virtio_buffer_allocator *alloc)
+{
+ if (alloc->top == 0) {
+ return NULL;
+ }
+ struct virtio_buffer *buf = alloc->buffers[--alloc->top];
+
+ assert(buf->state == VIRTIO_BUFFER_S_FREE);
+ buf->state = VIRTIO_BUFFER_S_ALLOCED;
+ return buf;
}
-bool stack_alloc_free(struct stack_allocator *alloc, void *el)
+/**
+ * \brief frees up a unused buffer by returning it to the allocator
+ *
+ * \param buf the buffer to be freed
+ */
+errval_t virtio_buffer_free(struct virtio_buffer *buf)
{
+ struct virtio_buffer_allocator *alloc = buf->a;
if (alloc->top >= alloc->size) {
- return false;
+ /* this should actually not happen */
+ return VIRTIO_ERR_ALLOC_FULL;
}
- alloc->stack[alloc->top++] = el;
- return true;
+ assert(buf->state == VIRTIO_BUFFER_S_ALLOCED);
+ buf->state = VIRTIO_BUFFER_S_FREE;
+
+ alloc->buffers[alloc->top++] = buf;
+
+ return SYS_ERR_OK;
}
-void *stack_alloc_alloc(struct stack_allocator *alloc)
+/**
+ * \brief returns the backing frame capability of a buffer allocator
+ */
+errval_t virtio_buffer_alloc_get_cap(struct virtio_buffer_allocator *alloc,
+ struct capref *ret_cap)
{
- if (alloc->top == 0) {
- return NULL;
+ *ret_cap = alloc->cap;
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief initializes a new VirtIO buffer list to be used for chaining buffers
+ *
+ * \param bl buffer list to initialize
+ *
+ * \return SYS_ERR_OK on success
+ */
+errval_t virtio_blist_init(struct virtio_buffer_list *bl)
+{
+ memset(bl, 0, sizeof(*bl));
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief frees up the buffer list by returning the buffers to the allocator
+ *
+ * \param bl buffer list to be freed
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_blist_free(struct virtio_buffer_list *bl)
+{
+ errval_t err;
+ struct virtio_buffer *buf = virtio_blist_get(bl);
+ while(buf) {
+ err = virtio_buffer_free(buf);
+ assert(err_is_ok(err));
+ buf = virtio_blist_get(bl);
}
- return alloc->stack[--alloc->top];
+ return SYS_ERR_OK;
}
+/**
+ * \brief appends a buffer to the tail of buffer list
+ *
+ * \param bl the list to append the buffer to
+ * \param buf the buffer to be appended
+ */
+errval_t virtio_blist_append(struct virtio_buffer_list *bl,
+ struct virtio_buffer *buf)
+{
+ if (buf->lhead) {
+ return VIRTIO_ERR_BUF_USED;
+ }
+ if (bl->length == 0) {
+ bl->head = buf;
+ bl->tail = buf;
+ } else {
+ bl->tail->next = buf;
+ bl->tail = buf;
+ }
+ buf->lhead = bl;
+ buf->next = NULL;
+ bl->length++;
+ return SYS_ERR_OK;
+}
-#endif
+/**
+ * \brief returns and removes the head of the list
+ *
+ * \param bl buffer list
+ *
+ * \returns pointer to virtio_buffer on sucess
+ * NULL on failuer
+ */
+struct virtio_buffer *virtio_blist_get(struct virtio_buffer_list *bl)
+{
+ if (bl->length == 0) {
+ return NULL;
+ }
+
+ struct virtio_buffer *buf;
+ buf = bl->head;
+ if (bl->length == 1) {
+ bl->head = NULL;
+ bl->tail = NULL;
+ } else {
+ bl->head = buf->next;
+ }
+
+ bl->length--;
+
+ buf->next = NULL;
+ buf->lhead = NULL;
+
+ return buf;
+}
*/
struct virtio_buffer_allocator
{
- struct virtio_buffer *buffers; ///< array of virtio_buffers
+ struct virtio_buffer **buffers; ///< array of virtio_buffers
uint16_t size; ///< number of buffers in this allocator
uint16_t top; ///< pointer to the top slot
struct capref cap; ///< frame capability backing this allocator
- struct virtqueue *queue; ///< the virtqueue this allocator belongs to
+ struct virtio_device *queue; ///< the VirtIO device this allocator belongs to
};
/**
- * \brief initializes the buffer allocator and allocates memory for the
- * buffers
+ * \brief assigns a buffer allocator to a virtqueue that it can be used as
+ * buffers over the VirtIO channel.
*
- * \param alloc the allocator struct to initialize
- * \param nbufs number of buffers to allocate
- * \param bufsz size of each buffer to allocate
- *
- * \return SYS_ERR_OK on success
- */
-errval_t virtio_buffer_alloc_init(struct virtio_buffer_allocator *alloc,
- size_t nbufs,
- size_t bufsz);
-
-/**
- * \brief destroys a buffer allocator by freeing up all the resources used
- * by the buffers
- *
- * \param alloc the allocator to destroy
- *
- * \returns SYS_ERR_OK on success
- */
-errval_t virtio_buffer_alloc_destroy(struct virtio_buffer_allocator *alloc);
-
-
-struct virtio_buffer *virtio_buffer_alloc(struct virtio_buffer_allocator *alloc);
-
-/**
- * \brief frees up a unused buffer by returning it to the allocator
- *
- * \param buf the buffer to be freed
+ * \param bf buffer allocator
+ * \param vdev virtqueue the buffer allocator gets added to
*/
-errval_t virtio_buffer_free(struct virtio_buffer_allocator *alloc,
- struct virtio_buffer *buf);
+errval_t virtio_buffer_alloc_assing(struct virtio_buffer_allocator *bf,
+ struct virtio_device *vdev);
-/**
- * \brief returns the backing frame capability of a buffer allocator
- */
-errval_t virtio_buffer_alloc_get_cap(struct virtio_buffer_allocator *alloc,
- struct capref *ret_cap);
#endif // VIRTIO_VIRTIO_BUFFER_H
#include <virtio/virtio.h>
#include <virtio/virtio_ring.h>
#include <virtio/virtqueue.h>
+#include <virtio/virtio_device.h>
#include "debug.h"
#define VIRTQUEUE_FLAG_INDIRECT 0x0001
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
+#define VIRTQUEUE_FLAG_FREE_CAP 0x8000
/**
* this data structure stores additional information to the descriptors
*/
struct vring_desc_info
{
- void *buf; ///< virtual address of this descriptor
- struct capref cap; ///< capability of this descriptor
- size_t size; ///< the size of the capability in bytes
- lpaddr_t paddr; ///< physical address of this descriptor
- lpaddr_t offset; ///< offset into the capability for mapping
+ struct virtio_buffer *buf;
+ void *st;
+ uint8_t is_head;
+
};
/**
uint16_t free_head; ///< head of the free descriptor chain
uint16_t free_count; ///< number of available free descriptors
uint16_t used_tail; ///< last consumed descriptor used table
- uint16_t used_count; ///< number of queued used descriptors
+ uint16_t queued_count; ///< number of queued used descriptors
/* vring memory information */
struct capref vring_cap; ///< capability of the vring data structure
}
+/**
+ * \brief initializes the indirect descriptors
+ *
+ * \param vq the virtqueue to initialize the indirect descriptors
+ * \param size the number of indirect descriptors
+ *
+ * \returns SYS_ERR_OK on success
+ */
+static errval_t virtqueue_init_indirect(struct virtqueue *vq,
+ uint16_t size)
+{
+ struct virtio_device *vdev = vq->device;
+
+ /*
+ * check if the device supports indirect descriptors first
+ */
+ if (virtio_device_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ VIRTIO_DEBUG_VQ("Device does not support indirect descriptors\n");
+ return SYS_ERR_OK;
+ }
+
+ assert(!"NYI: virtqueue_init_indirect");
+
+ return SYS_ERR_OK;
+}
+
+
+static bool virtqueue_should_notify_host(struct virtqueue *vq)
+{
+ uint16_t new, prev, *event_idx;
+
+ if (vq->flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+ new = vq->vring.avail->idx;
+ prev = new - vq->queued_count;
+ event_idx = vring_get_avail_event(&vq->vring);
+
+ return (vring_need_event(*event_idx, new, prev) != 0);
+ }
+
+ return ((vq->vring.used->flags & VIRTIO_RING_USED_F_NO_NOTIFY) == 0);
+}
+
/*
* ============================================================================
return err;
}
+ /* set the flag that we have allocated the cap, so that it gets free'd */
+ (*vq)->flags |= VIRTQUEUE_FLAG_FREE_CAP;
+
return SYS_ERR_OK;
}
vq->intr_handler = setup->intr_handler;
vq->intr_arg = setup->intr_arg;
- if (setup->max_indirect > 0) {
+ if (0 && setup->max_indirect > 0) {
/*
* TODO: initialize indirect descriptors
*/
+ virtqueue_init_indirect(vq, setup->max_indirect);
}
-#if 0
- if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
- vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
-#endif
+ if(virtio_device_has_feature(setup->device, VIRTIO_RING_F_EVENT_IDX)) {
+ vq->flags |= (1<<VIRTQUEUE_FLAG_EVENT_IDX);
+ }
virtqueue_init_vring(vq);
virtio_virtqueue_intr_disable(vq);
*/
void virtio_virtqueue_notify_host(struct virtqueue *vq)
{
- assert(!"NYI: host notify");
+ /* TODO: memory barrier */
+ if (virtqueue_should_notify_host(vq)) {
+ assert(!"NYI: host notify");
+ }
+ vq->queued_count = 0;
+
}
* Queue Management
*/
+/**
+ * \brief updates the available ring of the virtqueue by placing the descriptor
+ * into the availabe ring.
+ *
+ * \param vq the virtqueue to update
+ * \param idx index of the new descriptor chain head
+ */
+static void virtqueue_update_available(struct virtqueue *vq,
+ uint16_t idx)
+{
+ uint16_t avail_idx = vq->vring.avail->idx & (vq->vring_ndesc - 1);
+ vq->vring.avail->ring[avail_idx] = idx;
+
+ /*
+ * wmb();
+ */
+
+ vq->vring.avail->idx++;
+ vq->queued_count++;
+}
+
+
+static errval_t virtqueue_enqueue_bufs(struct virtqueue *vq,
+ struct vring_desc *desc,
+ uint16_t head,
+ struct virtio_buffer_list *bl,
+ uint16_t readable,
+ uint16_t writable,
+ uint16_t *ret_idx)
+{
+ struct virtio_buffer *buf = bl->head;
+ struct vring_desc *cd;
+
+ uint16_t needed = readable + writable;
+ uint16_t idx = head;
+
+ for (uint16_t i = 0; i < needed; ++i) {
+ vq->vring_di[idx].buf = buf;
+ cd = &desc[idx];
+
+ cd->addr = buf->paddr;
+ cd->length = buf->length;
+ cd->flags = 0;
+
+ if (i < needed - 1) {
+ cd->flags |= VIRTIO_RING_DESC_F_NEXT;
+ }
+ if (i >= readable) {
+ cd->flags |= VIRTIO_RING_DESC_F_WRITE;
+ }
+ idx = cd->next;
+ buf = buf->next;
+ }
+
+ if (ret_idx) {
+ *ret_idx = idx;
+ }
+
+ return SYS_ERR_OK;
+}
+
+
+errval_t virtio_virtqueue_desc_enqueue(struct virtqueue *vq,
+ struct virtio_buffer_list *bl,
+ void *vaddr,
+ uint16_t writeable,
+ uint16_t readable)
+{
+ uint16_t needed = readable + writeable;
+
+ if (needed != bl->length || needed < 0) {
+ return VIRTIO_ERR_SIZE_INVALID;
+ }
+
+ if (vq->free_count < needed) {
+ return VIRTIO_ERR_QUEUE_EMPTY;
+ }
+
+ /*
+ * TODO: check if we should use indirect descriptors or not
+ */
+
+ uint16_t free_head = vq->free_head;
+ struct vring_desc_info *info = &vq->vring_di[free_head];
+
+ info->is_head = 0x1;
+ info->st = NULL;
+
+ uint16_t idx;
+ virtqueue_enqueue_bufs(vq, vq->vring.desc, free_head,
+ bl, readable, writeable, &idx);
+
+
+ /* update free values */
+ vq->free_head = idx;
+ vq->free_count -= needed;
+
+ virtqueue_update_available(vq, free_head);
+
+ return SYS_ERR_OK;
+}
+
#if 0
/**
*
*/
errval_t virtio_virtqueue_desc_alloc(struct virtqueue *vq,
- size_t )
-
-errval_t virtio_virtqueue_desc_enq(struct virtqueue *vq,
- )
+ struct virtio_buffer_list *bl,
+ uint16_t readable,
+ uint16_t writeable)
{
- assert(!"NYI: virtio_virtqueue_enq");
- return SYS_ERR_OK;
+
}
+
+
void *virtio_virtqueue_desc_deq(struct virtqueue *vq)
{
return NULL;
}
char buf[50];
+#if !defined(__k1om__)
snprintf(buf, 50, "%s.%u", XEON_PHI_MESSAGING_NAME, xeon_phi_id);
-
+#else
+ snprintf(buf, 50, "%s", XEON_PHI_MESSAGING_NAME);
+#endif
DEBUG_XPMC("Nameservice lookup: %s\n", buf);
err = nameservice_blocking_lookup(buf, &xpm_iref[xeon_phi_id]);
errval_t err;
lvaddr_t offset;
- if (bi == NULL) {
- return SYS_ERR_ILLEGAL_INVOCATION;
- }
-
xeon_phi_boot_initialize(&boot_registers,
XEON_PHI_MMIO_TO_SBOX(phi),
XEON_PHI_MMIO_TO_DBOX(phi));
#include <string.h>
#include <barrelfish/barrelfish.h>
+#include <vfs/vfs.h>
#include <pci/pci.h>
#include <xeon_phi/xeon_phi_manager_client.h>
vendor_id, (device_id & 0xFF00), 0x8086, 0x2500);
return -1;
}
- debug_printf("WARNING: Initializing Xeon Phi with PCI address "
+ debug_printf("Initializing Xeon Phi with PCI address "
"[%u,%u,%u]\n", bus, dev, fun);
}
} else {
messages_wait_and_handle_next();
}
#endif
+
+ vfs_init();
+
err = service_init(&xphi);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "could not start the driver service\n");
*/
/// the name of the Xeon Phi bootloader image
-#define XEON_PHI_BOOTLOADER "weever"
+#define XEON_PHI_BOOTLOADER "/weever"
/// the name of the Xeon Phi multiboot image containint the modules
-#define XEON_PHI_MULTIBOOT "xeon_phi_multiboot"
+#define XEON_PHI_MULTIBOOT "/xeon_phi_multiboot"
/// if we use MSIX interrupts or legacy interrupts
#define XEON_PHI_MSIX_ENABLED 1
#ifdef __x86__
errval_t default_start_function(coreid_t where, struct module_info* mi,
- char* record)
+ char* record)
{
assert(mi != NULL);
errval_t err = SYS_ERR_OK;
-/*
- * XXX: there may be more device using this driver, so starting it a second time
- * may be needed.
- */
- if (is_started(mi) && strcmp("xeon_phi ",mi->binary)) {
+ /*
+ * XXX: there may be more device using this driver, so starting it a second time
+ * may be needed.
+ */
+ if (is_started(mi) && strcmp("xeon_phi",mi->binary)) {
return KALUGA_ERR_DRIVER_ALREADY_STARTED;
}
bool cleanup = false;
err = oct_read(record,
- "_ { bus: %d, device: %d, function: %d, vendor: %d, device_id: %d }",
- &bus, &dev, &fun, &vendor_id, &device_id);
+ "_ { bus: %d, device: %d, function: %d, vendor: %d, device_id: %d }",
+ &bus, &dev, &fun, &vendor_id, &device_id);
if (err_is_ok(err)) {
// We assume that we're starting a device if the query above succeeds
// list.
argv = malloc((mi->argc+1) * sizeof(char *));
memcpy(argv, mi->argv, mi->argc * sizeof(char *));
- char *pci_id = malloc(26);
+ char *pci_id = malloc(26);
// Make sure pci vendor and device id fit into our argument
assert(vendor_id < 0x9999 && device_id < 0x9999);
snprintf(pci_id, 26, "%04"PRIx64":%04"PRIx64":%04"PRIx64":%04"
- PRIx64":%04"PRIx64, vendor_id, device_id, bus, dev, fun);
+ PRIx64":%04"PRIx64, vendor_id, device_id, bus, dev, fun);
argv[mi->argc] = pci_id;
argv[mi->argc+1] = NULL;
cleanup = true;
}
+
+ if (is_started(mi) && !strcmp("xeon_phi",mi->binary)) {
+ where !=20;
+ }
+
err = spawn_program(where, mi->path, argv,
- environ, 0, &mi->did);
+ environ, 0, &mi->did);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Spawning %s failed.", mi->path);
}
#endif
-errval_t start_networking(coreid_t core, struct module_info* driver,
- char* record)
+errval_t start_networking(coreid_t core,
+ struct module_info* driver,
+ char* record)
{
assert(driver != NULL);
errval_t err = SYS_ERR_OK;
return KALUGA_ERR_DRIVER_NOT_AUTO;
}
- err = spawn_program(core, driver->path, driver->argv + 1, environ, 0,
- &driver->did);
+ err = spawn_program(core,
+ driver->path,
+ driver->argv + 1,
+ environ,
+ 0,
+ &driver->did);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Spawning %s failed.", driver->path);
return err;
// XXX: Manually add cardname (overwrite first (auto) argument)
// +Weird convention, e1000n binary but cardname=e1000
char* cardname =
- strcmp(driver->binary, "e1000n") == 0 ? "e1000" : driver->binary;
+ strcmp(driver->binary, "e1000n") == 0 ? "e1000" : driver->binary;
size_t name_len = strlen("cardname=") + strlen(cardname) + 1;
char* card_argument = malloc(name_len);
err = spawn_program(core, netd->path, netd->argv, environ, 0, &netd->did);
ngd_mng->argv[0] = card_argument;
- err = spawn_program(core, ngd_mng->path, ngd_mng->argv, environ, 0,
- &ngd_mng->did);
+ err = spawn_program(core,
+ ngd_mng->path,
+ ngd_mng->argv,
+ environ,
+ 0,
+ &ngd_mng->did);
free(card_argument);
return err;
/* return KALUGA_ERR_DRIVER_ALREADY_STARTED; */
/* } */
-
/* err = spawn_program_with_caps(core, driver->path, driver->argv, environ, */
/* NULL_CAP, l4_CFG_domain_cap, 0, &driver->did); */
/* if (err_is_fail(err)) { */