unsupp = 0x2 "The request is not supported";
};
+
+
+ /*
+ * ------------------------------------------------------------------------
+ * Reading / Writing the configuration space
+ * ------------------------------------------------------------------------
+ */
+
+
+
register capacity addr(base, 0x00) "The capacity in 512byte sectors" {
sectors 64 "Number of 512byte sectors";
};
- register size_max addr(base, 0x08) "The maximum segment size" {
- size 32 "Maximum segment size";
+ register seg_size addr(base, 0x08) "The maximum segment size" {
+ max 32 "Maximum segment size";
};
- register seg_max addr(base, 0x0C) "The maximum number of segments" {
- num 32 "Number of segments";
+ register seg_num addr(base, 0x0C) "The maximum number of segments" {
+ max 32 "Maximum number of segments";
};
- register geo_cylinders addr(base, 0x10) "Geometry Information: Cylinders" {
- num 16 "Geometry: Number of cylinders";
+ register geometry addr(base, 0x10) "Geometry Information" {
+ cylinders 16 "Number of cylinders";
+ heads 8 "Number of heads";
+ sectors 8 "Number of sectors";
};
- register geo_heads addr(base, 0x12) "Geometry Information: Heads" {
- num 8 "Geometry: Number of heads";
- };
-
- register geo_sectors addr(base, 0x13) "Geometry Information: Sectors" {
- num 8 "Geometry: Number of sectors";
- };
register block_size addr(base, 0x14) "Block Size" {
size 32 "The size of a block";
};
- register phys_block_exp addr(base, 0x18) "Toplogy: Logical Blocks" {
- exp 8 "Number of logical blocks per physical block";
+ register topo_blocks addr(base, 0x18) "Toplogy: Logical Blocks" {
+ logic_per_phys 8 "Number of logical blocks per physical block";
+ offset_aligned 8 "Offset of first aligned logical block";
};
- register align_offset addr(base, 0x19) "Topology: Align Offset" {
- off 8 "Offset of first aligned logical block";
+ register topo_io_size addr(base, 0x1A) "Topology: minimum IO Size" {
+ min 16 "Minimum number of IO size in blocks";
+ opt 16 "Maximum number of IO size in blocks";
};
- register min_io addr(base, 0x1A) "Topology: minimum IO Size" {
- size 16 "Minimum number of IO size in blocks";
- };
-
- register opt_io addr(base, 0x1C) "Topology: maximum IO Size" {
- size 32 "Maximum number of IO size in blocks";
- };
- register writeback addr(base, 0x20) "Legacy Writeback Register" {
+ register writeback addr(base, 0x1E) "Legacy Writeback Register" {
wb 8 "Write back";
};
};
failure DEVICE_STATUS "VirtIO device has the wrong status",
failure QUEUE_ACTIVE "The selected qeueue is already activated",
failure QUEUE_INVALID "The selected queue does not exist",
- failure BUF_SIZE "The buffer size is invalid.",
+ failure BUFFER_SIZE "The buffer size is invalid.",
+ failure BUFFER_STATE "The state of the buffer / buffer list is invalid",
failure ARG_INVALID "The given argument is invalid.",
failure NO_BUFFER "No buffer given, number of buffers is 0",
failure ALLOC_FULL "The allocator is already full",
- failure BUF_USED "The buffer is already used",
+ failure BUFFER_USED "The buffer is already enqueued and used",
+ failure NO_DESC_AVAIL "There is no descriptor availabe",
+ failure DEQ_CHAIN "Not the entire chain could be dequeued",
+ failure INVALID_RING_INDEX "The supplied index is not valid",
};
--- /dev/null
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef VIRTIO_DEVICES_VIRTIO_BLOCK_H
+#define VIRTIO_DEVICES_VIRTIO_BLOCK_H
+
+#include <dev/virtio/virtio_blk_dev.h>
+
+
+/*
+ * 5.2 Block Device
+ * The virtio block device is a simple virtual block device (ie. disk). Read
+ * and write requests (and other exotic requests) are placed in the queue, and
+ * serviced (probably out of order) by the device except where noted.
+ *
+ * Device ID = 2
+ *
+ * There is only a single virtio IO queue for this device
+ */
+
+/// the size of the VirtIO block device configuration spaces
+#define VIRTIO_BLOCK_CONFIG_SIZE 0x20
+
+/*
+ * --------------------------------------------------------------------------
+ * 5.2.3 Feature Bits
+ * --------------------------------------------------------------------------
+ */
+
+/// Request barriers are supported
+#define VIRTIO_BLOCK_F_BARRIER 0
+
+/// Maximum segment size is indicated in configuration structure
+#define VIRTIO_BLOCK_F_SIZE_MAX 1
+
+/// Maximum number of segments is indicated in configuration structure
+#define VIRTIO_BLOCK_F_SEG_MAX 2
+
+/// There is support for legacy geometry
+#define VIRTIO_BLOCK_F_GEOMETRY 4
+
+/// The disk is read only
+#define VIRTIO_BLOCK_F_RO 5
+
+/// The block size is indicated in configuration structure
+#define VIRTIO_BLOCK_F_BLK_SIZE 6
+
+/// SCSI command passthrough is supported
+#define VIRTIO_BLOCK_F_SCSI 7
+
+/// The cache flush command is supported
+#define VIRTIO_BLOCK_F_FLUSH 9
+
+/// the topology information is available
+#define VIRTIO_BLOCK_F_TOPOLOGY 10
+
+/// the length of the ID string
+#define VIRTIO_BLOCK_ID_BYTES 20
+
+/**
+ * \brief VirtIO block device topology information
+ */
+struct virtio_block_topology
+{
+ uint8_t num_logic_per_phys; ///< number of logical blocks per physical
+ uint8_t alignment_offset; ///< alignment offset
+ uint16_t min_io_size; ///< suggested minimum IO size
+ uint32_t opt_io_size; ///< suggested maximum IO size
+};
+
+/**
+ * \brief VirtIO block device geometry information
+ */
+struct virtio_block_geometry
+{
+ uint16_t cylinders; ///< number of cylinders
+ uint8_t heads; ///< number of heads
+ uint8_t sectors; ///< number of sectors
+};
+
+/*
+ * --------------------------------------------------------------------------
+ * Device Configuration Layout
+ * --------------------------------------------------------------------------
+ */
+
+/**
+ * The device configuration layout as specified by 5.2.3.2
+ */
+struct virtio_block_config
+{
+ uint64_t capacity; ///< the capacity in 512 byte sectors
+ uint32_t size_max; ///< the maximum segment size
+ uint32_t seg_max; ///< the maximum number of segments
+ struct virtio_block_geometry geometry; ///< device geometry
+ uint32_t blk_size;
+ struct virtio_block_topology topology; ///< topology information
+ uint8_t reserved; ///< reserved field originally write back
+}__attribute__((packed));
+
+/*
+ * --------------------------------------------------------------------------
+ * Device Operations
+ * --------------------------------------------------------------------------
+ *
+ * The driver queues requests to the virtqueue, and they are used by the device
+ * (not necessarily in order).
+ */
+
+/*
+ * VirtIO Block Command types
+ */
+
+/// Virtio Block Device Request Type IN
+#define VIRTIO_BLOCK_T_IN 0x00000000
+
+/// Virtio Block Device Request Type OUT
+#define VIRTIO_BLOCK_T_OUT 0x00000001
+
+/// Virtio Block Device Request Type Flush
+#define VIRTIO_BLOCK_T_FLUSH 0x00000004
+
+/// Get device ID command
+#define VIRTIO_BLOCK_T_GET_ID 0x00000008
+
+/// Memory barrier
+#define VIRTIO_BLOCK_T_BARRIER 0x80000000
+
+/*
+ * VirtIO Status Field values
+ */
+
+/// Virtio Block Device Request Status OK
+#define VIRTIO_BLOCK_S_OK 0x0
+
+/// Virtio Block Device Request Status IO Error
+#define VIRTIO_BLOCK_S_IOERR 0x1
+
+/// Virtio Block Device Request Status Unsupported
+#define VIRTIO_BLOCK_S_UNSUPP 0x2
+
+/**
+ * 5.2.7.1 Legacy Interface: Device Operation
+ */
+struct virtio_scsi_reqhdr
+{
+ uint32_t errors;
+ uint32_t data_len; ///< SHOULD be ignored by the driver.
+ uint32_t sense_len; ///< number of bytes actually written to the sense buffer.
+ uint32_t residual; ///< residual size, length - bytes actually transferred.
+};
+
+/**
+ * Block device request as defined in 5.2.5 Device Operation
+ */
+struct virtio_block_reqhdr
+{
+ uint32_t type; ///< Type of the request. One of VIRTIO_BLOCK_T_*
+ uint32_t ioprio; ///< Legacy devices only
+ uint64_t sector; ///< Offset (multiplied by 512) where to read/write
+ /* the data and status follow */
+};
+
+struct virtio_block_request
+{
+ void *foo;
+};
+
+struct virtio_device_blk
+{
+ struct virtio_device *vdev;
+ virtio_blk_t config_space;
+ void *config_addr;
+ struct virtqueue *vq;
+};
+
+/**
+ * \brief reads the device configuration and copies it into the local memory
+ *
+ * \param dev the block device to read the configuration space.
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_block_config_read(struct virtio_device_blk *dev);
+
+/**
+ * \brief returns the block size of the device
+ *
+ * \param dev the virtio block device
+ *
+ * \returns block size in bytes
+ */
+static inline uint32_t virtio_block_get_block_size(struct virtio_device_blk *dev)
+{
+ if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_BLK_SIZE)) {
+ return 0;
+ }
+
+ return virtio_blk_block_size_size_rdf(&dev->config_space);
+}
+
+/**
+ * \brief returns the capacity of the VirtIO block device in 512 segments
+ *
+ * \param dev the virtio block device
+ *
+ * \returns capacity in 512-byte sectors
+ */
+static inline uint64_t virtio_block_get_capacity(struct virtio_device_blk *dev)
+{
+ return virtio_blk_capacity_sectors_rdf(&dev->config_space);
+}
+
+/**
+ * \brief returns the maximum number of segments
+ *
+ * \param dev the virtio block device
+ *
+ * \returns maximum number of segments if VIRTIO_BLOCK_F_SEG_MAX
+ * 0 otherwise
+ */
+static inline uint32_t virtio_block_get_segment_num(struct virtio_device_blk *dev)
+{
+ if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_SEG_MAX)){
+ return 0;
+ }
+ return virtio_blk_seg_num_max_rdf(&dev->config_space);
+}
+
+/**
+ * \brief returns the maximum segment size
+ *
+ * \param dev the virtio block device
+ *
+ * \returns maximum segment size if VIRTIO_BLOCK_F_SEG_MAX
+ * 0 otherwise
+ */
+static inline uint32_t virtio_block_get_segment_size(struct virtio_device_blk *dev)
+{
+ if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_SIZE_MAX)) {
+ return 0;
+ }
+ return virtio_blk_seg_size_max_rdf(&dev->config_space);
+}
+
+/**
+ * \brief returns the topology information
+ *
+ * \param dev the virtio block device
+ * \param topo memory region to fill the topology information in
+ * (only valid if VIRTIO_BLOCK_F_TOPOLOGY)
+ *
+ * \returns true if VIRTIO_BLOCK_F_TOPOLOGY
+ * false otherwise
+ */
+bool virtio_block_get_topology(struct virtio_device_blk *dev,
+ struct virtio_block_topology *topo);
+
+/**
+ * \brief returns the blocksize of
+ *
+ * \param dev the virtio block device
+ * \param geo memory region to fill the geometry information in
+ * (only valid if VIRTIO_BLOCK_F_GEOMETRY)
+ *
+ * \returns true if VIRTIO_BLOCK_F_GEOMETRY
+ * false otherwise
+ */
+bool virtio_block_get_geometry(struct virtio_device_blk *dev,
+ struct virtio_block_geometry *geo);
+
+#endif // VIRTIO_DEVICES_VIRTIO_BLOCK_H
--- /dev/null
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef VIRTIO_DEVICES_VIRTIO_NET_H
+#define VIRTIO_DEVICES_VIRTIO_NET_H
+
+
+#endif // VIRTIO_DEVICES_VIRTIO_NET_H
/// defines how we map the memory frames
#define VIRTIO_VREGION_FLAGS_DEVICE VREGION_FLAGS_READ_WRITE
-#define VIRTIO_VREGION_FLAGS_RING VREGION_FLAGS_READ_WRITE
+#define VIRTIO_VREGION_FLAGS_RING VREGION_FLAGS_READ_WRITE
/*
/**
- * \brief initializes the library for host side operation
- *
- * \param guest_base base address of the guest physical memory
- * \param guest_size size of the guest physical memory
- */
-errval_t virtio_host_init(lpaddr_t guest_base,
- lpaddr_t guest_size);
-
-
-/**
* VirtIO Memory segment
*/
VIRTIO_BUFFER_S_QUEUED
};
+enum virtio_bl_state {
+ VIRTIO_BUFFER_LIST_S_INVALID, ///< invalid state
+ VIRTIO_BUFFER_LIST_S_EMTPY, ///< list is empty can be used to insert bufs
+ VIRTIO_BUFFER_LIST_S_FILLED, ///< list contains buffers, more can be appended
+ VIRTIO_BUFFER_LIST_S_ENQUEUED ///< buffer list is enqueued, appending not possible
+};
+
/**
* represents a VirtIO buffer to be used
*/
struct virtio_buffer
{
- struct virtio_buffer_allocator *a;
+ struct virtio_buffer_allocator *a; ///< pointer to the allocator
enum virtio_buffer_state state; ///< state of this buffer
lpaddr_t paddr; ///< physical address of the buffer
void *buf; ///< mapped virtual address of the buffer
*/
struct virtio_buffer_list
{
+ enum virtio_bl_state state;
struct virtio_buffer *head;
struct virtio_buffer *tail;
size_t length;
+++ /dev/null
-/*
- * Copyright (c) 2014 ETH Zurich.
- * All rights reserved.
- *
- * This file is distributed under the terms in the attached LICENSE file.
- * If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
- */
-
-#ifndef VIRTIO_VIRTIO_BLOCK_H
-#define VIRTIO_VIRTIO_BLOCK_H
-
-#include <barrelfish/barrelfish.h>
-
-/*
- * 5.2 Block Device
- * The virtio block device is a simple virtual block device (ie. disk). Read
- * and write requests (and other exotic requests) are placed in the queue, and
- * serviced (probably out of order) by the device except where noted.
- *
- * Device ID = 2
- *
- * There is only a single virtio IO queue for this device
- */
-
-/*
- * --------------------------------------------------------------------------
- * 5.2.3 Feature Bits
- * --------------------------------------------------------------------------
- */
-
-/// Request barriers are supported
-#define VIRTIO_BLK_F_BARRIER (1<<0)
-
-/// Maximum segment size is indicated in configuration structure
-#define VIRTIO_BLK_F_SIZE_MAX (1<<1)
-
-/// Maximum number of segments is indicated in configuration structure
-#define VIRTIO_BLK_F_SEG_MAX (1<<2)
-
-/// There is support for legacy geometry
-#define VIRTIO_BLK_F_GEOMETRY (1<<4)
-
-/// The disk is read only
-#define VIRTIO_BLK_F_RO (1<<5)
-
-/// The block size is indicated in configuration structure
-#define VIRTIO_BLK_F_BLK_SIZE (1<<6)
-
-/// SCSI command passthrough is supported
-#define VIRTIO_BLK_F_SCSI (1<<7)
-
-/// The cache flush command is supported
-#define VIRTIO_BLK_F_FLUSH (1<<9)
-
-/// the topology information is available
-#define VIRTIO_BLK_F_TOPOLOGY (1<<10)
-
-/// the length of the ID string
-#define VIRTIO_BLK_ID_BYTES 20
-
-/*
- * --------------------------------------------------------------------------
- * Device Configuration Layout
- * --------------------------------------------------------------------------
- */
-
-/**
- * The device configuration layout as specified by 5.2.3.2
- */
-struct virtio_blk_config {
- uint64_t capacity; ///< the capacity in 512 byte sectors
- uint32_t size_max; ///< the maximum segment size
- uint32_t seg_max; ///< the maximum number of segments
- struct virtio_blk_geometry {
- uint16_t cylinders; ///< number of cylinders
- uint8_t heads; ///< number of heads
- uint8_t sectors; ///< number of sectors
- } geometry; ///< device geometry
- uint32_t blk_size;
- struct virtio_blk_topology {
- uint8_t physical_block_exp; ///< num logical blocks per physical
- uint8_t alignment_offset; ///< the alginment offset
- uint16_t min_io_size; ///< suggested minimum IO size
- uint32_t opt_io_size; ///< suggested maximum IO size
- } topology; ///< topology information
- uint8_t reserved; ///< reserved field originally write back
-} __attribute__((packed));
-
-
-
-/*
- * --------------------------------------------------------------------------
- * Device Operations
- * --------------------------------------------------------------------------
- *
- * The driver queues requests to the virtqueue, and they are used by the device
- * (not necessarily in order).
- */
-
-/**
- * Block device request as defined in 5.2.5 Device Operation
- */
-struct virtio_blk_reqhdr {
- uint32_t type; ///< Type of the request. One of VIRTIO_BLK_T_*
- uint32_t ioprio; ///< Priviously called ioprio (legacy)
- uint64_t sector; ///< Offset (multiplied by 512) where to read/write
- /* the data and status follow */
-};
-
-/// Virtio Block Device Request Type IN
-#define VIRTIO_BLK_T_IN 0
-
-/// Virtio Block Device Request Type OUT
-#define VIRTIO_BLK_T_OUT 1
-
-/// Virtio Block Device Request Type Flush
-#define VIRTIO_BLK_T_FLUSH 4
-
-/// Get device ID command
-#define VIRTIO_BLK_T_GET_ID 8
-
-
-/// Virtio Block Device Request Status OK
-#define VIRTIO_BLK_S_OK 0
-
-/// Virtio Block Device Request Status IO Error
-#define VIRTIO_BLK_S_IOERR 1
-
-/// Virtio Block Device Request Status Unsupported
-#define VIRTIO_BLK_S_UNSUPP 2
-
-
-/**
- * 5.2.7.1 Legacy Interface: Device Operation
- */
-struct virtio_scsi_reqhdr {
- uint32_t errors;
- uint32_t data_len; ///< SHOULD be ignored by the driver.
- uint32_t sense_len; ///< number of bytes actually written to the sense buffer.
- uint32_t residual; ///<residual size, length - bytes actually transferred.
- uint8_t status;
-};
-
-
-#endif // VIRTIO_VIRTIO_BLOCK_H
bool virtio_device_has_feature(struct virtio_device *dev,
uint8_t feature);
+
+/**
+ * \brief reads the device configuration space and copies it into a local buffer
+ *
+ * \param vdev virtio device
+ * \param buf pointer to the buffer to store the data
+ * \param len the length of the buffer
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_device_config_read(struct virtio_device *vdev,
+ void *buf,
+ size_t len);
+/**
+ * \brief writes to the configuration space of a device
+ *
+ * \param vdev virtio device
+ * \param buf pointer to the buffer with data to update
+ * \param len the length of the buffer
+ *
+ * \returns SYS_ERR_OK on success
+ *
+ * xxx: this may be done at a certain offset/value combination
+ */
+errval_t virtio_device_config_write(struct virtio_device *dev,
+ void *config,
+ size_t length);
+
#endif // VIRTIO_VIRTIO_DEVICE_H
* Virtqueue Queue Management
*/
+/**
+ * \brief Enqueues a new descriptor chain into the virtqueue
+ *
+ * \param vq the virtqueue the descriptor chain gets enqueued in
+ * \param bl list of buffers to enqueue into the virtqueue
+ * \param st state associated with this descriptor chain
+ * \param num_wr number of writable descriptors
+ * \param num_rd number of readable descriptors
+ *
+ * \returns SYS_ERR_OK on success
+ * VIRTIO_ERR_* on failure
+ */
errval_t virtio_virtqueue_desc_enqueue(struct virtqueue *vq,
struct virtio_buffer_list *bl,
- void *vaddr,
+ void *st,
uint16_t writeable,
uint16_t readable);
+/**
+ * \brief dequeues a descriptor chain form the virtqueue
+ *
+ * \param vq the virtqueue to dequeue descriptors from
+ * \param ret_bl returns the associated buffer list structure
+ * \param ret_st returns the associated state of the queue list
+ *
+ * \returns SYS_ERR_OK when the dequeue is successful
+ * VIRTIO_ERR_NO_DESC_AVAIL when there was no descriptor to dequeue
+ * VIRTIO_ERR_* if there was an error
+ */
+errval_t virtio_virtqueue_desc_dequeue(struct virtqueue *vq,
+ struct virtio_buffer_list **ret_bl,
+ void **ret_st);
-#if 0
-void *virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
-void *virtqueue_poll(struct virtqueue *vq, uint32_t *len);
-
-
-void virtqueue_dump(struct virtqueue *vq);
-#endif
+/**
+ * \brief polls the virtqueue
+ *
+ * \param vq the virtqueue to dequeue descriptors from
+ * \param ret_bl returns the associated buffer list structure
+ * \param ret_st returns the associated state of the queue list
+ * \param handle_msg flag to have messages handled
+ *
+ * \returns SYS_ERR_OK when the dequeue is successful
+ * VIRTIO_ERR_* if there was an error
+ */
+errval_t virtio_virtqueue_poll(struct virtqueue *vq,
+ struct virtio_buffer_list **ret_bl,
+ void **ret_st,
+ uint8_t handle_msg);
#endif // VIRTIO_VIRTQUEUE_H
[ build library { target = "virtio",
cFiles = [ "virtqueue.c",
- "device.c",
- "vbuffer.c",
- "backends/virtio_device_mmio.c",
- "backends/virtio_device_pci.c" ],
+ "device.c",
+ "vbuffer.c",
+ "backends/virtio_device_mmio.c",
+ "backends/virtio_device_pci.c",
+ "devices/virtio_block.c" ],
mackerelDevices = [ "virtio/virtio_mmio",
"virtio/virtio_pci" ]
- }
+ },
+ build library { target = "virtio_host",
+ addCFlags = [ "-DVIRTIO_HOST" ],
+ cFiles = [ "virtqueue.c",
+ "device.c",
+ "vbuffer.c",
+ "backends/virtio_device_mmio.c",
+ "backends/virtio_device_pci.c",
+ "devices/virtio_block.c" ],
+ mackerelDevices = [ "virtio/virtio_mmio",
+ "virtio/virtio_pci" ]
+ }
]
#include <virtio/virtio_device.h>
+// forward declaration
+struct virtqueue;
/**
uint64_t features;
enum virtio_device_backend backend;
struct virtio_device_fn *f;
- uint16_t num_queues;
- struct virtqueue **queues;
};
/**
--- /dev/null
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <virtio/virtio.h>
+#include <virtio/virtio_device.h>
+#include <virtio/devices/virtio_block.h>
+
+#include <dev/virtio/virtio_blk_dev.h>
+
+/**
+ * \brief returns the topology information
+ *
+ * \param dev the virtio block device
+ * \param topo memory region to fill the topology information in
+ * (only valid if VIRTIO_BLK_F_TOPOLOGY)
+ *
+ * \returns true if VIRTIO_BLK_F_TOPOLOGY
+ * false otherwise
+ */
+bool virtio_block_get_topology(struct virtio_device_blk *dev,
+ struct virtio_block_topology *topo)
+{
+ /* can't return any data */
+ if (!topo) {
+ return 0;
+ }
+
+ /* the device does not support topology */
+ if (!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_TOPOLOGY)) {
+ return 0;
+ }
+
+ topo->alignment_offset = virtio_blk_topo_blocks_offset_aligned_rdf(&dev
+ ->config_space);
+ topo->min_io_size = virtio_blk_topo_io_size_min_rdf(&dev->config_space);
+ topo->opt_io_size = virtio_blk_topo_io_size_opt_rdf(&dev->config_space);
+ topo->num_logic_per_phys = virtio_blk_topo_blocks_logic_per_phys_rdf(&dev
+ ->config_space);
+
+ return 1;
+}
+
+/**
+ * \brief returns the blocksize of
+ *
+ * \param dev the virtio block device
+ * \param geo memory region to fill the geometry information in
+ * (only valid if VIRTIO_BLK_F_GEOMETRY)
+ *
+ * \returns true if VIRTIO_BLK_F_GEOMETRY
+ * false otherwise
+ */
+bool virtio_block_get_geometry(struct virtio_device_blk *dev,
+ struct virtio_block_geometry *geo)
+{
+ if (!geo) {
+ return 0;
+ }
+
+ if (!!virtio_device_has_feature(dev->vdev, VIRTIO_BLOCK_F_GEOMETRY)) {
+ return 0;
+ }
+
+ geo->cylinders = virtio_blk_geometry_cylinders_rdf(&dev->config_space);
+ geo->heads = virtio_blk_geometry_heads_rdf(&dev->config_space);
+ geo->sectors = virtio_blk_geometry_sectors_rdf(&dev->config_space);
+
+ return 0;
+}
+
+/**
+ * \brief reads the device configuration and copies it into the local memory
+ *
+ * \param dev the block device to read the configuration space.
+ *
+ * \returns SYS_ERR_OK on success
+ */
+errval_t virtio_block_config_read(struct virtio_device_blk *dev)
+{
+ if (dev->config_addr == NULL) {
+ dev->config_addr = malloc(VIRTIO_BLOCK_CONFIG_SIZE);
+ if (dev->config_addr == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ }
+
+ return virtio_device_config_read(dev->vdev,
+ dev->config_addr,
+ VIRTIO_BLOCK_CONFIG_SIZE);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <virtio/virtio.h>
+#include <virtio/virtio_device.h>
--- /dev/null
+/*
+ * Copyright (c) 2014 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include <virtio/virtio.h>
+#include <virtio/virtio_device.h>
struct virtio_buffer *buf)
{
if (buf->lhead) {
- return VIRTIO_ERR_BUF_USED;
+ return VIRTIO_ERR_BUFFER_USED;
}
if (bl->length == 0) {
bl->head = buf;
#include <string.h>
#include <barrelfish/barrelfish.h>
+#include <barrelfish/waitset.h>
#include <virtio/virtio.h>
#include <virtio/virtio_ring.h>
#include "debug.h"
-
#define IS_POW2(num) (((num) != 0) && (((num) & (~(num) + 1)) == (num)))
-
#define VIRTQUEUE_FLAG_INDIRECT 0x0001
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
#define VIRTQUEUE_FLAG_FREE_CAP 0x8000
{
struct virtio_buffer *buf;
void *st;
+ struct virtio_buffer_list *bl;
uint8_t is_head;
};
{
/* device information */
struct virtio_device *device; ///< pointer to to the virtio device
- uint16_t queue_index; ///< index of this queue in the device
+ uint16_t queue_index; ///< index of this queue in the device
char name[VIRTQUEUE_NAME_SIZE]; ///< name of the queue for debugging
/* vring information */
/* interrupt handling */
virtq_intr_hander_t intr_handler; ///< interrupt handler
- void *intr_arg; ///< user argument for the handler
+ void *intr_arg; ///< user argument for the handler
- struct vring_desc_info vring_di[0]; ///< array of additional desc information
+ struct vring_desc_info vring_di[0]; ///< array of additional desc information
#if 0
/* indirect descriptors */
uint16_t max_indirect;
- size_t indirect_size;
+ size_t indirect_size;
struct vq_desc_extra {
- void *cookie; << virtual address?
- struct vring_desc *indirect;
- vm_paddr_t indirect_paddr;
- uint16_t ndescs;
- } vq_descx[0];
+ void *cookie; << virtual address?
+ struct vring_desc *indirect;
+ vm_paddr_t indirect_paddr;
+ uint16_t ndescs;
+ }vq_descx[0];
#endif
};
return 0;
}
-
/**
* \brief initializes the vring structure of the virtqueue
*
/*
* initialize the vring structure in memory
*/
- vring_init(vr, vq->vring_ndesc, vq->vring_align, (void *)vq->vring_vaddr);
+ vring_init(vr, vq->vring_ndesc, vq->vring_align, (void *) vq->vring_vaddr);
/*
* initialize the descriptor chains
*/
uint32_t i;
for (i = 0; i < vq->vring_ndesc; ++i) {
- vr->desc[i].next = i+1;
+ vr->desc[i].next = i + 1;
}
vr->desc[i].next = VIRTQUEUE_CHAIN_END;
return SYS_ERR_OK;
}
-
static bool virtqueue_should_notify_host(struct virtqueue *vq)
{
uint16_t new, prev, *event_idx;
return ((vq->vring.used->flags & VIRTIO_RING_USED_F_NO_NOTIFY) == 0);
}
-
/*
* ============================================================================
* Public Interface
* ============================================================================
*/
-
/*
* ----------------------------------------------------------------------------
* Virtqueue Allocation / Deallocation
}
VIRTIO_DEBUG_VQ("Allocated memory for vring: [%lx & %lx]",
- (uint64_t)size, (uint64_t)framesize);
+ (uint64_t )size,
+ (uint64_t )framesize);
err = virtio_virtqueue_alloc_with_caps(setup, vring_cap, vq);
if (err_is_fail(err)) {
}
if (setup->max_indirect > VIRTIO_RING_MAX_INDIRECT) {
- VIRTIO_DEBUG_VQ("ERROR: too many indirect descriptors requested: [%u / %u]\n", setup->vring_ndesc, VIRTIO_RING_MAX_INDIRECT);
+ VIRTIO_DEBUG_VQ("ERROR: too many indirect descriptors requested: [%u / %u]\n",
+ setup->vring_ndesc,
+ VIRTIO_RING_MAX_INDIRECT);
return VIRTIO_ERR_MAX_INDIRECT;
}
if (vring_mem_size > (1UL << id.bits)) {
VIRTIO_DEBUG_VQ("ERROR: supplied cap was too small %lx, needed %lx\n",
- ((1UL << id.bits)), (uint64_t)vring_mem_size);
+ ((1UL << id.bits)),
+ (uint64_t )vring_mem_size);
return VIRTIO_ERR_CAP_SIZE;
}
return err;
}
- struct virtqueue *vq = calloc(1, sizeof(struct virtqueue)
- + (setup->vring_ndesc * sizeof(struct vring_desc_info)));
+ struct virtqueue *vq = calloc(1,
+ sizeof(struct virtqueue) + (setup->vring_ndesc
+ * sizeof(struct vring_desc_info)));
if (vq == NULL) {
vspace_unmap(vring_addr);
return LIB_ERR_MALLOC_FAIL;
vq->vring_size = vring_mem_size;
vq->vring_cap = vring_cap;
vq->vring_paddr = id.base;
- vq->vring_vaddr = (lvaddr_t)vring_addr;
+ vq->vring_vaddr = (lvaddr_t) vring_addr;
vq->free_count = setup->vring_ndesc;
-
vq->intr_handler = setup->intr_handler;
vq->intr_arg = setup->intr_arg;
virtqueue_init_indirect(vq, setup->max_indirect);
}
- if(virtio_device_has_feature(setup->device, VIRTIO_RING_F_EVENT_IDX)) {
- vq->flags |= (1<<VIRTQUEUE_FLAG_EVENT_IDX);
+ if (virtio_device_has_feature(setup->device, VIRTIO_RING_F_EVENT_IDX)) {
+ vq->flags |= (1 << VIRTQUEUE_FLAG_EVENT_IDX);
}
virtqueue_init_vring(vq);
return SYS_ERR_OK;
}
-
/**
* \brief frees the resources of previously allocated virtqueues
*
return vq->queue_index;
}
-
/**
* \brief Checks if the virtqueue is empty
*
vq->intr_handler(vq, vq->intr_arg);
}
-
/**
* \brief enables the interrupts on the next descriptor processed
*
return virtqueue_interrupt_enable(vq, ndesc);
}
-
/**
* \brief disables the interrupts for the given virtqueue
*
}
-
/*
* We layout the vring structure in memory as follows:
*
vq->queued_count++;
}
-
+/**
+ * \brief Performs the actual insertion and queue setup of the given buffer list
+ *
+ * \param vq virtqueue to insert in
+ * \param head index of the head of the free queue
+ * \param bl buffer list to be enqueued
+ * \param num_read number of readable buffers
+ * \param num_write number of writeable buffers
+ * \param ret_idx the returned new free head index
+ *
+ * \return SYS_ERR_OK on success
+ * VIRTIO_ERR_* on failulre
+ */
static errval_t virtqueue_enqueue_bufs(struct virtqueue *vq,
- struct vring_desc *desc,
uint16_t head,
struct virtio_buffer_list *bl,
- uint16_t readable,
- uint16_t writable,
+ uint16_t num_read,
+ uint16_t num_write,
uint16_t *ret_idx)
{
+ struct vring_desc *desc = vq->vring.desc;
struct virtio_buffer *buf = bl->head;
struct vring_desc *cd;
- uint16_t needed = readable + writable;
+ if (bl->state != VIRTIO_BUFFER_LIST_S_FILLED) {
+ return VIRTIO_ERR_BUFFER_STATE;
+ }
+
+ uint16_t needed = num_read + num_write;
uint16_t idx = head;
for (uint16_t i = 0; i < needed; ++i) {
+ if (buf->state == VIRTIO_BUFFER_S_QUEUED) {
+ /*
+ * XXX: assume here that read only descriptors can be queued multiple
+ * times, having the same buffer writable enqueued twices, this
+ * is clearly an error
+ */
+ if (i >= num_read) {
+ /*
+ * do a clean up, reverse pointers and revert the fields
+ */
+ idx = head;
+ buf = bl->head;
+ for (uint16_t j = 0; j < i; ++j) {
+ /* reset the buffer state */
+ buf->state = VIRTIO_BUFFER_S_ALLOCED;
+ vq->vring_di[idx].buf = NULL;
+ cd = &desc[idx];
+ cd->addr = buf->paddr;
+ cd->length = buf->length;
+ cd->flags = 0;
+
+ idx = cd->next;
+ buf = buf->next;
+ }
+ return VIRTIO_ERR_BUFFER_USED;
+ }
+ }
+
+ buf->state = VIRTIO_BUFFER_S_QUEUED;
+
vq->vring_di[idx].buf = buf;
cd = &desc[idx];
if (i < needed - 1) {
cd->flags |= VIRTIO_RING_DESC_F_NEXT;
}
- if (i >= readable) {
+ if (i >= num_read) {
cd->flags |= VIRTIO_RING_DESC_F_WRITE;
}
idx = cd->next;
buf = buf->next;
}
+ bl->state = VIRTIO_BUFFER_LIST_S_ENQUEUED;
+
if (ret_idx) {
*ret_idx = idx;
}
return SYS_ERR_OK;
}
-
+/**
+ * \brief Enqueues a new descriptor chain into the virtqueue
+ *
+ * \param vq the virtqueue the descriptor chain gets enqueued in
+ * \param bl list of buffers to enqueue into the virtqueue
+ * \param st state associated with this descriptor chain
+ * \param num_wr number of writable descriptors
+ * \param num_rd number of readable descriptors
+ *
+ * \returns SYS_ERR_OK on success
+ * VIRTIO_ERR_* on failure
+ */
errval_t virtio_virtqueue_desc_enqueue(struct virtqueue *vq,
struct virtio_buffer_list *bl,
- void *vaddr,
- uint16_t writeable,
- uint16_t readable)
+ void *st,
+ uint16_t num_wr,
+ uint16_t num_rd)
{
- uint16_t needed = readable + writeable;
+ errval_t err;
+
+ uint16_t needed = num_rd + num_wr;
if (needed != bl->length || needed < 0) {
return VIRTIO_ERR_SIZE_INVALID;
struct vring_desc_info *info = &vq->vring_di[free_head];
info->is_head = 0x1;
- info->st = NULL;
+ info->st = st;
+ info->bl = bl;
uint16_t idx;
- virtqueue_enqueue_bufs(vq, vq->vring.desc, free_head,
- bl, readable, writeable, &idx);
-
+ err = virtqueue_enqueue_bufs(vq, free_head, bl, num_rd, num_wr, &idx);
+ if (err_is_fail(err)) {
+ return err;
+ }
/* update free values */
vq->free_head = idx;
return SYS_ERR_OK;
}
-#if 0
+static errval_t virtqueue_free_desc_chain(struct virtqueue *vq,
+ uint16_t desc_idx)
+{
+ struct vring_desc *desc;
+ struct vring_desc_info *info;
+
+ desc = &vq->vring.desc[desc_idx];
+ info = &vq->vring_di[desc_idx];
+
+ uint16_t ndesc = info->bl->length;
+
+ vq->free_count += ndesc;
+ ndesc--;
+
+ if ((desc->flags & VIRTIO_RING_DESC_F_INDIRECT) == 0) {
+ while (desc->flags & VIRTIO_RING_DESC_F_NEXT) {
+ desc = &vq->vring.desc[desc->next];
+ ndesc--;
+ }
+ }
+
+ if (ndesc) {
+ return VIRTIO_ERR_DEQ_CHAIN;
+ }
+
+ /* append it to the free list of descriptors */
+ desc->next = vq->free_head;
+ vq->free_head = desc_idx;
+
+ return SYS_ERR_OK;
+}
/**
+ * \brief dequeues a descriptor chain form the virtqueue
+ *
+ * \param vq the virtqueue to dequeue descriptors from
+ * \param ret_bl returns the associated buffer list structure
+ * \param ret_st returns the associated state of the queue list
*
+ * \returns SYS_ERR_OK when the dequeue is successful
+ * VIRTIO_ERR_NO_DESC_AVAIL when there was no descriptor to dequeue
+ * VIRTIO_ERR_* if there was an error
*/
-errval_t virtio_virtqueue_desc_alloc(struct virtqueue *vq,
- struct virtio_buffer_list *bl,
- uint16_t readable,
- uint16_t writeable)
+errval_t virtio_virtqueue_desc_dequeue(struct virtqueue *vq,
+ struct virtio_buffer_list **ret_bl,
+ void **ret_st)
{
+ errval_t err;
-}
+ struct vring_used_elem *elem;
+
+ uint16_t used_idx, desc_idx;
+
+ /*
+ * check if there is a descriptor available
+ */
+ if (vq->used_tail == vq->vring.used->idx) {
+ return VIRTIO_ERR_NO_DESC_AVAIL;
+ }
+
+ used_idx = vq->used_tail++ & (vq->vring_ndesc - 1);
+ elem = &vq->vring.used->ring[used_idx];
+
+ /*
+ * TODO: read memory barrier
+ * rmb();
+ * */
+ desc_idx = (uint16_t) elem->id;
+
+ /* get the descritpor information */
+ struct vring_desc_info *info = &vq->vring_di[desc_idx];
+
+ assert(info->is_head);
+ assert(info->bl);
+ struct virtio_buffer_list *bl = info->bl;
+ err = virtqueue_free_desc_chain(vq, desc_idx);
+ if (err_is_fail(err)) {
+ used_idx = vq->used_tail-- & (vq->vring_ndesc - 1);
+ return err;
+ }
-void *virtio_virtqueue_desc_deq(struct virtqueue *vq)
+ if (ret_bl) {
+ *ret_bl = bl;
+ }
+
+ if (ret_st) {
+ *ret_st = info->st;
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief polls the virtqueue
+ *
+ * \param vq the virtqueue to dequeue descriptors from
+ * \param ret_bl returns the associated buffer list structure
+ * \param ret_st returns the associated state of the queue list
+ * \param handle_msg flag to have messages handled
+ *
+ * \returns SYS_ERR_OK when the dequeue is successful
+ * VIRTIO_ERR_* if there was an error
+ */
+errval_t virtio_virtqueue_poll(struct virtqueue *vq,
+ struct virtio_buffer_list **ret_bl,
+ void **ret_st,
+ uint8_t handle_msg)
{
- return NULL;
+ errval_t err;
+
+ err = virtio_virtqueue_desc_dequeue(vq, ret_bl, ret_st);
+
+ while(err_no(err) == VIRTIO_ERR_NO_DESC_AVAIL) {
+ if (handle_msg) {
+ err = event_dispatch_non_block(get_default_waitset());
+ if (err_is_fail(err)) {
+ if (err_no(err) == LIB_ERR_NO_EVENT) {
+ thread_yield();
+ }
+ }
+ } else {
+ thread_yield();
+ }
+ err = virtio_virtqueue_desc_dequeue(vq, ret_bl, ret_st);
+ }
+
+ return err;
}
+#if 0
void *virtio_virtqueue_poll(struct virtqueue *vq)
build application { target = "virtio_blk_host",
cFiles = [ "main_host.c"
],
- addLibraries = libDeps ["virtio"],
+ addLibraries = libDeps ["virtio_host"],
--flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]) ],
--flounderDefs = ["monitor", "xeon_phi_manager", "xeon_phi", "xeon_phi_messaging"],
--flounderBindings = ["xeon_phi", "xeon_phi_messaging"],
* ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
+#include <virtio/virtio.h>
+#include <virtio/virtio_device.h>
+#include <virtio/devices/virtio_block.h>
+
+#include <dev/virtio/virtio_blk_dev.h>
int main(int argc, char *argv[])
{
}
if (is_started(mi) && !strcmp("xeon_phi",mi->binary)) {
- where !=20;
+ where += 20;
}
err = spawn_program(where, mi->path, argv,