#module /armv7/sbin/chips boot
module /armv7/sbin/ramfsd boot
module /armv7/sbin/skb boot
-module /armv7/sbin/kaluga boot
module /armv7/sbin/spawnd boot bootarm=0
module /armv7/sbin/startd boot
module /armv7/sbin/memtest
module /armv7/sbin/usb_manager
+module /armv7/sbin/kaluga
# For pandaboard, use following values.
mmap map 0x80000000 0x40000000 1
* Extra Fields:\r
* - next: virtual pointer to the next ED of the list\r
* - prev: virtual pointer to the previous ED of the list\r
- * - obj_next: TODO: ??\r
+ * - obj_next: virtual pointer to the next ED\r
* - ed_self: physical address of this endpoint descriptor\r
*/\r
#define USB_OHCI_ED_ALIGN 16\r
* - td_buffer_end: physical pointer to the last byte in the buffer\r
*\r
* Extra Fields:\r
- * - obj_next: virtual pointer to the next td TODO: ??\r
- * - alt_next: alternative virtual next pointer TODO: ??\r
+ * - obj_next: virtual pointer to the next td\r
+ * - alt_next: alternative virtual next pointer\r
* - td_self: physical address of this endpoint\r
* - len: length of the data block of this transfer\r
*/\r
* - size: length of the isochronus packet\r
*\r
* Extra Fields:\r
- * - obj_next: TODO: ??\r
+ * - obj_next: virtual pointer to the next itd\r
* - itd_self: physical addres of this transfer descriptor\r
* - frames: number of frames\r
*/\r
static void\r
usb_ohci_xfer_isoc_enter(struct usb_xfer *xfer)\r
{\r
+ // TODO: Implement\r
assert(!"NYI: cannot create isochronus transfers at this time")\r
}\r
\r
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
-#include "usb_ohci_xfer.h"
#include "usb_ohci_descriptors.h"
+#include "usb_ohci_xfer.h"
#include "../usb_endpoint.h"
static void usb_ohci_xfer_short_frames(struct usb_xfer *xfer)
{
- assert(!"NYI: checking for short frames");
+ usb_ohci_td_t *td;
+ usb_ohci_ed_t *ed;
+
+ usb_ohci_td_ctrl_t *td_ctrl;
+ uint16_t cc;
+ usb_paddr_t td_next;
+ usb_paddr_t current_buffer;
+
+ td = xfer->hcd_td_cache;
+
+ /*
+ * loop over the frame, a frame may contain more than one short
+ * packets, we have to make sure that we reached the last one
+ */
+ while(1) {
+ /* TODO: invalidate chache ? */
+ current_buffer = td->td_current_buffer;
+ td_ctrl = &td->td_control;
+ td_next = td->td_nextTD;
+
+ /*
+ * check if we have reached the last transfer descriptor
+ * if so we are done
+ */
+
+ if (((void *) td) == xfer->hcd_td_last) {
+ td = NULL;
+ break;
+ }
+
+ /*
+ * check the condition codes, if it is USB_OHCI_STATUS_OK then
+ * the transfer is finished
+ */
+ cc = td_ctrl->condition_code;
+ if (cc) {
+ td = NULL;
+ break;
+ }
+
+ /*
+ * check if we have reached the last packet i.e. the td_nextTD is
+ * NULL, but hwe have to mask out the last four bits, since these may
+ * be used otherwise.
+ * If we have a current buffer then there is something else in the
+ * frame we follow the alternative and stop processing.
+ */
+ if (((td_next & (~0xF)) == 0) || current_buffer) {
+ td = td->alt_next;
+ break;
+ }
+
+ // go to next transfer descriptor
+ td = td->obj_next;
+ }
+
+ // update of the cache
+ xfer->hcd_td_cache = td;
+
+ /*
+ * we have found a non completed short transfer for this endpoint
+ * this means we have to update the head pointer of the endpoint
+ * descriptor to this one
+ */
+ if (td) {
+ // get the associated endpoint
+ ed = xfer->hcd_qh_start[xfer->flags_internal.curr_dma_set];
+
+ ed->ed_headP = td->td_self;
+
+ // TODO: invalideate cache?
+
+ /*
+ * we need to make sure that the OHCI takes up this remaining
+ * transfer descriptor for processing.
+ */
+ if (xfer->type == USB_XFER_TYPE_BULK) {
+ /* TODO: write register BLF
+ * OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF);
+ */
+ }
+
+ if (xfer->type == USB_XFER_TYPE_CTRL) {
+ /* TODO: write register CLF
+ * OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF);
+ */
+ }
+ }
}
/**
// getting the endpoint from the queue head list
ed = xfer->hcd_qh_start[xfer->flags_internal.curr_dma_set];
+ /* TODO: invalidate cache ? */
+
// get the transfer descriptor pointers
ed_headP = ed->ed_headP;
ed_tailP = ed->ed_tailP;
if (xfer->flags_internal.short_frames_ok) {
usb_ohci_xfer_short_frames(xfer);
+
+ if (xfer->hcd_td_cache) {
+ return 0;
+ }
}
// handle the data toggle flag
return 0;
}
-
+/**
+ * \brief This function updates the frame_lengths of the usb transfer
+ *
+ * \param xfer the current USB transfer
+ *
+ * \return USB_ERR_OK on success
+ * USB_ERR_IO
+ * USB_ERR_STALLED
+ */
static usb_error_t
usb_ohci_xfer_update_frame_lengths(struct usb_xfer *xfer)
{
// get the endpoint associated with the usb transfer
ed = xfer->hcd_qh_start[xfer->flags_internal.curr_dma_set];
- // todo: invalideate page cache of endpoint
+ // todo: invalidate page cache of endpoint
switch (xfer->type) {
case USB_XFER_TYPE_ISOC:
void usb_ohci_xfer_enqueue(struct usb_xfer *xfer)
{
/* check for early completion */
- if (usb_ohci_xfer_finished(xfer)) {
+ if (usb_ohci_xfer_is_finished(xfer)) {
return;
}
/* put transfer on interrupt queue */
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
*/\r
\r
+#include "usb_memory.h"\r
\r
+static struct usb_page *free_pages = NULL;\r
\r
+static struct usb_dma_page *free_dma_buffers = NULL;\r
\r
+#define USB_PAGE_SIZE 0x1000 // 4k\r
\r
\r
-static struct usb_page map_new_frame(int num, uint8_t flag)\r
+/**\r
+ * \brief allocates a chunk of memory from a given usb_page with the given\r
+ * size and alignment constraints.\r
+ *\r
+ * \param size the minimum size of the block\r
+ * \param align the alignment requirement of the block (physical memory)\r
+ * \param page the given usb_page to allocate from\r
+ * \param ret_mem the filled usb_mem_block structure with all the information\r
+ *\r
+ * \return size of the requested block\r
+ */\r
+uint32_t usb_mem_next_block(uint32_t size, uint32_t align,\r
+ struct usb_page *page, struct usb_memory_block *ret_mem)\r
{\r
- int r = 0;\r
- struct capref frame;\r
- struct frame_identity frame_id = { .base = 0, .bits = 0 };\r
- struct usb_page map;\r
-\r
- if (flag == USB_NEAR_EHCI && ehci_core_id != -1)\r
- set_range_to_ehci();\r
- else if (flag == USB_NEAR_SELF && self_core_id != -1)\r
- set_range(self_core_id);\r
-\r
- // In case DONOT_CARE...we just fall through\r
-\r
- // XXX: IMPORTANT - Assuming that num pages will be contigious\r
- // otherwise ...:-( !!\r
- int total_size = BASE_PAGE_SIZE * num;\r
- r = frame_alloc(&frame, total_size, NULL);\r
- assert(r == 0);\r
- r = invoke_frame_identify(frame, &frame_id);\r
- assert(r == 0);\r
- void *va;\r
- r = vspace_map_one_frame_attr(&va, total_size, frame,\r
- VREGION_FLAGS_READ_WRITE_NOCACHE,\r
- NULL, NULL);\r
- assert(r == 0);\r
- map.va = va;\r
- map.frame = frame;\r
- map.frame_id = frame_id;\r
- map.valid = 1;\r
- map.pa = (void *)frame_id.base;\r
-\r
- insert_page(map, num);\r
- usb_pages += num;\r
-\r
- dprintf("\n EHCI: A new frame is allocated PADDR %lx VADDR %lx ",\r
- (uint64_t) map.frame_id.base, (uint64_t) map.va);\r
-\r
- return map;\r
-}\r
+ // check if there is enough free space on this usb page\r
+ struct usb_memory_block *free = &page->free;\r
+\r
+ uint32_t size_req = size;\r
+\r
+ uint32_t offset = free->phys_addr % align;\r
+\r
+ // calculate the required size\r
+ if (offset) {\r
+ size_req += (align - offset);\r
+ }\r
+\r
+ // check if we have enough free space, otherwise return\r
+ if (free->size < size_req) {\r
+ ret_mem->buffer = 0;\r
+ ret_mem->phys_addr = 0;\r
+ ret_mem->size = 0;\r
+ return 0;\r
+ }\r
\r
+ ret_mem->buffer = free->buffer + offset;\r
+ ret_mem->phys_addr = free->phys_addr + offset;\r
+ ret_mem->size = size;\r
\r
+ // update free memory in page\r
+ free->buffer += size_req;\r
+ free->phys_addr += size_req;\r
+ free->size -= size_req;\r
\r
+ assert(free->size >= 0);\r
+\r
+ return size;\r
+}\r
\r
/*\r
- * \brief Maps a given capability into caller's domain.\r
+ * \brief allocates a fresh usb_page for hardware descriptors\r
+ *\r
+ * \return pointer to struct usb_page or NULL\r
*/\r
-\r
-void *map_cap(struct capref cap, uint32_t sz)\r
+struct usb_page *usb_mem_page_alloc()\r
{\r
- void *retval;\r
- errval_t err = vspace_map_one_frame(&retval, sz, cap, NULL, NULL);\r
- if (err_is_fail(err)) {\r
- DEBUG_ERR(err, "vspace_map_one_frame failed");\r
+ struct usb_page *ret;\r
+\r
+ // check if we have a free page left\r
+ if (free_pages != NULL) {\r
+ ret = free_pages;\r
+ free_pages = free_pages->next;\r
+ ret->next = NULL;\r
+ return ret;\r
+ }\r
+\r
+ ret = (struct usb_page *) malloc(sizeof(struct usb_page));\r
+ memset(ret, 0, sizeof(struct usb_page));\r
+\r
+ uint32_t ret_size;\r
+ errval_t err = frame_alloc(&ret->cap, USB_PAGE_SIZE, &ret_size);\r
+\r
+ if (err) {\r
+ return NULL;\r
+ }\r
+\r
+ err = invoke_frame_identify(ret->cap, &ret->frame_id);\r
+\r
+ if (err) {\r
return NULL;\r
}\r
- return retval;\r
+\r
+ err = vspace_map_one_frame_attr(&ret->page->buffer, USB_PAGE_SIZE, ret->cap,\r
+ VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);\r
+\r
+ if (err) {\r
+ return NULL;\r
+ }\r
+ ret->page.phys_addr = ret->frame_id.base;\r
+ ret->page.size = USB_PAGE_SIZE;\r
+ ret->free.size = USB_PAGE_SIZE;\r
+ ret->free.buffer = ret->page.buffer;\r
+\r
+ return ret;\r
}\r
\r
-/*\r
- * \brief Allocates an I/O buffer of size sz.\r
- *\r
- * \param sz size of the I/O buffer.\r
- * \param flag for NUMA aware allocation\r
- */\r
+void usb_mem_page_free(struct usb_page *mem)\r
+{\r
+ if (free_pages != NULL) {\r
+ mem->next = free_pages;\r
+ } else {\r
+ mem->next = NULL;\r
+ }\r
+ free_pages = mem;\r
+}\r
\r
-usb_mem malloc_iobuff(uint32_t sz, uint8_t flag)\r
+struct usb_dma_page *usb_mem_dma_alloc(uint32_t size, uint32_t align)\r
{\r
- // FIXME: This is very poor memory managment\r
- // code for EHCI. Even for a small io buffer request\r
- // it allocates a whole new frame.\r
- int no_frames = (sz / BASE_PAGE_SIZE);\r
- if (sz % BASE_PAGE_SIZE != 0) //spilled data into next frame\r
- no_frames++;\r
-\r
- usb_page map = map_new_frame(no_frames, flag);\r
- //reset the range for neq requests\r
- set_range(self_core_id);\r
-\r
- usb_mem mem;\r
- mem.va = map.va;\r
- mem.pa = map.pa;\r
- mem.type = EHCI_MEM_TYPE_IO;\r
- mem.free = 1;\r
- mem.size = sz;\r
- mem.cap = map.frame;\r
-\r
- return mem;\r
+ struct usb_dma_page *ret;\r
+\r
+ // check if we have a free page left\r
+ if (free_dma_buffers != NULL) {\r
+ ret = free_dma_buffers;\r
+ free_dma_buffers = free_dma_buffers->next;\r
+ ret->next = NULL;\r
+ return ret;\r
+ }\r
+\r
+ ret = (struct usb_dma_page *) malloc(sizeof(struct usb_dma_page));\r
+ memset(ret, 0, sizeof(struct usb_dma_page));\r
+\r
+ uint32_t ret_size;\r
+ errval_t err = frame_alloc(&ret->cap, USB_PAGE_SIZE, &ret_size);\r
+\r
+ if (err) {\r
+ return NULL;\r
+ }\r
+\r
+ err = invoke_frame_identify(ret->cap, &ret->frame_id);\r
+\r
+ if (err) {\r
+ return NULL;\r
+ }\r
+\r
+ err = vspace_map_one_frame_attr(&ret->buffer, USB_PAGE_SIZE, ret->cap,\r
+ VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);\r
+\r
+ if (err) {\r
+ return NULL;\r
+ }\r
+ ret->phys_addr = ret->frame_id.base;\r
+ ret->size = USB_PAGE_SIZE;\r
+\r
+ return ret;\r
}\r
\r
+void usb_mem_dma_free(struct usb_dma_page *page)\r
+{\r
+ if (free_dma_buffers != NULL) {\r
+ page->next = free_dma_buffers;\r
+ } else {\r
+ page->next = NULL;\r
+ }\r
+ free_dma_buffers = page;\r
+}\r
+\r
+\r
+\r
+\r
uint32_t size;\r
void *buffer;\r
struct usb_xfer *xfer;\r
+ struct usb_dma_page *next;\r
};\r
\r
\r
\r
\r
\r
-static struct usb_page *usb_page_alloc();\r
+struct usb_page *usb_mem_page_alloc();\r
+void usb_mem_page_free(struct usb_page *ret_page);\r
\r
-usb_mem_alloc(uint32_t size, uint32_t align, struct usb_memory_block *ret_mem);\r
+uint32_t usb_mem_next_block(uint32_t size, uint32_t align, struct usb_page *page,\r
+ struct usb_memory_block *ret_mem);\r
\r
-usb_mem_free\r
+struct usb_dma_page *usb_mem_dma_alloc(uint32_t size, uint32_t align);\r
+void usb_mem_dma_free(struct usb_dma_page *page);\r
\r
\r
#endif /* _USB_MEMORY_H_ */\r
assert(my_core_id == BSP_CORE_ID);
printf("Kaluga running.\n");
+ printf("Kaluga: Connecting to SKB.\n");
+
err = skb_client_connect();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Connect to SKB.");
}
+
+ printf("Kaluga: SKB client connected.\n");
+
// Make sure the driver db is loaded
err = skb_execute("[device_db].");
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Device DB not loaded.");
}
+ printf("Kaluga: intializing octopus\n");
+
err = oct_init();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Initialize octopus service.");
}
+ printf("Kaluga: parse boot modules...\n");
+
err = init_boot_modules();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Parse boot modules.");
}
add_start_function_overrides();
+ printf("Kaluga: barrier.acpi\n");
+
// The current boot protocol needs us to have
// knowledge about how many CPUs are available at boot
// time in order to start-up properly.
char* record = NULL;
err = oct_barrier_enter("barrier.acpi", &record, 2);
+
+ printf("Kaluga: cores\n");
+
err = watch_for_cores();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Watching cores.");
}
+ printf("Kaluga: pci_root_bridge\n");
+
err = watch_for_pci_root_bridge();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Watching PCI root bridges.");
}
+ printf("Kaluga: pci_devices\n");
+
err = watch_for_pci_devices();
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Watching PCI devices.");
err = oct_set("all_spawnds_up { iref: 0 }");
assert(err_is_ok(err));
+ printf("Kaluga: THC_Finish()\n");
+
THCFinish();
return EXIT_SUCCESS;
}