"-imacros" ] ]
++ [ NoDep SrcTree "src" "/include/deputy/nodeputy.h" ]
- commonCFlags = [ Str s | s <- [ "-std=gnu99",
+ commonCFlags = [ Str s | s <- [ "-std=c99",
++ "-U__STRICT_ANSI__", -- for newlib headers
"-Wstrict-prototypes",
"-Wold-style-definition",
"-Wmissing-prototypes" ] ]
stdLibs arch =
[ In InstallTree arch "/lib/libbarrelfish.a",
In InstallTree arch "/errors/errno.o",
- In InstallTree arch "/lib/libc.a",
+ In InstallTree arch ("/lib/lib" ++ Config.libc ++ ".a"),
+ In InstallTree arch "/lib/libposixcompat.a",
+ In InstallTree arch "/lib/libvfs.a",
+ In InstallTree arch "/lib/libnfs.a",
+ In InstallTree arch "/lib/liblwip.a",
+ In InstallTree arch "/lib/libbarrelfish.a",
+ In InstallTree arch "/lib/libcontmng.a",
++ In InstallTree arch "/lib/libprocon.a",
+ In InstallTree arch ("/lib/lib" ++ Config.libc ++ ".a"),
In InstallTree arch "/lib/crtend.o" ,
- In InstallTree arch "/lib/libcollections.a"]
+ In InstallTree arch "/lib/libcollections.a" ]
stdCxxLibs arch =
[ In InstallTree arch "/lib/libcxx.a",
if pse_paging then "CONFIG_PSE" else "",
if nxe_paging then "CONFIG_NXE" else "",
if rck_emu then "RCK_EMU" else "",
+ if libc == "freec" then "FREEC" else "NEWLIB",
- if oneshot_timer then "CONFIG_ONESHOT_TIMER" else "",
- "MAX_CPUS=" ++ show max_cpus
+ if oneshot_timer then "CONFIG_ONESHOT_TIMER" else ""
], d /= "" ]
+-- Sets the include path for the libc
+libcInc :: String
+libcInc = if libc == "freec" then "/include/freec"
+ else "/lib/newlib/newlib/libc/include"
+
-- some defines depend on the architecture/compile options
arch_defines :: Options -> [RuleToken]
arch_defines opts
rpc new_client(out cap bulk);
-- rpc get_start(in char key[key_len], out uint64 index, out bool haveit, out uint64 transid, out uint64 size);
-- rpc get_stop(in uint64 transid, in uint64 index, in uint64 length);
++ rpc get_start(in char key[key_len], out uint64 idx, out bool haveit, out uint64 transid, out uint64 size);
++ rpc get_stop(in uint64 transid, in uint64 idx, in uint64 length);
rpc print_stats();
};
*/
interface ether "Generic Ethernet Driver" {
- call register_buffer(cap buf);
- response new_buffer_id(errval err, uint64 idx);
-
- call transmit_packet(uint64 nr_pbufs,
- uint64 buffer_id,
- uint64 len,
- uint64 offset,
- uint64 client_data);
- response tx_done(uint64 client_data);
+ call register_buffer(cap buf,
+ cap sp,
+ uint64 slots,
+ uint8 role);
+ response new_buffer_id(errval err,
- uint64 index);
++ uint64 idx);
call get_mac_address();
response get_mac_address_response(uint64 hwaddr);
#define EDEADLOCK EDEADLK
- #define EBFONT 59 /* Bad font file format */
- #define ENOSTR 60 /* Device not a stream */
- #define ENODATA 61 /* No data available */
- #define ETIME 62 /* Timer expired */
- #define ENOSR 63 /* Out of streams resources */
- #define ENONET 64 /* Machine is not on the network */
- #define ENOPKG 65 /* Package not installed */
- #define EREMOTE 66 /* Object is remote */
- #define ENOLINK 67 /* Link has been severed */
- #define EADV 68 /* Advertise error */
- #define ESRMNT 69 /* Srmount error */
- #define ECOMM 70 /* Communication error on send */
- #define EPROTO 71 /* Protocol error */
- #define EMULTIHOP 72 /* Multihop attempted */
- #define EDOTDOT 73 /* RFS specific error */
- #define EBADMSG 74 /* Not a data message */
- #define EOVERFLOW 75 /* Value too large for defined data type */
- #define ENOTUNIQ 76 /* Name not unique on network */
- #define EBADFD 77 /* File descriptor in bad state */
- #define EREMCHG 78 /* Remote address changed */
- #define ELIBACC 79 /* Can not access a needed shared library */
- #define ELIBBAD 80 /* Accessing a corrupted shared library */
- #define ELIBSCN 81 /* .lib section in a.out corrupted */
- #define ELIBMAX 82 /* Attempting to link in too many shared libraries */
- #define ELIBEXEC 83 /* Cannot exec a shared library directly */
- #define EILSEQ 84 /* Illegal byte sequence */
- #define ERESTART 85 /* Interrupted system call should be restarted */
- #define ESTRPIPE 86 /* Streams pipe error */
- #define EUSERS 87 /* Too many users */
- #define ENOTSOCK 88 /* Socket operation on non-socket */
- #define EDESTADDRREQ 89 /* Destination address required */
- #define EMSGSIZE 90 /* Message too long */
- #define EPROTOTYPE 91 /* Protocol wrong type for socket */
- #define ENOPROTOOPT 92 /* Protocol not available */
- #define EPROTONOSUPPORT 93 /* Protocol not supported */
- #define ESOCKTNOSUPPORT 94 /* Socket type not supported */
- #define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
- #define EPFNOSUPPORT 96 /* Protocol family not supported */
- #define EAFNOSUPPORT 97 /* Address family not supported by protocol */
- #define EADDRINUSE 98 /* Address already in use */
- #define EADDRNOTAVAIL 99 /* Cannot assign requested address */
- #define ENETDOWN 100 /* Network is down */
- #define ENETUNREACH 101 /* Network is unreachable */
- #define ENETRESET 102 /* Network dropped connection because of reset */
- #define ECONNABORTED 103 /* Software caused connection abort */
- #define ECONNRESET 104 /* Connection reset by peer */
- #define ENOBUFS 105 /* No buffer space available */
- #define EISCONN 106 /* Transport endpoint is already connected */
- #define ENOTCONN 107 /* Transport endpoint is not connected */
-
- #define ETOOMANYREFS 109 /* Too many references: cannot splice */
- #define ETIMEDOUT 110 /* Connection timed out */
- #define ECONNREFUSED 111 /* Connection refused */
- #define EHOSTDOWN 112 /* Host is down */
- #define EHOSTUNREACH 113 /* No route to host */
- #define EALREADY 114 /* Operation already in progress */
- #define EINPROGRESS 115 /* Operation now in progress */
- #define ESTALE 116 /* Stale NFS file handle */
- #define EUCLEAN 117 /* Structure needs cleaning */
- #define ENOTNAM 118 /* Not a XENIX named type file */
- #define ENAVAIL 119 /* No XENIX semaphores available */
- #define EISNAM 120 /* Is a named type file */
- #define EREMOTEIO 121 /* Remote I/O error */
- #define EDQUOT 122 /* Quota exceeded */
-
- #define ENOMEDIUM 123 /* No medium found */
- #define EMEDIUMTYPE 124 /* Wrong medium type */
+ #define EBFONT 59 /* Bad font file format */
+ #define ENOSTR 60 /* Device not a stream */
+ #define ENODATA 61 /* No data available */
+ #define ETIME 62 /* Timer expired */
+ #define ENOSR 63 /* Out of streams resources */
+ #define ENONET 64 /* Machine is not on the network */
+ #define ENOPKG 65 /* Package not installed */
+ #define EREMOTE 66 /* Object is remote */
+ #define ENOLINK 67 /* Link has been severed */
+ #define EADV 68 /* Advertise error */
+ #define ESRMNT 69 /* Srmount error */
+ #define ECOMM 70 /* Communication error on send */
+ #define EPROTO 71 /* Protocol error */
+ #define EMULTIHOP 72 /* Multihop attempted */
+ #define EDOTDOT 73 /* RFS specific error */
+ #define EBADMSG 74 /* Not a data message */
+ #define EOVERFLOW 75 /* Value too large for defined data type */
+ #define ENOTUNIQ 76 /* Name not unique on network */
+ #define EBADFD 77 /* File descriptor in bad state */
+ #define EREMCHG 78 /* Remote address changed */
+ #define ELIBACC 79 /* Can not access a needed shared library */
+ #define ELIBBAD 80 /* Accessing a corrupted shared library */
+ #define ELIBSCN 81 /* .lib section in a.out corrupted */
+ #define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+ #define ELIBEXEC 83 /* Cannot exec a shared library directly */
+ #define EILSEQ 84 /* Illegal byte sequence */
+ #define ERESTART 85 /* Interrupted system call should be restarted */
+ #define ESTRPIPE 86 /* Streams pipe error */
+ #define EUSERS 87 /* Too many users */
+ #define ENOTSOCK 88 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 89 /* Destination address required */
+ #define EMSGSIZE 90 /* Message too long */
+ #define EPROTOTYPE 91 /* Protocol wrong type for socket */
+ #define ENOPROTOOPT 92 /* Protocol not available */
+ #define EPROTONOSUPPORT 93 /* Protocol not supported */
+ #define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+ #define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+ #define EPFNOSUPPORT 96 /* Protocol family not supported */
+ #define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+ #define EADDRINUSE 98 /* Address already in use */
+ #define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+ #define ENETDOWN 100 /* Network is down */
+ #define ENETUNREACH 101 /* Network is unreachable */
+ #define ENETRESET 102 /* Network dropped connection because of reset */
+ #define ECONNABORTED 103 /* Software caused connection abort */
+ #define ECONNRESET 104 /* Connection reset by peer */
+ #define ENOBUFS 105 /* No buffer space available */
+ #define EISCONN 106 /* Transport endpoint is already connected */
+ #define ENOTCONN 107 /* Transport endpoint is not connected */
-#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
++/* XXX: ESHUTDOWN (108) has been moved down, for newlib compatiblity -AKK */
+ #define ETOOMANYREFS 109 /* Too many references: cannot splice */
+ #define ETIMEDOUT 110 /* Connection timed out */
+ #define ECONNREFUSED 111 /* Connection refused */
+ #define EHOSTDOWN 112 /* Host is down */
+ #define EHOSTUNREACH 113 /* No route to host */
+ #define EALREADY 114 /* Operation already in progress */
+ #define EINPROGRESS 115 /* Operation now in progress */
+ #define ESTALE 116 /* Stale NFS file handle */
+ #define EUCLEAN 117 /* Structure needs cleaning */
+ #define ENOTNAM 118 /* Not a XENIX named type file */
+ #define ENAVAIL 119 /* No XENIX semaphores available */
+ #define EISNAM 120 /* Is a named type file */
+ #define EREMOTEIO 121 /* Remote I/O error */
+ #define EDQUOT 122 /* Quota exceeded */
+
+ #define ENOMEDIUM 123 /* No medium found */
+ #define EMEDIUMTYPE 124 /* Wrong medium type */
+#ifndef errno
+extern int errno;
+#endif
+
+#endif /* LWIP_PROVIDE_ERRNO */
+
-
- #define ENSROK 0 /* DNS server returned answer with no data */
- #define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
- #define ENSRNODATA 160 /* DNS server returned answer with no data */
- #define ENSRFORMERR 161 /* DNS server claims query was misformatted */
- #define ENSRSERVFAIL 162 /* DNS server returned general failure */
- #define ENSRNOTFOUND 163 /* Domain name not found */
- #define ENSRNOTIMP 164 /* DNS server does not implement requested operation */
- #define ENSRREFUSED 165 /* DNS server refused query */
- #define ENSRBADQUERY 166 /* Misformatted DNS query */
- #define ENSRBADNAME 167 /* Misformatted domain name */
- #define ENSRBADFAMILY 168 /* Unsupported address family */
- #define ENSRBADRESP 169 /* Misformatted DNS reply */
- #define ENSRCONNREFUSED 170 /* Could not contact DNS servers */
- #define ENSRTIMEOUT 171 /* Timeout while contacting DNS servers */
- #define ENSROF 172 /* End of file */
- #define ENSRFILE 173 /* Error reading file */
- #define ENSRNOMEM 174 /* Out of memory */
- #define ENSRDESTRUCTION 175 /* Application terminated lookup */
- #define ENSRQUERYDOMAINTOOLONG 176 /* Domain name is too long */
- #define ENSRCNAMELOOP 177 /* Domain name is too long */
-
++/* These are not protected by LWIP_PROVIDE_ERRNO for newlib compatiblity -AKK */
+
+ #define ENSROK 0 /* DNS server returned answer with no data */
++#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+ #define ENSRNODATA 160 /* DNS server returned answer with no data */
+ #define ENSRFORMERR 161 /* DNS server claims query was misformatted */
+ #define ENSRSERVFAIL 162 /* DNS server returned general failure */
+ #define ENSRNOTFOUND 163 /* Domain name not found */
+ #define ENSRNOTIMP 164 /* DNS server does not implement requested operation */
+ #define ENSRREFUSED 165 /* DNS server refused query */
+ #define ENSRBADQUERY 166 /* Misformatted DNS query */
+ #define ENSRBADNAME 167 /* Misformatted domain name */
+ #define ENSRBADFAMILY 168 /* Unsupported address family */
+ #define ENSRBADRESP 169 /* Misformatted DNS reply */
+ #define ENSRCONNREFUSED 170 /* Could not contact DNS servers */
+ #define ENSRTIMEOUT 171 /* Timeout while contacting DNS servers */
+ #define ENSROF 172 /* End of file */
+ #define ENSRFILE 173 /* Error reading file */
+ #define ENSRNOMEM 174 /* Out of memory */
+ #define ENSRDESTRUCTION 175 /* Application terminated lookup */
+ #define ENSRQUERYDOMAINTOOLONG 176 /* Domain name is too long */
+ #define ENSRCNAMELOOP 177 /* Domain name is too long */
-#ifndef errno
- extern int errno;
-#endif
-
-#endif /* LWIP_PROVIDE_ERRNO */
-
#ifdef __cplusplus
}
#endif
}
} /* end function: cont_queue_send_next_message */
- // Function to show the content of the queue.
- // Note: It is to be used only for debug purposes.
void cont_queue_show_queue(struct cont_queue *q)
{
+ /*
int i = 0;
- int index = 0;
+ int idx = 0;
int len = 0;
len = q->head - q->tail;
printf("Showing the cont queue status for queue[%s]\n", q->name);
}
printf("Showing elements which are already sent!!\n");
- index = q->tail;
+ idx = q->tail;
for (i = 0; i < 10; ++i){
- index = (index - 1);
- if (index < 0) {
- index = MAX_QUEUE_SIZE - 1;
+ idx = (idx - 1);
+ if (idx < 0) {
+ idx = MAX_QUEUE_SIZE - 1;
}
- printf("elem %d: [%s], state %d\n", index, q->qelist[index].fname,
- q->qelist[index].state);
-
+ printf("elem %d: [%s], state %d\n", idx, q->qelist[idx].fname,
+ q->qelist[idx].history);
}
+ */
} // end function: cont_queue_show_queue
--------------------------------------------------------------------------
[ build library { target = "ethersrv",
- cFiles = [ "ethersrv.c", "frag.c"],
- flounderBindings = [ "ether", "ether_control" ]
+ cFiles = [ "ethersrv.c", "frag.c", "ethersrv_ctl.c",
+ "ethersrv_support.c" ],
- flounderBindings = [ "ether", "ether_control"],
- addLibraries = [ "contmng", "procon", "bfdmuxvm", "lwip" ]
++ flounderBindings = [ "ether", "ether_control"]
}
]
--- /dev/null
+
+/*
- * K&R Malloc
++ * K&R Malloc
+ *
+ * System specifc code should implement `more_core'
+ */
+#include "k_r_malloc.h"
+#include <stddef.h> /* For NULL */
+#include <stdlib.h>
+#include <string.h> /* For memcpy */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/core_state.h> /* XXX */
+
+typedef void *(*alt_malloc_t)(size_t bytes);
+alt_malloc_t alt_malloc = NULL;
+
+typedef void (*alt_free_t)(void *p);
+alt_free_t alt_free = NULL;
+
+#define MALLOC_LOCK thread_mutex_lock(&state->mutex)
+#define MALLOC_UNLOCK thread_mutex_unlock(&state->mutex)
+
+#ifdef CONFIG_MALLOC_INSTRUMENT
+size_t __malloc_instrumented_allocated;
+#endif
+
+#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
+#include <stdio.h>
+#include <assert.h>
+int __malloc_check(void);
+void __malloc_dump(void);
+#endif
+
+/*
- * malloc: general-purpose storage allocator
++ * malloc: general-purpose storage allocator
+ */
+void *
+malloc(size_t nbytes)
+{
+ if (alt_malloc != NULL) {
+ return alt_malloc(nbytes);
+ }
+
+ struct morecore_state *state = get_morecore_state();
+ Header *p, *prevp;
+ unsigned nunits;
+ nunits = (nbytes + sizeof(Header) - 1) / sizeof(Header) + 1;
+
+ MALLOC_LOCK;
+ if ((prevp = state->header_freep) == NULL) { /* no free list yet */
+ state->header_base.s.ptr = state->header_freep = prevp = &state->header_base;
+ state->header_base.s.size = 0;
+ }
+ for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
+ if (p->s.size >= nunits) { /* big enough */
+ if (p->s.size == nunits) /* exactly */
+ prevp->s.ptr = p->s.ptr;
+ else { /* allocate tail end */
+ p->s.size -= nunits;
+ p += p->s.size;
+ p->s.size = nunits;
+ }
+ state->header_freep = prevp;
+#ifdef CONFIG_MALLOC_DEBUG
+ {
+ /* Write bit pattern over data */
+ char *x = (char *) (p + 1);
+ int i;
+ for (i = 0; i < nbytes; i++)
+ x[i] = 0xd0;
+ }
+#endif
+
+#ifdef CONFIG_MALLOC_INSTRUMENT
+ __malloc_instrumented_allocated += nunits;
+#endif
+#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
+ if (__malloc_check() != 0) {
+ printf("malloc %lu %p\n", nbytes, (void *) (p + 1));
+ __malloc_dump();
+ assert(__malloc_check() == 0);
+ }
+#endif
+ MALLOC_UNLOCK;
+ return (void *) (p + 1);
+ }
+ if (p == state->header_freep) { /* wrapped around free list */
+ if ((p = (Header *) morecore(nunits)) == NULL) {
+ MALLOC_UNLOCK;
+ return NULL; /* none left */
+ } else {
+
+ }
+ }
+ }
+ MALLOC_UNLOCK;
+}
+
+/*
- * free: put block ap in free list
++ * free: put block ap in free list
+ */
+void
+__free_locked(void *ap)
+{
+ struct morecore_state *state = get_morecore_state();
+ Header *bp, *p;
+
+ if (ap == NULL)
+ return;
+
+ bp = (Header *) ap - 1; /* point to block header */
+ for (p = state->header_freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
+ if (p >= p->s.ptr && (bp > p || bp < p->s.ptr))
+ break; /* freed block at start or end of arena */
+
+#ifdef CONFIG_MALLOC_INSTRUMENT
+ __malloc_instrumented_allocated -= bp->s.size;
+#endif
+
+ if (bp + bp->s.size == p->s.ptr) { /* join to upper nbr */
+ bp->s.size += p->s.ptr->s.size;
+ bp->s.ptr = p->s.ptr->s.ptr;
+ } else {
+ bp->s.ptr = p->s.ptr;
+ }
+
+ if (p + p->s.size == bp) { /* join to lower nbr */
+ p->s.size += bp->s.size;
+ p->s.ptr = bp->s.ptr;
+ } else {
+ p->s.ptr = bp;
+ }
+
+ state->header_freep = p;
+
+#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
+ if (__malloc_check() != 0) {
+ printf("free %p\n", ap);
+ __malloc_dump();
+ assert(__malloc_check() == 0);
+ }
+#endif
+}
+
+void free(void *ap)
+{
+ if (ap == NULL) {
+ return;
+ }
+
+
+ if (alt_free != NULL) {
+ return alt_free(ap);
+ }
+
+ struct morecore_state *state = get_morecore_state();
+
+#ifdef __x86_64__
+ /* XXX: Since dispatchers on different cores maintain different malloc arena,
+ * we detect instances when one dispatcher tries to free memory not in it's
+ * arena and leak it
+ */
+ lvaddr_t base = vregion_get_base_addr(&state->mmu_state.vregion);
+ lvaddr_t limit = base + vregion_get_size(&state->mmu_state.vregion);
+
+ if ((lvaddr_t)ap < base || (lvaddr_t)ap >= limit) {
+ if (X86_64_PML4_BASE(ap) != X86_64_PML4_BASE(base)) {
+ return;
+ }
+ }
+
+ assert((lvaddr_t)ap >= base && (lvaddr_t)ap < limit);
+#endif
+
+ MALLOC_LOCK;
+ __free_locked(ap);
+ lesscore();
+ MALLOC_UNLOCK;
+}
+
+#ifdef CONFIG_MALLOC_DEBUG_INTERNAL
+
+int
+__malloc_check(void)
+{
+ struct morecore_state *state = get_morecore_state();
+ Header *p, *prevp;
+ if ((prevp = state->header_freep) == NULL) { /* no free list yet */
+ return 0;
+ }
+ for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
+ if ((void*) p == NULL) {
+ return 1;
+ }
+ /* Free bits should be in order */
+ if (p > p->s.ptr && p->s.ptr != &state->header_base) {
+ return 1;
+ }
+ if ((uintptr_t) p + (p->s.size * sizeof(Header)) > (uintptr_t) p->s.ptr && p->s.ptr != &state->header_base) {
+ return 1;
+ }
+ /* shouldn't have zero sized free bits */
+ if (p->s.size == 0 && p != &state->header_base) {
+ return 1;
+ }
+ if (p == state->header_freep) { /* wrapped around free list */
+ break;
+ }
+ }
+ return 0;
+}
+
+void
+__malloc_dump(void)
+{
+ struct morecore_state *state = get_morecore_state();
+ Header *p, *prevp;
+ if ((prevp = state->header_freep) == NULL) { /* no free list yet */
+ return;
+ }
+ printf("Malloc dump\n"
+ "We expect the free list to be sorted from low to high addresses\n"
+ "with no item overlapping another item and no empty items.\n"
+ "Legend:\n"
+ "* Successor in list is at lower address than current item\n"
+ "# Item has size 0\n"
+ "$ This item overlaps (base + size) the next item's base\n");
+ printf("List base at %p, freep at %p\n", &state->header_base,
+ state->header_freep);
+ for (p = prevp->s.ptr;; prevp = p, p = p->s.ptr) {
+ if (p > p->s.ptr && p->s.ptr != &state->header_base) {
+ printf("* ");
+ }
+ if (p->s.size == 0 && p != &state->header_base) {
+ printf("# ");
+ }
+ if ((uintptr_t) p + (p->s.size * sizeof(Header)) > (uintptr_t) p->s.ptr && p->s.ptr != &state->header_base) {
+ printf("$ ");
+ }
+ if (p == &state->header_base) {
+ printf(" p: <base>\n");
+ } else {
+ printf(" p: %p (%d) -> %p\n", p, p->s.size, p->s.ptr);
+ }
+ assert(p != NULL);
+ if (p == state->header_freep) { /* wrapped around free list */
+ return;
+ }
+ }
+}
+#endif
void *sbrk(intptr_t increment)
{
errval_t err;
+ size_t orig_offset;
-
+
static void *base;
- static size_t offset;
+ static size_t offset = 0;
+ static size_t goffset = 0;
+ static struct memobj_anon memobj_;
static struct memobj *memobj = NULL;
+ static struct vregion vregion_;
static struct vregion *vregion = NULL;
if (!memobj) { // Initialize
}
if (increment < 0) {
- USER_PANIC("sbrk() called with negative increment - NYI");
+ if (-increment > offset) {
+ USER_PANIC("sbrk() called with negative increment beyond offset");
+ } else {
+ orig_offset = offset;
+ offset += increment;
-
++
+ void *ret = base + orig_offset;
+ return ret;
+ }
} else if (increment == 0) {
return base + offset;
} else if (offset + increment > SBRK_REGION_BYTES) {
- debug_printf("sbrk() exceeded static region limit of %lu bytes, offset: %lu\n",
- debug_printf("sbrk() exceeded static region limit of %zu bytes\n",
- (size_t)SBRK_REGION_BYTES);
++ debug_printf("sbrk() exceeded static region limit of %zu bytes, offset: %zu\n",
+ (size_t)SBRK_REGION_BYTES, offset);
return (void *)-1;
+ } else if (offset + increment <= goffset) {
+ orig_offset = offset;
+ offset += increment;
-
++
+ void *ret = base + orig_offset;
+ return ret;
}
- size_t inc_bytes = increment;
- size_t orig_offset = offset;
+ size_t inc_bytes = offset + increment - goffset;
+ orig_offset = offset;
struct capref frame;
err = frame_alloc(&frame, inc_bytes, &inc_bytes);
--- /dev/null
+ /**
+ * \file
+ * \brief producer consumer library
+ *
+ * This file provides a producer consumer protocol
+ */
+
+ /*
+ * Copyright (c) 2007-11 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+ #include <stdio.h>
+ #include <string.h>
+ #include <barrelfish/barrelfish.h>
+ #include <barrelfish/bulk_transfer.h>
+ #include <procon/procon.h>
+
+
+ // ******************* cache coherency specific code
+ #define MAX_CACHE_READ_TRIES 3
+
+ static uint64_t sp_atomic_read_reg(union vreg *reg)
+ {
+
+ #if 0
+ volatile uint64_t v1 = 0;
+ volatile uint64_t v2 = 0;
+ uint8_t tries = 0;
+
+ for (tries = 0; tries < MAX_CACHE_READ_TRIES; ++tries) {
+ /*
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(reg, CACHESIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ */
+ v1 = reg->value;
+ /*
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(reg, CACHESIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ */
+ v2 = reg->value;
+
+ if (v1 == v2) {
+ return v1;
+ }
+ } // end for : retrying
+ assert (!"atomic read of read index failed");
+ return v1;
+ #endif // 0
+ return reg->value;
+ mfence();
+ } // end function: sp_atomic_read_reg
+
+ static void sp_atomic_set_reg(union vreg *reg, uint64_t value)
+ {
+ reg->value = value;
+ mfence();
+ /*
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(reg, CACHESIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ */
+ }
+
+ void sp_reload_regs(struct shared_pool_private *spp)
+ {
+ assert(spp != NULL);
+ struct shared_pool *sp = spp->sp;
+ assert(sp != NULL);
+ spp->c_read_id = sp_atomic_read_reg(&spp->sp->read_reg);
+ spp->c_write_id = sp_atomic_read_reg(&spp->sp->write_reg);
+ spp->c_size = sp_atomic_read_reg(&spp->sp->size_reg);
+ }
+
+
+
+ // **************************** generic queue based code
+ bool sp_gen_queue_empty(uint64_t read, uint64_t write)
+ {
+ return (read == write);
+ }
+
+ bool sp_gen_queue_full(uint64_t read, uint64_t write, uint64_t size)
+ {
+ return (((write + 1) % size ) == read);
+ }
+
+ uint64_t sp_c_range_size(uint64_t start, uint64_t end, uint64_t size)
+ {
+
+ // simple, non-wrapped space
+ if (start <= end) {
+ return (end - start);
+ }
+
+ // wrapped queue, so more complicated!
+ return ((size - start) + end);
+ }
+
+
+ // checks for (start <= value < end ) in circular queue of size "size"
+ bool sp_c_between(uint64_t start, uint64_t value, uint64_t end, uint64_t size)
+ {
+
+ // sanity check: value must be less than size
+ if (value >= size) {
+ return false;
+ }
+
+ // Logical queue empty state
+ if (start == end) {
+ if (start == value) {
+ return true;
+ }
+ return false;
+ }
+
+ // simple, non-wrapped space
+ if (start < end) {
+ if ((start <= value) && (value < end)) {
+ return true;
+ }
+ return false;
+ }
+
+ // wrapped space, more complicated
+ if ((value < end) || (start <= value)) {
+ return true;
+ }
+ return false;
+ }
+
+ // ******************* spp queue code for condition checking
+
+ uint64_t sp_get_read_index(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ return spp->c_read_id;
+ }
+
+ uint64_t sp_get_write_index(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ return spp->c_write_id;
+ }
+
+ uint64_t sp_get_queue_size(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ return spp->c_size;
+ }
+
+
+ // Checks for queue empty condition
+ bool sp_queue_empty(struct shared_pool_private *spp)
+ {
+ // sp_reload_regs(spp);
+ return sp_gen_queue_empty(spp->c_read_id, spp->c_write_id);
+ }
+
+
+ // Check for queue full condition
+ bool sp_queue_full(struct shared_pool_private *spp)
+ {
+ return sp_gen_queue_full(spp->c_read_id, spp->c_write_id,
+ spp->c_size);
+ }
+
+
+ // Checks if given index is peekable or not
-bool sp_read_peekable_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_read_peekable_index(struct shared_pool_private *spp, uint64_t idx)
+ {
+ sp_reload_regs(spp);
- return sp_c_between(spp->c_read_id, index, spp->c_write_id, spp->c_size);
++ return sp_c_between(spp->c_read_id, idx, spp->c_write_id, spp->c_size);
+ } // end function: sp_read_peekable_index
+
+
+ // Checks if given index is settable for not for read_reg
-bool sp_validate_read_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_validate_read_index(struct shared_pool_private *spp, uint64_t idx)
+ {
+ sp_reload_regs(spp);
+ uint64_t upper_limit = (spp->c_write_id + 1) % spp->c_size;
- return sp_c_between(spp->c_read_id, index, upper_limit, spp->c_size);
++ return sp_c_between(spp->c_read_id, idx, upper_limit, spp->c_size);
+ }
+
+
+ // Returns no. of elements available for consumption
+ uint64_t sp_queue_elements_count(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ return sp_c_range_size(spp->c_read_id, spp->c_write_id, spp->c_size);
+ } // end function: sp_queue_elements_count
+
+ // Checks if given index is write peekable or not
-bool sp_write_peekable_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_write_peekable_index(struct shared_pool_private *spp, uint64_t idx)
+ {
+ sp_reload_regs(spp);
+
+ // Trivial case: index bigger than queue size
- if (index >= spp->c_size){
++ if (idx >= spp->c_size){
+ return false;
+ }
+
+ // Trivial case: queue empty
+ if (sp_queue_empty(spp)) {
+ return true;
+ }
+
- return sp_c_between(spp->c_write_id, index, spp->c_read_id, spp->c_size);
++ return sp_c_between(spp->c_write_id, idx, spp->c_read_id, spp->c_size);
+ } // end function: sp_write_peekable_index
+
+
+ // Checks if given index is valid for write or not
-bool sp_validate_write_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_validate_write_index(struct shared_pool_private *spp, uint64_t idx)
+ {
- return sp_write_peekable_index(spp, index);
++ return sp_write_peekable_index(spp, idx);
+ } // end function: sp_validate_write_index
+
+
+ // Returns no. of free slots available for production
+ uint64_t sp_queue_free_slots_count(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ if (sp_queue_empty(spp)) {
+ return spp->c_size;
+ }
+ return sp_c_range_size(spp->c_write_id, spp->c_read_id, spp->c_size);
+ } // end function: sp_queue_free_slots_count
+
+
+ // ************* Initialization functions ***********************
+
+ static size_t calculate_shared_pool_size(uint64_t slot_no)
+ {
+ return (sizeof(struct shared_pool) +
+ ((sizeof(union slot)) * (slot_no - TMP_SLOTS)));
+ }
+
+ static void sp_reset_pool(struct shared_pool_private *spp, uint64_t slot_count)
+ {
+ assert(spp != NULL);
+ struct shared_pool *sp = spp->sp;
+ assert(sp != NULL);
+ assert(slot_count > TMP_SLOTS);
+
+ int i = 0;
+
+ // Esure that slot_count is <= alloted_slots
+ assert(slot_count <= spp->alloted_slots);
+
+ sp_atomic_set_reg(&sp->read_reg, 0);
+ sp_atomic_set_reg(&sp->write_reg, 0);
+ sp_atomic_set_reg(&sp->size_reg, slot_count);
+ for(i = 0; i < slot_count; ++i) {
+ memset(&sp->slot_list[i], 0, sizeof(union slot));
+ } // end for:
+
+ sp_reload_regs(spp);
+ spp->notify_other_side = 0;
+ spp->ghost_read_id = spp->c_read_id;
+ spp->ghost_write_id = spp->c_write_id;
+ spp->pre_write_id = spp->c_read_id;
+ spp->produce_counter = 0;
+ spp->consume_counter = 0;
+ spp->clear_counter = 0;
+ mfence();
+ } // sp_reset_pool
+
+
+ // Creates a new shared_pool area and initializes it as creator
+ struct shared_pool_private *sp_create_shared_pool(uint64_t slot_no,
+ uint8_t role)
+ {
+
+ struct shared_pool_private *spp = (struct shared_pool_private *)
+ malloc(sizeof(struct shared_pool_private));
+ assert(spp != NULL);
+
+ errval_t err;
+ assert(slot_no > 2);
+
+ // adding 1 more slot for safety
+ size_t mem_size = calculate_shared_pool_size((slot_no));
+
+ // NOTE: using bulk create here because bulk_create code has
+ // been modified to suit the shared buffer allocation
+ // FIXME: code repetation with mem_barrelfish_alloc_and_register
+ struct bulk_transfer bt_sp;
+ #if defined(__scc__) && !defined(RCK_EMU)
+ err = bulk_create(mem_size, sizeof(union slot), &(spp->cap), &bt_sp, true);
+ #else
+ err = bulk_create(mem_size, sizeof(union slot), &(spp->cap), &bt_sp, false);
+ #endif // defined(__scc__) && !defined(RCK_EMU)
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "bulk_create failed.");
+ return NULL;
+ }
+ spp->va = bt_sp.mem;
+ spp->sp = (struct shared_pool *)spp->va;
+
+ struct frame_identity f;
+
+ err = invoke_frame_identify(spp->cap, &f);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "frame_identify failed");
+ return NULL;
+ }
+ spp->pa = f.base;
+ spp->mem_size = (1 << f.bits);
+ spp->alloted_slots = slot_no;
+ spp->is_creator = true;
+ spp->role = role;
+
+ sp_reset_pool(spp, slot_no);
+ printf("Created shared_pool of size(Req %"PRIu64", Actual %"PRIu64") "
+ "with role [%"PRIu8"] and slots [%"PRIu64"]\n",
+ (uint64_t)mem_size, spp->mem_size, spp->role,
+ spp->alloted_slots);
+
+ /* printf("##### procon sizeof spp[%lu], sizeof sp[%lu]\n",
+ sizeof(struct shared_pool_private),
+ sizeof(struct shared_pool) );
+ */
+ mfence();
+ return spp;
+ } // end function: sp_create_shared_pool
+
+
+ // Loads shared_pool area which is already created by some other creator
+ errval_t sp_map_shared_pool(struct shared_pool_private *spp, struct capref cap,
+ uint64_t slot_no, uint8_t role)
+ {
+ errval_t err = SYS_ERR_OK;
+ assert(spp != NULL);
+ assert(spp->sp == NULL);
+ assert(slot_no > 2);
+ spp->cap = cap;
+ spp->alloted_slots = slot_no;
+ spp->role = role;
+ spp->is_creator = 0;
+
+ struct frame_identity f;
+
+ err = invoke_frame_identify(cap, &f);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "invoke_frame_identify failed");
+ return err;
+ }
+ spp->pa = f.base;
+ spp->mem_size = (1 << f.bits);
+ size_t mem_size = calculate_shared_pool_size(slot_no);
+
+ assert(spp->mem_size >= mem_size);
+
+ err = vspace_map_one_frame_attr(&spp->va, (1L << f.bits), cap,
+ VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
+
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "vspace_map_one_frame failed");
+ return err;
+ }
+
+ spp->sp = (struct shared_pool *)spp->va;
+
+ sp_reload_regs(spp);
+ assert(spp->c_size == spp->alloted_slots);
+
+ spp->ghost_read_id = spp->c_read_id;
+ spp->ghost_write_id = spp->c_write_id;
+ spp->pre_write_id = spp->c_read_id;
+ spp->notify_other_side = 0;
+ spp->produce_counter = 0;
+ spp->consume_counter = 0;
+ spp->clear_counter = 0;
+
+ printf("Mapped shared_pool of size(R %"PRIu64", A %"PRIu64") "
+ "with role [%"PRIu8"], slots[%"PRIu64"] and pool len[%"PRIu64"]\n",
+ (uint64_t)mem_size, spp->mem_size, spp->role, spp->alloted_slots,
+ spp->c_size);
+ mfence();
+ return SYS_ERR_OK;
+
+ } // end function: sp_map_shared_pool
+
+
+ // *************************** State modifying functions *************
+ static bool validate_slot(struct slot_data *d)
+ {
+ if (d == NULL) {
+ return false;
+ }
+
+ // FIXME: check if the buffer_id, pbuff_id, len and all are sensible!
+ return true;
+ } // end function: validate_slot
+
+ void copy_data_into_slot(struct shared_pool_private *spp, uint64_t buf_id,
+ uint64_t id, uint64_t offset, uint64_t len, uint64_t no_pbufs,
+ uint64_t client_data, uint64_t ts)
+ {
+ assert(id < spp->c_size);
+ spp->sp->slot_list[id].d.buffer_id = buf_id;
+ spp->sp->slot_list[id].d.no_pbufs = no_pbufs;
+ spp->sp->slot_list[id].d.pbuf_id = id;
+ spp->sp->slot_list[id].d.offset = offset;
+ spp->sp->slot_list[id].d.len = len;
+ spp->sp->slot_list[id].d.client_data = client_data;
+ spp->sp->slot_list[id].d.ts = ts;
+ mfence();
+ // copy the s into shared_pool
+ #if 0
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(&spp->sp->slot_list[id], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ #endif // 0
+ }
+
+ void sp_copy_slot_data(struct slot_data *d, struct slot_data *s)
+ {
+ assert(d != NULL);
+ assert(s != NULL);
+ d->buffer_id = s->buffer_id;
+ d->pbuf_id = s->pbuf_id;
+ d->offset = s->offset;
+ d->len = s->len;
+ d->no_pbufs = s->no_pbufs;
+ d->client_data = s->client_data;
+ d->ts = s->ts;
+ mfence();
+ }
+
+ void sp_copy_slot_data_from_index(struct shared_pool_private *spp,
- uint64_t index, struct slot_data *d)
++ uint64_t idx, struct slot_data *d)
+ {
- sp_copy_slot_data(d, &spp->sp->slot_list[index].d);
++ sp_copy_slot_data(d, &spp->sp->slot_list[idx].d);
+ } // end function: sp_copy_slot_data_index
+
+
+ // Set the value of read index
+ // To be used with sp_read_peek_slot
-bool sp_set_read_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_set_read_index(struct shared_pool_private *spp, uint64_t idx)
+ {
+
+ sp_reload_regs(spp);
+ // Trivial case:
- if (spp->c_read_id == index) {
++ if (spp->c_read_id == idx) {
+ return true;
+ }
+
- if (!sp_validate_read_index(spp, index)) {
++ if (!sp_validate_read_index(spp, idx)) {
+ // The value in index is invalid!
+ return false;
+ }
+
+ if (sp_queue_full(spp)) {
+ // Producer is assuming that there is no free space in this pool.
+ // As we have created some free space by reading, we should inform
+ // the producer to produce more!
+ // Typically means, I am slow!
+ ++spp->notify_other_side;
+ }
+
- sp_atomic_set_reg(&spp->sp->read_reg, index);
++ sp_atomic_set_reg(&spp->sp->read_reg, idx);
+ sp_reload_regs(spp);
+
+ // spp->ghost_read_id = spp->c_read_id;
+ // printf("changing read_index!\n");
+ if (sp_queue_empty(spp)) {
+ // There is nothing more to consume,
+ // We should inform producer to produce quickly
+ // Typically means, Producer is slow!
+ ++spp->notify_other_side;
+ }
+
+ ++spp->consume_counter;
+ return true;
+ } // end function: sp_set_read_index
+
+
+ // Set the value of write index
+ // To be used with sp_ghost_produce_slot
-bool sp_set_write_index(struct shared_pool_private *spp, uint64_t index)
++bool sp_set_write_index(struct shared_pool_private *spp, uint64_t idx)
+ {
+ sp_reload_regs(spp);
+
+ // Trivial case:
- if (spp->c_write_id == index) {
++ if (spp->c_write_id == idx) {
+ return true;
+ }
+
- if (!sp_validate_write_index(spp, index)) {
++ if (!sp_validate_write_index(spp, idx)) {
+ // The value in index is invalid!
+ return false;
+ }
+
+ if (sp_queue_empty(spp)) {
+ // Consumer is assuming that there is no data in the pool
+ // As we have created new data, we should inform
+ // the consumer to consume more!
+ // Typically means, I am slow!
+ ++spp->notify_other_side;
+ }
+
- sp_atomic_set_reg(&spp->sp->write_reg, index);
++ sp_atomic_set_reg(&spp->sp->write_reg, idx);
+ sp_reload_regs(spp);
+ // spp->ghost_write_id = spp->c_write_id;
+
+ if (sp_queue_full(spp)) {
+ // There no free space left to create new items.
+ // We should inform the consumer that it is slow!
+ // Typically means, consumer is slow!
+ ++spp->notify_other_side;
+ }
+
+ ++spp->produce_counter;
+ return true;
+ } // end function: sp_set_write_index
+
+
+
+ uint64_t sp_is_slot_clear(struct shared_pool_private *spp, uint64_t id)
+ {
+ sp_reload_regs(spp);
+ if (!sp_queue_empty(spp)) {
+ if (!sp_c_between(spp->c_write_id, id, spp->c_read_id, spp->c_size)) {
+ sp_print_metadata(spp);
+ printf("failed for id %"PRIu64"\n", id);
+ /*
+ printf("callstack: %p %p %p %p\n",
+ __builtin_return_address(0),
+ __builtin_return_address(1),
+ __builtin_return_address(2),
+ __builtin_return_address(3));
+ */
+ }
+ if (!sp_c_between(spp->c_write_id, id, spp->c_read_id, spp->c_size)) {
+ printf("sp_c_between failed in sp_is_slot_clear\n");
+ abort();
+ }
+
+ }
+ /*
+ else {
+ // queue empty!
+ if (id == spp->c_write_id) {
+ sp_print_metadata(spp);
+ printf("failed for id %"PRIu64"\n", id);
+ printf("callstack: %p %p %p %p\n",
+ __builtin_return_address(0),
+ __builtin_return_address(1),
+ __builtin_return_address(2),
+ __builtin_return_address(3));
+ }
+ assert(id != spp->c_write_id);
+ }
+ */
+ return spp->sp->slot_list[id].d.client_data;
+ }
+
+ bool sp_clear_slot(struct shared_pool_private *spp, struct slot_data *d,
+ uint64_t id)
+ {
+ sp_reload_regs(spp);
+
+ if (sp_queue_full(spp)) {
+ return false;
+ }
+
+ if (sp_queue_empty(spp) ||
+ sp_c_between(spp->c_write_id, id, spp->c_read_id, spp->c_size)) {
+
+ sp_copy_slot_data(d, &spp->sp->slot_list[id].d);
+ spp->pre_write_id = id;
+ // printf("%s Slot %p with id %"PRIu64" is cleared and had "
+ // "%"PRIu64", %"PRIu64"\n",
+ // disp_name(), &spp->sp->slot_list[id].d,
+ // id, spp->sp->slot_list[id].d.client_data, d->client_data);
+
+ spp->sp->slot_list[id].d.client_data = 0;
+ ++spp->clear_counter;
+ return true;
+ }
+
+ return false;
+ } // end function: sp_clear_slot
+
+ bool validate_and_empty_produce_slot(struct shared_pool_private *spp,
+ uint64_t produced_slot_id)
+ {
+ sp_reload_regs(spp);
+
+ if (sp_queue_full(spp)) {
+ return false;
+ }
+
+ uint64_t wi = spp->c_write_id;
+ assert(spp->c_write_id == produced_slot_id);
+ // If needed, mark the slot as produced
+ if(!sp_set_write_index(spp, ((wi + 1) % spp->c_size))) {
+ printf("ERROR: validate_and_empty_produce_slot: sp_set_write_index "
+ "failed\n");
+ abort();
+ }
+ return true;
+ } // end function: validate_and_empty_produce_slot
+
+
+ // Adds the data from parameter d into appropriate slot of shared pool queue
+ bool sp_produce_slot(struct shared_pool_private *spp, struct slot_data *d)
+ {
+
+ sp_reload_regs(spp);
+
+ if (sp_queue_full(spp)) {
+ return false;
+ }
+
+ uint64_t wi = spp->c_write_id;
+ sp_copy_slot_data(&spp->sp->slot_list[wi].d, d);
+
+ #if 0
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(&spp->sp->slot_list[wi], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ #endif // 0
+
+ // Incrementing write pointer
+ if(!sp_set_write_index(spp, ((wi + 1) % spp->c_size))) {
+ printf("ERROR: sp_produce_slot: sp_set_write_index failed\n");
+ abort();
+ }
+ return true;
+ } // end function: sp_produce_slot
+
+
+ // Gost-add data into shared_pool
+ // Add data into free slots, but don't increment write index
+ // This allows adding multiple slots and then atomically increment write index
+ bool sp_ghost_produce_slot(struct shared_pool_private *spp,
- struct slot_data *d, uint64_t index)
++ struct slot_data *d, uint64_t idx)
+ {
+ sp_reload_regs(spp);
+
+ // Make sure that slot provided is proper
+ assert(d != NULL);
+
+ if (sp_queue_full(spp)) {
+ // printf("sp_ghost_produce_slot: queue full\n");
+ return false;
+ }
+
+ // Check if the requested peak is valid or not
- if (!sp_write_peekable_index(spp, index))
++ if (!sp_write_peekable_index(spp, idx))
+ {
+ return false;
+ }
+
- sp_copy_slot_data(&spp->sp->slot_list[index].d, d);
++ sp_copy_slot_data(&spp->sp->slot_list[idx].d, d);
+ #if 0
+ #if !defined(__scc__) && !defined(__i386__)
- cache_flush_range(&spp->sp->slot_list[index], SLOT_SIZE);
++ cache_flush_range(&spp->sp->slot_list[idx], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ #endif // 0
+ // Incrementing write pointer
- spp->ghost_write_id = (index + 1) % spp->c_size;
++ spp->ghost_write_id = (idx + 1) % spp->c_size;
+ /*
+ printf("ghost produce slot, producing for %"PRIu64", val %"PRIu64"\n",
- index, d->client_data);
- sp_print_slot(&spp->sp->slot_list[index].d);
++ idx, d->client_data);
++ sp_print_slot(&spp->sp->slot_list[idx].d);
+ */
+ return true;
+ } // end function: sp_produce_slot
+
+ // Reads the slot without changing the read pointer, instead changes the local
+ // ghost_read_id to know how much is read.
+ // To bu used by driver when it adds the packet in hardware queue for sending
+ // but the packet is not yet sent.
+ // when packet is actually done, then read pointer can be changed.
+ bool sp_ghost_read_slot(struct shared_pool_private *spp, struct slot_data *dst)
+ {
+ sp_reload_regs(spp);
+
+ // Make sure that slot provided is proper
+ assert(dst != NULL);
+
+ // Make sure that there is slot available for consumption
+ if (sp_queue_empty(spp)) {
+ return false;
+ }
+
+ // Check if the requested peak is valid or not
+ if (!sp_read_peekable_index(spp, spp->ghost_read_id))
+ {
+ return false;
+ }
+
+ // Copying the slot data contents into provided slot
+ /*
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(&spp->sp->slot_list[spp->ghost_read_id], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ */
+ sp_copy_slot_data(dst, &spp->sp->slot_list[spp->ghost_read_id].d);
+ /* printf("After copying data from id %"PRIu64"\n", spp->ghost_read_id);
+ sp_print_slot(&spp->sp->slot_list[spp->ghost_read_id].d);
+ */
+ spp->ghost_read_id = (spp->ghost_read_id + 1) % spp->c_size;
+ return true;
+ } // end function: sp_read_peak_slot
+
+
+
+ // FIXME: not used, may be it should be removed
+ bool sp_ghost_read_confirm(struct shared_pool_private *spp)
+ {
+ return (sp_set_read_index(spp, spp->ghost_read_id));
+ }
+
+ // swaps the slot provided in parameter d with next available slot for
+ // consumption.
+ // TO be used by application to receive packet and register new pbuf
+ // at same time.
+ bool sp_replace_slot(struct shared_pool_private *spp, struct slot_data *new_slot)
+ {
+ sp_reload_regs(spp);
+
+ // Make sure that slot provided is proper
+ if (!validate_slot(new_slot)) {
+ return false;
+ }
+
+ // Make sure that there is slot available for consumption
+ if (sp_queue_empty(spp)) {
+ return false;
+ }
+
+ uint64_t ri = spp->c_read_id;
+ // swapping the slot_data contents between ri and new_slot
+ struct slot_data tmp;
+ #if 0
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(&spp->sp->slot_list[ri], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ #endif // 0
+ sp_copy_slot_data(&tmp, &spp->sp->slot_list[ri].d);
+ sp_copy_slot_data(&spp->sp->slot_list[ri].d, new_slot);
+ sp_copy_slot_data(new_slot, &tmp);
+ #if 0
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(&spp->sp->slot_list[ri], SLOT_SIZE);
+ #endif // !defined(__scc__) && !defined(__i386__)
+ #endif // 0
+ // Incrementing read index
+ if(!sp_set_read_index(spp, ((ri + 1) % spp->c_size))) {
+ printf("sp_set_read_index failed\n");
+ abort();
+ }
+ return true;
+ } // end function: sp_consume_slot
+
+
+ // ****************** For debugging purposes **************
+ void sp_print_metadata(struct shared_pool_private *spp)
+ {
+ assert(spp != NULL);
+ // sp_reload_regs(spp);
+ /* printf("SPP Q C[%"PRIu8"], R[%"PRIu8"], GRI[%"PRIu64"], GWI[%"PRIu64"] "
+ "pre_write_id[%"PRIu64"]\n",
+ spp->is_creator?1:0, spp->role,
+ spp->ghost_read_id, spp->ghost_write_id, spp->pre_write_id);
+ */
+ printf("SPP S PRO[%"PRIu64"], CON[%"PRIu64"], CLEAR[%"PRIu64"]\n",
+ spp->produce_counter, spp->consume_counter, spp->clear_counter);
+ printf("SPP S C C-R[%"PRIu64"], C-W[%"PRIu64"]\n",
+ spp->c_read_id, spp->c_write_id);
+
+ struct shared_pool *sp = spp->sp;
+ assert(sp != NULL);
+ /*
+ printf("SP Q len[%"PRIu64"], RI[%"PRIu64"], WI[%"PRIu64"], elem[%"PRIu64"]"
+ " free[%"PRIu64"]\n",
+ sp->size_reg.value, sp->read_reg.value, sp->write_reg.value,
+ sp_queue_elements_count(spp),
+ sp_queue_free_slots_count(spp));
+ */
+ }
+
+
+ void sp_print_slot(struct slot_data *d)
+ {
+ printf("@%p, buf[%"PRIu64"], pbuf_id[%"PRIu64"], offset[%"PRIu64"], "
+ "len[%"PRIu64"], n_p[%"PRIu64"], CL[%"PRIu64"], ts[%"PRIu64"]\n",
+ d, d->buffer_id, d->pbuf_id, d->offset, d->len,
+ d->no_pbufs, d->client_data, d->ts);
+ }
+
+ // Code for testing and debugging the library
+ void sp_print_pool(struct shared_pool_private *spp)
+ {
+ sp_reload_regs(spp);
+ assert(spp != NULL);
+ struct shared_pool *sp = spp->sp;
+ assert(sp != NULL);
+
+ uint64_t queue_size = sp->size_reg.value;
+ sp_print_metadata(spp);
+ int i = 0;
+ for(i = 0; i < queue_size; ++i) {
+ sp_print_slot(&sp->slot_list[i].d);
+ }
+ }
+
errval_t err = bcc->rpc.vtbl.print_stats(&bcc->rpc);
assert(err_is_ok(err));
- #if 0
- printf("\n\n");
- printf("open = %lu\n"
- "create = %lu\n"
- "truncate = %lu\n"
- "stat = %lu\n"
- "close = %lu\n"
- "opendir = %lu\n"
- "dir_read_next= %lu\n"
- "closedir = %lu\n"
- "mkdir = %lu\n"
- "rmdir = %lu\n"
- "remove = %lu\n",
- nopen, ncreate, ntruncate, nstat, nclose, nopendir, ndir_read_next,
- nclosedir, nmkdir, nrmdir, nremove);
- printf("\n\n");
-
+ printf("cache[%d] stats\n", disp_get_core_id());
+
+ double total_misses_time, total_hits_time;
+ total_misses_time = total_hits_time = 0;
+ for (int i=0; i<cacheTotalOps; i++) {
+ size_t misses_cnt = Stats[i][0].cnt;
+ size_t hits_cnt = Stats[i][1].cnt;
+ double misses_time = (double)Stats[i][0].ticks/(double)tscperms;
+ double hits_time = (double)Stats[i][1].ticks/(double)tscperms;
+ double total_time = misses_time + hits_time;
+ total_hits_time += hits_time;
+ total_misses_time += misses_time;
+ printf(" %-12s: MISSES[cnt:%6zu time:%12.3lf avg:%12.3lf] | HITS[cnt:%6zu time:%12.3lf avg:%12.3lf] TOTAL:%12.3lf\n",
+ cacheOpsName[i],
+ misses_cnt, misses_time, misses_cnt != 0 ? misses_time/(double)misses_cnt : 0,
+ hits_cnt, hits_time, hits_cnt != 0 ? hits_time /(double)hits_cnt : 0,
+ total_time);
+ }
+ printf("==== TOTAL: MISSES[time:%12.3lf] | HITS[time:%12.3lf ] TOTAL:%12.3lf\n",
+ total_misses_time, total_hits_time, total_misses_time + total_hits_time);
+
+ #ifdef CACHE_META_DATA
printf("meta data\n"
"=========\n"
- "hits = %zu\n"
- "misses = %zu\n"
- "overwrites = %zu\n"
+ "hits = %lu\n"
+ "misses = %lu\n"
+ "overwrites = %lu\n"
"allocations = %u\n",
meta_hits, meta_misses, meta_overwrites, alloc_ptr);
- printf("\n\n");
- #endif
+ #endif
}
static errval_t read(void *st, vfs_handle_t handle, void *buffer, size_t bytes,
// Fake via a setting seek
if((off_t)nh->cached_filesize < 0) {
- printf("filesize = %zu\n", nh->cached_filesize);
+ printf("filesize = %lu\n", nh->cached_filesize);
}
assert((off_t)nh->cached_filesize + offset >= 0);
- return bst->orig_ops->seek(bst->orig_st, handle, VFS_SEEK_SET,
- nh->cached_filesize + offset);
+ offset = nh->cached_filesize + offset;
}
#endif
return SYS_ERR_OK;
}
--#include <dmalloc/dmalloc.h>
++/* alt_malloc provided only by FREEC -AKK */
++
++#ifdef FREEC
++#include <dmalloc/dmalloc.h>
typedef void *(*alt_malloc_t)(size_t bytes);
extern alt_malloc_t alt_malloc;
alt_free = &dlfree;
alt_realloc = &dlrealloc;
}
++#endif
int main(int argc, char *argv[])
{
errval_t err;
++ #ifdef FREEC
init_dmalloc();
++ #endif
err = create_cache_mem(CACHE_SIZE);
if(err_is_fail(err)) {
static void get_start_handler(struct bcache_binding *b, char *key, size_t key_len)
{
errval_t err;
- bool haveit;
- uintptr_t index, length = 0;
+ key_state_t ks;
- uintptr_t index, length = 0;
++ uintptr_t idx, length = 0;
assert(key > (char *)BASE_PAGE_SIZE);
- haveit = cache_lookup(key, key_len, &index, &length);
- ks = cache_lookup(key, key_len, &index, &length);
++ ks = cache_lookup(key, key_len, &idx, &length);
- if(!haveit) {
- index = cache_allocate(key, key_len);
- } else {
+ if (ks == KEY_INTRANSIT) { // key is in transit: wait for it!
free(key);
- cache_register_wait(index, b);
++ cache_register_wait(idx, b);
+ return; // get_start_response() will be called when key arrives
+ } else if (ks == KEY_MISSING) {
- index = cache_allocate(key, key_len);
++ idx = cache_allocate(key, key_len);
+ } else if (ks == KEY_EXISTS) {
+ free(key);
+ } else {
+ assert(0);
+ }
+
+ #if 0
+ // Block everyone if we have a write on this block
- if(inwrite[index]) {
++ if(inwrite[idx]) {
+ struct wait_list *w = malloc(sizeof(struct wait_list));
+ w->b = b;
- w->next = waiting[index];
- waiting[index] = w;
++ w->next = waiting[idx];
++ waiting[idx] = w;
+ }
+
+ if(write) {
- inwrite[index] = true;
++ inwrite[idx] = true;
}
+ #endif
- err = b->tx_vtbl.get_start_response(b, NOP_CONT, index, haveit,
+ bool haveit = (ks == KEY_EXISTS);
- err = b->tx_vtbl.get_start_response(b, NOP_CONT, index, haveit,
++ err = b->tx_vtbl.get_start_response(b, NOP_CONT, idx, haveit,
haveit ? 1 : 0, length);
if(err_is_fail(err)) {
USER_PANIC_ERR(err, "get_start_response");
}
static void get_stop_handler(struct bcache_binding *b, uint64_t transid,
-- uint64_t index, uint64_t length)
++ uint64_t idx, uint64_t length)
{
errval_t err;
if(transid == 0) {
-- cache_update(index, length);
++ cache_update(idx, length);
}
+ #if 0
- if(inwrite[index]) {
++ if(inwrite[idx]) {
+ // Wake up all waiters
+
- inwrite[index] = false;
++ inwrite[idx] = false;
+ }
+ #endif
+
+ /* notify issuer */
err = b->tx_vtbl.get_stop_response(b, NOP_CONT);
if(err_is_fail(err)) {
USER_PANIC_ERR(err, "get_stop_response");
}
+
+ /* notify waiters */
+ if (transid == 0) {
+ struct bcache_binding *wb;
- while ((wb = cache_get_next_waiter(index)) != NULL) {
- uint64_t l = cache_get_block_length(index);
- err = b->tx_vtbl.get_start_response(wb, NOP_CONT, index, true, 1, l);
++ while ((wb = cache_get_next_waiter(idx)) != NULL) {
++ uint64_t l = cache_get_block_length(idx);
++ err = b->tx_vtbl.get_start_response(wb, NOP_CONT, idx, true, 1, l);
+ if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "get_start_response");
+ }
+ }
+ }
}
static void new_client_handler(struct bcache_binding *b)
rxd = &receive_ring[receive_bufptr];
if ((rxd->rx_read_format.info.status.dd) &&
- (rxd->rx_read_format.info.status.eop) &&
- (!local_pbuf[receive_bufptr].event_sent)) { /* valid packet received */
-
- new_packet = true;
-
- /* FIXME: following two conditions might be repeating, hence
- * extra. Check it out. */
- if(internal_memory_pa == NULL || internal_memory_va == NULL) {
- // E1000N_DEBUG("no internal memory yet#####.\n");
- buffer = NULL;
- /* FIXME: control should go out of parent if block */
- goto end;
- }
-
- /* Ensures that netd is up and running */
- if(waiting_for_netd()){
- E1000N_DEBUG("still waiting for netd to register buffers\n");
- buffer = NULL;
- goto end;
- }
+ (rxd->rx_read_format.info.status.eop) &&
+ (!local_pbuf[receive_bufptr].event_sent)) {
+ // valid packet received
+
+ new_packet = true;
+
+ // FIXME: following two conditions might be repeating, hence
+ // extra. Check it out.
+ if(internal_memory_pa == NULL || internal_memory_va == NULL) {
+ // E1000N_DEBUG("no internal memory yet#####.\n");
+ buffer = NULL;
+ // FIXME: control should go out of parent if block
+ goto end;
+ }
+ // Ensures that netd is up and running
+ if(waiting_for_netd()){
+ E1000N_DEBUG("still waiting for netd to register buffers\n");
+ buffer = NULL;
+ goto end;
+ }
- len = rxd->rx_read_format.info.length;
- if (len < 0 || len > 1522) {
- E1000N_DEBUG("ERROR: pkt with len %lu\n", len);
- goto end;
- }
+ len = rxd->rx_read_format.info.length;
+ if (len < 0 || len > 1522) {
+ E1000N_DEBUG("ERROR: pkt with len %zu\n", len);
+ goto end;
+ }
+ total_rx_datasize += len;
- // E1000N_DEBUG("packet received of size %zu..\n", len);
+ // E1000N_DEBUG("packet received of size %lu..\n", len);
- buffer_address = (void*)rxd->rx_read_format.buffer_address;
- data = (buffer_address - internal_memory_pa)
- + internal_memory_va;
+ buffer_address = (void*)rxd->rx_read_format.buffer_address;
+ data = (buffer_address - internal_memory_pa) + internal_memory_va;
- if (data == NULL || len == 0){
- printf("ERROR: Incorrect packet\n");
- // abort();
- /* FIXME: What should I do when such errors occur. */
- buffer = NULL;
- goto end;
- }
- process_received_packet(data, len);
+ if (data == NULL || len == 0){
+ printf("ERROR: Incorrect packet\n");
+ // abort();
+ // FIXME: What should I do when such errors occur.
+ buffer = NULL;
+ goto end;
+ }
+
+ #if !defined(__scc__) && !defined(__i386__)
+ cache_flush_range(data, len);
+ #endif // !defined(__scc__) && !defined(__i386__)
- } /* end if: valid packet received */
+ process_received_packet(data, len);
+
+ #if 0
+ // This code is useful for RX micro-benchmark
+ // only to measures performance of accepting incoming packets
+ if (g_cl != NULL) {
+ if (g_cl->debug_state == 4) {
+
+ uint64_t ts = rdtsc();
+
+ // memcpy_fast(tmp_buf, data, len);
+ process_received_packet(data, len);
+ total_processing_time = total_processing_time +
+ (rdtsc() - ts);
+
+ } else {
+ process_received_packet(data, len);
+ }
+ } else {
+ process_received_packet(data, len);
+ }
+ #endif // 0
+
+ } // end if: valid packet received
else {
- /* false alarm. Something else happened, not packet arrival */
+ // false alarm. Something else happened, not packet arrival
return false;
}
RTL8029_DEBUG("write page\n");
rtl8029as_rbcr_wr(&rtl, cl->len);// Number of bytes to transfer
rtl8029as_rsar_wr(&rtl, dst); // Destination in NIC mem
- rtl8029as_cr_wr(&rtl, cr); // Start write
+ // Start write
+ rtl8029as_cr_t cr = rtl8029as_cr_default;
+ cr = rtl8029as_cr_sta_insert(cr, 1);
+ cr = rtl8029as_cr_rd_insert(cr, rtl8029as_rwr);
+ rtl8029as_cr_wr(&rtl, cr);
-
- for (int index = 0; index < cl->rtpbuf; index++) {
+ for (int idx = 0; idx < cl->rtpbuf; idx++) {
- /*
- RTL8029_DEBUG("sending %dth rx_pbuf\n", idx);
+ /*
- RTL8029_DEBUG("sending %dth rx_pbuf\n", index);
++ RTL8029_DEBUG("sending %dth rx_pbuf\n", idx);
- RTL8029_DEBUG("pa %p va %p offset %lu\n",
- (void *)cl->buffer_ptr->pa, cl->buffer_ptr->va, cl->pbuf[idx].offset);
- */
+ RTL8029_DEBUG("pa %p va %p offset %lu\n",
- (void *)cl->buffer_ptr->pa, cl->buffer_ptr->va, cl->pbuf[index].offset);
++ (void *)cl->buffer_ptr->pa, cl->buffer_ptr->va, cl->pbuf[idx].offset);
+ */
#if defined(__i386__)
- uint8_t *src = (uint8_t *) ((uintptr_t)(cl->buffer_ptr->va + cl->pbuf[idx].offset));
- uint8_t *src = (uint8_t *) ((uintptr_t)(cl->buffer_ptr->va + cl->pbuf[index].offset));
++ uint8_t *src = (uint8_t *) ((uintptr_t)(cl->buffer_ptr->va + cl->pbuf[idx].offset));
#else
- uint8_t *src = (uint8_t *) ((uint64_t)cl->buffer_ptr->va + cl->pbuf[index].offset);
+ uint8_t *src = (uint8_t *) ((uint64_t)cl->buffer_ptr->va + cl->pbuf[idx].offset);
#endif
- pbuf_len = cl->pbuf[index].len;
+ pbuf_len = cl->pbuf[idx].len;
uint32_t i = 0;
--------------------------------------------------------------------------
[ build application { target = "fish",
-- cFiles = [ "fish.c", "font.c" ],
- addLibraries = [ "vfs", "nfs", "lwip", "contmng",
- "pci", "trace", "skb" ],
- flounderBindings = [ "pixels" ],
- flounderExtraBindings = [ ("spawn", ["rpcclient"]),
- ("mem", ["rpcclient"]) ],
- omitCFlags = [ "-Wredundant-decls" ]
++ cFiles = [ "fish.c", "font.c" ],
+ addLibraries = [ "vfs", "nfs", "lwip",
+ "contmng", "procon", "pci", "trace", "skb" ],
+ flounderBindings = [ "pixels" ]
++ -- omitCFlags = [ "-Wredundant-decls" ]
}
]
errval_t monitor_cap_create(struct capref dest, struct capability *cap,
coreid_t core_id);
errval_t monitor_identify_cnode_get_cap(struct capability *cnode_raw,
- caddr_t slot, struct capability *ret);
+ capaddr_t slot, struct capability *ret);
errval_t monitor_nullify_cap(struct capref cap);
errval_t monitor_retype_remote_cap(struct capref croot,
- caddr_t src, enum objtype newtype,
- int objbits, caddr_t to, caddr_t slot,
+ capaddr_t src, enum objtype newtype,
+ int objbits, capaddr_t to, capaddr_t slot,
int bits);
-errval_t monitor_delete_remote_cap(struct capref croot, caddr_t src, int bits);
-errval_t monitor_revoke_remote_cap(struct capref croot, caddr_t src, int bits);
+errval_t monitor_delete_remote_cap(struct capref croot, capaddr_t src, int bits);
+errval_t monitor_revoke_remote_cap(struct capref croot, capaddr_t src, int bits);
-
- /* route.c */
- void route_done_join(struct intermon_binding *st, routeid_t id,
- coreid_t core, errval_t err, iref_t iref);
- void route_done_connect(struct intermon_binding *st, routeid_t id,
- errval_t err);
- errval_t route_initialize_bsp(bool monitors_up[MAX_CPUS]);
- errval_t route_join_app_core(struct intermon_binding *st,
- intermon_ROUTE_TYPE_t route_type,
- routeid_t routeid);
- errval_t route_connect_app_core(intermon_ROUTE_TYPE_t route_type,
- routeid_t routeid, iref_t * irefs, size_t size);
- errval_t route_rcap_lock_req(struct capability * cap, coremask_t send_to,
- coreid_t from_core, struct rcap_st * st,
- bool recirsive);
- errval_t route_rcap_unlock(struct capability * cap, coremask_t send_to,
- coreid_t from_core, bool recirsive);
- errval_t route_rcap_new_core(struct capability * cap, coremask_t send_to,
- coreid_t send_core, coreid_t recv_core);
- errval_t route_rcap_send_details(struct capability * cap, coremask_t send_to,
- bool has_desc);
- errval_t route_rcap_request_details(struct capability * cap, coremask_t send_to);
- errval_t route_rcap_lock_reply(errval_t reply_err, coremask_t locked_cores,
- bool has_desc, recordid_t ccast_recordid);
- errval_t route_rcap_retype(struct capability * cap, bool has_descendents,
- coremask_t send_to);
- errval_t route_rcap_delete(struct capability * cap, coremask_t send_to);
- errval_t route_rcap_revoke(struct capability * cap);
-
/* monitor_server.c */
errval_t monitor_server_arch_init(struct monitor_binding *b);
void set_monitor_rpc_iref(iref_t iref);
static void remote_cap_delete_phase_2(void * st_arg);
static void remote_cap_revoke_phase_2(void * st_arg);
- // workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
- #if defined(__i386__) && defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
- static __attribute__((noinline)) struct retype_st *
+ static SAFEINLINE struct retype_st *
alloc_retype_st(struct monitor_blocking_binding *b, struct capref croot,
- caddr_t src, uint64_t new_type, uint8_t size_bits,
- caddr_t to, caddr_t slot, int dcn_vbits)
+ capaddr_t src, uint64_t new_type, uint8_t size_bits,
+ capaddr_t to, capaddr_t slot, int dcn_vbits)
- #else
- static struct retype_st *alloc_retype_st(struct monitor_blocking_binding *b,
- struct capref croot, capaddr_t src,
- uint64_t new_type, uint8_t size_bits,
- capaddr_t to, capaddr_t slot, int dcn_vbits)
- #endif
{
struct retype_st * st;
if (static_retype_state_used) {
}
}
- // workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
- #if defined(__i386__) && defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
- static __attribute__((noinline)) struct delete_st *
+ static SAFEINLINE struct delete_st *
alloc_delete_st(struct monitor_blocking_binding *b, struct capref croot,
- caddr_t src, uint8_t vbits)
+ capaddr_t src, uint8_t vbits)
- #else
- static struct delete_st* alloc_delete_st(struct monitor_blocking_binding *b,
- struct capref croot, capaddr_t src,
- uint8_t vbits)
- #endif
{
struct delete_st * st;
if (static_delete_state_used) {
}
// workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
- #if defined(__i386__) && defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
- static __attribute__((noinline)) struct revoke_st *
+ static SAFEINLINE struct revoke_st *
alloc_revoke_st(struct monitor_blocking_binding *b, struct capref croot,
- caddr_t src, uint8_t vbits)
+ capaddr_t src, uint8_t vbits)
- #else
- static struct revoke_st *alloc_revoke_st(struct monitor_blocking_binding *b,
- struct capref croot, capaddr_t src,
- uint8_t vbits)
- #endif
{
struct revoke_st * st;
if (static_revoke_state_used) {
--
--------------------------------------------------------------------------
+--
+-- This application is not very portable across different libc
+--
+
[ build application { target = "net-test",
cFiles = [ "net-test.c" ],
- addLibraries = [ "posixcompat", "vfs", "nfs", "lwip",
- "contmng", "procon", "timer", "hashtable" ],
+ addLibraries = [ "vfs", "nfs", "lwip", "contmng", "timer" ],
- architectures = [ "x86_64", "scc" ],
- omitCFlags = [ "-Wredundant-decls" ]
+ architectures = [ "x86_64", "scc" ]
++ -- omitCFlags = [ "-Wredundant-decls" ]
}
]
"rhs.c", "set_constants.c", "setup_mpi.c",
"solve_subs.c", "timers.c", "verify.c", "x_solve.c",
"y_solve.c", "z_solve.c" ],
- addLibraries = [ "rcce", "libroute" ],
- addLibraries = [ "rcce", "msun" ],
++ addLibraries = [ "rcce"] ,
addIncludes = [ "/include/rcce" ]
}
"print_results.c", "proc_grid.c", "read_input.c",
"rhs.c", "setbv.c", "setcoeff.c", "setiv.c",
"ssor.c", "subdomain.c", "timers.c", "verify.c" ],
- addLibraries = [ "rcce", "libroute" ],
- addLibraries = [ "rcce", "msun" ],
++ addLibraries = [ "rcce"],
addIncludes = [ "/include/rcce" ]
}
flounderBindings = [ "skb" ],
addIncludes = [ "eclipse_kernel/src"],
addLibraries = [ "eclipse", "shm", "dummies",
- "icsolver", "vfs", "nfs", "lwip", "contmng"],
- "posixcompat", "msun",
+ "icsolver", "vfs", "nfs", "lwip",
+ "contmng", "procon", "hashtable"],
architectures = [ arch ]
}
in
[ build application { target = "testdesc",
cFiles = [ "testdesc.c" ],
- addLibraries = [ "vfs" , "nfs", "lwip", "contmng", "timer" ],
- addLibraries = [ "posixcompat", "vfs" , "nfs", "lwip",
- "contmng", "procon", "timer" ],
++ addLibraries = [ "vfs" , "nfs", "lwip", "contmng", "procon", "timer" ],
flounderBindings = [ "unixsock" ]
-- flounderExtraBindings = [ ("unixsock",["rpcclient"]) ]
},