You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by st...@apache.org on 2016/09/29 01:34:42 UTC

[35/49] incubator-mynewt-core git commit: directory re-org

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_dev.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_dev.c b/kernel/os/src/os_dev.c
new file mode 100644
index 0000000..fa8709b
--- /dev/null
+++ b/kernel/os/src/os_dev.c
@@ -0,0 +1,309 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "os/os.h"
+#include "os/queue.h"
+#include "os/os_dev.h"
+
+#include <string.h>
+
+static STAILQ_HEAD(, os_dev) g_os_dev_list;
+
+static int
+os_dev_init(struct os_dev *dev, char *name, uint8_t stage,
+        uint8_t priority, os_dev_init_func_t od_init, void *arg)
+{
+    dev->od_name = name;
+    dev->od_stage = stage;
+    dev->od_priority = priority;
+    /* assume these are set after the fact. */
+    dev->od_flags = 0;
+    dev->od_open_ref = 0;
+    dev->od_init = od_init;
+    dev->od_init_arg = arg;
+    memset(&dev->od_handlers, 0, sizeof(dev->od_handlers));
+
+    return (0);
+}
+
+/**
+ * Add the device to the device tree.  This is a private function.
+ *
+ * @param dev The device to add to the device tree.
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+os_dev_add(struct os_dev *dev)
+{
+    struct os_dev *cur_dev;
+
+    /* If no devices present, insert into head */
+    if (STAILQ_FIRST(&g_os_dev_list) == NULL) {
+        STAILQ_INSERT_HEAD(&g_os_dev_list, dev, od_next);
+        return (0);
+    }
+
+    /* Add devices to the list, sorted first by stage, then by
+     * priority.  Keep sorted in this order for initialization
+     * stage.
+     */
+    cur_dev = NULL;
+    STAILQ_FOREACH(cur_dev, &g_os_dev_list, od_next) {
+        if (cur_dev->od_stage > dev->od_stage) {
+            continue;
+        }
+
+        if (dev->od_priority >= cur_dev->od_priority) {
+            break;
+        }
+    }
+
+    if (cur_dev) {
+        STAILQ_INSERT_AFTER(&g_os_dev_list, cur_dev, dev, od_next);
+    } else {
+        STAILQ_INSERT_TAIL(&g_os_dev_list, dev, od_next);
+    }
+
+    return (0);
+}
+
+
+/**
+ * Create a new device in the kernel.
+ *
+ * @param dev The device to create.
+ * @param name The name of the device to create.
+ * @param stage The stage to initialize that device to.
+ * @param priority The priority of initializing that device
+ * @param od_init The initialization function to call for this
+ *                device.
+ * @param arg The argument to provide this device initialization
+ *            function.
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+int
+os_dev_create(struct os_dev *dev, char *name, uint8_t stage,
+        uint8_t priority, os_dev_init_func_t od_init, void *arg)
+{
+    int rc;
+
+    rc = os_dev_init(dev, name, stage, priority, od_init, arg);
+    if (rc != 0) {
+        goto err;
+    }
+
+    rc = os_dev_add(dev);
+    if (rc != 0) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Initialize all devices for a given state.
+ *
+ * @param stage The stage to initialize.
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+int
+os_dev_initialize_all(uint8_t stage)
+{
+    struct os_dev *dev;
+    int rc;
+
+    STAILQ_FOREACH(dev, &g_os_dev_list, od_next) {
+        if (dev->od_stage == stage) {
+            rc = dev->od_init(dev, dev->od_init_arg);
+            if (rc != 0) {
+                if (dev->od_flags & OS_DEV_F_INIT_CRITICAL) {
+                    goto err;
+                }
+            } else {
+                dev->od_flags |= OS_DEV_F_STATUS_READY;
+            }
+        }
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Suspend all devices.
+ *
+ * @param dev The device to suspend
+ * @param suspend_t The number of ticks to suspend this device for
+ * @param force Whether or not to force suspending the device
+ *
+ * @return 0 on success, or a non-zero error code if one of the devices
+ *                       returned it.
+ */
+int
+os_dev_suspend_all(os_time_t suspend_t, uint8_t force)
+{
+    struct os_dev *dev;
+    int suspend_failure;
+    int rc;
+
+    suspend_failure = 0;
+    STAILQ_FOREACH(dev, &g_os_dev_list, od_next) {
+        rc = os_dev_suspend(dev, suspend_t, force);
+        if (rc != 0) {
+            suspend_failure = OS_ERROR;
+        }
+    }
+
+    return (suspend_failure);
+}
+
+/**
+ * Resume all the devices that were suspended.
+ *
+ * @return 0 on success, -1 if any of the devices have failed to resume.
+ */
+int
+os_dev_resume_all(void)
+{
+    struct os_dev *dev;
+    int rc;
+
+    STAILQ_FOREACH(dev, &g_os_dev_list, od_next) {
+        rc = os_dev_resume(dev);
+        if (rc != 0) {
+            goto err;
+        }
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Lookup a device by name, internal function only.
+ *
+ * @param name The name of the device to look up.
+ *
+ * @return A pointer to the device corresponding to name, or NULL if not found.
+ */
+static struct os_dev *
+os_dev_lookup(char *name)
+{
+    struct os_dev *dev;
+
+    dev = NULL;
+    STAILQ_FOREACH(dev, &g_os_dev_list, od_next) {
+        if (!strcmp(dev->od_name, name)) {
+            break;
+        }
+    }
+    return (dev);
+}
+
+/**
+ * Open a device.
+ *
+ * @param dev The device to open
+ * @param timo The timeout to open the device, if not specified.
+ * @param arg The argument to the device open() call.
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+struct os_dev *
+os_dev_open(char *devname, uint32_t timo, void *arg)
+{
+    struct os_dev *dev;
+    os_sr_t sr;
+    int rc;
+
+    dev = os_dev_lookup(devname);
+    if (dev == NULL) {
+        return (NULL);
+    }
+
+    /* Device is not ready to be opened. */
+    if ((dev->od_flags & OS_DEV_F_STATUS_READY) == 0) {
+        return (NULL);
+    }
+
+    if (dev->od_handlers.od_open) {
+        rc = dev->od_handlers.od_open(dev, timo, arg);
+        if (rc != 0) {
+            goto err;
+        }
+    }
+
+    OS_ENTER_CRITICAL(sr);
+    ++dev->od_open_ref;
+    dev->od_flags |= OS_DEV_F_STATUS_OPEN;
+    OS_EXIT_CRITICAL(sr);
+
+    return (dev);
+err:
+    return (NULL);
+}
+
+/**
+ * Close a device.
+ *
+ * @param dev The device to close
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+int
+os_dev_close(struct os_dev *dev)
+{
+    int rc;
+    os_sr_t sr;
+
+    if (dev->od_handlers.od_close) {
+        rc = dev->od_handlers.od_close(dev);
+        if (rc != 0) {
+            goto err;
+        }
+    }
+
+    OS_ENTER_CRITICAL(sr);
+    if (--dev->od_open_ref == 0) {
+        dev->od_flags &= ~OS_DEV_F_STATUS_OPEN;
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Clears the device list.  This function does not close any devices or free
+ * any resources; its purpose is to allow a full system reset between unit
+ * tests.
+ */
+void
+os_dev_reset(void)
+{
+    STAILQ_INIT(&g_os_dev_list);
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_eventq.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_eventq.c b/kernel/os/src/os_eventq.c
new file mode 100644
index 0000000..f9cc283
--- /dev/null
+++ b/kernel/os/src/os_eventq.c
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+#include "os/os.h"
+
+#include <string.h>
+
+/**
+ * Initialize the event queue
+ *
+ * @param evq The event queue to initialize
+ */
+void
+os_eventq_init(struct os_eventq *evq)
+{
+    memset(evq, 0, sizeof(*evq));
+    STAILQ_INIT(&evq->evq_list);
+}
+
+/**
+ * Put an event on the event queue.
+ *
+ * @param evq The event queue to put an event on 
+ * @param ev The event to put on the queue
+ */
+void
+os_eventq_put(struct os_eventq *evq, struct os_event *ev)
+{
+    int resched;
+    os_sr_t sr;
+
+    OS_ENTER_CRITICAL(sr);
+
+    /* Do not queue if already queued */
+    if (OS_EVENT_QUEUED(ev)) {
+        OS_EXIT_CRITICAL(sr);
+        return;
+    }
+
+    /* Queue the event */
+    ev->ev_queued = 1;
+    STAILQ_INSERT_TAIL(&evq->evq_list, ev, ev_next);
+
+    resched = 0;
+    if (evq->evq_task) {
+        /* If task waiting on event, wake it up.
+         * Check if task is sleeping, because another event 
+         * queue may have woken this task up beforehand.
+         */
+        if (evq->evq_task->t_state == OS_TASK_SLEEP) {
+            os_sched_wakeup(evq->evq_task);
+            resched = 1;
+        }
+        /* Either way, NULL out the task, because the task will
+         * be awake upon exit of this function.
+         */
+        evq->evq_task = NULL;
+    }
+
+    OS_EXIT_CRITICAL(sr);
+
+    if (resched) {
+        os_sched(NULL);
+    }
+}
+
+/**
+ * Pull a single item from an event queue.  This function blocks until there 
+ * is an item on the event queue to read.
+ *
+ * @param evq The event queue to pull an event from
+ *
+ * @return The event from the queue
+ */
+struct os_event *
+os_eventq_get(struct os_eventq *evq)
+{
+    struct os_event *ev;
+    os_sr_t sr;
+
+    OS_ENTER_CRITICAL(sr);
+pull_one:
+    ev = STAILQ_FIRST(&evq->evq_list);
+    if (ev) {
+        STAILQ_REMOVE(&evq->evq_list, ev, os_event, ev_next);
+        ev->ev_queued = 0;
+    } else {
+        evq->evq_task = os_sched_get_current_task();
+        os_sched_sleep(evq->evq_task, OS_TIMEOUT_NEVER);
+        OS_EXIT_CRITICAL(sr);
+
+        os_sched(NULL);
+
+        OS_ENTER_CRITICAL(sr);
+        evq->evq_task = NULL;
+        goto pull_one;
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    return (ev);
+}
+
+static struct os_event *
+os_eventq_poll_0timo(struct os_eventq **evq, int nevqs)
+{
+    struct os_event *ev;
+    os_sr_t sr;
+    int i;
+
+    ev = NULL;
+
+    OS_ENTER_CRITICAL(sr);
+    for (i = 0; i < nevqs; i++) {
+        ev = STAILQ_FIRST(&evq[i]->evq_list);
+        if (ev) {
+            STAILQ_REMOVE(&evq[i]->evq_list, ev, os_event, ev_next);
+            ev->ev_queued = 0;
+            break;
+        }
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    return ev;
+}
+
+/**
+ * Poll the list of event queues specified by the evq parameter 
+ * (size nevqs), and return the "first" event available on any of 
+ * the queues.  Event queues are searched in the order that they 
+ * are passed in the array.
+ *
+ * @param evq Array of event queues
+ * @param nevqs Number of event queues in evq
+ * @param timo Timeout, forever if OS_WAIT_FOREVER is passed to poll.
+ *
+ * @return An event, or NULL if no events available
+ */
+struct os_event *
+os_eventq_poll(struct os_eventq **evq, int nevqs, os_time_t timo)
+{
+    struct os_event *ev;
+    struct os_task *cur_t;
+    int i, j;
+    os_sr_t sr;
+
+    /* If the timeout is 0, don't involve the scheduler at all.  Grab an event
+     * if one is available, else return immediately.
+     */
+    if (timo == 0) {
+        return os_eventq_poll_0timo(evq, nevqs);
+    }
+
+    ev = NULL;
+
+    OS_ENTER_CRITICAL(sr);
+    cur_t = os_sched_get_current_task();
+
+    for (i = 0; i < nevqs; i++) {
+        ev = STAILQ_FIRST(&evq[i]->evq_list);
+        if (ev) {
+            STAILQ_REMOVE(&evq[i]->evq_list, ev, os_event, ev_next);
+            ev->ev_queued = 0;
+            /* Reset the items that already have an evq task set. */
+            for (j = 0; j < i; j++) {
+                evq[j]->evq_task = NULL;
+            }
+
+            OS_EXIT_CRITICAL(sr);
+            goto has_event;
+        }
+        evq[i]->evq_task = cur_t;
+    }
+
+    os_sched_sleep(cur_t, timo);
+    OS_EXIT_CRITICAL(sr);
+
+    os_sched(NULL);
+
+    OS_ENTER_CRITICAL(sr);
+    for (i = 0; i < nevqs; i++) {
+        /* Go through the entire loop to clear the evq_task variable, 
+         * given this task is no longer sleeping on the event queues.
+         * Return the first event found, so only grab the event if 
+         * we haven't found one.
+         */
+        if (!ev) {
+            ev = STAILQ_FIRST(&evq[i]->evq_list);
+            if (ev) {
+                STAILQ_REMOVE(&evq[i]->evq_list, ev, os_event, ev_next);
+                ev->ev_queued = 0;
+            }
+        }
+        evq[i]->evq_task = NULL;
+    }
+    OS_EXIT_CRITICAL(sr);
+
+has_event:
+    return (ev);
+}
+
+/**
+ * Remove an event from the queue.
+ *
+ * @param evq The event queue to remove the event from
+ * @param ev  The event to remove from the queue
+ */
+void
+os_eventq_remove(struct os_eventq *evq, struct os_event *ev)
+{
+    os_sr_t sr;
+
+    OS_ENTER_CRITICAL(sr);
+    if (OS_EVENT_QUEUED(ev)) {
+        STAILQ_REMOVE(&evq->evq_list, ev, os_event, ev_next);
+    }
+    ev->ev_queued = 0;
+    OS_EXIT_CRITICAL(sr);
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_heap.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_heap.c b/kernel/os/src/os_heap.c
new file mode 100644
index 0000000..5f5af95
--- /dev/null
+++ b/kernel/os/src/os_heap.c
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+#include <assert.h>
+#include "os/os_mutex.h"
+#include "os/os_heap.h"
+
+static struct os_mutex os_malloc_mutex;
+
+static void
+os_malloc_lock(void)
+{
+    int rc;
+
+    if (g_os_started) {
+        rc = os_mutex_pend(&os_malloc_mutex, 0xffffffff);
+        assert(rc == 0);
+    }
+}
+
+static void
+os_malloc_unlock(void)
+{
+    int rc;
+
+    if (g_os_started) {
+        rc = os_mutex_release(&os_malloc_mutex);
+        assert(rc == 0);
+    }
+}
+
+/**
+ * Operating system level malloc().   This ensures that a safe malloc occurs
+ * within the context of the OS.  Depending on platform, the OS may rely on
+ * libc's malloc() implementation, which is not guaranteed to be thread-safe.
+ * This malloc() will always be thread-safe.
+ *
+ * @param size The number of bytes to allocate
+ *
+ * @return A pointer to the memory region allocated.
+ */
+void *
+os_malloc(size_t size)
+{
+    void *ptr;
+
+    os_malloc_lock();
+    ptr = malloc(size);
+    os_malloc_unlock();
+
+    return ptr;
+}
+
+/**
+ * Operating system level free().  See description of os_malloc() for reasoning.
+ *
+ * Free's memory allocated by malloc.
+ *
+ * @param mem The memory to free.
+ */
+void
+os_free(void *mem)
+{
+    os_malloc_lock();
+    free(mem);
+    os_malloc_unlock();
+}
+
+/**
+ * Operating system level realloc(). See description of os_malloc() for reasoning.
+ *
+ * Reallocates the memory at ptr, to be size contiguouos bytes.
+ *
+ * @param ptr A pointer to the memory to allocate
+ * @param size The number of contiguouos bytes to allocate at that location
+ *
+ * @return A pointer to memory of size, or NULL on failure to allocate
+ */
+void *
+os_realloc(void *ptr, size_t size)
+{
+    void *new_ptr;
+
+    os_malloc_lock();
+    new_ptr = realloc(ptr, size);
+    os_malloc_unlock();
+
+    return new_ptr;
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_mbuf.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_mbuf.c b/kernel/os/src/os_mbuf.c
new file mode 100644
index 0000000..c7183f9
--- /dev/null
+++ b/kernel/os/src/os_mbuf.c
@@ -0,0 +1,1270 @@
+/*
+ * Software in this file is based heavily on code written in the FreeBSD source
+ * code repostiory.  While the code is written from scratch, it contains
+ * many of the ideas and logic flow in the original source, this is a
+ * derivative work, and the following license applies as well:
+ *
+ * Copyright (c) 1982, 1986, 1988, 1991, 1993
+ *  The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "os/os.h"
+
+#include <assert.h>
+#include <string.h>
+#include <limits.h>
+
+STAILQ_HEAD(, os_mbuf_pool) g_msys_pool_list =
+    STAILQ_HEAD_INITIALIZER(g_msys_pool_list);
+
+/**
+ * Initialize a mbuf queue.  An mbuf queue is a queue of mbufs that tie
+ * to a specific task's event queue.  Mbuf queues are a helper API around
+ * a common paradigm, which is to wait on an event queue, until at least
+ * 1 packet is available, and then process a queue of packets.
+ *
+ * When mbufs are available on the queue, an event OS_EVENT_T_MQUEUE_DATA
+ * will be posted to the task's mbuf queue.
+ *
+ * @param mq The mbuf queue to initialize
+ * @param arg The argument to provide to the event posted on this mbuf queue
+ *
+ * @return 0 on success, non-zero on failure.
+ *
+ */
+int
+os_mqueue_init(struct os_mqueue *mq, void *arg)
+{
+    struct os_event *ev;
+
+    STAILQ_INIT(&mq->mq_head);
+
+    ev = &mq->mq_ev;
+    memset(ev, 0, sizeof(*ev));
+    ev->ev_arg = arg;
+    ev->ev_type = OS_EVENT_T_MQUEUE_DATA;
+
+    return (0);
+}
+
+/**
+ * Remove and return a single mbuf from the mbuf queue.  Does not block.
+ *
+ * @param mq The mbuf queue to pull an element off of.
+ *
+ * @return The next mbuf in the queue, or NULL if queue has no mbufs.
+ */
+struct os_mbuf *
+os_mqueue_get(struct os_mqueue *mq)
+{
+    struct os_mbuf_pkthdr *mp;
+    struct os_mbuf *m;
+    os_sr_t sr;
+
+    OS_ENTER_CRITICAL(sr);
+    mp = STAILQ_FIRST(&mq->mq_head);
+    if (mp) {
+        STAILQ_REMOVE_HEAD(&mq->mq_head, omp_next);
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    if (mp) {
+        m = OS_MBUF_PKTHDR_TO_MBUF(mp);
+    } else {
+        m = NULL;
+    }
+
+    return (m);
+}
+
+/**
+ * Put a new mbuf in the mbuf queue.  Appends an mbuf to the end of the
+ * mbuf queue, and posts an event to the event queue passed in.
+ *
+ * @param mq The mbuf queue to append the mbuf to
+ * @param evq The event queue to post an OS_EVENT_T_MQUEUE_DATA event to
+ * @param m The mbuf to append to the mbuf queue
+ *
+ * @return 0 on success, non-zero on failure.
+ */
+int
+os_mqueue_put(struct os_mqueue *mq, struct os_eventq *evq, struct os_mbuf *m)
+{
+    struct os_mbuf_pkthdr *mp;
+    os_sr_t sr;
+    int rc;
+
+    /* Can only place the head of a chained mbuf on the queue. */
+    if (!OS_MBUF_IS_PKTHDR(m)) {
+        rc = OS_EINVAL;
+        goto err;
+    }
+
+    mp = OS_MBUF_PKTHDR(m);
+
+    OS_ENTER_CRITICAL(sr);
+    STAILQ_INSERT_TAIL(&mq->mq_head, mp, omp_next);
+    OS_EXIT_CRITICAL(sr);
+
+    /* Only post an event to the queue if its specified */
+    if (evq) {
+        os_eventq_put(evq, &mq->mq_ev);
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * MSYS is a system level mbuf registry.  Allows the system to share
+ * packet buffers amongst the various networking stacks that can be running
+ * simultaeneously.
+ *
+ * Mbuf pools are created in the system initialization code, and then when
+ * a mbuf is allocated out of msys, it will try and find the best fit based
+ * upon estimated mbuf size.
+ *
+ * os_msys_register() registers a mbuf pool with MSYS, and allows MSYS to
+ * allocate mbufs out of it.
+ *
+ * @param new_pool The pool to register with MSYS
+ *
+ * @return 0 on success, non-zero on failure
+ */
+int
+os_msys_register(struct os_mbuf_pool *new_pool)
+{
+    struct os_mbuf_pool *pool;
+
+    pool = NULL;
+    STAILQ_FOREACH(pool, &g_msys_pool_list, omp_next) {
+        if (new_pool->omp_databuf_len > pool->omp_databuf_len) {
+            break;
+        }
+    }
+
+    if (pool) {
+        STAILQ_INSERT_AFTER(&g_msys_pool_list, pool, new_pool, omp_next);
+    } else {
+        STAILQ_INSERT_TAIL(&g_msys_pool_list, new_pool, omp_next);
+    }
+
+    return (0);
+}
+
+/**
+ * De-registers all mbuf pools from msys.
+ */
+void
+os_msys_reset(void)
+{
+    STAILQ_INIT(&g_msys_pool_list);
+}
+
+static struct os_mbuf_pool *
+_os_msys_find_pool(uint16_t dsize)
+{
+    struct os_mbuf_pool *pool;
+
+    pool = NULL;
+    STAILQ_FOREACH(pool, &g_msys_pool_list, omp_next) {
+        if (dsize <= pool->omp_databuf_len) {
+            break;
+        }
+    }
+
+    if (!pool) {
+        pool = STAILQ_LAST(&g_msys_pool_list, os_mbuf_pool, omp_next);
+    }
+
+    return (pool);
+}
+
+/**
+ * Allocate a mbuf from msys.  Based upon the data size requested,
+ * os_msys_get() will choose the mbuf pool that has the best fit.
+ *
+ * @param dsize The estimated size of the data being stored in the mbuf
+ * @param leadingspace The amount of leadingspace to allocate in the mbuf
+ *
+ * @return A freshly allocated mbuf on success, NULL on failure.
+ *
+ */
+struct os_mbuf *
+os_msys_get(uint16_t dsize, uint16_t leadingspace)
+{
+    struct os_mbuf *m;
+    struct os_mbuf_pool *pool;
+
+    pool = _os_msys_find_pool(dsize);
+    if (!pool) {
+        goto err;
+    }
+
+    m = os_mbuf_get(pool, leadingspace);
+    return (m);
+err:
+    return (NULL);
+}
+
+/**
+ * Allocate a packet header structure from the MSYS pool.  See
+ * os_msys_register() for a description of MSYS.
+ *
+ * @param dsize The estimated size of the data being stored in the mbuf
+ * @param user_hdr_len The length to allocate for the packet header structure
+ *
+ * @return A freshly allocated mbuf on success, NULL on failure.
+ */
+struct os_mbuf *
+os_msys_get_pkthdr(uint16_t dsize, uint16_t user_hdr_len)
+{
+    uint16_t total_pkthdr_len;
+    struct os_mbuf *m;
+    struct os_mbuf_pool *pool;
+
+    total_pkthdr_len =  user_hdr_len + sizeof(struct os_mbuf_pkthdr);
+    pool = _os_msys_find_pool(dsize + total_pkthdr_len);
+    if (!pool) {
+        goto err;
+    }
+
+    m = os_mbuf_get_pkthdr(pool, user_hdr_len);
+    return (m);
+err:
+    return (NULL);
+}
+
+int
+os_msys_count(void)
+{
+    struct os_mbuf_pool *omp;
+    int total;
+
+    total = 0;
+    STAILQ_FOREACH(omp, &g_msys_pool_list, omp_next) {
+        total += omp->omp_pool->mp_num_blocks;
+    }
+
+    return total;
+}
+
+int
+os_msys_num_free(void)
+{
+    struct os_mbuf_pool *omp;
+    int total;
+
+    total = 0;
+    STAILQ_FOREACH(omp, &g_msys_pool_list, omp_next) {
+        total += omp->omp_pool->mp_num_free;
+    }
+
+    return total;
+}
+
+/**
+ * Initialize a pool of mbufs.
+ *
+ * @param omp     The mbuf pool to initialize
+ * @param mp      The memory pool that will hold this mbuf pool
+ * @param buf_len The length of the buffer itself.
+ * @param nbufs   The number of buffers in the pool
+ *
+ * @return 0 on success, error code on failure.
+ */
+int
+os_mbuf_pool_init(struct os_mbuf_pool *omp, struct os_mempool *mp,
+                  uint16_t buf_len, uint16_t nbufs)
+{
+    omp->omp_databuf_len = buf_len - sizeof(struct os_mbuf);
+    omp->omp_mbuf_count = nbufs;
+    omp->omp_pool = mp;
+
+    return (0);
+}
+
+/**
+ * Get an mbuf from the mbuf pool.  The mbuf is allocated, and initialized
+ * prior to being returned.
+ *
+ * @param omp The mbuf pool to return the packet from
+ * @param leadingspace The amount of leadingspace to put before the data
+ *     section by default.
+ *
+ * @return An initialized mbuf on success, and NULL on failure.
+ */
+struct os_mbuf *
+os_mbuf_get(struct os_mbuf_pool *omp, uint16_t leadingspace)
+{
+    struct os_mbuf *om;
+
+    if (leadingspace > omp->omp_databuf_len) {
+        goto err;
+    }
+
+    om = os_memblock_get(omp->omp_pool);
+    if (!om) {
+        goto err;
+    }
+
+    SLIST_NEXT(om, om_next) = NULL;
+    om->om_flags = 0;
+    om->om_pkthdr_len = 0;
+    om->om_len = 0;
+    om->om_data = (&om->om_databuf[0] + leadingspace);
+    om->om_omp = omp;
+
+    return (om);
+err:
+    return (NULL);
+}
+
+/**
+ * Allocate a new packet header mbuf out of the os_mbuf_pool.
+ *
+ * @param omp The mbuf pool to allocate out of
+ * @param user_pkthdr_len The packet header length to reserve for the caller.
+ *
+ * @return A freshly allocated mbuf on success, NULL on failure.
+ */
+struct os_mbuf *
+os_mbuf_get_pkthdr(struct os_mbuf_pool *omp, uint8_t user_pkthdr_len)
+{
+    uint16_t pkthdr_len;
+    struct os_mbuf_pkthdr *pkthdr;
+    struct os_mbuf *om;
+
+    /* User packet header must fit inside mbuf */
+    pkthdr_len = user_pkthdr_len + sizeof(struct os_mbuf_pkthdr);
+    if ((pkthdr_len > omp->omp_databuf_len) || (pkthdr_len > 255)) {
+        return NULL;
+    }
+
+    om = os_mbuf_get(omp, 0);
+    if (om) {
+        om->om_pkthdr_len = pkthdr_len;
+        om->om_data += pkthdr_len;
+
+        pkthdr = OS_MBUF_PKTHDR(om);
+        pkthdr->omp_len = 0;
+        pkthdr->omp_flags = 0;
+        STAILQ_NEXT(pkthdr, omp_next) = NULL;
+    }
+
+    return om;
+}
+
+/**
+ * Release a mbuf back to the pool
+ *
+ * @param omp The Mbuf pool to release back to
+ * @param om  The Mbuf to release back to the pool
+ *
+ * @return 0 on success, -1 on failure
+ */
+int
+os_mbuf_free(struct os_mbuf *om)
+{
+    int rc;
+
+    if (om->om_omp != NULL) {
+        rc = os_memblock_put(om->om_omp->omp_pool, om);
+        if (rc != 0) {
+            goto err;
+        }
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Free a chain of mbufs
+ *
+ * @param omp The mbuf pool to free the chain of mbufs into
+ * @param om  The starting mbuf of the chain to free back into the pool
+ *
+ * @return 0 on success, -1 on failure
+ */
+int
+os_mbuf_free_chain(struct os_mbuf *om)
+{
+    struct os_mbuf *next;
+    int rc;
+
+    while (om != NULL) {
+        next = SLIST_NEXT(om, om_next);
+
+        rc = os_mbuf_free(om);
+        if (rc != 0) {
+            goto err;
+        }
+
+        om = next;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Copy a packet header from one mbuf to another.
+ *
+ * @param omp The mbuf pool associated with these buffers
+ * @param new_buf The new buffer to copy the packet header into
+ * @param old_buf The old buffer to copy the packet header from
+ */
+static inline void
+_os_mbuf_copypkthdr(struct os_mbuf *new_buf, struct os_mbuf *old_buf)
+{
+    assert(new_buf->om_len == 0);
+
+    memcpy(&new_buf->om_databuf[0], &old_buf->om_databuf[0],
+           old_buf->om_pkthdr_len);
+    new_buf->om_pkthdr_len = old_buf->om_pkthdr_len;
+    new_buf->om_data = new_buf->om_databuf + old_buf->om_pkthdr_len;
+}
+
+/**
+ * Append data onto a mbuf
+ *
+ * @param om   The mbuf to append the data onto
+ * @param data The data to append onto the mbuf
+ * @param len  The length of the data to append
+ *
+ * @return 0 on success, and an error code on failure
+ */
+int
+os_mbuf_append(struct os_mbuf *om, const void *data,  uint16_t len)
+{
+    struct os_mbuf_pool *omp;
+    struct os_mbuf *last;
+    struct os_mbuf *new;
+    int remainder;
+    int space;
+    int rc;
+
+    if (om == NULL) {
+        rc = OS_EINVAL;
+        goto err;
+    }
+
+    omp = om->om_omp;
+
+    /* Scroll to last mbuf in the chain */
+    last = om;
+    while (SLIST_NEXT(last, om_next) != NULL) {
+        last = SLIST_NEXT(last, om_next);
+    }
+
+    remainder = len;
+    space = OS_MBUF_TRAILINGSPACE(last);
+
+    /* If room in current mbuf, copy the first part of the data into the
+     * remaining space in that mbuf.
+     */
+    if (space > 0) {
+        if (space > remainder) {
+            space = remainder;
+        }
+
+        memcpy(OS_MBUF_DATA(last, uint8_t *) + last->om_len , data, space);
+
+        last->om_len += space;
+        data += space;
+        remainder -= space;
+    }
+
+    /* Take the remaining data, and keep allocating new mbufs and copying
+     * data into it, until data is exhausted.
+     */
+    while (remainder > 0) {
+        new = os_mbuf_get(omp, 0);
+        if (!new) {
+            break;
+        }
+
+        new->om_len = min(omp->omp_databuf_len, remainder);
+        memcpy(OS_MBUF_DATA(new, void *), data, new->om_len);
+        data += new->om_len;
+        remainder -= new->om_len;
+        SLIST_NEXT(last, om_next) = new;
+        last = new;
+    }
+
+    /* Adjust the packet header length in the buffer */
+    if (OS_MBUF_IS_PKTHDR(om)) {
+        OS_MBUF_PKTHDR(om)->omp_len += len - remainder;
+    }
+
+    if (remainder != 0) {
+        rc = OS_ENOMEM;
+        goto err;
+    }
+
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Reads data from one mbuf and appends it to another.  On error, the specified
+ * data range may be partially appended.  Neither mbuf is required to contain
+ * an mbuf packet header.
+ *
+ * @param dst                   The mbuf to append to.
+ * @param src                   The mbuf to copy data from.
+ * @param src_off               The absolute offset within the source mbuf
+ *                                  chain to read from.
+ * @param len                   The number of bytes to append.
+ *
+ * @return                      0 on success;
+ *                              OS_EINVAL if the specified range extends beyond
+ *                                  the end of the source mbuf chain.
+ */
+int
+os_mbuf_appendfrom(struct os_mbuf *dst, const struct os_mbuf *src,
+                   uint16_t src_off, uint16_t len)
+{
+    const struct os_mbuf *src_cur_om;
+    uint16_t src_cur_off;
+    uint16_t chunk_sz;
+    int rc;
+
+    src_cur_om = os_mbuf_off(src, src_off, &src_cur_off);
+    while (len > 0) {
+        if (src_cur_om == NULL) {
+            return OS_EINVAL;
+        }
+
+        chunk_sz = min(len, src_cur_om->om_len - src_cur_off);
+        rc = os_mbuf_append(dst, src_cur_om->om_data + src_cur_off, chunk_sz);
+        if (rc != 0) {
+            return rc;
+        }
+
+        len -= chunk_sz;
+        src_cur_om = SLIST_NEXT(src_cur_om, om_next);
+        src_cur_off = 0;
+    }
+
+    return 0;
+}
+
+/**
+ * Duplicate a chain of mbufs.  Return the start of the duplicated chain.
+ *
+ * @param omp The mbuf pool to duplicate out of
+ * @param om  The mbuf chain to duplicate
+ *
+ * @return A pointer to the new chain of mbufs
+ */
+struct os_mbuf *
+os_mbuf_dup(struct os_mbuf *om)
+{
+    struct os_mbuf_pool *omp;
+    struct os_mbuf *head;
+    struct os_mbuf *copy;
+
+    omp = om->om_omp;
+
+    head = NULL;
+    copy = NULL;
+
+    for (; om != NULL; om = SLIST_NEXT(om, om_next)) {
+        if (head) {
+            SLIST_NEXT(copy, om_next) = os_mbuf_get(omp,
+                    OS_MBUF_LEADINGSPACE(om));
+            if (!SLIST_NEXT(copy, om_next)) {
+                os_mbuf_free_chain(head);
+                goto err;
+            }
+
+            copy = SLIST_NEXT(copy, om_next);
+        } else {
+            head = os_mbuf_get(omp, OS_MBUF_LEADINGSPACE(om));
+            if (!head) {
+                goto err;
+            }
+
+            if (OS_MBUF_IS_PKTHDR(om)) {
+                _os_mbuf_copypkthdr(head, om);
+            }
+            copy = head;
+        }
+        copy->om_flags = om->om_flags;
+        copy->om_len = om->om_len;
+        memcpy(OS_MBUF_DATA(copy, uint8_t *), OS_MBUF_DATA(om, uint8_t *),
+                om->om_len);
+    }
+
+    return (head);
+err:
+    return (NULL);
+}
+
+/**
+ * Locates the specified absolute offset within an mbuf chain.  The offset
+ * can be one past than the total length of the chain, but no greater.
+ *
+ * @param om                    The start of the mbuf chain to seek within.
+ * @param off                   The absolute address to find.
+ * @param out_off               On success, this points to the relative offset
+ *                                  within the returned mbuf.
+ *
+ * @return                      The mbuf containing the specified offset on
+ *                                  success.
+ *                              NULL if the specified offset is out of bounds.
+ */
+struct os_mbuf *
+os_mbuf_off(const struct os_mbuf *om, int off, uint16_t *out_off)
+{
+    struct os_mbuf *next;
+    struct os_mbuf *cur;
+
+    /* Cast away const. */
+    cur = (struct os_mbuf *)om;
+
+    while (1) {
+        if (cur == NULL) {
+            return NULL;
+        }
+
+        next = SLIST_NEXT(cur, om_next);
+
+        if (cur->om_len > off ||
+            (cur->om_len == off && next == NULL)) {
+
+            *out_off = off;
+            return cur;
+        }
+
+        off -= cur->om_len;
+        cur = next;
+    }
+}
+
+/*
+ * Copy data from an mbuf chain starting "off" bytes from the beginning,
+ * continuing for "len" bytes, into the indicated buffer.
+ *
+ * @param m The mbuf chain to copy from
+ * @param off The offset into the mbuf chain to begin copying from
+ * @param len The length of the data to copy
+ * @param dst The destination buffer to copy into
+ *
+ * @return                      0 on success;
+ *                              -1 if the mbuf does not contain enough data.
+ */
+int
+os_mbuf_copydata(const struct os_mbuf *m, int off, int len, void *dst)
+{
+    unsigned int count;
+    uint8_t *udst;
+
+    if (!len) {
+        return 0;
+    }
+
+    udst = dst;
+
+    while (off > 0) {
+        if (!m) {
+            return (-1);
+        }
+
+        if (off < m->om_len)
+            break;
+        off -= m->om_len;
+        m = SLIST_NEXT(m, om_next);
+    }
+    while (len > 0 && m != NULL) {
+        count = min(m->om_len - off, len);
+        memcpy(udst, m->om_data + off, count);
+        len -= count;
+        udst += count;
+        off = 0;
+        m = SLIST_NEXT(m, om_next);
+    }
+
+    return (len > 0 ? -1 : 0);
+}
+
+/**
+ * Adjust the length of a mbuf, trimming either from the head or the tail
+ * of the mbuf.
+ *
+ * @param mp The mbuf chain to adjust
+ * @param req_len The length to trim from the mbuf.  If positive, trims
+ *                from the head of the mbuf, if negative, trims from the
+ *                tail of the mbuf.
+ */
+void
+os_mbuf_adj(struct os_mbuf *mp, int req_len)
+{
+    int len = req_len;
+    struct os_mbuf *m;
+    int count;
+
+    if ((m = mp) == NULL)
+        return;
+    if (len >= 0) {
+        /*
+         * Trim from head.
+         */
+        while (m != NULL && len > 0) {
+            if (m->om_len <= len) {
+                len -= m->om_len;
+                m->om_len = 0;
+                m = SLIST_NEXT(m, om_next);
+            } else {
+                m->om_len -= len;
+                m->om_data += len;
+                len = 0;
+            }
+        }
+        if (OS_MBUF_IS_PKTHDR(mp))
+            OS_MBUF_PKTHDR(mp)->omp_len -= (req_len - len);
+    } else {
+        /*
+         * Trim from tail.  Scan the mbuf chain,
+         * calculating its length and finding the last mbuf.
+         * If the adjustment only affects this mbuf, then just
+         * adjust and return.  Otherwise, rescan and truncate
+         * after the remaining size.
+         */
+        len = -len;
+        count = 0;
+        for (;;) {
+            count += m->om_len;
+            if (SLIST_NEXT(m, om_next) == (struct os_mbuf *)0)
+                break;
+            m = SLIST_NEXT(m, om_next);
+        }
+        if (m->om_len >= len) {
+            m->om_len -= len;
+            if (OS_MBUF_IS_PKTHDR(mp))
+                OS_MBUF_PKTHDR(mp)->omp_len -= len;
+            return;
+        }
+        count -= len;
+        if (count < 0)
+            count = 0;
+        /*
+         * Correct length for chain is "count".
+         * Find the mbuf with last data, adjust its length,
+         * and toss data from remaining mbufs on chain.
+         */
+        m = mp;
+        if (OS_MBUF_IS_PKTHDR(m))
+            OS_MBUF_PKTHDR(m)->omp_len = count;
+        for (; m; m = SLIST_NEXT(m, om_next)) {
+            if (m->om_len >= count) {
+                m->om_len = count;
+                if (SLIST_NEXT(m, om_next) != NULL) {
+                    os_mbuf_free_chain(SLIST_NEXT(m, om_next));
+                    SLIST_NEXT(m, om_next) = NULL;
+                }
+                break;
+            }
+            count -= m->om_len;
+        }
+    }
+}
+
+/**
+ * Performs a memory compare of the specified region of an mbuf chain against a
+ * flat buffer.
+ *
+ * @param om                    The start of the mbuf chain to compare.
+ * @param off                   The offset within the mbuf chain to start the
+ *                                  comparison.
+ * @param data                  The flat buffer to compare.
+ * @param len                   The length of the flat buffer.
+ *
+ * @return                      0 if both memory regions are identical;
+ *                              A memcmp return code if there is a mismatch;
+ *                              INT_MAX if the mbuf is too short.
+ */
+int
+os_mbuf_cmpf(const struct os_mbuf *om, int off, const void *data, int len)
+{
+    uint16_t chunk_sz;
+    uint16_t data_off;
+    uint16_t om_off;
+    int rc;
+
+    if (len <= 0) {
+        return 0;
+    }
+
+    data_off = 0;
+    om = os_mbuf_off(om, off, &om_off);
+    while (1) {
+        if (om == NULL) {
+            return INT_MAX;
+        }
+
+        chunk_sz = min(om->om_len - om_off, len - data_off);
+        if (chunk_sz > 0) {
+            rc = memcmp(om->om_data + om_off, data + data_off, chunk_sz);
+            if (rc != 0) {
+                return rc;
+            }
+        }
+
+        data_off += chunk_sz;
+        if (data_off == len) {
+            return 0;
+        }
+
+        om = SLIST_NEXT(om, om_next);
+        om_off = 0;
+
+        if (om == NULL) {
+            return INT_MAX;
+        }
+    }
+}
+
+/**
+ * Compares the contents of two mbuf chains.  The ranges of the two chains to
+ * be compared are specified via the two offset parameters and the len
+ * parameter.  Neither mbuf chain is required to contain a packet header.
+ *
+ * @param om1                   The first mbuf chain to compare.
+ * @param offset1               The absolute offset within om1 at which to
+ *                                  start the comparison.
+ * @param om2                   The second mbuf chain to compare.
+ * @param offset2               The absolute offset within om2 at which to
+ *                                  start the comparison.
+ * @param len                   The number of bytes to compare.
+ *
+ * @return                      0 if both mbuf segments are identical;
+ *                              A memcmp() return code if the segment contents
+ *                                  differ;
+ *                              INT_MAX if a specified range extends beyond the
+ *                                  end of its corresponding mbuf chain.
+ */
+int
+os_mbuf_cmpm(const struct os_mbuf *om1, uint16_t offset1,
+             const struct os_mbuf *om2, uint16_t offset2,
+             uint16_t len)
+{
+    const struct os_mbuf *cur1;
+    const struct os_mbuf *cur2;
+    uint16_t bytes_remaining;
+    uint16_t chunk_sz;
+    uint16_t om1_left;
+    uint16_t om2_left;
+    uint16_t om1_off;
+    uint16_t om2_off;
+    int rc;
+
+    cur1 = os_mbuf_off(om1, offset1, &om1_off);
+    cur2 = os_mbuf_off(om2, offset2, &om2_off);
+
+    bytes_remaining = len;
+    while (1) {
+        if (bytes_remaining == 0) {
+            return 0;
+        }
+
+        while (cur1 != NULL && om1_off >= cur1->om_len) {
+            cur1 = SLIST_NEXT(cur1, om_next);
+            om1_off = 0;
+        }
+        while (cur2 != NULL && om2_off >= cur2->om_len) {
+            cur2 = SLIST_NEXT(cur2, om_next);
+            om2_off = 0;
+        }
+
+        if (cur1 == NULL || cur2 == NULL) {
+            return INT_MAX;
+        }
+
+        om1_left = cur1->om_len - om1_off;
+        om2_left = cur2->om_len - om2_off;
+        chunk_sz = min(min(om1_left, om2_left), bytes_remaining);
+
+        rc = memcmp(cur1->om_data + om1_off, cur2->om_data + om2_off,
+                    chunk_sz);
+        if (rc != 0) {
+            return rc;
+        }
+
+        om1_off += chunk_sz;
+        om2_off += chunk_sz;
+        bytes_remaining -= chunk_sz;
+    }
+}
+
+/**
+ * Increases the length of an mbuf chain by adding data to the front.  If there
+ * is insufficient room in the leading mbuf, additional mbufs are allocated and
+ * prepended as necessary.  If this function fails to allocate an mbuf, the
+ * entire chain is freed.
+ *
+ * The specified mbuf chain does not need to contain a packet header.
+ *
+ * @param omp                   The mbuf pool to allocate from.
+ * @param om                    The head of the mbuf chain.
+ * @param len                   The number of bytes to prepend.
+ *
+ * @return                      The new head of the chain on success;
+ *                              NULL on failure.
+ */
+struct os_mbuf *
+os_mbuf_prepend(struct os_mbuf *om, int len)
+{
+    struct os_mbuf *p;
+    int leading;
+
+    while (1) {
+        /* Fill the available space at the front of the head of the chain, as
+         * needed.
+         */
+        leading = min(len, OS_MBUF_LEADINGSPACE(om));
+
+        om->om_data -= leading;
+        om->om_len += leading;
+        if (OS_MBUF_IS_PKTHDR(om)) {
+            OS_MBUF_PKTHDR(om)->omp_len += leading;
+        }
+
+        len -= leading;
+        if (len == 0) {
+            break;
+        }
+
+        /* The current head didn't have enough space; allocate a new head. */
+        if (OS_MBUF_IS_PKTHDR(om)) {
+            p = os_mbuf_get_pkthdr(om->om_omp,
+                om->om_pkthdr_len - sizeof (struct os_mbuf_pkthdr));
+        } else {
+            p = os_mbuf_get(om->om_omp, 0);
+        }
+        if (p == NULL) {
+            os_mbuf_free_chain(om);
+            om = NULL;
+            break;
+        }
+
+        if (OS_MBUF_IS_PKTHDR(om)) {
+            _os_mbuf_copypkthdr(p, om);
+            om->om_pkthdr_len = 0;
+        }
+
+        /* Move the new head's data pointer to the end so that data can be
+         * prepended.
+         */
+        p->om_data += OS_MBUF_TRAILINGSPACE(p);
+
+        SLIST_NEXT(p, om_next) = om;
+        om = p;
+    }
+
+    return om;
+}
+
+/**
+ * Prepends a chunk of empty data to the specified mbuf chain and ensures the
+ * chunk is contiguous.  If either operation fails, the specified mbuf chain is
+ * freed and NULL is returned.
+ *
+ * @param om                    The mbuf chain to prepend to.
+ * @param len                   The number of bytes to prepend and pullup.
+ *
+ * @return                      The modified mbuf on success;
+ *                              NULL on failure (and the mbuf chain is freed).
+ */
+struct os_mbuf *
+os_mbuf_prepend_pullup(struct os_mbuf *om, uint16_t len)
+{
+    om = os_mbuf_prepend(om, len);
+    if (om == NULL) {
+        return NULL;
+    }
+
+    om = os_mbuf_pullup(om, len);
+    if (om == NULL) {
+        return NULL;
+    }
+
+    return om;
+}
+
+/**
+ * Copies the contents of a flat buffer into an mbuf chain, starting at the
+ * specified destination offset.  If the mbuf is too small for the source data,
+ * it is extended as necessary.  If the destination mbuf contains a packet
+ * header, the header length is updated.
+ *
+ * @param omp                   The mbuf pool to allocate from.
+ * @param om                    The mbuf chain to copy into.
+ * @param off                   The offset within the chain to copy to.
+ * @param src                   The source buffer to copy from.
+ * @param len                   The number of bytes to copy.
+ *
+ * @return                      0 on success; nonzero on failure.
+ */
+int
+os_mbuf_copyinto(struct os_mbuf *om, int off, const void *src, int len)
+{
+    struct os_mbuf *next;
+    struct os_mbuf *cur;
+    const uint8_t *sptr;
+    uint16_t cur_off;
+    int copylen;
+    int rc;
+
+    /* Find the mbuf,offset pair for the start of the destination. */
+    cur = os_mbuf_off(om, off, &cur_off);
+    if (cur == NULL) {
+        return -1;
+    }
+
+    /* Overwrite existing data until we reach the end of the chain. */
+    sptr = src;
+    while (1) {
+        copylen = min(cur->om_len - cur_off, len);
+        if (copylen > 0) {
+            memcpy(cur->om_data + cur_off, sptr, copylen);
+            sptr += copylen;
+            len -= copylen;
+
+            copylen = 0;
+        }
+
+        if (len == 0) {
+            /* All the source data fit in the existing mbuf chain. */
+            return 0;
+        }
+
+        next = SLIST_NEXT(cur, om_next);
+        if (next == NULL) {
+            break;
+        }
+
+        cur = next;
+    }
+
+    /* Append the remaining data to the end of the chain. */
+    rc = os_mbuf_append(cur, sptr, len);
+    if (rc != 0) {
+        return rc;
+    }
+
+    /* Fix up the packet header, if one is present. */
+    if (OS_MBUF_IS_PKTHDR(om)) {
+        OS_MBUF_PKTHDR(om)->omp_len =
+            max(OS_MBUF_PKTHDR(om)->omp_len, off + len);
+    }
+
+    return 0;
+}
+
+/**
+ * Attaches a second mbuf chain onto the end of the first.  If the first chain
+ * contains a packet header, the header's length is updated.  If the second
+ * chain has a packet header, its header is cleared.
+ *
+ * @param first                 The mbuf chain being attached to.
+ * @param second                The mbuf chain that gets attached.
+ */
+void
+os_mbuf_concat(struct os_mbuf *first, struct os_mbuf *second)
+{
+    struct os_mbuf *next;
+    struct os_mbuf *cur;
+
+    /* Point 'cur' to the last buffer in the first chain. */
+    cur = first;
+    while (1) {
+        next = SLIST_NEXT(cur, om_next);
+        if (next == NULL) {
+            break;
+        }
+
+        cur = next;
+    }
+
+    /* Attach the second chain to the end of the first. */
+    SLIST_NEXT(cur, om_next) = second;
+
+    /* If the first chain has a packet header, calculate the length of the
+     * second chain and add it to the header length.
+     */
+    if (OS_MBUF_IS_PKTHDR(first)) {
+        if (OS_MBUF_IS_PKTHDR(second)) {
+            OS_MBUF_PKTHDR(first)->omp_len += OS_MBUF_PKTHDR(second)->omp_len;
+        } else {
+            for (cur = second; cur != NULL; cur = SLIST_NEXT(cur, om_next)) {
+                OS_MBUF_PKTHDR(first)->omp_len += cur->om_len;
+            }
+        }
+    }
+
+    second->om_pkthdr_len = 0;
+}
+
+/**
+ * Increases the length of an mbuf chain by the specified amount.  If there is
+ * not sufficient room in the last buffer, a new buffer is allocated and
+ * appended to the chain.  It is an error to request more data than can fit in
+ * a single buffer.
+ *
+ * @param omp
+ * @param om                    The head of the chain to extend.
+ * @param len                   The number of bytes to extend by.
+ *
+ * @return                      A pointer to the new data on success;
+ *                              NULL on failure.
+ */
+void *
+os_mbuf_extend(struct os_mbuf *om, uint16_t len)
+{
+    struct os_mbuf *newm;
+    struct os_mbuf *last;
+    void *data;
+
+    if (len > om->om_omp->omp_databuf_len) {
+        return NULL;
+    }
+
+    /* Scroll to last mbuf in the chain */
+    last = om;
+    while (SLIST_NEXT(last, om_next) != NULL) {
+        last = SLIST_NEXT(last, om_next);
+    }
+
+    if (OS_MBUF_TRAILINGSPACE(last) < len) {
+        newm = os_mbuf_get(om->om_omp, 0);
+        if (newm == NULL) {
+            return NULL;
+        }
+
+        SLIST_NEXT(last, om_next) = newm;
+        last = newm;
+    }
+
+    data = last->om_data + last->om_len;
+    last->om_len += len;
+
+    if (OS_MBUF_IS_PKTHDR(om)) {
+        OS_MBUF_PKTHDR(om)->omp_len += len;
+    }
+
+    return data;
+}
+
+/**
+ * Rearrange a mbuf chain so that len bytes are contiguous,
+ * and in the data area of an mbuf (so that OS_MBUF_DATA() will
+ * work on a structure of size len.)  Returns the resulting
+ * mbuf chain on success, free's it and returns NULL on failure.
+ *
+ * If there is room, it will add up to "max_protohdr - len"
+ * extra bytes to the contiguous region, in an attempt to avoid being
+ * called next time.
+ *
+ * @param omp The mbuf pool to take the mbufs out of
+ * @param om The mbuf chain to make contiguous
+ * @param len The number of bytes in the chain to make contiguous
+ *
+ * @return The contiguous mbuf chain on success, NULL on failure.
+ */
+struct os_mbuf *
+os_mbuf_pullup(struct os_mbuf *om, uint16_t len)
+{
+    struct os_mbuf_pool *omp;
+    struct os_mbuf *next;
+    struct os_mbuf *om2;
+    int count;
+    int space;
+
+    omp = om->om_omp;
+
+    /*
+     * If first mbuf has no cluster, and has room for len bytes
+     * without shifting current data, pullup into it,
+     * otherwise allocate a new mbuf to prepend to the chain.
+     */
+    if (om->om_len >= len) {
+        return (om);
+    }
+    if (om->om_len + OS_MBUF_TRAILINGSPACE(om) >= len &&
+        SLIST_NEXT(om, om_next)) {
+        om2 = om;
+        om = SLIST_NEXT(om, om_next);
+        len -= om2->om_len;
+    } else {
+        if (len > omp->omp_databuf_len - om->om_pkthdr_len) {
+            goto bad;
+        }
+
+        om2 = os_mbuf_get(omp, 0);
+        if (om2 == NULL) {
+            goto bad;
+        }
+
+        if (OS_MBUF_IS_PKTHDR(om)) {
+            _os_mbuf_copypkthdr(om2, om);
+        }
+    }
+    space = OS_MBUF_TRAILINGSPACE(om2);
+    do {
+        count = min(min(len, space), om->om_len);
+        memcpy(om2->om_data + om2->om_len, om->om_data, count);
+        len -= count;
+        om2->om_len += count;
+        om->om_len -= count;
+        space -= count;
+        if (om->om_len) {
+            om->om_data += count;
+        } else {
+            next = SLIST_NEXT(om, om_next);
+            os_mbuf_free(om);
+            om = next;
+        }
+    } while (len > 0 && om);
+    if (len > 0) {
+        os_mbuf_free(om2);
+        goto bad;
+    }
+    SLIST_NEXT(om2, om_next) = om;
+    return (om2);
+bad:
+    os_mbuf_free_chain(om);
+    return (NULL);
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_mempool.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_mempool.c b/kernel/os/src/os_mempool.c
new file mode 100644
index 0000000..e940ba4
--- /dev/null
+++ b/kernel/os/src/os_mempool.c
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "os/os.h"
+
+#include <string.h>
+#include <assert.h>
+
+#define OS_MEMPOOL_TRUE_BLOCK_SIZE(bsize)   OS_ALIGN(bsize, OS_ALIGNMENT)
+
+STAILQ_HEAD(, os_mempool) g_os_mempool_list = 
+    STAILQ_HEAD_INITIALIZER(g_os_mempool_list);
+
+/**
+ * os mempool init
+ *  
+ * Initialize a memory pool. 
+ * 
+ * @param mp            Pointer to a pointer to a mempool
+ * @param blocks        The number of blocks in the pool
+ * @param blocks_size   The size of the block, in bytes. 
+ * @param membuf        Pointer to memory to contain blocks. 
+ * @param name          Name of the pool.
+ * 
+ * @return os_error_t 
+ */
+os_error_t
+os_mempool_init(struct os_mempool *mp, int blocks, int block_size,
+                void *membuf, char *name)
+{
+    int true_block_size;
+    uint8_t *block_addr;
+    struct os_memblock *block_ptr;
+
+    /* Check for valid parameters */
+    if ((!mp) || (blocks < 0) || (block_size <= 0)) {
+        return OS_INVALID_PARM;
+    }
+
+    if ((!membuf) && (blocks != 0)) {
+        return OS_INVALID_PARM;
+    }
+
+    if (membuf != NULL) {
+        /* Blocks need to be sized properly and memory buffer should be
+         * aligned
+         */
+        if (((uint32_t)membuf & (OS_ALIGNMENT - 1)) != 0) {
+            return OS_MEM_NOT_ALIGNED;
+        }
+    }
+    true_block_size = OS_MEMPOOL_TRUE_BLOCK_SIZE(block_size);
+
+    /* Initialize the memory pool structure */
+    mp->mp_block_size = block_size;
+    mp->mp_num_free = blocks;
+    mp->mp_num_blocks = blocks;
+    mp->mp_membuf_addr = (uint32_t)membuf;
+    mp->name = name;
+    SLIST_FIRST(mp) = membuf;
+
+    /* Chain the memory blocks to the free list */
+    block_addr = (uint8_t *)membuf;
+    block_ptr = (struct os_memblock *)block_addr;
+    while (blocks > 1) {
+        block_addr += true_block_size;
+        SLIST_NEXT(block_ptr, mb_next) = (struct os_memblock *)block_addr;
+        block_ptr = (struct os_memblock *)block_addr;
+        --blocks;
+    }
+
+    /* Last one in the list should be NULL */
+    SLIST_NEXT(block_ptr, mb_next) = NULL;
+
+    STAILQ_INSERT_TAIL(&g_os_mempool_list, mp, mp_list);
+
+    return OS_OK;
+}
+
+/**
+ * Checks if a memory block was allocated from the specified mempool.
+ *
+ * @param mp                    The mempool to check as parent.
+ * @param block_addr            The memory block to check as child.
+ *
+ * @return                      0 if the block does not belong to the mempool;
+ *                              1 if the block does belong to the mempool.
+ */
+int
+os_memblock_from(struct os_mempool *mp, void *block_addr)
+{
+    uint32_t true_block_size;
+    uint32_t baddr32;
+    uint32_t end;
+
+    _Static_assert(sizeof block_addr == sizeof baddr32,
+                   "Pointer to void must be 32-bits.");
+
+    baddr32 = (uint32_t)block_addr;
+    true_block_size = OS_MEMPOOL_TRUE_BLOCK_SIZE(mp->mp_block_size);
+    end = mp->mp_membuf_addr + (mp->mp_num_blocks * true_block_size);
+
+    /* Check that the block is in the memory buffer range. */
+    if ((baddr32 < mp->mp_membuf_addr) || (baddr32 >= end)) {
+        return 0;
+    }
+
+    /* All freed blocks should be on true block size boundaries! */
+    if (((baddr32 - mp->mp_membuf_addr) % true_block_size) != 0) {
+        return 0;
+    }
+
+    return 1;
+}
+
+/**
+ * os memblock get 
+ *  
+ * Get a memory block from a memory pool 
+ * 
+ * @param mp Pointer to the memory pool
+ * 
+ * @return void* Pointer to block if available; NULL otherwise
+ */
+void *
+os_memblock_get(struct os_mempool *mp)
+{
+    os_sr_t sr;
+    struct os_memblock *block;
+
+    /* Check to make sure they passed in a memory pool (or something) */
+    block = NULL;
+    if (mp) {
+        OS_ENTER_CRITICAL(sr);
+        /* Check for any free */
+        if (mp->mp_num_free) {
+            /* Get a free block */
+            block = SLIST_FIRST(mp);
+
+            /* Set new free list head */
+            SLIST_FIRST(mp) = SLIST_NEXT(block, mb_next);
+
+            /* Decrement number free by 1 */
+            mp->mp_num_free--;
+        }
+        OS_EXIT_CRITICAL(sr);
+    }
+
+    return (void *)block;
+}
+
+/**
+ * os memblock put 
+ *  
+ * Puts the memory block back into the pool 
+ * 
+ * @param mp Pointer to memory pool
+ * @param block_addr Pointer to memory block
+ * 
+ * @return os_error_t 
+ */
+os_error_t
+os_memblock_put(struct os_mempool *mp, void *block_addr)
+{
+    os_sr_t sr;
+    struct os_memblock *block;
+
+    /* Make sure parameters are valid */
+    if ((mp == NULL) || (block_addr == NULL)) {
+        return OS_INVALID_PARM;
+    }
+
+    /* Check that the block we are freeing is a valid block! */
+    if (!os_memblock_from(mp, block_addr)) {
+        return OS_INVALID_PARM;
+    }
+
+    block = (struct os_memblock *)block_addr;
+    OS_ENTER_CRITICAL(sr);
+    
+    /* Chain current free list pointer to this block; make this block head */
+    SLIST_NEXT(block, mb_next) = SLIST_FIRST(mp);
+    SLIST_FIRST(mp) = block;
+
+    /* XXX: Should we check that the number free <= number blocks? */
+    /* Increment number free */
+    mp->mp_num_free++;
+
+    OS_EXIT_CRITICAL(sr);
+
+    return OS_OK;
+}
+
+
+struct os_mempool *
+os_mempool_info_get_next(struct os_mempool *mp, struct os_mempool_info *omi)
+{
+    struct os_mempool *cur;
+
+    if (mp == NULL) {
+        cur = STAILQ_FIRST(&g_os_mempool_list);
+    } else {
+        cur = STAILQ_NEXT(mp, mp_list);
+    }
+
+    if (cur == NULL) {
+        return (NULL);
+    }
+
+    omi->omi_block_size = cur->mp_block_size;
+    omi->omi_num_blocks = cur->mp_num_blocks;
+    omi->omi_num_free = cur->mp_num_free;
+    strncpy(omi->omi_name, cur->name, sizeof(omi->omi_name));
+
+    return (cur);
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_msys_init.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_msys_init.c b/kernel/os/src/os_msys_init.c
new file mode 100644
index 0000000..fe0d569
--- /dev/null
+++ b/kernel/os/src/os_msys_init.c
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <assert.h>
+#include "sysinit/sysinit.h"
+#include "syscfg/syscfg.h"
+#include "os/os_mempool.h"
+#include "util/mem.h"
+#include "os_priv.h"
+
+#if MYNEWT_VAL(MSYS_1_BLOCK_COUNT) > 0
+#define SYSINIT_MSYS_1_MEMBLOCK_SIZE                \
+    OS_ALIGN(MYNEWT_VAL(MSYS_1_BLOCK_SIZE), 4)
+#define SYSINIT_MSYS_1_MEMPOOL_SIZE                 \
+    OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_1_BLOCK_COUNT),  \
+                    SYSINIT_MSYS_1_MEMBLOCK_SIZE)
+static os_membuf_t os_msys_init_1_data[SYSINIT_MSYS_1_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_init_1_mbuf_pool;
+static struct os_mempool os_msys_init_1_mempool;
+#endif
+
+#if MYNEWT_VAL(MSYS_2_BLOCK_COUNT) > 0
+#define SYSINIT_MSYS_2_MEMBLOCK_SIZE                \
+    OS_ALIGN(MYNEWT_VAL(MSYS_2_BLOCK_SIZE), 4)
+#define SYSINIT_MSYS_2_MEMPOOL_SIZE                 \
+    OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_2_BLOCK_COUNT),  \
+                    SYSINIT_MSYS_2_MEMBLOCK_SIZE)
+static os_membuf_t os_msys_init_2_data[SYSINIT_MSYS_2_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_init_2_mbuf_pool;
+static struct os_mempool os_msys_init_2_mempool;
+#endif
+
+#if MYNEWT_VAL(MSYS_3_BLOCK_COUNT) > 0
+#define SYSINIT_MSYS_3_MEMBLOCK_SIZE                \
+    OS_ALIGN(MYNEWT_VAL(MSYS_3_BLOCK_SIZE), 4)
+#define SYSINIT_MSYS_3_MEMPOOL_SIZE                 \
+    OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_3_BLOCK_COUNT),  \
+                    SYSINIT_MSYS_3_MEMBLOCK_SIZE)
+static os_membuf_t os_msys_init_3_data[SYSINIT_MSYS_3_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_init_3_mbuf_pool;
+static struct os_mempool os_msys_init_3_mempool;
+#endif
+
+#if MYNEWT_VAL(MSYS_4_BLOCK_COUNT) > 0
+#define SYSINIT_MSYS_4_MEMBLOCK_SIZE                \
+    OS_ALIGN(MYNEWT_VAL(MSYS_4_BLOCK_SIZE), 4)
+#define SYSINIT_MSYS_4_MEMPOOL_SIZE                 \
+    OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_4_BLOCK_COUNT),  \
+                    SYSINIT_MSYS_4_MEMBLOCK_SIZE)
+static os_membuf_t os_msys_init_4_data[SYSINIT_MSYS_4_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_init_4_mbuf_pool;
+static struct os_mempool os_msys_init_4_mempool;
+#endif
+
+#if MYNEWT_VAL(MSYS_5_BLOCK_COUNT) > 0
+#define SYSINIT_MSYS_5_MEMBLOCK_SIZE                \
+    OS_ALIGN(MYNEWT_VAL(MSYS_5_BLOCK_SIZE), 4)
+#define SYSINIT_MSYS_5_MEMPOOL_SIZE                 \
+    OS_MEMPOOL_SIZE(MYNEWT_VAL(MSYS_5_BLOCK_COUNT),  \
+                    SYSINIT_MSYS_5_MEMBLOCK_SIZE)
+
+static os_membuf_t os_msys_init_5_data[SYSINIT_MSYS_5_MEMPOOL_SIZE];
+static struct os_mbuf_pool os_msys_init_5_mbuf_pool;
+static struct os_mempool os_msys_init_5_mempool;
+#endif
+
+static void
+os_msys_init_once(void *data, struct os_mempool *mempool,
+                  struct os_mbuf_pool *mbuf_pool,
+                  int block_count, int block_size, char *name)
+{
+    int rc;
+
+    rc = mem_init_mbuf_pool(data, mempool, mbuf_pool, block_count, block_size,
+                            name);
+    SYSINIT_PANIC_ASSERT(rc == 0);
+
+    rc = os_msys_register(mbuf_pool);
+    SYSINIT_PANIC_ASSERT(rc == 0);
+}
+
+void
+os_msys_init(void)
+{
+    os_msys_reset();
+
+#if MYNEWT_VAL(MSYS_1_BLOCK_COUNT) > 0
+    os_msys_init_once(os_msys_init_1_data,
+                      &os_msys_init_1_mempool,
+                      &os_msys_init_1_mbuf_pool,
+                      MYNEWT_VAL(MSYS_1_BLOCK_COUNT),
+                      SYSINIT_MSYS_1_MEMBLOCK_SIZE,
+                      "msys_1");
+#endif
+
+#if MYNEWT_VAL(MSYS_2_BLOCK_COUNT) > 0
+    os_msys_init_once(os_msys_init_2_data,
+                      &os_msys_init_2_mempool,
+                      &os_msys_init_2_mbuf_pool,
+                      MYNEWT_VAL(MSYS_2_BLOCK_COUNT),
+                      SYSINIT_MSYS_2_MEMBLOCK_SIZE,
+                      "msys_2");
+#endif
+
+#if MYNEWT_VAL(MSYS_3_BLOCK_COUNT) > 0
+    os_msys_init_once(os_msys_init_3_data,
+                      &os_msys_init_3_mempool,
+                      &os_msys_init_3_mbuf_pool,
+                      MYNEWT_VAL(MSYS_3_BLOCK_COUNT),
+                      SYSINIT_MSYS_3_MEMBLOCK_SIZE,
+                      "msys_3");
+#endif
+
+#if MYNEWT_VAL(MSYS_4_BLOCK_COUNT) > 0
+    os_msys_init_once(os_msys_init_4_data,
+                      &os_msys_init_4_mempool,
+                      &os_msys_init_4_mbuf_pool,
+                      MYNEWT_VAL(MSYS_4_BLOCK_COUNT),
+                      SYSINIT_MSYS_4_MEMBLOCK_SIZE,
+                      "msys_4");
+#endif
+
+#if MYNEWT_VAL(MSYS_5_BLOCK_COUNT) > 0
+    os_msys_init_once(os_msys_init_5_data,
+                      &os_msys_init_5_mempool,
+                      &os_msys_init_5_mbuf_pool,
+                      MYNEWT_VAL(MSYS_5_BLOCK_COUNT),
+                      SYSINIT_MSYS_5_MEMBLOCK_SIZE,
+                      "msys_5");
+#endif
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_mutex.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_mutex.c b/kernel/os/src/os_mutex.c
new file mode 100644
index 0000000..6519292
--- /dev/null
+++ b/kernel/os/src/os_mutex.c
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "os/os.h"
+#include <assert.h>
+
+/**
+ * os mutex create
+ *  
+ * Create a mutex and initialize it. 
+ * 
+ * @param mu Pointer to mutex
+ * 
+ * @return os_error_t 
+ *      OS_INVALID_PARM     Mutex passed in was NULL.
+ *      OS_OK               no error.
+ */
+os_error_t
+os_mutex_init(struct os_mutex *mu)
+{
+    if (!mu) {
+        return OS_INVALID_PARM;
+    }
+
+    /* Initialize to 0 */
+    mu->mu_prio = 0;
+    mu->mu_level = 0;
+    mu->mu_owner = NULL;
+    SLIST_FIRST(&mu->mu_head) = NULL;
+
+    return OS_OK;
+}
+
+/**
+ * os mutex release
+ *  
+ * Release a mutex. 
+ * 
+ * @param mu Pointer to the mutex to be released
+ * 
+ * @return os_error_t 
+ *      OS_INVALID_PARM Mutex passed in was NULL.
+ *      OS_BAD_MUTEX    Mutex was not granted to current task (not owner).
+ *      OS_OK           No error
+ */
+os_error_t
+os_mutex_release(struct os_mutex *mu)
+{
+    int resched;
+    os_sr_t sr;
+    struct os_task *current;
+    struct os_task *rdy;
+
+    /* Check if OS is started */
+    if (!g_os_started) {
+        return (OS_NOT_STARTED);
+    }
+
+    /* Check for valid mutex */
+    if (!mu) {
+        return OS_INVALID_PARM;
+    }
+
+    /* We better own this mutex! */
+    current = os_sched_get_current_task();
+    if ((mu->mu_level == 0) || (mu->mu_owner != current)) {
+        return (OS_BAD_MUTEX);
+    }
+
+    /* Decrement nesting level by 1. If not zero, nested (so dont release!) */
+    --mu->mu_level;
+    if (mu->mu_level != 0) {
+        return (OS_OK);
+    }
+
+    OS_ENTER_CRITICAL(sr);
+
+    /* Restore owner task's priority; resort list if different  */
+    if (current->t_prio != mu->mu_prio) {
+        current->t_prio = mu->mu_prio;
+        os_sched_resort(current);
+    }
+
+    /* Check if tasks are waiting for the mutex */
+    rdy = SLIST_FIRST(&mu->mu_head);
+    if (rdy) {
+        /* There is one waiting. Wake it up */
+        assert(rdy->t_obj);
+        os_sched_wakeup(rdy);
+
+        /* Set mutex internals */
+        mu->mu_level = 1;
+        mu->mu_prio = rdy->t_prio;
+    }
+
+    /* Set new owner of mutex (or NULL if not owned) */
+    mu->mu_owner = rdy;
+
+    /* Do we need to re-schedule? */
+    resched = 0;
+    rdy = os_sched_next_task();
+    if (rdy != current) {
+        resched = 1;
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    /* Re-schedule if needed */
+    if (resched) {
+        os_sched(rdy);
+    }
+
+    return OS_OK;
+}
+
+/**
+ * os mutex pend 
+ *  
+ * Pend (wait) for a mutex. 
+ * 
+ * @param mu Pointer to mutex.
+ * @param timeout Timeout, in os ticks. A timeout of 0 means do 
+ *                not wait if not available. A timeout of
+ *                0xFFFFFFFF means wait forever.
+ *              
+ * 
+ * @return os_error_t 
+ *      OS_INVALID_PARM     Mutex passed in was NULL.
+ *      OS_TIMEOUT          Mutex was owned by another task and timeout=0
+ *      OS_OK               no error.
+ */ 
+os_error_t
+os_mutex_pend(struct os_mutex *mu, uint32_t timeout)
+{
+    os_sr_t sr;
+    os_error_t rc;
+    struct os_task *current;
+    struct os_task *entry;
+    struct os_task *last;
+
+    /* OS must be started when calling this function */
+    if (!g_os_started) {
+        return (OS_NOT_STARTED);
+    }
+
+    /* Check for valid mutex */
+    if (!mu) {
+        return OS_INVALID_PARM;
+    }
+
+    OS_ENTER_CRITICAL(sr);
+
+    /* Is this owned? */
+    current = os_sched_get_current_task();
+    if (mu->mu_level == 0) {
+        mu->mu_owner = current;
+        mu->mu_prio  = current->t_prio;
+        mu->mu_level = 1;
+        OS_EXIT_CRITICAL(sr);
+        return OS_OK;
+    }
+
+    /* Are we owner? */
+    if (mu->mu_owner == current) {
+        ++mu->mu_level;
+        OS_EXIT_CRITICAL(sr);
+        return OS_OK;
+    }
+
+    /* Mutex is not owned by us. If timeout is 0, return immediately */
+    if (timeout == 0) {
+        OS_EXIT_CRITICAL(sr);
+        return OS_TIMEOUT;
+    }
+
+    /* Change priority of owner if needed */
+    if (mu->mu_owner->t_prio > current->t_prio) {
+        mu->mu_owner->t_prio = current->t_prio;
+        os_sched_resort(mu->mu_owner);
+    }
+
+    /* Link current task to tasks waiting for mutex */
+    last = NULL;
+    if (!SLIST_EMPTY(&mu->mu_head)) {
+        /* Insert in priority order */
+        SLIST_FOREACH(entry, &mu->mu_head, t_obj_list) {
+            if (current->t_prio < entry->t_prio) { 
+                break;
+            }
+            last = entry;
+        }
+    }
+
+    if (last) {
+        SLIST_INSERT_AFTER(last, current, t_obj_list);
+    } else {
+        SLIST_INSERT_HEAD(&mu->mu_head, current, t_obj_list);
+    }
+
+    /* Set mutex pointer in task */
+    current->t_obj = mu;
+    current->t_flags |= OS_TASK_FLAG_MUTEX_WAIT;
+    os_sched_sleep(current, timeout);
+    OS_EXIT_CRITICAL(sr);
+
+    os_sched(NULL);
+
+    OS_ENTER_CRITICAL(sr);
+    current->t_flags &= ~OS_TASK_FLAG_MUTEX_WAIT;
+    OS_EXIT_CRITICAL(sr);
+
+    /* If we are owner we did not time out. */
+    if (mu->mu_owner == current) {
+        rc = OS_OK; 
+    } else {
+        rc = OS_TIMEOUT;
+    }
+
+    return rc;
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_priv.h
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_priv.h b/kernel/os/src/os_priv.h
new file mode 100644
index 0000000..7745a0a
--- /dev/null
+++ b/kernel/os/src/os_priv.h
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef H_OS_PRIV_
+#define H_OS_PRIV_
+
+#include "os/queue.h"
+
+TAILQ_HEAD(os_task_list, os_task);
+TAILQ_HEAD(os_callout_list, os_callout);
+STAILQ_HEAD(os_task_stailq, os_task);
+
+extern struct os_task g_idle_task;
+extern struct os_task_list g_os_run_list;
+extern struct os_task_list g_os_sleep_list;
+extern struct os_task_stailq g_os_task_list;
+extern struct os_task *g_current_task;
+extern struct os_callout_list g_callout_list;
+
+void os_msys_init(void);
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_sanity.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_sanity.c b/kernel/os/src/os_sanity.c
new file mode 100644
index 0000000..f0aa5d2
--- /dev/null
+++ b/kernel/os/src/os_sanity.c
@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "os/os.h"
+
+SLIST_HEAD(, os_sanity_check) g_os_sanity_check_list =
+    SLIST_HEAD_INITIALIZER(os_sanity_check_list);
+
+struct os_mutex g_os_sanity_check_mu;
+
+/**
+ * Initialize a sanity check
+ *
+ * @param sc The sanity check to initialize
+ *
+ * @return 0 on success, error code on failure.
+ */
+int
+os_sanity_check_init(struct os_sanity_check *sc)
+{
+    memset(sc, 0, sizeof(*sc));
+
+    return (0);
+}
+
+/**
+ * Lock the sanity check list
+ *
+ * @return 0 on success, error code on failure.
+ */
+static int
+os_sanity_check_list_lock(void)
+{
+    int rc;
+
+    if (!g_os_started) {
+        return (0);
+    }
+
+    rc = os_mutex_pend(&g_os_sanity_check_mu, OS_WAIT_FOREVER);
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Unlock the sanity check list
+ *
+ * @return 0 on success, error code on failure
+ */
+static int
+os_sanity_check_list_unlock(void)
+{
+    int rc;
+
+    if (!g_os_started) {
+        return (0);
+    }
+
+    rc = os_mutex_release(&g_os_sanity_check_mu);
+    if (rc != 0) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/**
+ * Provide a "task checkin" for the sanity task.
+ *
+ * @param t The task to check in
+ *
+ * @return 0 on success, error code on failure
+ */
+int
+os_sanity_task_checkin(struct os_task *t)
+{
+    int rc;
+
+    if (t == NULL) {
+        t = os_sched_get_current_task();
+    }
+
+    rc = os_sanity_check_reset(&t->t_sanity_check);
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+
+/**
+ * Register a sanity check
+ *
+ * @param sc The sanity check to register
+ *
+ * @return 0 on success, error code on failure
+ */
+int
+os_sanity_check_register(struct os_sanity_check *sc)
+{
+    int rc;
+
+    rc = os_sanity_check_list_lock();
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    SLIST_INSERT_HEAD(&g_os_sanity_check_list, sc, sc_next);
+
+    rc = os_sanity_check_list_unlock();
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+
+/**
+ * Reset the os sanity check, so that it doesn't trip up the
+ * sanity timer.
+ *
+ * @param sc The sanity check to reset
+ *
+ * @return 0 on success, error code on failure
+ */
+int
+os_sanity_check_reset(struct os_sanity_check *sc)
+{
+    int rc;
+
+    rc = os_sanity_check_list_lock();
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    sc->sc_checkin_last = os_time_get();
+
+    rc = os_sanity_check_list_unlock();
+    if (rc != OS_OK) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}
+
+/*
+ * Called from the IDLE task context, every MYNEWT_VAL(SANITY_INTERVAL) msecs.
+ *
+ * Goes through the sanity check list, and performs sanity checks.  If any of
+ * these checks failed, or tasks have not checked in, it resets the processor.
+ */
+void
+os_sanity_run(void)
+{
+    struct os_sanity_check *sc;
+    int rc;
+
+    rc = os_sanity_check_list_lock();
+    if (rc != 0) {
+        assert(0);
+    }
+
+    SLIST_FOREACH(sc, &g_os_sanity_check_list, sc_next) {
+        rc = OS_OK;
+
+        if (sc->sc_func) {
+            rc = sc->sc_func(sc, sc->sc_arg);
+            if (rc == OS_OK) {
+                sc->sc_checkin_last = os_time_get();
+                continue;
+            }
+        }
+
+        if (OS_TIME_TICK_GT(os_time_get(),
+                    sc->sc_checkin_last + sc->sc_checkin_itvl)) {
+            assert(0);
+        }
+    }
+
+    rc = os_sanity_check_list_unlock();
+    if (rc != 0) {
+        assert(0);
+    }
+}
+
+/**
+ * Initialize the sanity task and mutex.
+ *
+ * @return 0 on success, error code on failure
+ */
+int
+os_sanity_init(void)
+{
+    int rc;
+
+    rc = os_mutex_init(&g_os_sanity_check_mu);
+    if (rc != 0) {
+        goto err;
+    }
+
+    return (0);
+err:
+    return (rc);
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/6a7432f4/kernel/os/src/os_sched.c
----------------------------------------------------------------------
diff --git a/kernel/os/src/os_sched.c b/kernel/os/src/os_sched.c
new file mode 100644
index 0000000..3d81dba
--- /dev/null
+++ b/kernel/os/src/os_sched.c
@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "os/os.h"
+#include "os/queue.h"
+
+#include <assert.h>
+
+TAILQ_HEAD(, os_task) g_os_run_list = TAILQ_HEAD_INITIALIZER(g_os_run_list); 
+
+TAILQ_HEAD(, os_task) g_os_sleep_list = TAILQ_HEAD_INITIALIZER(g_os_sleep_list); 
+
+struct os_task *g_current_task; 
+
+extern os_time_t g_os_time;
+os_time_t g_os_last_ctx_sw_time;
+
+/**
+ * os sched insert
+ *  
+ * Insert a task into the scheduler list. This causes the task to be evaluated
+ * for running when os_sched is called. 
+ * 
+ * @param t     Pointer to task to insert in run list
+ * 
+ * @return int  OS_OK: task was inserted into run list 
+ *              OS_EINVAL: Task was not in ready state. 
+ */
+os_error_t
+os_sched_insert(struct os_task *t) 
+{
+    struct os_task *entry; 
+    os_sr_t sr; 
+    os_error_t rc;
+
+    if (t->t_state != OS_TASK_READY) {
+        rc = OS_EINVAL;
+        goto err;
+    }
+
+    entry = NULL;
+    OS_ENTER_CRITICAL(sr); 
+    TAILQ_FOREACH(entry, &g_os_run_list, t_os_list) {
+        if (t->t_prio < entry->t_prio) { 
+            break;
+        }
+    }
+    if (entry) {
+        TAILQ_INSERT_BEFORE(entry, (struct os_task *) t, t_os_list);
+    } else {
+        TAILQ_INSERT_TAIL(&g_os_run_list, (struct os_task *) t, t_os_list);
+    }
+    OS_EXIT_CRITICAL(sr);
+
+    return (0);
+err:
+    return (rc);
+}
+
+void
+os_sched_ctx_sw_hook(struct os_task *next_t)
+{
+    if (g_current_task == next_t) {
+        return;
+    }
+
+    next_t->t_ctx_sw_cnt++;
+    g_current_task->t_run_time += g_os_time - g_os_last_ctx_sw_time;
+    g_os_last_ctx_sw_time = g_os_time;
+}
+
+
+/**
+ * os sched get current task 
+ *  
+ * Returns the currently running task. Note that this task may or may not be 
+ * the highest priority task ready to run. 
+ * 
+ * 
+ * @return struct os_task* 
+ */
+struct os_task * 
+os_sched_get_current_task(void)
+{
+    return (g_current_task);
+}
+
+/**
+ * os sched set current task 
+ *  
+ * Sets the currently running task to 't'. Note that this function simply sets 
+ * the global variable holding the currently running task. It does not perform 
+ * a context switch or change the os run or sleep list. 
+ * 
+ * @param t Pointer to currently running task.
+ */
+void 
+os_sched_set_current_task(struct os_task *t) 
+{
+    g_current_task = t;
+}
+
+/**
+ * os sched 
+ *  
+ * Performs a context switch. When called, it will either find the highest 
+ * priority task ready to run if next_t is NULL (i.e. the head of the os run 
+ * list) or will schedule next_t as the task to run.
+ * 
+ * @param next_t Task to run
+ */
+void
+os_sched(struct os_task *next_t)
+{
+    os_sr_t sr;
+
+    OS_ENTER_CRITICAL(sr);
+
+    if (!next_t) {
+        next_t = os_sched_next_task();
+    }
+
+    if (next_t != g_current_task) {
+        os_arch_ctx_sw(next_t);
+    }
+
+    OS_EXIT_CRITICAL(sr);
+}
+
+/**
+ * os sched sleep 
+ *  
+ * Removes the task from the run list and puts it on the sleep list. 
+ * 
+ * @param t Task to put to sleep
+ * @param nticks Number of ticks to put task to sleep
+ * 
+ * @return int
+ *  
+ * NOTE: must be called with interrupts disabled! This function does not call 
+ * the scheduler 
+ */
+int 
+os_sched_sleep(struct os_task *t, os_time_t nticks) 
+{
+    struct os_task *entry;
+
+    entry = NULL; 
+
+    TAILQ_REMOVE(&g_os_run_list, t, t_os_list);
+    t->t_state = OS_TASK_SLEEP;
+    t->t_next_wakeup = os_time_get() + nticks;
+    if (nticks == OS_TIMEOUT_NEVER) {
+        t->t_flags |= OS_TASK_FLAG_NO_TIMEOUT;
+        TAILQ_INSERT_TAIL(&g_os_sleep_list, t, t_os_list); 
+    } else {
+        TAILQ_FOREACH(entry, &g_os_sleep_list, t_os_list) {
+            if ((entry->t_flags & OS_TASK_FLAG_NO_TIMEOUT) ||
+                    OS_TIME_TICK_GT(entry->t_next_wakeup, t->t_next_wakeup)) {
+                break;
+            }
+        }
+        if (entry) {
+            TAILQ_INSERT_BEFORE(entry, t, t_os_list); 
+        } else {
+            TAILQ_INSERT_TAIL(&g_os_sleep_list, t, t_os_list); 
+        }
+    }
+
+    return (0);
+}
+
+/**
+ * os sched wakeup 
+ *  
+ * Called to wake up a task. Waking up a task consists of setting the task state
+ * to READY and moving it from the sleep list to the run list. 
+ * 
+ * @param t     Pointer to task to wake up. 
+ * 
+ * @return int 
+ *  
+ * NOTE: This function must be called with interrupts disabled. 
+ */
+int 
+os_sched_wakeup(struct os_task *t) 
+{
+    struct os_task_obj *os_obj;
+
+    assert(t->t_state == OS_TASK_SLEEP);
+
+    /* Remove self from object list if waiting on one */
+    if (t->t_obj) {
+        os_obj = (struct os_task_obj *)t->t_obj;
+        assert(!SLIST_EMPTY(&os_obj->obj_head));
+        SLIST_REMOVE(&os_obj->obj_head, t, os_task, t_obj_list);
+        SLIST_NEXT(t, t_obj_list) = NULL;
+        t->t_obj = NULL; 
+    }
+
+    /* Remove task from sleep list */
+    t->t_state = OS_TASK_READY;
+    t->t_next_wakeup = 0;
+    t->t_flags &= ~OS_TASK_FLAG_NO_TIMEOUT;
+    TAILQ_REMOVE(&g_os_sleep_list, t, t_os_list);
+    os_sched_insert(t);
+
+    return (0);
+}
+
+/**
+ * os sched os timer exp 
+ *  
+ * Called when the OS tick timer expires. Search the sleep list for any tasks 
+ * that need waking up. This occurs when the current OS time exceeds the next 
+ * wakeup time stored in the task. Any tasks that need waking up will be 
+ * removed from the sleep list and added to the run list. 
+ * 
+ */
+void
+os_sched_os_timer_exp(void)
+{
+    struct os_task *t;
+    struct os_task *next;
+    os_time_t now; 
+    os_sr_t sr;
+
+    now = os_time_get();
+
+    OS_ENTER_CRITICAL(sr);
+
+    /*
+     * Wakeup any tasks that have their sleep timer expired
+     */
+    t = TAILQ_FIRST(&g_os_sleep_list);
+    while (t) {
+        /* If task waiting forever, do not check next wakeup time */
+        if (t->t_flags & OS_TASK_FLAG_NO_TIMEOUT) {
+            break;
+        }
+        next = TAILQ_NEXT(t, t_os_list);
+        if (OS_TIME_TICK_GEQ(now, t->t_next_wakeup)) {
+            os_sched_wakeup(t);
+        } else {
+            break;
+        }
+        t = next;
+    }
+
+    OS_EXIT_CRITICAL(sr); 
+}
+
+/*
+ * Return the number of ticks until the first sleep timer expires.If there are
+ * no such tasks then return OS_TIMEOUT_NEVER instead.
+ */
+os_time_t
+os_sched_wakeup_ticks(os_time_t now)
+{
+    os_time_t rt;
+    struct os_task *t;
+
+    OS_ASSERT_CRITICAL();
+
+    t = TAILQ_FIRST(&g_os_sleep_list);
+    if (t == NULL || (t->t_flags & OS_TASK_FLAG_NO_TIMEOUT)) {
+        rt = OS_TIMEOUT_NEVER;
+    } else if (OS_TIME_TICK_GEQ(t->t_next_wakeup, now)) {
+        rt = t->t_next_wakeup - now;   
+    } else {
+        rt = 0;     /* wakeup time was in the past */
+    }
+    return (rt);
+}
+
+/**
+ * os sched next task 
+ *  
+ * Returns the task that we should be running. This is the task at the head 
+ * of the run list. 
+ *  
+ * NOTE: if you want to guarantee that the os run list does not change after 
+ * calling this function you have to call it with interrupts disabled. 
+ * 
+ * @return struct os_task* 
+ */
+struct os_task *  
+os_sched_next_task(void) 
+{
+    return (TAILQ_FIRST(&g_os_run_list));
+}
+
+/**
+ * os sched resort 
+ *  
+ * Resort a task that is in the ready list as its priority has 
+ * changed. If the task is not in the ready state, there is 
+ * nothing to do. 
+ * 
+ * @param t Pointer to task to insert back into ready to run 
+ *          list.
+ *  
+ * NOTE: this function expects interrupts to be disabled so they 
+ * are not disabled here. 
+ */
+void 
+os_sched_resort(struct os_task *t) 
+{
+    if (t->t_state == OS_TASK_READY) {
+        TAILQ_REMOVE(&g_os_run_list, t, t_os_list);
+        os_sched_insert(t);
+    }
+}
+