You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by GitBox <gi...@apache.org> on 2018/01/15 07:44:31 UTC

[GitHub] ccollins476ad closed pull request #686: BLE Host flow control

ccollins476ad closed pull request #686: BLE Host flow control
URL: https://github.com/apache/mynewt-core/pull/686
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/kernel/os/include/os/os_mempool.h b/kernel/os/include/os/os_mempool.h
index 51b9f7b3e..2de871d3e 100644
--- a/kernel/os/include/os/os_mempool.h
+++ b/kernel/os/include/os/os_mempool.h
@@ -44,16 +44,54 @@ struct os_memblock {
 
 /* Memory pool */
 struct os_mempool {
-    int mp_block_size;          /* Size of the memory blocks, in bytes. */
-    int mp_num_blocks;          /* The number of memory blocks. */
-    int mp_num_free;            /* The number of free blocks left */
-    int mp_min_free;            /* The lowest number of free blocks seen */
+    uint32_t mp_block_size;     /* Size of the memory blocks, in bytes. */
+    uint16_t mp_num_blocks;     /* The number of memory blocks. */
+    uint16_t mp_num_free;       /* The number of free blocks left */
+    uint16_t mp_min_free;       /* The lowest number of free blocks seen */
+    uint8_t mp_flags;           /* Bitmap of OS_MEMPOOL_F_[...] values. */
     uint32_t mp_membuf_addr;    /* Address of memory buffer used by pool */
     STAILQ_ENTRY(os_mempool) mp_list;
     SLIST_HEAD(,os_memblock);   /* Pointer to list of free blocks */
     char *name;                 /* Name for memory block */
 };
 
+/**
+ * Indicates an extended mempool.  Address can be safely cast to
+ * (struct os_mempool_ext *).
+ */
+#define OS_MEMPOOL_F_EXT        0x01
+
+struct os_mempool_ext;
+
+/**
+ * Block put callback function.  If configured, this callback gets executed
+ * whenever a block is freed to the corresponding extended mempool.  Note: The
+ * os_memblock_put() function calls this callback instead of freeing the block
+ * itself.  Therefore, it is the callback's responsibility to free the block
+ * via a call to os_memblock_put_from_cb().
+ *
+ * @param ome                   The extended mempool that a block is being
+ *                                  freed back to.
+ * @param data                  The block being freed.
+ * @param arg                   Optional argument configured along with the
+ *                                  callback.
+ *
+ * @return                      Indicates whether the block was successfully
+ *                                  freed.  A non-zero value should only be
+ *                                  returned if the block was not successfully
+ *                                  released back to its pool.
+ */
+typedef os_error_t os_mempool_put_fn(struct os_mempool_ext *ome, void *data,
+                                     void *arg);
+
+struct os_mempool_ext {
+    struct os_mempool mpe_mp;
+
+    /* Callback that is executed immediately when a block is freed. */
+    os_mempool_put_fn *mpe_put_cb;
+    void *mpe_put_arg;
+};
+
 #define OS_MEMPOOL_INFO_NAME_LEN (32)
 
 struct os_mempool_info {
@@ -85,8 +123,10 @@ typedef uint64_t os_membuf_t;
     (sizeof (os_membuf_t) * OS_MEMPOOL_SIZE((n), (blksize)))
 
 /* Initialize a memory pool */
-os_error_t os_mempool_init(struct os_mempool *mp, int blocks, int block_size,
-                           void *membuf, char *name);
+os_error_t os_mempool_init(struct os_mempool *mp, uint16_t blocks,
+                           uint32_t block_size, void *membuf, char *name);
+os_error_t os_mempool_ext_init(struct os_mempool_ext *mpe, uint16_t blocks,
+                               uint32_t block_size, void *membuf, char *name);
 
 /* Performs an integrity check of the specified mempool. */
 bool os_mempool_is_sane(const struct os_mempool *mp);
@@ -97,6 +137,8 @@ int os_memblock_from(const struct os_mempool *mp, const void *block_addr);
 /* Get a memory block from the pool */
 void *os_memblock_get(struct os_mempool *mp);
 
+os_error_t os_memblock_put_from_cb(struct os_mempool *mp, void *block_addr);
+
 /* Put the memory block back into the pool */
 os_error_t os_memblock_put(struct os_mempool *mp, void *block_addr);
 
diff --git a/kernel/os/src/os_mempool.c b/kernel/os/src/os_mempool.c
index 6ce3770c6..214482b34 100644
--- a/kernel/os/src/os_mempool.c
+++ b/kernel/os/src/os_mempool.c
@@ -82,7 +82,7 @@ os_mempool_poison_check(void *start, int sz)
  * @return os_error_t
  */
 os_error_t
-os_mempool_init(struct os_mempool *mp, int blocks, int block_size,
+os_mempool_init(struct os_mempool *mp, uint16_t blocks, uint32_t block_size,
                 void *membuf, char *name)
 {
     int true_block_size;
@@ -112,6 +112,7 @@ os_mempool_init(struct os_mempool *mp, int blocks, int block_size,
     mp->mp_block_size = block_size;
     mp->mp_num_free = blocks;
     mp->mp_min_free = blocks;
+    mp->mp_flags = 0;
     mp->mp_num_blocks = blocks;
     mp->mp_membuf_addr = (uint32_t)membuf;
     mp->name = name;
@@ -137,6 +138,37 @@ os_mempool_init(struct os_mempool *mp, int blocks, int block_size,
     return OS_OK;
 }
 
+/**
+ * Initializes an extended memory pool.  Extended attributes (e.g., callbacks)
+ * are not specified when this function is called; they are assigned manually
+ * after initialization.
+ *
+ * @param mpe           The extended memory pool to initialize.
+ * @param blocks        The number of blocks in the pool.
+ * @param block_size    The size of each block, in bytes.
+ * @param membuf        Pointer to memory to contain blocks.
+ * @param name          Name of the pool.
+ *
+ * @return os_error_t
+ */
+os_error_t
+os_mempool_ext_init(struct os_mempool_ext *mpe, uint16_t blocks,
+                    uint32_t block_size, void *membuf, char *name)
+{
+    int rc;
+
+    rc = os_mempool_init(&mpe->mpe_mp, blocks, block_size, membuf, name);
+    if (rc != 0) {
+        return rc;
+    }
+
+    mpe->mpe_mp.mp_flags = OS_MEMPOOL_F_EXT;
+    mpe->mpe_put_cb = NULL;
+    mpe->mpe_put_arg = NULL;
+
+    return 0;
+}
+
 /**
  * Performs an integrity check of the specified mempool.  This function
  * attempts to detect memory corruption in the specified memory pool.
@@ -240,6 +272,42 @@ os_memblock_get(struct os_mempool *mp)
     return (void *)block;
 }
 
+/**
+ * os memblock put from cb
+ *
+ * Puts the memory block back into the pool, ignoring the put callback, if any.
+ * This function should only be called from a put callback to free a block
+ * without causing infinite recursion.
+ *
+ * @param mp Pointer to memory pool
+ * @param block_addr Pointer to memory block
+ *
+ * @return os_error_t
+ */
+os_error_t
+os_memblock_put_from_cb(struct os_mempool *mp, void *block_addr)
+{
+    os_sr_t sr;
+    struct os_memblock *block;
+
+    os_mempool_poison(block_addr, OS_MEMPOOL_TRUE_BLOCK_SIZE(mp));
+
+    block = (struct os_memblock *)block_addr;
+    OS_ENTER_CRITICAL(sr);
+
+    /* Chain current free list pointer to this block; make this block head */
+    SLIST_NEXT(block, mb_next) = SLIST_FIRST(mp);
+    SLIST_FIRST(mp) = block;
+
+    /* XXX: Should we check that the number free <= number blocks? */
+    /* Increment number free */
+    mp->mp_num_free++;
+
+    OS_EXIT_CRITICAL(sr);
+
+    return OS_OK;
+}
+
 /**
  * os memblock put
  *
@@ -253,8 +321,8 @@ os_memblock_get(struct os_mempool *mp)
 os_error_t
 os_memblock_put(struct os_mempool *mp, void *block_addr)
 {
-    os_sr_t sr;
-    struct os_memblock *block;
+    struct os_mempool_ext *mpe;
+    int rc;
 
     /* Make sure parameters are valid */
     if ((mp == NULL) || (block_addr == NULL)) {
@@ -272,24 +340,22 @@ os_memblock_put(struct os_mempool *mp, void *block_addr)
         assert(block != (struct os_memblock *)block_addr);
     }
 #endif
-    os_mempool_poison(block_addr, OS_MEMPOOL_TRUE_BLOCK_SIZE(mp));
-    block = (struct os_memblock *)block_addr;
-    OS_ENTER_CRITICAL(sr);
-
-    /* Chain current free list pointer to this block; make this block head */
-    SLIST_NEXT(block, mb_next) = SLIST_FIRST(mp);
-    SLIST_FIRST(mp) = block;
-
-    /* XXX: Should we check that the number free <= number blocks? */
-    /* Increment number free */
-    mp->mp_num_free++;
 
-    OS_EXIT_CRITICAL(sr);
+    /* If this is an extended mempool with a put callback, call the callback
+     * instead of freeing the block directly.
+     */
+    if (mp->mp_flags & OS_MEMPOOL_F_EXT) {
+        mpe = (struct os_mempool_ext *)mp;
+        if (mpe->mpe_put_cb != NULL) {
+            rc = mpe->mpe_put_cb(mpe, block_addr, mpe->mpe_put_arg);
+            return rc;
+        }
+    }
 
-    return OS_OK;
+    /* No callback; free the block. */
+    return os_memblock_put_from_cb(mp, block_addr);
 }
 
-
 struct os_mempool *
 os_mempool_info_get_next(struct os_mempool *mp, struct os_mempool_info *omi)
 {
diff --git a/kernel/os/test/src/mempool_test.c b/kernel/os/test/src/mempool_test.c
index 10f4a0ace..af17e88c1 100644
--- a/kernel/os/test/src/mempool_test.c
+++ b/kernel/os/test/src/mempool_test.c
@@ -82,8 +82,12 @@ os_mempool_test_init(void *arg)
 }
 
 TEST_CASE_DECL(os_mempool_test_case)
+TEST_CASE_DECL(os_mempool_test_ext_basic)
+TEST_CASE_DECL(os_mempool_test_ext_nested)
 
 TEST_SUITE(os_mempool_test_suite)
 {
     os_mempool_test_case();
+    os_mempool_test_ext_basic();
+    os_mempool_test_ext_nested();
 }
diff --git a/kernel/os/test/src/mempool_test.h b/kernel/os/test/src/mempool_test.h
index 8a4d50a8e..50010144b 100644
--- a/kernel/os/test/src/mempool_test.h
+++ b/kernel/os/test/src/mempool_test.h
@@ -26,7 +26,7 @@
 #include "os_test_priv.h"
 
 #ifdef __cplusplus
-#extern "C" {
+extern "C" {
 #endif
 
 /* Limit max blocks for testing */
diff --git a/kernel/os/test/src/testcases/os_mempool_test_ext_basic.c b/kernel/os/test/src/testcases/os_mempool_test_ext_basic.c
new file mode 100644
index 000000000..939f6e338
--- /dev/null
+++ b/kernel/os/test/src/testcases/os_mempool_test_ext_basic.c
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "os_test_priv.h"
+
+static struct os_mempool_ext *freed_pool;
+static void *freed_block;
+
+static os_error_t
+put_cb(struct os_mempool_ext *mpe, void *block, void *arg)
+{
+    /* Verify block was not freed before this callback gets called. */
+    TEST_ASSERT(mpe->mpe_mp.mp_num_free == mpe->mpe_mp.mp_num_blocks - 1);
+
+    /* Remember pool that block got freed to. */
+    freed_pool = mpe;
+    freed_block = block;
+
+    /* Actually free block. */
+    return os_memblock_put_from_cb(&mpe->mpe_mp, block);
+}
+
+TEST_CASE(os_mempool_test_ext_basic)
+{
+    uint8_t buf[OS_MEMPOOL_BYTES(10, 32)];
+    struct os_mempool_ext pool;
+    int *ip;
+    int rc;
+
+    rc = os_mempool_ext_init(&pool, 10, 32, buf, "test_ext_basic");
+    TEST_ASSERT_FATAL(rc == 0);
+
+    /*** No callback. */
+    ip = os_memblock_get(&pool.mpe_mp);
+    TEST_ASSERT_FATAL(ip != NULL, "Error allocating block");
+
+    rc = os_memblock_put(&pool.mpe_mp, ip);
+    TEST_ASSERT_FATAL(rc == 0, "Error freeing block %d", rc);
+
+    TEST_ASSERT(freed_pool == NULL);
+
+    /*** With callback. */
+    pool.mpe_put_cb = put_cb;
+
+    ip = os_memblock_get(&pool.mpe_mp);
+    TEST_ASSERT_FATAL(ip != NULL, "Error allocating block");
+
+    rc = os_memblock_put(&pool.mpe_mp, ip);
+    TEST_ASSERT_FATAL(rc == 0, "Error freeing block %d", rc);
+
+    /*** No callback; ensure old callback doesn't get called. */
+    freed_pool = NULL;
+    freed_block = NULL;
+    pool.mpe_put_cb = NULL;
+
+    ip = os_memblock_get(&pool.mpe_mp);
+    TEST_ASSERT_FATAL(ip != NULL, "Error allocating block");
+
+    rc = os_memblock_put(&pool.mpe_mp, ip);
+    TEST_ASSERT_FATAL(rc == 0, "Error freeing block %d", rc);
+
+    TEST_ASSERT(freed_pool == NULL);
+    TEST_ASSERT(freed_block == NULL);
+}
diff --git a/kernel/os/test/src/testcases/os_mempool_test_ext_nested.c b/kernel/os/test/src/testcases/os_mempool_test_ext_nested.c
new file mode 100644
index 000000000..7dbf4d652
--- /dev/null
+++ b/kernel/os/test/src/testcases/os_mempool_test_ext_nested.c
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "os_test_priv.h"
+
+static int num_frees;
+
+static os_error_t
+put_cb(struct os_mempool_ext *mpe, void *block, void *arg)
+{
+    int *elem;
+    int rc;
+
+    num_frees++;
+
+    /* Only do work on the first free to avoid infinite recursion. */
+    if (num_frees == 1) {
+        /* Try to allocate and free within callback. */
+        elem = os_memblock_get(&mpe->mpe_mp);
+        TEST_ASSERT(elem != NULL);
+
+        rc = os_memblock_put(&mpe->mpe_mp, elem);
+        TEST_ASSERT(rc == 0);
+    }
+
+    /* Actually free block. */
+    return os_memblock_put_from_cb(&mpe->mpe_mp, block);
+}
+
+TEST_CASE(os_mempool_test_ext_nested)
+{
+    uint8_t buf[OS_MEMPOOL_BYTES(10, 32)];
+    struct os_mempool_ext pool;
+    int *elem;
+    int rc;
+
+    rc = os_mempool_ext_init(&pool, 10, 32, buf, "test_ext_nested");
+    TEST_ASSERT_FATAL(rc == 0);
+
+    pool.mpe_put_cb = put_cb;
+
+    elem = os_memblock_get(&pool.mpe_mp);
+    TEST_ASSERT_FATAL(elem != NULL, "Error allocating block");
+
+    rc = os_memblock_put(&pool.mpe_mp, elem);
+    TEST_ASSERT_FATAL(rc == 0, "Error freeing block %d", rc);
+
+    /* Verify callback was called within callback. */
+    TEST_ASSERT(num_frees == 2);
+}
diff --git a/net/nimble/host/src/ble_gap.c b/net/nimble/host/src/ble_gap.c
index be8f11382..05957f6be 100644
--- a/net/nimble/host/src/ble_gap.c
+++ b/net/nimble/host/src/ble_gap.c
@@ -1021,6 +1021,7 @@ ble_gap_conn_broken(uint16_t conn_handle, int reason)
     ble_sm_connection_broken(conn_handle);
     ble_gatts_connection_broken(conn_handle);
     ble_gattc_connection_broken(conn_handle);
+    ble_hs_flow_connection_broken(conn_handle);;
 
     ble_hs_atomic_conn_delete(conn_handle);
 
diff --git a/net/nimble/host/src/ble_hs.c b/net/nimble/host/src/ble_hs.c
index bcbb0382e..306a7be89 100644
--- a/net/nimble/host/src/ble_hs.c
+++ b/net/nimble/host/src/ble_hs.c
@@ -149,13 +149,14 @@ ble_hs_is_parent_task(void)
            os_sched_get_current_task() == ble_hs_parent_task;
 }
 
+/**
+ * Locks the BLE host mutex.  Nested locks allowed.
+ */
 void
-ble_hs_lock(void)
+ble_hs_lock_nested(void)
 {
     int rc;
 
-    BLE_HS_DBG_ASSERT(!ble_hs_locked_by_cur_task());
-
 #if MYNEWT_VAL(BLE_HS_DEBUG)
     if (!os_started()) {
         ble_hs_dbg_mutex_locked = 1;
@@ -167,14 +168,16 @@ ble_hs_lock(void)
     BLE_HS_DBG_ASSERT_EVAL(rc == 0 || rc == OS_NOT_STARTED);
 }
 
+/**
+ * Unlocks the BLE host mutex.  Nested locks allowed.
+ */
 void
-ble_hs_unlock(void)
+ble_hs_unlock_nested(void)
 {
     int rc;
 
 #if MYNEWT_VAL(BLE_HS_DEBUG)
     if (!os_started()) {
-        BLE_HS_DBG_ASSERT(ble_hs_dbg_mutex_locked);
         ble_hs_dbg_mutex_locked = 0;
         return;
     }
@@ -184,6 +187,37 @@ ble_hs_unlock(void)
     BLE_HS_DBG_ASSERT_EVAL(rc == 0 || rc == OS_NOT_STARTED);
 }
 
+/**
+ * Locks the BLE host mutex.  Nested locks not allowed.
+ */
+void
+ble_hs_lock(void)
+{
+    BLE_HS_DBG_ASSERT(!ble_hs_locked_by_cur_task());
+#if MYNEWT_VAL(BLE_HS_DEBUG)
+    if (!os_started()) {
+        BLE_HS_DBG_ASSERT(!ble_hs_dbg_mutex_locked);
+    }
+#endif
+
+    ble_hs_lock_nested();
+}
+
+/**
+ * Unlocks the BLE host mutex.  Nested locks not allowed.
+ */
+void
+ble_hs_unlock(void)
+{
+#if MYNEWT_VAL(BLE_HS_DEBUG)
+    if (!os_started()) {
+        BLE_HS_DBG_ASSERT(ble_hs_dbg_mutex_locked);
+    }
+#endif
+
+    ble_hs_unlock_nested();
+}
+
 void
 ble_hs_process_rx_data_queue(void)
 {
@@ -214,8 +248,7 @@ ble_hs_wakeup_tx_conn(struct ble_hs_conn *conn)
             /* Controller is at capacity.  This packet will be the first to
              * get transmitted next time around.
              */
-            STAILQ_INSERT_HEAD(&conn->bhc_tx_q, OS_MBUF_PKTHDR(om),
-                               omp_next);
+            STAILQ_INSERT_HEAD(&conn->bhc_tx_q, OS_MBUF_PKTHDR(om), omp_next);
             return BLE_HS_EAGAIN;
         }
     }
@@ -579,6 +612,11 @@ ble_hs_rx_data(struct os_mbuf *om, void *arg)
 {
     int rc;
 
+    /* If flow control is enabled, mark this packet with its corresponding
+     * connection handle.
+     */
+    ble_hs_flow_fill_acl_usrhdr(om);
+
     rc = os_mqueue_put(&ble_hs_rx_q, ble_hs_evq, om);
     if (rc != 0) {
         os_mbuf_free_chain(om);
diff --git a/net/nimble/host/src/ble_hs_conn_priv.h b/net/nimble/host/src/ble_hs_conn_priv.h
index d4da0ffab..f92c5ec60 100644
--- a/net/nimble/host/src/ble_hs_conn_priv.h
+++ b/net/nimble/host/src/ble_hs_conn_priv.h
@@ -65,6 +65,14 @@ struct ble_hs_conn {
      */
     uint16_t bhc_outstanding_pkts;
 
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+    /**
+     * Count of packets received over this connection that have been processed
+     * and freed.
+     */
+    uint16_t bhc_completed_pkts;
+#endif
+
     /** Queue of outgoing packets that could not be sent. */
     STAILQ_HEAD(, os_mbuf_pkthdr) bhc_tx_q;
 
diff --git a/net/nimble/host/src/ble_hs_flow.c b/net/nimble/host/src/ble_hs_flow.c
new file mode 100644
index 000000000..a571df324
--- /dev/null
+++ b/net/nimble/host/src/ble_hs_flow.c
@@ -0,0 +1,240 @@
+#include "syscfg/syscfg.h"
+#include "nimble/ble_hci_trans.h"
+#include "ble_hs_priv.h"
+
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+
+#define BLE_HS_FLOW_ITVL_TICKS  \
+    (MYNEWT_VAL(BLE_HS_FLOW_CTRL_ITVL) * OS_TICKS_PER_SEC / 1000)
+
+/**
+ * The number of freed buffers since the most-recent
+ * number-of-completed-packets event was sent.  This is used to determine if an
+ * immediate event transmission is required.
+ */
+static uint16_t ble_hs_flow_num_completed_pkts;
+
+/** Periodically sends number-of-completed-packets events.  */
+static struct os_callout ble_hs_flow_timer;
+
+static os_event_fn ble_hs_flow_event_cb;
+
+static struct os_event ble_hs_flow_ev = {
+    .ev_cb = ble_hs_flow_event_cb,
+};
+
+static int
+ble_hs_flow_tx_num_comp_pkts(void)
+{
+    uint8_t buf[
+        BLE_HCI_HOST_NUM_COMP_PKTS_HDR_LEN + 
+        BLE_HCI_HOST_NUM_COMP_PKTS_ENT_LEN
+    ];
+    struct hci_host_num_comp_pkts_entry entry;
+    struct ble_hs_conn *conn;
+    int rc;
+
+    BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task());
+
+    /* For each connection with completed packets, send a separate
+     * host-number-of-completed-packets command.
+     */
+    for (conn = ble_hs_conn_first();
+         conn != NULL;
+         conn = SLIST_NEXT(conn, bhc_next)) {
+
+        if (conn->bhc_completed_pkts > 0) {
+            /* Only specify one connection per command. */
+            buf[0] = 1;
+
+            /* Append entry for this connection. */
+            entry.conn_handle = conn->bhc_handle;
+            entry.num_pkts = conn->bhc_completed_pkts;
+            rc = ble_hs_hci_cmd_build_host_num_comp_pkts_entry(
+                &entry,
+                buf + BLE_HCI_HOST_NUM_COMP_PKTS_HDR_LEN,
+                sizeof buf - BLE_HCI_HOST_NUM_COMP_PKTS_HDR_LEN);
+            BLE_HS_DBG_ASSERT(rc == 0);
+
+            conn->bhc_completed_pkts = 0;
+
+            /* The host-number-of-completed-packets command does not elicit a
+             * response from the controller, so don't use the normal blocking
+             * HCI API when sending it.
+             */
+            rc = ble_hs_hci_cmd_send_buf(
+                BLE_HCI_OP(BLE_HCI_OGF_CTLR_BASEBAND,
+                           BLE_HCI_OCF_CB_HOST_NUM_COMP_PKTS),
+                buf, off);
+            if (rc != 0) {
+                return rc;
+            }
+        }
+    }
+
+    return 0;
+}
+
+static void
+ble_hs_flow_event_cb(struct os_event *ev)
+{
+    int rc;
+
+    ble_hs_lock();
+
+    if (ble_hs_flow_num_completed_pkts > 0) {
+        rc = ble_hs_flow_tx_num_comp_pkts();
+        if (rc != 0) {
+            ble_hs_sched_reset(rc);
+        }
+
+        ble_hs_flow_num_completed_pkts = 0;
+    }
+
+    ble_hs_unlock();
+}
+
+static void
+ble_hs_flow_inc_completed_pkts(struct ble_hs_conn *conn)
+{
+    uint16_t num_free;
+
+    int rc;
+
+    BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task());
+
+    conn->bhc_completed_pkts++;
+    ble_hs_flow_num_completed_pkts++;
+
+    if (ble_hs_flow_num_completed_pkts > MYNEWT_VAL(BLE_ACL_BUF_COUNT)) {
+        ble_hs_sched_reset(BLE_HS_ECONTROLLER);
+        return;
+    }
+
+    /* If the number of free buffers is at or below the configured threshold,
+     * send an immediate number-of-copmleted-packets event.
+     */
+    num_free = MYNEWT_VAL(BLE_ACL_BUF_COUNT) - ble_hs_flow_num_completed_pkts;
+    if (num_free <= MYNEWT_VAL(BLE_HS_FLOW_CTRL_THRESH)) {
+        os_eventq_put(ble_hs_evq_get(), &ble_hs_flow_ev);
+        os_callout_stop(&ble_hs_flow_timer);
+    } else if (ble_hs_flow_num_completed_pkts == 1) {
+        rc = os_callout_reset(&ble_hs_flow_timer, BLE_HS_FLOW_ITVL_TICKS);
+        BLE_HS_DBG_ASSERT_EVAL(rc == 0);
+    }
+}
+
+static os_error_t
+ble_hs_flow_acl_free(struct os_mempool_ext *mpe, void *data, void *arg)
+{
+    struct ble_hs_conn *conn;
+    const struct os_mbuf *om;
+    uint16_t conn_handle;
+    int rc;
+
+    om = data;
+
+    /* An ACL data packet must be a single mbuf, and it must contain the
+     * corresponding connection handle in its user header.
+     */
+    assert(OS_MBUF_IS_PKTHDR(om));
+    assert(OS_MBUF_USRHDR_LEN(om) >= sizeof conn_handle);
+
+    /* Copy the connection handle out of the mbuf. */
+    memcpy(&conn_handle, OS_MBUF_USRHDR(om), sizeof conn_handle);
+
+    /* Free the mbuf back to its pool. */
+    rc = os_memblock_put_from_cb(&mpe->mpe_mp, data);
+    if (rc != 0) {
+        return rc;
+    }
+
+    /* Allow nested locks - there are too many places where acl buffers can get
+     * freed.
+     */
+    ble_hs_lock_nested();
+    
+    conn = ble_hs_conn_find(conn_handle);
+    if (conn != NULL) {
+        ble_hs_flow_inc_completed_pkts(conn);
+    }
+
+    ble_hs_unlock_nested();
+
+    return 0;
+}
+#endif /* MYNEWT_VAL(BLE_HS_FLOW_CTRL) */
+
+void
+ble_hs_flow_connection_broken(uint16_t conn_handle)
+{
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL) &&                 \
+    MYNEWT_VAL(BLE_HS_FLOW_CTRL_TX_ON_DISCONNECT)
+    ble_hs_lock();
+    ble_hs_flow_tx_num_comp_pkts(); 
+    ble_hs_unlock();
+#endif
+}
+
+/**
+ * Fills the user header of an incoming data packet.  On function return, the
+ * header contains the connection handle associated with the sender.
+ *
+ * If flow control is disabled, this function is a no-op.
+ */
+void
+ble_hs_flow_fill_acl_usrhdr(struct os_mbuf *om)
+{
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+    const struct hci_data_hdr *hdr;
+    uint16_t *conn_handle;
+
+    BLE_HS_DBG_ASSERT(OS_MBUF_USRHDR_LEN(om) >= sizeof *conn_handle);
+    conn_handle = OS_MBUF_USRHDR(om);
+
+    hdr = (void *)om->om_data;
+    *conn_handle = BLE_HCI_DATA_HANDLE(hdr->hdh_handle_pb_bc);
+#endif
+}
+
+/**
+ * Sends the HCI commands to the controller required for enabling host flow
+ * control.
+ *
+ * If flow control is disabled, this function is a no-op.
+ */
+int
+ble_hs_flow_startup(void)
+{
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+    struct hci_host_buf_size buf_size_cmd;
+    int rc;
+
+    /* Assume failure. */
+    ble_hci_trans_set_acl_free_cb(NULL, NULL);
+    os_callout_stop(&ble_hs_flow_timer);
+
+    rc = ble_hs_hci_cmd_tx_set_ctlr_to_host_fc(BLE_HCI_CTLR_TO_HOST_FC_ACL);
+    if (rc != 0) {
+        return rc;
+    }
+
+    buf_size_cmd = (struct hci_host_buf_size) {
+        .acl_pkt_len = MYNEWT_VAL(BLE_ACL_BUF_SIZE),
+        .num_acl_pkts = MYNEWT_VAL(BLE_ACL_BUF_COUNT),
+    };
+    rc = ble_hs_hci_cmd_tx_host_buf_size(&buf_size_cmd);
+    if (rc != 0) {
+        ble_hs_hci_cmd_tx_set_ctlr_to_host_fc(BLE_HCI_CTLR_TO_HOST_FC_OFF);
+        return rc;
+    }
+
+    /* Flow control successfully enabled. */
+    ble_hs_flow_num_completed_pkts = 0;
+    ble_hci_trans_set_acl_free_cb(ble_hs_flow_acl_free, NULL);
+    os_callout_init(&ble_hs_flow_timer, ble_hs_evq_get(),
+                    ble_hs_flow_event_cb, NULL);
+#endif
+
+    return 0;
+}
diff --git a/net/nimble/host/src/ble_hs_flow_priv.h b/net/nimble/host/src/ble_hs_flow_priv.h
new file mode 100644
index 000000000..52dc48172
--- /dev/null
+++ b/net/nimble/host/src/ble_hs_flow_priv.h
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef H_BLE_HS_FLOW_PRIV_
+#define H_BLE_HS_FLOW_PRIV_
+
+#include <inttypes.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void ble_hs_flow_connection_broken(uint16_t conn_handle);
+void ble_hs_flow_fill_acl_usrhdr(struct os_mbuf *om);
+int ble_hs_flow_startup(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/net/nimble/host/src/ble_hs_hci_cmd.c b/net/nimble/host/src/ble_hs_hci_cmd.c
index 9cd732659..7f062c48c 100644
--- a/net/nimble/host/src/ble_hs_hci_cmd.c
+++ b/net/nimble/host/src/ble_hs_hci_cmd.c
@@ -66,7 +66,7 @@ ble_hs_hci_cmd_write_hdr(uint8_t ogf, uint16_t ocf, uint8_t len, void *buf)
     u8ptr[2] = len;
 }
 
-int
+static int
 ble_hs_hci_cmd_send(uint16_t opcode, uint8_t len, const void *cmddata)
 {
     uint8_t *buf;
@@ -532,6 +532,51 @@ ble_hs_hci_cmd_reset(void)
                                0, NULL);
 }
 
+/** Set controller to host flow control (OGF 0x03, OCF 0x0031). */
+int
+ble_hs_hci_cmd_tx_set_ctlr_to_host_fc(uint8_t fc_enable)
+{
+    if (fc_enable > BLE_HCI_CTLR_TO_HOST_FC_BOTH) {
+        return BLE_HS_EINVAL;
+    }
+
+    return ble_hs_hci_cmd_tx_empty_ack(
+        BLE_HCI_OP(BLE_HCI_OGF_CTLR_BASEBAND,
+                   BLE_HCI_OCF_CB_SET_CTLR_TO_HOST_FC),
+        &fc_enable, 1);
+}
+
+/* Host buffer size (OGF 0x03, OCF 0x0033). */
+int
+ble_hs_hci_cmd_tx_host_buf_size(const struct hci_host_buf_size *cmd)
+{
+    uint8_t buf[BLE_HCI_HOST_BUF_SIZE_LEN];
+
+    put_le16(buf + 0, cmd->acl_pkt_len);
+    buf[2] = cmd->sync_pkt_len;
+    put_le16(buf + 3, cmd->num_acl_pkts);
+    put_le16(buf + 5, cmd->num_sync_pkts);
+
+    return ble_hs_hci_cmd_tx_empty_ack(
+        BLE_HCI_OP(BLE_HCI_OGF_CTLR_BASEBAND, BLE_HCI_OCF_CB_HOST_BUF_SIZE),
+        buf, sizeof buf);
+}
+
+/* Host number of completed packets (OGF 0x03, OCF 0x0035). */
+int
+ble_hs_hci_cmd_build_host_num_comp_pkts_entry(
+    const struct hci_host_num_comp_pkts_entry *entry,
+    uint8_t *dst, int dst_len)
+{
+    if (dst_len < BLE_HCI_HOST_NUM_COMP_PKTS_ENT_LEN) {
+        return BLE_HS_EMSGSIZE;
+    }
+
+    put_le16(dst + 0, entry->conn_handle);
+    put_le16(dst + 2, entry->num_pkts);
+
+    return 0;
+}
 
 /**
  * Read the transmit power level used for LE advertising channel packets.
diff --git a/net/nimble/host/src/ble_hs_hci_priv.h b/net/nimble/host/src/ble_hs_hci_priv.h
index 45f95ac54..57ab99458 100644
--- a/net/nimble/host/src/ble_hs_hci_priv.h
+++ b/net/nimble/host/src/ble_hs_hci_priv.h
@@ -99,8 +99,6 @@ int ble_hs_hci_evt_process(uint8_t *data);
 uint16_t ble_hs_hci_util_opcode_join(uint8_t ogf, uint16_t ocf);
 void ble_hs_hci_cmd_write_hdr(uint8_t ogf, uint16_t ocf, uint8_t len,
                               void *buf);
-int ble_hs_hci_cmd_send(uint16_t opcode, uint8_t len,
-                        const void *cmddata);
 int ble_hs_hci_cmd_send_buf(uint16_t opcode, void *buf, uint8_t buf_len);
 void ble_hs_hci_cmd_build_set_event_mask(uint64_t event_mask,
                                          uint8_t *dst, int dst_len);
@@ -142,6 +140,11 @@ int ble_hs_hci_cmd_build_le_add_to_whitelist(const uint8_t *addr,
                                              uint8_t addr_type,
                                              uint8_t *dst, int dst_len);
 int ble_hs_hci_cmd_reset(void);
+int ble_hs_hci_cmd_tx_set_ctlr_to_host_fc(uint8_t fc_enable);
+int ble_hs_hci_cmd_tx_host_buf_size(const struct hci_host_buf_size *cmd);
+int ble_hs_hci_cmd_build_host_num_comp_pkts_entry(
+    const struct hci_host_num_comp_pkts_entry *entry,
+    uint8_t *dst, int dst_len);
 int ble_hs_hci_cmd_read_adv_pwr(void);
 int ble_hs_hci_cmd_le_create_conn_cancel(void);
 int ble_hs_hci_cmd_build_le_conn_update(const struct hci_conn_update *hcu,
diff --git a/net/nimble/host/src/ble_hs_priv.h b/net/nimble/host/src/ble_hs_priv.h
index 0636aafb0..d855ccd8f 100644
--- a/net/nimble/host/src/ble_hs_priv.h
+++ b/net/nimble/host/src/ble_hs_priv.h
@@ -39,6 +39,7 @@
 #include "ble_l2cap_coc_priv.h"
 #include "ble_sm_priv.h"
 #include "ble_hs_adv_priv.h"
+#include "ble_hs_flow_priv.h"
 #include "ble_hs_pvcy_priv.h"
 #include "ble_hs_id_priv.h"
 #include "ble_uuid_priv.h"
@@ -117,6 +118,8 @@ int ble_hs_misc_restore_irks(void);
 
 int ble_hs_locked_by_cur_task(void);
 int ble_hs_is_parent_task(void);
+void ble_hs_lock_nested(void);
+void ble_hs_unlock_nested(void);
 void ble_hs_lock(void);
 void ble_hs_unlock(void);
 void ble_hs_hw_error(uint8_t hw_code);
diff --git a/net/nimble/host/src/ble_hs_startup.c b/net/nimble/host/src/ble_hs_startup.c
index 5de4256ca..2b144cc50 100644
--- a/net/nimble/host/src/ble_hs_startup.c
+++ b/net/nimble/host/src/ble_hs_startup.c
@@ -280,5 +280,8 @@ ble_hs_startup_go(void)
 
     ble_hs_pvcy_set_our_irk(NULL);
 
+    /* If flow control is enabled, configure the controller to use it. */
+    ble_hs_flow_startup();
+
     return 0;
 }
diff --git a/net/nimble/host/syscfg.yml b/net/nimble/host/syscfg.yml
index 5559dc930..56ad2d368 100644
--- a/net/nimble/host/syscfg.yml
+++ b/net/nimble/host/syscfg.yml
@@ -374,5 +374,34 @@ syscfg.defs:
             that have been enabled in the stack, such as GATT support.
         value: 0
 
+    # Flow control settings.
+    BLE_HS_FLOW_CTRL:
+        description: >
+            Whether to enable host-side flow control.  This should only be
+            enabled in host-only setups (i.e., not combined-host-controller).
+        value: 0
+
+    BLE_HS_FLOW_CTRL_ITVL:
+        description: >
+            The interval, in milliseconds, that the host should provide
+            number-of-completed-packets updates to the controller.
+        value: 1000
+
+    BLE_HS_FLOW_CTRL_THRESH:
+        description: >
+            If the number of data buffers available to the controller falls to
+            this number, immediately send a number-of-completed-packets event.
+            The free buffer count is calculated as follows:
+            (total-acl-bufs - bufs-freed-since-last-num-completed-event).
+        value: 2
+
+    BLE_HS_FLOW_CTRL_TX_ON_DISCONNECT:
+        description: >
+            If enabled, the host will immediately transmit a
+            host-number-of-completed-packets command whenever a connection
+            terminates.  This behavior is not required by the standard, but is
+            a necessary workaround when interfacing with some controllers.
+        value: 0
+
 syscfg.vals.BLE_MESH:
     BLE_SM_SC: 1
diff --git a/net/nimble/include/nimble/ble.h b/net/nimble/include/nimble/ble.h
index 71d884312..53fef515e 100644
--- a/net/nimble/include/nimble/ble.h
+++ b/net/nimble/include/nimble/ble.h
@@ -150,6 +150,9 @@ struct ble_mbuf_hdr
 #define BLE_MBUF_MEMBLOCK_OVERHEAD      \
     (sizeof(struct os_mbuf) + BLE_MBUF_PKTHDR_OVERHEAD)
 
+/* Length of host user header.  Only contains the peer's connection handle. */
+#define BLE_MBUF_HS_HDR_LEN     (2)
+
 #define BLE_DEV_ADDR_LEN        (6)
 extern uint8_t g_dev_addr[BLE_DEV_ADDR_LEN];
 extern uint8_t g_random_addr[BLE_DEV_ADDR_LEN];
diff --git a/net/nimble/include/nimble/ble_hci_trans.h b/net/nimble/include/nimble/ble_hci_trans.h
index 3df77aa48..7e221e609 100644
--- a/net/nimble/include/nimble/ble_hci_trans.h
+++ b/net/nimble/include/nimble/ble_hci_trans.h
@@ -21,6 +21,8 @@
 #define H_HCI_TRANSPORT_
 
 #include <inttypes.h>
+#include "os/os_mempool.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -125,6 +127,19 @@ uint8_t *ble_hci_trans_buf_alloc(int type);
  */
 void ble_hci_trans_buf_free(uint8_t *buf);
 
+/**
+ * Configures a callback to get executed whenever an ACL data packet is freed.
+ * The function is called immediately before the free occurs.
+ *
+ * @param cb                    The callback to configure.
+ * @param arg                   An optional argument to pass to the callback.
+ *
+ * @return                      0 on success;
+ *                              BLE_ERR_UNSUPPORTED if the transport does not
+ *                                  support this operation.
+ */
+int ble_hci_trans_set_acl_free_cb(os_mempool_put_fn *cb, void *arg);
+
 /**
  * Configures the HCI transport to operate with a controller.  The transport
  * will execute specified callbacks upon receiving HCI packets from the host.
diff --git a/net/nimble/include/nimble/hci_common.h b/net/nimble/include/nimble/hci_common.h
index e14137d14..1907bf0d5 100644
--- a/net/nimble/include/nimble/hci_common.h
+++ b/net/nimble/include/nimble/hci_common.h
@@ -70,6 +70,9 @@ extern "C" {
 #define BLE_HCI_OCF_CB_SET_EVENT_MASK       (0x0001)
 #define BLE_HCI_OCF_CB_RESET                (0x0003)
 #define BLE_HCI_OCF_CB_READ_TX_PWR          (0x002D)
+#define BLE_HCI_OCF_CB_SET_CTLR_TO_HOST_FC  (0x0031)
+#define BLE_HCI_OCF_CB_HOST_BUF_SIZE        (0x0033)
+#define BLE_HCI_OCF_CB_HOST_NUM_COMP_PKTS   (0x0035)
 #define BLE_HCI_OCF_CB_SET_EVENT_MASK2      (0x0063)
 #define BLE_HCI_OCF_CB_RD_AUTH_PYLD_TMO     (0x007B)
 #define BLE_HCI_OCF_CB_WR_AUTH_PYLD_TMO     (0x007C)
@@ -173,6 +176,21 @@ extern "C" {
 /* --- Set event mask (OGF 0x03, OCF 0x0001 --- */
 #define BLE_HCI_SET_EVENT_MASK_LEN          (8)
 
+/* --- Set controller to host flow control (OGF 0x03, OCF 0x0031) --- */
+#define BLE_HCI_CTLR_TO_HOST_FC_LEN         (1)
+
+#define BLE_HCI_CTLR_TO_HOST_FC_OFF         (0)
+#define BLE_HCI_CTLR_TO_HOST_FC_ACL         (1)
+#define BLE_HCI_CTLR_TO_HOST_FC_SYNC        (2)
+#define BLE_HCI_CTLR_TO_HOST_FC_BOTH        (3)
+
+/* --- Host buffer size (OGF 0x03, OCF 0x0033) --- */
+#define BLE_HCI_HOST_BUF_SIZE_LEN           (7)
+
+/* --- Host number of completed packets (OGF 0x03, OCF 0x0035) --- */
+#define BLE_HCI_HOST_NUM_COMP_PKTS_HDR_LEN  (1)
+#define BLE_HCI_HOST_NUM_COMP_PKTS_ENT_LEN  (4)
+
 /* --- Read BD_ADDR (OGF 0x04, OCF 0x0009 --- */
 #define BLE_HCI_IP_RD_BD_ADDR_ACK_PARAM_LEN (6)
 
@@ -785,6 +803,23 @@ extern "C" {
 #define BLE_HCI_LE_ENH_CONN_COMPLETE_LEN    (31)
 
 /*--- Shared data structures ---*/
+
+/* Host buffer size (OGF=0x03, OCF=0x0033) */
+struct hci_host_buf_size
+{
+    uint16_t acl_pkt_len;
+    uint8_t sync_pkt_len;
+    uint16_t num_acl_pkts;
+    uint16_t num_sync_pkts;
+};
+
+/* Host number of completed packets (OGF=0x03, OCF=0x0035) */
+struct hci_host_num_comp_pkts_entry
+{
+    uint16_t conn_handle;
+    uint16_t num_pkts;
+};
+
 /* Read local version information (OGF=0x0004, OCF=0x0001) */
 struct hci_loc_ver_info
 {
diff --git a/net/nimble/transport/emspi/src/ble_hci_emspi.c b/net/nimble/transport/emspi/src/ble_hci_emspi.c
index 96107855d..1487984b3 100644
--- a/net/nimble/transport/emspi/src/ble_hci_emspi.c
+++ b/net/nimble/transport/emspi/src/ble_hci_emspi.c
@@ -100,7 +100,7 @@ static struct os_mempool ble_hci_emspi_pkt_pool;
 static void *ble_hci_emspi_pkt_buf;
 
 static struct os_mbuf_pool ble_hci_emspi_acl_mbuf_pool;
-static struct os_mempool ble_hci_emspi_acl_pool;
+static struct os_mempool_ext ble_hci_emspi_acl_pool;
 static void *ble_hci_emspi_acl_buf;
 
 static void
@@ -269,15 +269,15 @@ ble_hci_emspi_rx(uint8_t *data, int max_len)
 static struct os_mbuf *
 ble_hci_trans_acl_buf_alloc(void)
 {
-    struct os_mbuf *m;
+    uint8_t usrhdr_len;
 
-    /*
-     * XXX: note that for host only there would be no need to allocate
-     * a user header. Address this later.
-     */
-    m = os_mbuf_get_pkthdr(&ble_hci_emspi_acl_mbuf_pool,
-                           sizeof(struct ble_mbuf_hdr));
-    return m;
+#if MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+    usrhdr_len = BLE_MBUF_HS_HDR_LEN;
+#else
+    usrhdr_len = 0;
+#endif
+
+    return os_mbuf_get_pkthdr(&ble_hci_emspi_acl_mbuf_pool, usrhdr_len);
 }
 
 /**
@@ -603,13 +603,7 @@ ble_hci_emspi_free_pkt(uint8_t type, uint8_t *cmdevt, struct os_mbuf *acl)
 }
 
 /**
- * Sends an HCI event from the controller to the host.
- *
- * @param cmd                   The HCI event to send.  This buffer must be
- *                                  allocated via ble_hci_trans_buf_alloc().
- *
- * @return                      0 on success;
- *                              A BLE_ERR_[...] error code on failure.
+ * Unsupported.  This is a host-only transport.
  */
 int
 ble_hci_trans_ll_evt_tx(uint8_t *cmd)
@@ -618,12 +612,7 @@ ble_hci_trans_ll_evt_tx(uint8_t *cmd)
 }
 
 /**
- * Sends ACL data from controller to host.
- *
- * @param om                    The ACL data packet to send.
- *
- * @return                      0 on success;
- *                              A BLE_ERR_[...] error code on failure.
+ * Unsupported.  This is a host-only transport.
  */
 int
 ble_hci_trans_ll_acl_tx(struct os_mbuf *om)
@@ -708,7 +697,7 @@ ble_hci_trans_cfg_ll(ble_hci_trans_rx_cmd_fn *cmd_cb,
                      ble_hci_trans_rx_acl_fn *acl_cb,
                      void *acl_arg)
 {
-    /* XXX: Unimplemented. */
+    /* Unsupported. */
     assert(0);
 }
 
@@ -778,6 +767,22 @@ ble_hci_trans_buf_free(uint8_t *buf)
     }
 }
 
+/**
+ * Configures a callback to get executed whenever an ACL data packet is freed.
+ * The function is called in lieu of actually freeing the packet.
+ *
+ * @param cb                    The callback to configure.
+ *
+ * @return                      0 on success.
+ */
+int
+ble_hci_trans_set_acl_free_cb(os_mempool_put_fn *cb, void *arg)
+{
+    ble_hci_emspi_acl_pool.mpe_put_cb = cb;
+    ble_hci_emspi_acl_pool.mpe_put_arg = arg;
+    return 0;
+}
+
 /**
  * Resets the HCI UART transport to a clean state.  Frees all buffers and
  * reconfigures the UART.
@@ -876,15 +881,16 @@ ble_hci_emspi_init(void)
                      BLE_MBUF_MEMBLOCK_OVERHEAD +
                      BLE_HCI_DATA_HDR_SZ;
     acl_block_size = OS_ALIGN(acl_block_size, OS_ALIGNMENT);
-    rc = mem_malloc_mempool(&ble_hci_emspi_acl_pool,
-                            MYNEWT_VAL(BLE_ACL_BUF_COUNT),
-                            acl_block_size,
-                            "ble_hci_emspi_acl_pool",
-                            &ble_hci_emspi_acl_buf);
+    rc = mem_malloc_mempool_ext(&ble_hci_emspi_acl_pool,
+                                MYNEWT_VAL(BLE_ACL_BUF_COUNT),
+                                acl_block_size,
+                                "ble_hci_emspi_acl_pool",
+                                &ble_hci_emspi_acl_buf);
     SYSINIT_PANIC_ASSERT(rc == 0);
 
     rc = os_mbuf_pool_init(&ble_hci_emspi_acl_mbuf_pool,
-                           &ble_hci_emspi_acl_pool, acl_block_size,
+                           &ble_hci_emspi_acl_pool.mpe_mp,
+                           acl_block_size,
                            MYNEWT_VAL(BLE_ACL_BUF_COUNT));
     SYSINIT_PANIC_ASSERT(rc == 0);
 
diff --git a/net/nimble/transport/emspi/syscfg.yml b/net/nimble/transport/emspi/syscfg.yml
index c8c20980f..066deb7f1 100644
--- a/net/nimble/transport/emspi/syscfg.yml
+++ b/net/nimble/transport/emspi/syscfg.yml
@@ -20,13 +20,16 @@
 
 syscfg.defs:
     BLE_HCI_EMSPI:
-        description: 'Indicates that the emspi HCI transport is present.'
+        description: 'Indicates that the emspi host HCI transport is present.'
         value: 1
         restrictions:
             # XXX: This package only builds with the apollo2 MCU.
             # MCU-dependencies need to be removed.
             - MCU_APOLLO2
 
+            # This is a host-only transport.
+            - BLE_HOST
+
     BLE_HCI_EVT_HI_BUF_COUNT:
         description: 'Number of high-priority event buffers.'
         value:  2
diff --git a/net/nimble/transport/ram/src/ble_hci_ram.c b/net/nimble/transport/ram/src/ble_hci_ram.c
index 1c0cc6a01..34406d44c 100644
--- a/net/nimble/transport/ram/src/ble_hci_ram.c
+++ b/net/nimble/transport/ram/src/ble_hci_ram.c
@@ -168,6 +168,16 @@ ble_hci_trans_buf_free(uint8_t *buf)
     }
 }
 
+/**
+ * Unsupported; the RAM transport does not have a dedicated ACL data packet
+ * pool.
+ */
+int
+ble_hci_trans_set_acl_free_cb(os_mempool_put_fn *cb, void *arg)
+{
+    return BLE_ERR_UNSUPPORTED;
+}
+
 static void
 ble_hci_ram_free_mem(void)
 {
diff --git a/net/nimble/transport/uart/src/ble_hci_uart.c b/net/nimble/transport/uart/src/ble_hci_uart.c
index 19b89aee2..55e275309 100755
--- a/net/nimble/transport/uart/src/ble_hci_uart.c
+++ b/net/nimble/transport/uart/src/ble_hci_uart.c
@@ -93,7 +93,7 @@ static struct os_mempool ble_hci_uart_pkt_pool;
 static void *ble_hci_uart_pkt_buf;
 
 static struct os_mbuf_pool ble_hci_uart_acl_mbuf_pool;
-static struct os_mempool ble_hci_uart_acl_pool;
+static struct os_mempool_ext ble_hci_uart_acl_pool;
 static void *ble_hci_uart_acl_buf;
 
 /**
@@ -165,13 +165,17 @@ static struct os_mbuf *
 ble_hci_trans_acl_buf_alloc(void)
 {
     struct os_mbuf *m;
+    uint8_t usrhdr_len;
 
-    /*
-     * XXX: note that for host only there would be no need to allocate
-     * a user header. Address this later.
-     */
-    m = os_mbuf_get_pkthdr(&ble_hci_uart_acl_mbuf_pool,
-                           sizeof(struct ble_mbuf_hdr));
+#if MYNEWT_VAL(BLE_DEVICE)
+    usrhdr_len = sizeof(struct ble_mbuf_hdr);
+#elif MYNEWT_VAL(BLE_HS_FLOW_CTRL)
+    usrhdr_len = BLE_MBUF_HS_HDR_LEN;
+#else
+    usrhdr_len = 0;
+#endif
+
+    m = os_mbuf_get_pkthdr(&ble_hci_uart_acl_mbuf_pool, usrhdr_len);
     return m;
 }
 
@@ -949,6 +953,22 @@ ble_hci_trans_buf_free(uint8_t *buf)
     }
 }
 
+/**
+ * Configures a callback to get executed whenever an ACL data packet is freed.
+ * The function is called in lieu of actually freeing the packet.
+ *
+ * @param cb                    The callback to configure.
+ *
+ * @return                      0 on success.
+ */
+int
+ble_hci_trans_set_acl_free_cb(os_mempool_put_fn *cb, void *arg)
+{
+    ble_hci_uart_acl_pool.mpe_put_cb = cb;
+    ble_hci_uart_acl_pool.mpe_put_arg = arg;
+    return 0;
+}
+
 /**
  * Resets the HCI UART transport to a clean state.  Frees all buffers and
  * reconfigures the UART.
@@ -1020,15 +1040,17 @@ ble_hci_uart_init(void)
     acl_block_size = acl_data_length + BLE_MBUF_MEMBLOCK_OVERHEAD +
         BLE_HCI_DATA_HDR_SZ;
     acl_block_size = OS_ALIGN(acl_block_size, OS_ALIGNMENT);
-    rc = mem_malloc_mempool(&ble_hci_uart_acl_pool,
-                            MYNEWT_VAL(BLE_ACL_BUF_COUNT),
-                            acl_block_size,
-                            "ble_hci_uart_acl_pool",
-                            &ble_hci_uart_acl_buf);
+    rc = mem_malloc_mempool_ext(&ble_hci_emspi_acl_pool,
+                                MYNEWT_VAL(BLE_ACL_BUF_COUNT),
+                                acl_block_size,
+                                "ble_hci_uart_acl_pool",
+                                &ble_hci_uart_acl_buf);
     SYSINIT_PANIC_ASSERT(rc == 0);
 
-    rc = os_mbuf_pool_init(&ble_hci_uart_acl_mbuf_pool, &ble_hci_uart_acl_pool,
-                           acl_block_size, MYNEWT_VAL(BLE_ACL_BUF_COUNT));
+    rc = os_mbuf_pool_init(&ble_hci_uart_acl_mbuf_pool,
+                           &ble_hci_uart_acl_pool.mpe_mp,
+                           acl_block_size,
+                           MYNEWT_VAL(BLE_ACL_BUF_COUNT));
     SYSINIT_PANIC_ASSERT(rc == 0);
 
     /*
diff --git a/util/mem/include/mem/mem.h b/util/mem/include/mem/mem.h
index 18461b6e4..a97e148fa 100644
--- a/util/mem/include/mem/mem.h
+++ b/util/mem/include/mem/mem.h
@@ -27,12 +27,14 @@ extern "C" {
 struct os_mempool;
 struct os_mbuf_pool;
 
-int mem_malloc_mempool(struct os_mempool *mempool, int num_blocks,
-                       int block_size, char *name, void **out_buf);
+int mem_malloc_mempool(struct os_mempool *mempool, uint16_t num_blocks,
+                       uint32_t block_size, char *name, void **out_buf);
+int mem_malloc_mempool_ext(struct os_mempool_ext *mempool, uint16_t num_blocks,
+                           uint32_t block_size, char *name, void **out_buf);
 
 int mem_malloc_mbuf_pool(struct os_mempool *mempool,
-                         struct os_mbuf_pool *mbuf_pool, int num_blocks,
-                         int block_size, char *name,
+                         struct os_mbuf_pool *mbuf_pool, uint16_t num_blocks,
+                         uint32_t block_size, char *name,
                          void **out_buf);
 int mem_malloc_mbufpkt_pool(struct os_mempool *mempool,
                             struct os_mbuf_pool *mbuf_pool, int num_blocks,
diff --git a/util/mem/src/mem.c b/util/mem/src/mem.c
index 340e60e7c..6b5739aae 100644
--- a/util/mem/src/mem.c
+++ b/util/mem/src/mem.c
@@ -20,6 +20,27 @@
 #include "os/os.h"
 #include "mem/mem.h"
 
+/**
+ * Generic mempool allocation function.  Used with basic and extended mempools.
+ */
+static int
+mem_malloc_mempool_gen(uint16_t num_blocks, uint32_t block_size,
+                       void **out_buf)
+{
+    block_size = OS_ALIGN(block_size, OS_ALIGNMENT);
+
+    if (num_blocks > 0) {
+        *out_buf = malloc(OS_MEMPOOL_BYTES(num_blocks, block_size));
+        if (*out_buf == NULL) {
+            return OS_ENOMEM;
+        }
+    } else {
+        *out_buf = NULL;
+    }
+
+    return 0;
+}
+
 /**
  * Mallocs a block of memory and initializes a mempool to use it.
  *
@@ -37,21 +58,15 @@
  *                              Other OS code on unexpected error.
  */
 int
-mem_malloc_mempool(struct os_mempool *mempool, int num_blocks, int block_size,
-                   char *name, void **out_buf)
+mem_malloc_mempool(struct os_mempool *mempool, uint16_t num_blocks,
+                   uint32_t block_size, char *name, void **out_buf)
 {
     void *buf;
     int rc;
 
-    block_size = OS_ALIGN(block_size, OS_ALIGNMENT);
-
-    if (num_blocks > 0) {
-        buf = malloc(OS_MEMPOOL_BYTES(num_blocks, block_size));
-        if (buf == NULL) {
-            return OS_ENOMEM;
-        }
-    } else {
-        buf = NULL;
+    rc = mem_malloc_mempool_gen(num_blocks, block_size, &buf);
+    if (rc != 0) {
+        return rc;
     }
 
     rc = os_mempool_init(mempool, num_blocks, block_size, buf, name);
@@ -67,6 +82,47 @@ mem_malloc_mempool(struct os_mempool *mempool, int num_blocks, int block_size,
     return 0;
 }
 
+/**
+ * Mallocs a block of memory and initializes an extended mempool to use it.
+ *
+ * @param mpe                   The extended mempool to initialize.
+ * @param num_blocks            The total number of memory blocks in the
+ *                                  mempool.
+ * @param block_size            The size of each mempool entry.
+ * @param name                  The name to give the mempool.
+ * @param out_buf               On success, this points to the malloced memory.
+ *                                  Pass NULL if you don't need this
+ *                                  information.
+ *
+ * @return                      0 on success;
+ *                              OS_ENOMEM on malloc failure;
+ *                              Other OS code on unexpected error.
+ */
+int
+mem_malloc_mempool_ext(struct os_mempool_ext *mpe, uint16_t num_blocks,
+                       uint32_t block_size, char *name, void **out_buf)
+{
+    void *buf;
+    int rc;
+
+    rc = mem_malloc_mempool_gen(num_blocks, block_size, &buf);
+    if (rc != 0) {
+        return rc;
+    }
+
+    rc = os_mempool_ext_init(mpe, num_blocks, block_size, buf, name);
+    if (rc != 0) {
+        free(buf);
+        return rc;
+    }
+
+    if (out_buf != NULL) {
+        *out_buf = buf;
+    }
+
+    return 0;
+}
+
 /**
  * Mallocs a block of memory and initializes an mbuf pool to use it.  The
  * specified block_size indicates the size of an mbuf acquired from the pool if
@@ -87,8 +143,8 @@ mem_malloc_mempool(struct os_mempool *mempool, int num_blocks, int block_size,
  */
 int
 mem_malloc_mbuf_pool(struct os_mempool *mempool,
-                     struct os_mbuf_pool *mbuf_pool, int num_blocks,
-                     int block_size, char *name,
+                     struct os_mbuf_pool *mbuf_pool, uint16_t num_blocks,
+                     uint32_t block_size, char *name,
                      void **out_buf)
 {
     void *buf;


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services