You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by an...@apache.org on 2021/11/04 10:36:04 UTC

[mynewt-nimble] 03/04: nimble/ll: Add fast path for aux chain scheduling

This is an automated email from the ASF dual-hosted git repository.

andk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mynewt-nimble.git

commit ed4563741cf75353105fa6ae42a997343b1152ad
Author: Andrzej Kaczmarek <an...@codecoup.pl>
AuthorDate: Wed Nov 3 13:36:34 2021 +0100

    nimble/ll: Add fast path for aux chain scheduling
    
    For short aux offsets we can to usecs to ticks conversion using only
    32-bit arithmetics without integer division. This is much faster than
    generic routine, especially on M0 (e.g. CMAC) which does not have hw
    support for integer division. The faster calculation is even more
    accurate than generic one since it's never off by 1 tick.
    
    This is a temporary solution until we have more generic timer routines
    that do the same kind of optimizations.
---
 nimble/controller/src/ble_ll_sched.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/nimble/controller/src/ble_ll_sched.c b/nimble/controller/src/ble_ll_sched.c
index 7a2e6f3..5217557 100644
--- a/nimble/controller/src/ble_ll_sched.c
+++ b/nimble/controller/src/ble_ll_sched.c
@@ -786,6 +786,22 @@ ble_ll_sched_slave_new(struct ble_ll_conn_sm *connsm)
     return rc;
 }
 
+#if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
+static inline uint32_t
+usecs_to_ticks_fast(uint32_t usecs)
+{
+    uint32_t ticks;
+
+    if (usecs <= 31249) {
+        ticks = (usecs * 137439) / 4194304;
+    } else {
+        ticks = os_cputime_usecs_to_ticks(usecs);
+    }
+
+    return ticks;
+}
+#endif
+
 #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_PERIODIC_ADV)
 /*
  * Determines if the schedule item overlaps the currently running schedule
@@ -887,7 +903,7 @@ ble_ll_sched_sync(struct ble_ll_sched_item *sch,
     os_sr_t sr;
     int rc = 0;
 
-    off_ticks = os_cputime_usecs_to_ticks(offset);
+    off_ticks = usecs_to_ticks_fast(offset);
     off_rem_usecs = offset - os_cputime_ticks_to_usecs(off_ticks);
 
     start_time = beg_cputime + off_ticks;
@@ -899,7 +915,7 @@ ble_ll_sched_sync(struct ble_ll_sched_item *sch,
 
     dur = ble_ll_pdu_tx_time_get(MYNEWT_VAL(BLE_LL_SCHED_SCAN_SYNC_PDU_LEN),
                                   phy_mode);
-    end_time = start_time + os_cputime_usecs_to_ticks(dur);
+    end_time = start_time + usecs_to_ticks_fast(dur);
 
     start_time -= g_ble_ll_sched_offset_ticks;
 
@@ -1257,12 +1273,12 @@ ble_ll_sched_scan_aux(struct ble_ll_sched_item *sch, uint32_t pdu_time,
     int rc;
 
     offset_us += pdu_time_rem;
-    offset_ticks = os_cputime_usecs_to_ticks(offset_us);
+    offset_ticks = usecs_to_ticks_fast(offset_us);
 
     sch->start_time = pdu_time + offset_ticks - g_ble_ll_sched_offset_ticks;
     sch->remainder = offset_us - os_cputime_ticks_to_usecs(offset_ticks);
     /* TODO: make some sane slot reservation */
-    sch->end_time = sch->start_time + os_cputime_usecs_to_ticks(5000);
+    sch->end_time = sch->start_time + usecs_to_ticks_fast(5000);
 
     OS_ENTER_CRITICAL(sr);