You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by gn...@apache.org on 2020/05/14 23:31:40 UTC

[incubator-nuttx] branch master updated: arch/arm/src/stm32h7/stm32_sdmmc.c: Fixes for IDMA transfer and cache usage

This is an automated email from the ASF dual-hosted git repository.

gnutt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 1071934  arch/arm/src/stm32h7/stm32_sdmmc.c: Fixes for IDMA transfer and cache usage
1071934 is described below

commit 107193435006e192ac03b7d6ead784753762f4e7
Author: Jukka Laitinen <ju...@intel.com>
AuthorDate: Tue Oct 22 12:51:42 2019 +0300

    arch/arm/src/stm32h7/stm32_sdmmc.c: Fixes for IDMA transfer and cache usage
    
    This simplifies the sdmmc driver when the IDMA is in use. There is no need to mix
    IDMA and interrupt based transfers; instead, when making unaligned data tranfers,
    just make IDMA into an internal aligned buffer and then copy the data.
    
    Signed-off-by: Jukka Laitinen <ju...@intel.com>
---
 arch/arm/src/stm32h7/stm32_sdmmc.c | 313 ++++++++++++++++++++++++++-----------
 1 file changed, 224 insertions(+), 89 deletions(-)

diff --git a/arch/arm/src/stm32h7/stm32_sdmmc.c b/arch/arm/src/stm32h7/stm32_sdmmc.c
index 26f16d7..99a98d9 100644
--- a/arch/arm/src/stm32h7/stm32_sdmmc.c
+++ b/arch/arm/src/stm32h7/stm32_sdmmc.c
@@ -217,6 +217,10 @@
 
 #define SDMMC_DTIMER_DATATIMEOUT (6250000) /* 250 ms @ 25 MHz */
 
+/* Block size for multi-block transfers */
+
+#define SDMMC_MAX_BLOCK_SIZE          (512)
+
 /* Data transfer interrupt mask bits */
 
 /* DMA interrupts */
@@ -360,6 +364,10 @@ struct stm32_dev_s
 #endif
   uint8_t            rxfifo[FIFO_SIZE_IN_BYTES] /* To offload with IDMA */
                      __attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));
+#if defined(CONFIG_ARMV7M_DCACHE) && defined(CONFIG_STM32H7_SDMMC_IDMA) && \
+    !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+  bool               unaligned_rx; /* read buffer is not cache-line aligned */
+#endif
 };
 
 /* Register logging support */
@@ -424,11 +432,14 @@ static void stm32_dumpsamples(struct stm32_dev_s *priv);
 
 static uint8_t stm32_log2(uint16_t value);
 static void stm32_dataconfig(struct stm32_dev_s *priv, uint32_t timeout,
-                             uint32_t dlen, uint32_t dctrl);
+                             uint32_t dlen, bool receive);
 static void stm32_datadisable(struct stm32_dev_s *priv);
 #ifndef CONFIG_STM32H7_SDMMC_IDMA
 static void stm32_sendfifo(struct stm32_dev_s *priv);
 static void stm32_recvfifo(struct stm32_dev_s *priv);
+#elif defined(CONFIG_ARMV7M_DCACHE) && \
+      !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+static void stm32_recvdma(struct stm32_dev_s *priv);
 #endif
 static void stm32_eventtimeout(int argc, uint32_t arg, ...);
 static void stm32_endwait(struct stm32_dev_s *priv,
@@ -467,10 +478,12 @@ static int  stm32_sendcmd(FAR struct sdio_dev_s *dev, uint32_t cmd,
                           uint32_t arg);
 static void stm32_blocksetup(FAR struct sdio_dev_s *dev,
               unsigned int blocksize, unsigned int nblocks);
+#ifndef CONFIG_STM32H7_SDMMC_IDMA
 static int  stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer,
                             size_t nbytes);
 static int  stm32_sendsetup(FAR struct sdio_dev_s *dev,
                             FAR const uint8_t *buffer, uint32_t nbytes);
+#endif
 static int  stm32_cancel(FAR struct sdio_dev_s *dev);
 
 static int  stm32_waitresponse(FAR struct sdio_dev_s *dev, uint32_t cmd);
@@ -533,8 +546,13 @@ struct stm32_dev_s g_sdmmcdev1 =
     .attach           = stm32_attach,
     .sendcmd          = stm32_sendcmd,
     .blocksetup       = stm32_blocksetup,
+#if defined(CONFIG_STM32H7_SDMMC_IDMA)
+    .recvsetup        = stm32_dmarecvsetup,
+    .sendsetup        = stm32_dmasendsetup,
+#else
     .recvsetup        = stm32_recvsetup,
     .sendsetup        = stm32_sendsetup,
+#endif
     .cancel           = stm32_cancel,
     .waitresponse     = stm32_waitresponse,
     .recv_r1          = stm32_recvshortcrc,
@@ -627,6 +645,13 @@ struct stm32_dev_s g_sdmmcdev2 =
 static struct stm32_sampleregs_s g_sampleregs[DEBUG_NSAMPLES];
 #endif
 
+/* Input dma buffer for unaligned transfers */
+#if defined(CONFIG_ARMV7M_DCACHE) && defined(CONFIG_STM32H7_SDMMC_IDMA) && \
+    !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+static uint8_t sdmmc_rxbuffer[SDMMC_MAX_BLOCK_SIZE]
+__attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));
+#endif
+
 /****************************************************************************
  * Private Functions
  ****************************************************************************/
@@ -1059,42 +1084,80 @@ static uint8_t stm32_log2(uint16_t value)
  ****************************************************************************/
 
 static void stm32_dataconfig(struct stm32_dev_s *priv, uint32_t timeout,
-                             uint32_t dlen, uint32_t dctrl)
+                             uint32_t dlen, bool receive)
 {
-  uint32_t regval = 0;
-
-  /* Enable data path */
+  uint32_t dctrl = 0;
 
-  /* Set DTIMER */
-
-  sdmmc_putreg32(priv, timeout, STM32_SDMMC_DTIMER_OFFSET);
-
-  /* Set DLEN */
-
-  sdmmc_putreg32(priv, dlen, STM32_SDMMC_DLEN_OFFSET);
+  DEBUGASSERT((sdmmc_getreg32(priv, STM32_SDMMC_IDMACTRLR_OFFSET) &
+               STM32_SDMMC_IDMACTRLR_IDMAEN) == 0);
+  DEBUGASSERT((sdmmc_getreg32(priv, STM32_SDMMC_STA_OFFSET) &
+               STM32_SDMMC_STA_DPSMACT) == 0);
 
   /* Configure DCTRL DTDIR, DTMODE, and DBLOCKSIZE fields.
    * Note: the DTEN is not used, DPSM, and CPSM are used.
    */
 
-  regval  =  sdmmc_getreg32(priv, STM32_SDMMC_DCTRL_OFFSET);
-  regval &= ~(STM32_SDMMC_DCTRL_DTDIR | STM32_SDMMC_DCTRL_DTMODE_MASK |
+  dctrl  =  sdmmc_getreg32(priv, STM32_SDMMC_DCTRL_OFFSET);
+  dctrl  &= ~(STM32_SDMMC_DCTRL_DTDIR | STM32_SDMMC_DCTRL_DTMODE_MASK |
               STM32_SDMMC_DCTRL_DBLOCKSIZE_MASK | STM32_SDMMC_DCTRL_DTEN |
               STM32_SDMMC_DCTRL_BOOTACKEN);
 
   dctrl  &= (STM32_SDMMC_DCTRL_DTDIR | STM32_SDMMC_DCTRL_DTMODE_MASK |
              STM32_SDMMC_DCTRL_DBLOCKSIZE_MASK);
 
-  regval |= dctrl;
+  /* Configure the data direction */
+
+  if (receive)
+    {
+      dctrl |= STM32_SDMMC_DCTRL_DTDIR;
+    }
+
+  /* Set SDIO_MODE */
 
 #if defined(HAVE_SDMMC_SDIO_MODE)
   if (priv->sdiomode == true)
     {
-      regval |= STM32_SDMMC_DCTRL_SDIOEN | STM32_SDMMC_DCTRL_DTMODE_SDIO;
+      dctrl |= STM32_SDMMC_DCTRL_SDIOEN | STM32_SDMMC_DCTRL_DTMODE_SDIO;
     }
 #endif
 
-  sdmmc_putreg32(priv, regval, STM32_SDMMC_DCTRL_OFFSET);
+  /* if dlen > priv->blocksize we assume that this is a multi-block transfer
+   * and that the len is multiple of priv->blocksize.
+   */
+
+  if (dlen > priv->blocksize)
+    {
+      DEBUGASSERT((dlen % priv->blocksize) == 0);
+
+#if defined(CONFIG_STM32H7_SDMMC_IDMA) && defined(CONFIG_ARMV7M_DCACHE) && \
+    !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+      /* If cache is enabled, and this is an unaligned receive,
+       * receive one block at a time to the internal buffer
+       */
+
+      if (priv->unaligned_rx)
+        {
+          DEBUGASSERT(priv->blocksize <= sizeof(sdmmc_rxbuffer));
+          dlen = priv->blocksize;
+        }
+#endif
+    }
+
+  dctrl |= stm32_log2(priv->blocksize) << STM32_SDMMC_DCTRL_DBLOCKSIZE_SHIFT;
+
+  /* Enable data path */
+
+  /* Set DTIMER */
+
+  sdmmc_putreg32(priv, timeout, STM32_SDMMC_DTIMER_OFFSET);
+
+  /* Set DLEN */
+
+  sdmmc_putreg32(priv, dlen, STM32_SDMMC_DLEN_OFFSET);
+
+  /* Set DCTRL */
+
+  sdmmc_putreg32(priv, dctrl, STM32_SDMMC_DCTRL_OFFSET);
 }
 
 /****************************************************************************
@@ -1256,6 +1319,81 @@ static void stm32_recvfifo(struct stm32_dev_s *priv)
 #endif
 
 /****************************************************************************
+ * Name: stm32_recvdma
+ *
+ * Description:
+ *   Receive SDIO data in dma mode
+ *
+ * Input Parameters:
+ *   priv  - Instance of the SDMMC private state structure.
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#if defined (CONFIG_STM32H7_SDMMC_IDMA) && defined(CONFIG_ARMV7M_DCACHE) && \
+    !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+static void stm32_recvdma(struct stm32_dev_s *priv)
+{
+  uint32_t dctrl;
+
+  if (priv->unaligned_rx)
+    {
+      /* If we are receiving multiple blocks to an unaligned buffers,
+       * we receive them one-by-one
+       */
+
+      /* Copy the received data to client buffer */
+
+      memcpy(priv->buffer, sdmmc_rxbuffer, priv->blocksize);
+
+      /* Invalidate the cache before receiving next block */
+
+      up_invalidate_dcache((uintptr_t)sdmmc_rxbuffer,
+                           (uintptr_t)sdmmc_rxbuffer + priv->blocksize);
+
+      /* Update how much there is left to receive */
+
+      priv->remaining -= priv->blocksize;
+    }
+  else
+    {
+      /* In an aligned case, we have always received all blocks */
+
+      priv->remaining = 0;
+    }
+
+  if (priv->remaining == 0)
+    {
+      /* no data remaining, end the transfer */
+
+      stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE);
+    }
+  else
+    {
+      /* We end up here only in unaligned rx-buffers case, and are receiving
+       * the data one block at a time
+       */
+
+      /* Update where to receive the following block */
+
+      priv->buffer = (uint32_t *)((uintptr_t)priv->buffer + priv->blocksize);
+
+      /* Clear pending interrupt status */
+
+      sdmmc_putreg32(priv, STM32_SDMMC_XFRDONE_ICR, STM32_SDMMC_ICR_OFFSET);
+
+      /* Re-enable datapath and wait for next block */
+
+      dctrl = sdmmc_getreg32(priv, STM32_SDMMC_DCTRL_OFFSET);
+      dctrl |= STM32_SDMMC_DCTRL_DTEN;
+      sdmmc_putreg32(priv, dctrl, STM32_SDMMC_DCTRL_OFFSET);
+    }
+}
+#endif
+
+/****************************************************************************
  * Name: stm32_eventtimeout
  *
  * Description:
@@ -1377,17 +1515,6 @@ static void stm32_endtransfer(struct stm32_dev_s *priv,
 
   sdmmc_putreg32(priv, STM32_SDMMC_XFRDONE_ICR, STM32_SDMMC_ICR_OFFSET);
 
-#if defined(CONFIG_STM32H7_SDMMC_IDMA) && \
-    !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
-  /* invalidate dcache in case of DMA receive. */
-
-  if (priv->receivecnt)
-    {
-      up_invalidate_dcache((uintptr_t)priv->buffer,
-                           (uintptr_t)priv->buffer + priv->remaining);
-    }
-#endif
-
   /* Mark the transfer finished */
 
   priv->remaining = 0;
@@ -1516,10 +1643,8 @@ static int stm32_sdmmc_interrupt(int irq, void *context, void *arg)
               /* Receive data from the RX FIFO */
 
               stm32_recvfifo(priv);
-#if !defined(CONFIG_STM32H7_SDMMC_IDMA)
               work_queue(HPWORK, &priv->cbfifo,
                          (worker_t)stm32_sdmmc_fifo_monitor, arg, 1);
-#endif
             }
 
           /* Otherwise, Is the transmit FIFO half empty or less?  If so
@@ -1554,6 +1679,8 @@ static int stm32_sdmmc_interrupt(int irq, void *context, void *arg)
                * half-full interrupt will be received.
                */
 
+#ifndef CONFIG_STM32H7_SDMMC_IDMA
+
               /* If the transfer would not trigger fifo half full
                * we used IDMA to manage the lame fifo
                */
@@ -1563,10 +1690,27 @@ static int stm32_sdmmc_interrupt(int irq, void *context, void *arg)
                 {
                   memcpy(priv->buffer, priv->rxfifo, priv->remaining);
                 }
+#else
+#  if defined(CONFIG_ARMV7M_DCACHE) && \
+      !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+              if (priv->receivecnt)
+                {
+                  /* Invalidate dcache, and copy the received data into
+                   * client buffers in unaligned case
+                   */
 
-              /* Then terminate the transfer Sets STM32_SDMMC_ICR_DATAENDC */
+                  stm32_recvdma(priv);
+                }
+              else
+#  endif
+#endif
+                {
+                  /* Then terminate the transfer.
+                   * Sets STM32_SDMMC_ICR_DATAENDC
+                   */
 
-              stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE);
+                  stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE);
+                }
             }
 
           /* Handle data block send/receive CRC failure */
@@ -2132,11 +2276,11 @@ static void stm32_blocksetup(FAR struct sdio_dev_s *dev,
  *
  ****************************************************************************/
 
+#ifndef CONFIG_STM32H7_SDMMC_IDMA
 static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer,
                            size_t nbytes)
 {
   struct stm32_dev_s *priv = (struct stm32_dev_s *)dev;
-  uint32_t dblksize;
 
   DEBUGASSERT(priv != NULL && buffer != NULL && nbytes > 0);
   DEBUGASSERT(((uint32_t)buffer & 3) == 0);
@@ -2160,10 +2304,8 @@ static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer,
 
   /* Then set up the SDIO data path */
 
-  dblksize = stm32_log2(priv->blocksize) <<
-             STM32_SDMMC_DCTRL_DBLOCKSIZE_SHIFT;
   stm32_dataconfig(priv, SDMMC_DTIMER_DATATIMEOUT * ((nbytes + 511) >> 9),
-                   nbytes, dblksize | STM32_SDMMC_DCTRL_DTDIR);
+                   nbytes, true);
 
   /* Workaround the FIFO data available issue */
 
@@ -2190,6 +2332,7 @@ static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer,
 
   return OK;
 }
+#endif
 
 /****************************************************************************
  * Name: stm32_sendsetup
@@ -2210,11 +2353,11 @@ static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer,
  *
  ****************************************************************************/
 
+#ifndef CONFIG_STM32H7_SDMMC_IDMA
 static int stm32_sendsetup(FAR struct sdio_dev_s *dev, FAR const
                            uint8_t *buffer, size_t nbytes)
 {
   struct stm32_dev_s *priv = (struct stm32_dev_s *)dev;
-  uint32_t dblksize;
 
   DEBUGASSERT(priv != NULL && buffer != NULL && nbytes > 0);
   DEBUGASSERT(((uint32_t)buffer & 3) == 0);
@@ -2236,10 +2379,8 @@ static int stm32_sendsetup(FAR struct sdio_dev_s *dev, FAR const
 
   /* Then set up the SDIO data path */
 
-  dblksize = stm32_log2(priv->blocksize) <<
-             STM32_SDMMC_DCTRL_DBLOCKSIZE_SHIFT;
   stm32_dataconfig(priv, SDMMC_DTIMER_DATATIMEOUT * ((nbytes + 511) >> 9),
-                   nbytes, dblksize);
+                   nbytes, false);
 
   /* Enable TX interrupts */
 
@@ -2247,6 +2388,7 @@ static int stm32_sendsetup(FAR struct sdio_dev_s *dev, FAR const
   stm32_sample(priv, SAMPLENDX_AFTER_SETUP);
   return OK;
 }
+#endif
 
 /****************************************************************************
  * Name: stm32_cancel
@@ -2949,27 +3091,34 @@ static int stm32_dmarecvsetup(FAR struct sdio_dev_s *dev,
                               FAR uint8_t *buffer, size_t buflen)
 {
   struct stm32_dev_s *priv = (struct stm32_dev_s *)dev;
-  uint32_t dblksize;
 
   DEBUGASSERT(priv != NULL && buffer != NULL && buflen > 0);
-#if defined(CONFIG_ARCH_HAVE_SDIO_PREFLIGHT)
+#  if defined(CONFIG_ARCH_HAVE_SDIO_PREFLIGHT)
   DEBUGASSERT(stm32_dmapreflight(dev, buffer, buflen) == 0);
-#else
-#  if defined(CONFIG_ARMV7M_DCACHE) && !defined(CONFIG_ARMV7M_DCACHE_WRITETHROUGH)
-  /* buffer alignment is required for DMA transfers with dcache in buffered
-   * mode (not write-through) because the up_invalidate_dcache could lose
-   * buffered buffered writes if the buffer alignment and sizes are not on
-   * ARMV7M_DCACHE_LINESIZE boundaries.
-   */
-
+#  else
+#    if defined(CONFIG_ARMV7M_DCACHE) && \
+        !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
   if (((uintptr_t)buffer & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
-      (buflen & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
+       (buflen & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
     {
-      return -EFAULT;
+      /* The read buffer is not cache-line aligned. Read to an internal
+       * buffer instead.
+       */
+
+      up_invalidate_dcache((uintptr_t)sdmmc_rxbuffer,
+                           (uintptr_t)sdmmc_rxbuffer + priv->blocksize);
+
+      priv->unaligned_rx = true;
     }
-#  endif
+  else
+    {
+      up_invalidate_dcache((uintptr_t)buffer,
+                           (uintptr_t)buffer + buflen);
 
-#endif
+      priv->unaligned_rx = false;
+    }
+#    endif
+#  endif
 
   /* Reset the DPSM configuration */
 
@@ -2990,28 +3139,27 @@ static int stm32_dmarecvsetup(FAR struct sdio_dev_s *dev,
 
   /* Then set up the SDIO data path */
 
-  dblksize = stm32_log2(priv->blocksize) <<
-             STM32_SDMMC_DCTRL_DBLOCKSIZE_SHIFT;
   stm32_dataconfig(priv, SDMMC_DTIMER_DATATIMEOUT * ((buflen + 511) >> 9),
-                   buflen, dblksize | STM32_SDMMC_DCTRL_DTDIR);
+                   buflen, true);
 
   /* Configure the RX DMA */
 
-  sdmmc_putreg32(priv, (uintptr_t)priv->buffer,
-                 STM32_SDMMC_IDMABASE0R_OFFSET);
-
-  sdmmc_putreg32(priv, STM32_SDMMC_IDMACTRLR_IDMAEN,
-                 STM32_SDMMC_IDMACTRLR_OFFSET);
-
-  /* Force RAM reread */
-
-#  if !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
-  if ((uintptr_t)buffer < DTCM_START ||
-      (uintptr_t)buffer + buflen > DTCM_END)
+#  if defined(CONFIG_ARMV7M_DCACHE) && \
+      !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+  if (priv->unaligned_rx)
     {
-      up_invalidate_dcache((uintptr_t)buffer, (uintptr_t)buffer + buflen);
+      sdmmc_putreg32(priv, (uintptr_t)sdmmc_rxbuffer,
+                     STM32_SDMMC_IDMABASE0R_OFFSET);
     }
+  else
 #  endif
+    {
+      sdmmc_putreg32(priv, (uintptr_t)priv->buffer,
+                     STM32_SDMMC_IDMABASE0R_OFFSET);
+    }
+
+  sdmmc_putreg32(priv, STM32_SDMMC_IDMACTRLR_IDMAEN,
+                 STM32_SDMMC_IDMACTRLR_OFFSET);
 
   /* And enable interrupts */
 
@@ -3046,27 +3194,16 @@ static int stm32_dmasendsetup(FAR struct sdio_dev_s *dev,
                               FAR const uint8_t *buffer, size_t buflen)
 {
   struct stm32_dev_s *priv = (struct stm32_dev_s *)dev;
-  uint32_t dblksize;
 
   DEBUGASSERT(priv != NULL && buffer != NULL && buflen > 0);
-#if defined(CONFIG_ARCH_HAVE_SDIO_PREFLIGHT)
+#  if defined(CONFIG_ARCH_HAVE_SDIO_PREFLIGHT)
   DEBUGASSERT(stm32_dmapreflight(dev, buffer, buflen) == 0);
-#else
-#  if defined(CONFIG_ARMV7M_DCACHE) && \
-      !defined(CONFIG_ARMV7M_DCACHE_WRITETHROUGH)
-  /* buffer alignment is required for DMA transfers with dcache in buffered
-   * mode (not write-through) because the up_flush_dcache would corrupt
-   * adjacent memory if the buffer alignment and sizes are not on
-   * ARMV7M_DCACHE_LINESIZE boundaries.
-   */
+#  endif
 
-  if (((uintptr_t)buffer & (ARMV7M_DCACHE_LINESIZE - 1)) != 0 ||
-      (buflen & (ARMV7M_DCACHE_LINESIZE - 1)) != 0)
-    {
-      return -EFAULT;
-    }
+#  if defined(CONFIG_ARMV7M_DCACHE) && \
+      !defined(CONFIG_ARCH_HAVE_SDIO_DELAYED_INVLDT)
+  priv->unaligned_rx = false;
 #  endif
-#endif
 
   /* Reset the DPSM configuration */
 
@@ -3086,7 +3223,7 @@ static int stm32_dmasendsetup(FAR struct sdio_dev_s *dev,
     {
       up_clean_dcache((uintptr_t)buffer, (uintptr_t)buffer + buflen);
     }
-#endif
+#  endif
 
   /* Save the source buffer information for use by the interrupt handler */
 
@@ -3096,10 +3233,8 @@ static int stm32_dmasendsetup(FAR struct sdio_dev_s *dev,
 
   /* Then set up the SDIO data path */
 
-  dblksize = stm32_log2(priv->blocksize) <<
-             STM32_SDMMC_DCTRL_DBLOCKSIZE_SHIFT;
   stm32_dataconfig(priv, SDMMC_DTIMER_DATATIMEOUT * ((buflen + 511) >> 9),
-                   buflen, dblksize);
+                   buflen, false);
 
   /* Configure the TX DMA */