You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by ma...@apache.org on 2017/03/09 22:51:39 UTC
[12/17] incubator-mynewt-core git commit: Update CMSIS
http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/c8b6596e/hw/cmsis-core/src/ext/cmsis_gcc.h
----------------------------------------------------------------------
diff --git a/hw/cmsis-core/src/ext/cmsis_gcc.h b/hw/cmsis-core/src/ext/cmsis_gcc.h
new file mode 100644
index 0000000..d868f2e
--- /dev/null
+++ b/hw/cmsis-core/src/ext/cmsis_gcc.h
@@ -0,0 +1,1373 @@
+/**************************************************************************//**
+ * @file cmsis_gcc.h
+ * @brief CMSIS Cortex-M Core Function/Instruction Header File
+ * @version V4.30
+ * @date 20. October 2015
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2015 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#ifndef __CMSIS_GCC_H
+#define __CMSIS_GCC_H
+
+/* ignore some GCC warnings */
+#if defined ( __GNUC__ )
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsign-conversion"
+#pragma GCC diagnostic ignored "-Wconversion"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+
+/* ########################### Core Function Access ########################### */
+/** \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+ @{
+ */
+
+/**
+ \brief Enable IRQ Interrupts
+ \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
+{
+ __ASM volatile ("cpsie i" : : : "memory");
+}
+
+
+/**
+ \brief Disable IRQ Interrupts
+ \details Disables IRQ interrupts by setting the I-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
+{
+ __ASM volatile ("cpsid i" : : : "memory");
+}
+
+
+/**
+ \brief Get Control Register
+ \details Returns the content of the Control Register.
+ \return Control Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, control" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Control Register
+ \details Writes the given value to the Control Register.
+ \param [in] control Control Register value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
+{
+ __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
+}
+
+
+/**
+ \brief Get IPSR Register
+ \details Returns the content of the IPSR Register.
+ \return IPSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get APSR Register
+ \details Returns the content of the APSR Register.
+ \return APSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, apsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get xPSR Register
+ \details Returns the content of the xPSR Register.
+
+ \return xPSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Get Process Stack Pointer
+ \details Returns the current value of the Process Stack Pointer (PSP).
+ \return PSP Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
+{
+ register uint32_t result;
+
+ __ASM volatile ("MRS %0, psp\n" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Process Stack Pointer
+ \details Assigns the given value to the Process Stack Pointer (PSP).
+ \param [in] topOfProcStack Process Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
+{
+ __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
+}
+
+
+/**
+ \brief Get Main Stack Pointer
+ \details Returns the current value of the Main Stack Pointer (MSP).
+ \return MSP Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
+{
+ register uint32_t result;
+
+ __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Main Stack Pointer
+ \details Assigns the given value to the Main Stack Pointer (MSP).
+
+ \param [in] topOfMainStack Main Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
+{
+ __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
+}
+
+
+/**
+ \brief Get Priority Mask
+ \details Returns the current state of the priority mask bit from the Priority Mask Register.
+ \return Priority Mask value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, primask" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Priority Mask
+ \details Assigns the given value to the Priority Mask Register.
+ \param [in] priMask Priority Mask
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
+{
+ __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
+}
+
+
+#if (__CORTEX_M >= 0x03U)
+
+/**
+ \brief Enable FIQ
+ \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
+{
+ __ASM volatile ("cpsie f" : : : "memory");
+}
+
+
+/**
+ \brief Disable FIQ
+ \details Disables FIQ interrupts by setting the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
+{
+ __ASM volatile ("cpsid f" : : : "memory");
+}
+
+
+/**
+ \brief Get Base Priority
+ \details Returns the current value of the Base Priority register.
+ \return Base Priority register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, basepri" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Base Priority
+ \details Assigns the given value to the Base Priority register.
+ \param [in] basePri Base Priority value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
+{
+ __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
+}
+
+
+/**
+ \brief Set Base Priority with condition
+ \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
+ or the new value increases the BASEPRI priority level.
+ \param [in] basePri Base Priority value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
+{
+ __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
+}
+
+
+/**
+ \brief Get Fault Mask
+ \details Returns the current value of the Fault Mask register.
+ \return Fault Mask register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
+ return(result);
+}
+
+
+/**
+ \brief Set Fault Mask
+ \details Assigns the given value to the Fault Mask register.
+ \param [in] faultMask Fault Mask value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
+{
+ __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
+}
+
+#endif /* (__CORTEX_M >= 0x03U) */
+
+
+#if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
+
+/**
+ \brief Get FPSCR
+ \details Returns the current value of the Floating Point Status/Control register.
+ \return Floating Point Status/Control register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
+{
+#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
+ uint32_t result;
+
+ /* Empty asm statement works as a scheduling barrier */
+ __ASM volatile ("");
+ __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
+ __ASM volatile ("");
+ return(result);
+#else
+ return(0);
+#endif
+}
+
+
+/**
+ \brief Set FPSCR
+ \details Assigns the given value to the Floating Point Status/Control register.
+ \param [in] fpscr Floating Point Status/Control value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
+ /* Empty asm statement works as a scheduling barrier */
+ __ASM volatile ("");
+ __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
+ __ASM volatile ("");
+#endif
+}
+
+#endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
+
+
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+
+/* ########################## Core Instruction Access ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+ Access to dedicated instructions
+ @{
+*/
+
+/* Define macros for porting to both thumb1 and thumb2.
+ * For thumb1, use low register (r0-r7), specified by constraint "l"
+ * Otherwise, use general registers, specified by constraint "r" */
+#if defined (__thumb__) && !defined (__thumb2__)
+#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
+#define __CMSIS_GCC_USE_REG(r) "l" (r)
+#else
+#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
+#define __CMSIS_GCC_USE_REG(r) "r" (r)
+#endif
+
+/**
+ \brief No Operation
+ \details No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
+{
+ __ASM volatile ("nop");
+}
+
+
+/**
+ \brief Wait For Interrupt
+ \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
+{
+ __ASM volatile ("wfi");
+}
+
+
+/**
+ \brief Wait For Event
+ \details Wait For Event is a hint instruction that permits the processor to enter
+ a low-power state until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
+{
+ __ASM volatile ("wfe");
+}
+
+
+/**
+ \brief Send Event
+ \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
+{
+ __ASM volatile ("sev");
+}
+
+
+/**
+ \brief Instruction Synchronization Barrier
+ \details Instruction Synchronization Barrier flushes the pipeline in the processor,
+ so that all instructions following the ISB are fetched from cache or memory,
+ after the instruction has been completed.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
+{
+ __ASM volatile ("isb 0xF":::"memory");
+}
+
+
+/**
+ \brief Data Synchronization Barrier
+ \details Acts as a special kind of Data Memory Barrier.
+ It completes when all explicit memory accesses before this instruction complete.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
+{
+ __ASM volatile ("dsb 0xF":::"memory");
+}
+
+
+/**
+ \brief Data Memory Barrier
+ \details Ensures the apparent order of the explicit memory operations before
+ and after the instruction, without ensuring their completion.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
+{
+ __ASM volatile ("dmb 0xF":::"memory");
+}
+
+
+/**
+ \brief Reverse byte order (32 bit)
+ \details Reverses the byte order in integer value.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ return __builtin_bswap32(value);
+#else
+ uint32_t result;
+
+ __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/**
+ \brief Reverse byte order (16 bit)
+ \details Reverses the byte order in two unsigned short values.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/**
+ \brief Reverse byte order in signed short value
+ \details Reverses the byte order in a signed short value with sign extension to integer.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ return (short)__builtin_bswap16(value);
+#else
+ int32_t result;
+
+ __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/**
+ \brief Rotate Right in unsigned value (32 bit)
+ \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+ \param [in] value Value to rotate
+ \param [in] value Number of Bits to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+ return (op1 >> op2) | (op1 << (32U - op2));
+}
+
+
+/**
+ \brief Breakpoint
+ \details Causes the processor to enter Debug state.
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+ \param [in] value is ignored by the processor.
+ If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value) __ASM volatile ("bkpt "#value)
+
+
+/**
+ \brief Reverse bit order of value
+ \details Reverses the bit order of the given value.
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+ uint32_t result;
+
+#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
+ __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
+#else
+ int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
+
+ result = value; /* r will be reversed bits of v; first get LSB of v */
+ for (value >>= 1U; value; value >>= 1U)
+ {
+ result <<= 1U;
+ result |= value & 1U;
+ s--;
+ }
+ result <<= s; /* shift when v's highest bits are zero */
+#endif
+ return(result);
+}
+
+
+/**
+ \brief Count leading zeros
+ \details Counts the number of leading zeros of a data value.
+ \param [in] value Value to count the leading zeros
+ \return number of leading zeros in value
+ */
+#define __CLZ __builtin_clz
+
+
+#if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
+
+/**
+ \brief LDR Exclusive (8 bit)
+ \details Executes a exclusive LDR instruction for 8 bit value.
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDR Exclusive (16 bit)
+ \details Executes a exclusive LDR instruction for 16 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDR Exclusive (32 bit)
+ \details Executes a exclusive LDR instruction for 32 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (8 bit)
+ \details Executes a exclusive STR instruction for 8 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (16 bit)
+ \details Executes a exclusive STR instruction for 16 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/**
+ \brief STR Exclusive (32 bit)
+ \details Executes a exclusive STR instruction for 32 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
+ return(result);
+}
+
+
+/**
+ \brief Remove the exclusive lock
+ \details Removes the exclusive lock which is created by LDREX.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
+{
+ __ASM volatile ("clrex" ::: "memory");
+}
+
+
+/**
+ \brief Signed Saturate
+ \details Saturates a signed value.
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (1..32)
+ \return Saturated value
+ */
+#define __SSAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/**
+ \brief Unsigned Saturate
+ \details Saturates an unsigned value.
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (0..31)
+ \return Saturated value
+ */
+#define __USAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/**
+ \brief Rotate Right with Extend (32 bit)
+ \details Moves each bit of a bitstring right by one bit.
+ The carry input is shifted in at the left end of the bitstring.
+ \param [in] value Value to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/**
+ \brief LDRT Unprivileged (8 bit)
+ \details Executes a Unprivileged LDRT instruction for 8 bit value.
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDRT Unprivileged (16 bit)
+ \details Executes a Unprivileged LDRT instruction for 16 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/**
+ \brief LDRT Unprivileged (32 bit)
+ \details Executes a Unprivileged LDRT instruction for 32 bit values.
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/**
+ \brief STRT Unprivileged (8 bit)
+ \details Executes a Unprivileged STRT instruction for 8 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+ __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+ \brief STRT Unprivileged (16 bit)
+ \details Executes a Unprivileged STRT instruction for 16 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+ __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/**
+ \brief STRT Unprivileged (32 bit)
+ \details Executes a Unprivileged STRT instruction for 32 bit values.
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+ __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
+}
+
+#endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+
+/* ################### Compiler specific Intrinsics ########################### */
+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
+ Access to dedicated SIMD instructions
+ @{
+*/
+
+#if (__CORTEX_M >= 0x04U) /* only for Cortex-M4 and above */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#define __SSAT16(ARG1,ARG2) \
+({ \
+ int32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+#define __USAT16(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
+{
+ union llreg_u{
+ uint32_t w32[2];
+ uint64_t w64;
+ } llr;
+ llr.w64 = acc;
+
+#ifndef __ARMEB__ /* Little endian */
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
+#else /* Big endian */
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
+#endif
+
+ return(llr.w64);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
+{
+ int32_t result;
+
+ __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
+{
+ int32_t result;
+
+ __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+#define __PKHBT(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+#define __PKHTB(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ if (ARG3 == 0) \
+ __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
+ else \
+ __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
+{
+ int32_t result;
+
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#endif /* (__CORTEX_M >= 0x04) */
+/*@} end of group CMSIS_SIMD_intrinsics */
+
+
+#if defined ( __GNUC__ )
+#pragma GCC diagnostic pop
+#endif
+
+#endif /* __CMSIS_GCC_H */
http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/c8b6596e/hw/cmsis-core/src/ext/core_ca9.h
----------------------------------------------------------------------
diff --git a/hw/cmsis-core/src/ext/core_ca9.h b/hw/cmsis-core/src/ext/core_ca9.h
deleted file mode 100644
index bae5f65..0000000
--- a/hw/cmsis-core/src/ext/core_ca9.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/**************************************************************************//**
- * @file core_ca9.h
- * @brief CMSIS Cortex-A9 Core Peripheral Access Layer Header File
- * @version
- * @date 25 March 2013
- *
- * @note
- *
- ******************************************************************************/
-/* Copyright (c) 2009 - 2012 ARM LIMITED
-
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- - Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- - Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- - Neither the name of ARM nor the names of its contributors may be used
- to endorse or promote products derived from this software without
- specific prior written permission.
- *
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
- ---------------------------------------------------------------------------*/
-
-
-#if defined ( __ICCARM__ )
- #pragma system_include /* treat file as system include file for MISRA check */
-#endif
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-#ifndef __CORE_CA9_H_GENERIC
-#define __CORE_CA9_H_GENERIC
-
-
-/** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
- CMSIS violates the following MISRA-C:2004 rules:
-
- \li Required Rule 8.5, object/function definition in header file.<br>
- Function definitions in header files are used to allow 'inlining'.
-
- \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
- Unions are used for effective representation of core registers.
-
- \li Advisory Rule 19.7, Function-like macro defined.<br>
- Function-like macros are used to allow more efficient code.
- */
-
-
-/*******************************************************************************
- * CMSIS definitions
- ******************************************************************************/
-/** \ingroup Cortex_A9
- @{
- */
-
-/* CMSIS CA9 definitions */
-#define __CA9_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */
-#define __CA9_CMSIS_VERSION_SUB (0x10) /*!< [15:0] CMSIS HAL sub version */
-#define __CA9_CMSIS_VERSION ((__CA9_CMSIS_VERSION_MAIN << 16) | \
- __CA9_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */
-
-#define __CORTEX_A (0x09) /*!< Cortex-A Core */
-
-
-#if defined ( __CC_ARM )
- #define __ASM __asm /*!< asm keyword for ARM Compiler */
- #define __INLINE __inline /*!< inline keyword for ARM Compiler */
- #define __STATIC_INLINE static __inline
- #define __STATIC_ASM static __asm
-
-#elif defined ( __ICCARM__ )
- #define __ASM __asm /*!< asm keyword for IAR Compiler */
- #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */
- #define __STATIC_INLINE static inline
- #define __STATIC_ASM static __asm
-
-#elif defined ( __TMS470__ )
- #define __ASM __asm /*!< asm keyword for TI CCS Compiler */
- #define __STATIC_INLINE static inline
- #define __STATIC_ASM static __asm
-
-#elif defined ( __GNUC__ )
- #define __ASM __asm /*!< asm keyword for GNU Compiler */
- #define __INLINE inline /*!< inline keyword for GNU Compiler */
- #define __STATIC_INLINE static inline
- #define __STATIC_ASM static __asm
-
-#elif defined ( __TASKING__ )
- #define __ASM __asm /*!< asm keyword for TASKING Compiler */
- #define __INLINE inline /*!< inline keyword for TASKING Compiler */
- #define __STATIC_INLINE static inline
- #define __STATIC_ASM static __asm
-
-#endif
-
-/** __FPU_USED indicates whether an FPU is used or not. For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions.
-*/
-#if defined ( __CC_ARM )
- #if defined __TARGET_FPU_VFP
- #if (__FPU_PRESENT == 1)
- #define __FPU_USED 1
- #else
- #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
- #define __FPU_USED 0
- #endif
- #else
- #define __FPU_USED 0
- #endif
-
-#elif defined ( __ICCARM__ )
- #if defined __ARMVFP__
- #if (__FPU_PRESENT == 1)
- #define __FPU_USED 1
- #else
- #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
- #define __FPU_USED 0
- #endif
- #else
- #define __FPU_USED 0
- #endif
-
-#elif defined ( __TMS470__ )
- #if defined __TI_VFP_SUPPORT__
- #if (__FPU_PRESENT == 1)
- #define __FPU_USED 1
- #else
- #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
- #define __FPU_USED 0
- #endif
- #else
- #define __FPU_USED 0
- #endif
-
-#elif defined ( __GNUC__ )
- #if defined (__VFP_FP__) && !defined(__SOFTFP__)
- #if (__FPU_PRESENT == 1)
- #define __FPU_USED 1
- #else
- #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
- #define __FPU_USED 0
- #endif
- #else
- #define __FPU_USED 0
- #endif
-
-#elif defined ( __TASKING__ )
- #if defined __FPU_VFP__
- #if (__FPU_PRESENT == 1)
- #define __FPU_USED 1
- #else
- #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
- #define __FPU_USED 0
- #endif
- #else
- #define __FPU_USED 0
- #endif
-#endif
-
-#include <stdint.h> /*!< standard types definitions */
-#include "core_caInstr.h" /*!< Core Instruction Access */
-#include "core_caFunc.h" /*!< Core Function Access */
-#include "core_cm4_simd.h" /*!< Compiler specific SIMD Intrinsics */
-
-#endif /* __CORE_CA9_H_GENERIC */
-
-#ifndef __CMSIS_GENERIC
-
-#ifndef __CORE_CA9_H_DEPENDANT
-#define __CORE_CA9_H_DEPENDANT
-
-/* check device defines and use defaults */
-#if defined __CHECK_DEVICE_DEFINES
- #ifndef __CA9_REV
- #define __CA9_REV 0x0000
- #warning "__CA9_REV not defined in device header file; using default!"
- #endif
-
- #ifndef __FPU_PRESENT
- #define __FPU_PRESENT 1
- #warning "__FPU_PRESENT not defined in device header file; using default!"
- #endif
-
- #ifndef __Vendor_SysTickConfig
- #define __Vendor_SysTickConfig 1
- #endif
-
- #if __Vendor_SysTickConfig == 0
- #error "__Vendor_SysTickConfig set to 0, but vendor systick timer must be supplied for Cortex-A9"
- #endif
-#endif
-
-/* IO definitions (access restrictions to peripheral registers) */
-/**
- \defgroup CMSIS_glob_defs CMSIS Global Defines
-
- <strong>IO Type Qualifiers</strong> are used
- \li to specify the access to peripheral variables.
- \li for automatic generation of peripheral register debug information.
-*/
-#ifdef __cplusplus
- #define __I volatile /*!< Defines 'read only' permissions */
-#else
- #define __I volatile const /*!< Defines 'read only' permissions */
-#endif
-#define __O volatile /*!< Defines 'write only' permissions */
-#define __IO volatile /*!< Defines 'read / write' permissions */
-
-/*@} end of group Cortex_A9 */
-
-
-/*******************************************************************************
- * Register Abstraction
- ******************************************************************************/
-/** \defgroup CMSIS_core_register Defines and Type Definitions
- \brief Type definitions and defines for Cortex-A processor based devices.
-*/
-
-/** \ingroup CMSIS_core_register
- \defgroup CMSIS_CORE Status and Control Registers
- \brief Core Register type definitions.
- @{
- */
-
-/** \brief Union type to access the Application Program Status Register (APSR).
- */
-typedef union
-{
- struct
- {
- uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */
- uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */
- uint32_t reserved1:7; /*!< bit: 20..23 Reserved */
- uint32_t Q:1; /*!< bit: 27 Saturation condition flag */
- uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
- uint32_t C:1; /*!< bit: 29 Carry condition code flag */
- uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
- uint32_t N:1; /*!< bit: 31 Negative condition code flag */
- } b; /*!< Structure used for bit access */
- uint32_t w; /*!< Type used for word access */
-} APSR_Type;
-
-
-/*@} end of group CMSIS_CORE */
-
-/*@} end of CMSIS_Core_FPUFunctions */
-
-
-#endif /* __CORE_CA9_H_GENERIC */
-
-#endif /* __CMSIS_GENERIC */
-
-#ifdef __cplusplus
-}
-
-
-#endif
http://git-wip-us.apache.org/repos/asf/incubator-mynewt-core/blob/c8b6596e/hw/cmsis-core/src/ext/core_caFunc.h
----------------------------------------------------------------------
diff --git a/hw/cmsis-core/src/ext/core_caFunc.h b/hw/cmsis-core/src/ext/core_caFunc.h
deleted file mode 100644
index 255c683..0000000
--- a/hw/cmsis-core/src/ext/core_caFunc.h
+++ /dev/null
@@ -1,1169 +0,0 @@
-/**************************************************************************//**
- * @file core_caFunc.h
- * @brief CMSIS Cortex-A Core Function Access Header File
- * @version V3.10
- * @date 9 May 2013
- *
- * @note
- *
- ******************************************************************************/
-/* Copyright (c) 2009 - 2012 ARM LIMITED
-
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- - Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- - Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- - Neither the name of ARM nor the names of its contributors may be used
- to endorse or promote products derived from this software without
- specific prior written permission.
- *
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
- ---------------------------------------------------------------------------*/
-
-
-#ifndef __CORE_CAFUNC_H__
-#define __CORE_CAFUNC_H__
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* ########################### Core Function Access ########################### */
-/** \ingroup CMSIS_Core_FunctionInterface
- \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
- @{
- */
-
-#if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
-/* ARM armcc specific functions */
-
-#if (__ARMCC_VERSION < 400677)
- #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
-#endif
-
-#define MODE_USR 0x10
-#define MODE_FIQ 0x11
-#define MODE_IRQ 0x12
-#define MODE_SVC 0x13
-#define MODE_MON 0x16
-#define MODE_ABT 0x17
-#define MODE_HYP 0x1A
-#define MODE_UND 0x1B
-#define MODE_SYS 0x1F
-
-/** \brief Get APSR Register
-
- This function returns the content of the APSR Register.
-
- \return APSR Register value
- */
-__STATIC_INLINE uint32_t __get_APSR(void)
-{
- register uint32_t __regAPSR __ASM("apsr");
- return(__regAPSR);
-}
-
-
-/** \brief Get CPSR Register
-
- This function returns the content of the CPSR Register.
-
- \return CPSR Register value
- */
-__STATIC_INLINE uint32_t __get_CPSR(void)
-{
- register uint32_t __regCPSR __ASM("cpsr");
- return(__regCPSR);
-}
-
-/** \brief Set Stack Pointer
-
- This function assigns the given value to the current stack pointer.
-
- \param [in] topOfStack Stack Pointer value to set
- */
-register uint32_t __regSP __ASM("sp");
-__STATIC_INLINE void __set_SP(uint32_t topOfStack)
-{
- __regSP = topOfStack;
-}
-
-
-/** \brief Get link register
-
- This function returns the value of the link register
-
- \return Value of link register
- */
-register uint32_t __reglr __ASM("lr");
-__STATIC_INLINE uint32_t __get_LR(void)
-{
- return(__reglr);
-}
-
-/** \brief Set link register
-
- This function sets the value of the link register
-
- \param [in] lr LR value to set
- */
-__STATIC_INLINE void __set_LR(uint32_t lr)
-{
- __reglr = lr;
-}
-
-/** \brief Set Process Stack Pointer
-
- This function assigns the given value to the USR/SYS Stack Pointer (PSP).
-
- \param [in] topOfProcStack USR/SYS Stack Pointer value to set
- */
-__STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
-{
- ARM
- PRESERVE8
-
- BIC R0, R0, #7 ;ensure stack is 8-byte aligned
- MRS R1, CPSR
- CPS #MODE_SYS ;no effect in USR mode
- MOV SP, R0
- MSR CPSR_c, R1 ;no effect in USR mode
- ISB
- BX LR
-
-}
-
-/** \brief Set User Mode
-
- This function changes the processor state to User Mode
-
- \param [in] topOfProcStack USR/SYS Stack Pointer value to set
- */
-__STATIC_ASM void __set_CPS_USR(void)
-{
- ARM
-
- CPS #MODE_USR
- BX LR
-}
-
-
-/** \brief Enable FIQ
-
- This function enables FIQ interrupts by clearing the F-bit in the CPSR.
- Can only be executed in Privileged modes.
- */
-#define __enable_fault_irq __enable_fiq
-
-
-/** \brief Disable FIQ
-
- This function disables FIQ interrupts by setting the F-bit in the CPSR.
- Can only be executed in Privileged modes.
- */
-#define __disable_fault_irq __disable_fiq
-
-
-/** \brief Get FPSCR
-
- This function returns the current value of the Floating Point Status/Control register.
-
- \return Floating Point Status/Control register value
- */
-__STATIC_INLINE uint32_t __get_FPSCR(void)
-{
-#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
- register uint32_t __regfpscr __ASM("fpscr");
- return(__regfpscr);
-#else
- return(0);
-#endif
-}
-
-
-/** \brief Set FPSCR
-
- This function assigns the given value to the Floating Point Status/Control register.
-
- \param [in] fpscr Floating Point Status/Control value to set
- */
-__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
-{
-#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
- register uint32_t __regfpscr __ASM("fpscr");
- __regfpscr = (fpscr);
-#endif
-}
-
-/** \brief Get FPEXC
-
- This function returns the current value of the Floating Point Exception Control register.
-
- \return Floating Point Exception Control register value
- */
-__STATIC_INLINE uint32_t __get_FPEXC(void)
-{
-#if (__FPU_PRESENT == 1)
- register uint32_t __regfpexc __ASM("fpexc");
- return(__regfpexc);
-#else
- return(0);
-#endif
-}
-
-
-/** \brief Set FPEXC
-
- This function assigns the given value to the Floating Point Exception Control register.
-
- \param [in] fpscr Floating Point Exception Control value to set
- */
-__STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
-{
-#if (__FPU_PRESENT == 1)
- register uint32_t __regfpexc __ASM("fpexc");
- __regfpexc = (fpexc);
-#endif
-}
-
-/** \brief Get CPACR
-
- This function returns the current value of the Coprocessor Access Control register.
-
- \return Coprocessor Access Control register value
- */
-__STATIC_INLINE uint32_t __get_CPACR(void)
-{
- register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
- return __regCPACR;
-}
-
-/** \brief Set CPACR
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] cpacr Coporcessor Acccess Control value to set
- */
-__STATIC_INLINE void __set_CPACR(uint32_t cpacr)
-{
- register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
- __regCPACR = cpacr;
- __ISB();
-}
-
-/** \brief Get CBAR
-
- This function returns the value of the Configuration Base Address register.
-
- \return Configuration Base Address register value
- */
-__STATIC_INLINE uint32_t __get_CBAR() {
- register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
- return(__regCBAR);
-}
-
-/** \brief Get TTBR0
-
- This function returns the value of the Configuration Base Address register.
-
- \return Translation Table Base Register 0 value
- */
-__STATIC_INLINE uint32_t __get_TTBR0() {
- register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
- return(__regTTBR0);
-}
-
-/** \brief Set TTBR0
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] ttbr0 Translation Table Base Register 0 value to set
- */
-__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
- register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
- __regTTBR0 = ttbr0;
- __ISB();
-}
-
-/** \brief Get DACR
-
- This function returns the value of the Domain Access Control Register.
-
- \return Domain Access Control Register value
- */
-__STATIC_INLINE uint32_t __get_DACR() {
- register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
- return(__regDACR);
-}
-
-/** \brief Set DACR
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] dacr Domain Access Control Register value to set
- */
-__STATIC_INLINE void __set_DACR(uint32_t dacr) {
- register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
- __regDACR = dacr;
- __ISB();
-}
-
-/******************************** Cache and BTAC enable ****************************************************/
-
-/** \brief Set SCTLR
-
- This function assigns the given value to the System Control Register.
-
- \param [in] sctlr System Control Register, value to set
- */
-__STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
-{
- register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
- __regSCTLR = sctlr;
-}
-
-/** \brief Get SCTLR
-
- This function returns the value of the System Control Register.
-
- \return System Control Register value
- */
-__STATIC_INLINE uint32_t __get_SCTLR() {
- register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
- return(__regSCTLR);
-}
-
-/** \brief Enable Caches
-
- Enable Caches
- */
-__STATIC_INLINE void __enable_caches(void) {
- // Set I bit 12 to enable I Cache
- // Set C bit 2 to enable D Cache
- __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
-}
-
-/** \brief Disable Caches
-
- Disable Caches
- */
-__STATIC_INLINE void __disable_caches(void) {
- // Clear I bit 12 to disable I Cache
- // Clear C bit 2 to disable D Cache
- __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
- __ISB();
-}
-
-/** \brief Enable BTAC
-
- Enable BTAC
- */
-__STATIC_INLINE void __enable_btac(void) {
- // Set Z bit 11 to enable branch prediction
- __set_SCTLR( __get_SCTLR() | (1 << 11));
- __ISB();
-}
-
-/** \brief Disable BTAC
-
- Disable BTAC
- */
-__STATIC_INLINE void __disable_btac(void) {
- // Clear Z bit 11 to disable branch prediction
- __set_SCTLR( __get_SCTLR() & ~(1 << 11));
-}
-
-
-/** \brief Enable MMU
-
- Enable MMU
- */
-__STATIC_INLINE void __enable_mmu(void) {
- // Set M bit 0 to enable the MMU
- // Set AFE bit to enable simplified access permissions model
- // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
- __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
- __ISB();
-}
-
-/** \brief Enable MMU
-
- Enable MMU
- */
-__STATIC_INLINE void __disable_mmu(void) {
- // Clear M bit 0 to disable the MMU
- __set_SCTLR( __get_SCTLR() & ~1);
- __ISB();
-}
-
-/******************************** TLB maintenance operations ************************************************/
-/** \brief Invalidate the whole tlb
-
- TLBIALL. Invalidate the whole tlb
- */
-
-__STATIC_INLINE void __ca9u_inv_tlb_all(void) {
- register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
- __TLBIALL = 0;
- __DSB();
- __ISB();
-}
-
-/******************************** BTB maintenance operations ************************************************/
-/** \brief Invalidate entire branch predictor array
-
- BPIALL. Branch Predictor Invalidate All.
- */
-
-__STATIC_INLINE void __v7_inv_btac(void) {
- register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
- __BPIALL = 0;
- __DSB(); //ensure completion of the invalidation
- __ISB(); //ensure instruction fetch path sees new state
-}
-
-
-/******************************** L1 cache operations ******************************************************/
-
-/** \brief Invalidate the whole I$
-
- ICIALLU. Instruction Cache Invalidate All to PoU
- */
-__STATIC_INLINE void __v7_inv_icache_all(void) {
- register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
- __ICIALLU = 0;
- __DSB(); //ensure completion of the invalidation
- __ISB(); //ensure instruction fetch path sees new I cache state
-}
-
-/** \brief Clean D$ by MVA
-
- DCCMVAC. Data cache clean by MVA to PoC
- */
-__STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
- register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
- __DCCMVAC = (uint32_t)va;
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief Invalidate D$ by MVA
-
- DCIMVAC. Data cache invalidate by MVA to PoC
- */
-__STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
- register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
- __DCIMVAC = (uint32_t)va;
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief Clean and Invalidate D$ by MVA
-
- DCCIMVAC. Data cache clean and invalidate by MVA to PoC
- */
-__STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
- register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
- __DCCIMVAC = (uint32_t)va;
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief
- * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
- */
-#pragma push
-#pragma arm
-__STATIC_ASM void __v7_all_cache(uint32_t op) {
- ARM
-
- PUSH {R4-R11}
-
- MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
- ANDS R3, R6, #0x07000000 // Extract coherency level
- MOV R3, R3, LSR #23 // Total cache levels << 1
- BEQ Finished // If 0, no need to clean
-
- MOV R10, #0 // R10 holds current cache level << 1
-Loop1 ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
- MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
- AND R1, R1, #7 // Isolate those lower 3 bits
- CMP R1, #2
- BLT Skip // No cache or only instruction cache at this level
-
- MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
- ISB // ISB to sync the change to the CacheSizeID reg
- MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
- AND R2, R1, #7 // Extract the line length field
- ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
- LDR R4, =0x3FF
- ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
- CLZ R5, R4 // R5 is the bit position of the way size increment
- LDR R7, =0x7FFF
- ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
-
-Loop2 MOV R9, R4 // R9 working copy of the max way size (right aligned)
-
-Loop3 ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
- ORR R11, R11, R7, LSL R2 // Factor in the Set number
- CMP R0, #0
- BNE Dccsw
- MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
- B cont
-Dccsw CMP R0, #1
- BNE Dccisw
- MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
- B cont
-Dccisw MCR p15, 0, R11, c7, c14, 2 // DCCISW, Clean and Invalidate by Set/Way
-cont SUBS R9, R9, #1 // Decrement the Way number
- BGE Loop3
- SUBS R7, R7, #1 // Decrement the Set number
- BGE Loop2
-Skip ADD R10, R10, #2 // increment the cache number
- CMP R3, R10
- BGT Loop1
-
-Finished
- DSB
- POP {R4-R11}
- BX lr
-
-}
-#pragma pop
-
-/** \brief __v7_all_cache - helper function
-
- */
-
-/** \brief Invalidate the whole D$
-
- DCISW. Invalidate by Set/Way
- */
-
-__STATIC_INLINE void __v7_inv_dcache_all(void) {
- __v7_all_cache(0);
-}
-
-/** \brief Clean the whole D$
-
- DCCSW. Clean by Set/Way
- */
-
-__STATIC_INLINE void __v7_clean_dcache_all(void) {
- __v7_all_cache(1);
-}
-
-/** \brief Clean and invalidate the whole D$
-
- DCCISW. Clean and Invalidate by Set/Way
- */
-
-__STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
- __v7_all_cache(2);
-}
-
-#include "core_ca_mmu.h"
-
-#elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
-
-#error IAR Compiler support not implemented for Cortex-A
-
-#elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
-
-/* GNU gcc specific functions */
-
-#define MODE_USR 0x10
-#define MODE_FIQ 0x11
-#define MODE_IRQ 0x12
-#define MODE_SVC 0x13
-#define MODE_MON 0x16
-#define MODE_ABT 0x17
-#define MODE_HYP 0x1A
-#define MODE_UND 0x1B
-#define MODE_SYS 0x1F
-
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
-{
- __ASM volatile ("cpsie i");
-}
-
-/** \brief Disable IRQ Interrupts
-
- This function disables IRQ interrupts by setting the I-bit in the CPSR.
- Can only be executed in Privileged modes.
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __disable_irq(void)
-{
- uint32_t result;
-
- __ASM volatile ("mrs %0, cpsr" : "=r" (result));
- __ASM volatile ("cpsid i");
- return(result & 0x80);
-}
-
-
-/** \brief Get APSR Register
-
- This function returns the content of the APSR Register.
-
- \return APSR Register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
-{
-#if 1
- uint32_t result;
-
- __ASM volatile ("mrs %0, apsr" : "=r" (result) );
- return (result);
-#else
- register uint32_t __regAPSR __ASM("apsr");
- return(__regAPSR);
-#endif
-}
-
-
-/** \brief Get CPSR Register
-
- This function returns the content of the CPSR Register.
-
- \return CPSR Register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPSR(void)
-{
-#if 1
- register uint32_t __regCPSR;
- __ASM volatile ("mrs %0, cpsr" : "=r" (__regCPSR));
-#else
- register uint32_t __regCPSR __ASM("cpsr");
-#endif
- return(__regCPSR);
-}
-
-#if 0
-/** \brief Set Stack Pointer
-
- This function assigns the given value to the current stack pointer.
-
- \param [in] topOfStack Stack Pointer value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SP(uint32_t topOfStack)
-{
- register uint32_t __regSP __ASM("sp");
- __regSP = topOfStack;
-}
-#endif
-
-/** \brief Get link register
-
- This function returns the value of the link register
-
- \return Value of link register
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_LR(void)
-{
- register uint32_t __reglr __ASM("lr");
- return(__reglr);
-}
-
-#if 0
-/** \brief Set link register
-
- This function sets the value of the link register
-
- \param [in] lr LR value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_LR(uint32_t lr)
-{
- register uint32_t __reglr __ASM("lr");
- __reglr = lr;
-}
-#endif
-
-/** \brief Set Process Stack Pointer
-
- This function assigns the given value to the USR/SYS Stack Pointer (PSP).
-
- \param [in] topOfProcStack USR/SYS Stack Pointer value to set
- */
-extern void __set_PSP(uint32_t topOfProcStack);
-
-/** \brief Set User Mode
-
- This function changes the processor state to User Mode
-
- \param [in] topOfProcStack USR/SYS Stack Pointer value to set
- */
-extern void __set_CPS_USR(void);
-
-/** \brief Enable FIQ
-
- This function enables FIQ interrupts by clearing the F-bit in the CPSR.
- Can only be executed in Privileged modes.
- */
-#define __enable_fault_irq __enable_fiq
-
-
-/** \brief Disable FIQ
-
- This function disables FIQ interrupts by setting the F-bit in the CPSR.
- Can only be executed in Privileged modes.
- */
-#define __disable_fault_irq __disable_fiq
-
-
-/** \brief Get FPSCR
-
- This function returns the current value of the Floating Point Status/Control register.
-
- \return Floating Point Status/Control register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
-{
-#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
-#if 1
- uint32_t result;
-
- __ASM volatile ("vmrs %0, fpscr" : "=r" (result) );
- return (result);
-#else
- register uint32_t __regfpscr __ASM("fpscr");
- return(__regfpscr);
-#endif
-#else
- return(0);
-#endif
-}
-
-
-/** \brief Set FPSCR
-
- This function assigns the given value to the Floating Point Status/Control register.
-
- \param [in] fpscr Floating Point Status/Control value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
-{
-#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
-#if 1
- __ASM volatile ("vmsr fpscr, %0" : : "r" (fpscr) );
-#else
- register uint32_t __regfpscr __ASM("fpscr");
- __regfpscr = (fpscr);
-#endif
-#endif
-}
-
-/** \brief Get FPEXC
-
- This function returns the current value of the Floating Point Exception Control register.
-
- \return Floating Point Exception Control register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPEXC(void)
-{
-#if (__FPU_PRESENT == 1)
-#if 1
- uint32_t result;
-
- __ASM volatile ("vmrs %0, fpexc" : "=r" (result));
- return (result);
-#else
- register uint32_t __regfpexc __ASM("fpexc");
- return(__regfpexc);
-#endif
-#else
- return(0);
-#endif
-}
-
-
-/** \brief Set FPEXC
-
- This function assigns the given value to the Floating Point Exception Control register.
-
- \param [in] fpscr Floating Point Exception Control value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
-{
-#if (__FPU_PRESENT == 1)
-#if 1
- __ASM volatile ("vmsr fpexc, %0" : : "r" (fpexc));
-#else
- register uint32_t __regfpexc __ASM("fpexc");
- __regfpexc = (fpexc);
-#endif
-#endif
-}
-
-/** \brief Get CPACR
-
- This function returns the current value of the Coprocessor Access Control register.
-
- \return Coprocessor Access Control register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPACR(void)
-{
-#if 1
- register uint32_t __regCPACR;
- __ASM volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (__regCPACR));
-#else
- register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
-#endif
- return __regCPACR;
-}
-
-/** \brief Set CPACR
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] cpacr Coporcessor Acccess Control value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
-{
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r" (cpacr));
-#else
- register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
- __regCPACR = cpacr;
-#endif
- __ISB();
-}
-
-/** \brief Get CBAR
-
- This function returns the value of the Configuration Base Address register.
-
- \return Configuration Base Address register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CBAR() {
-#if 1
- register uint32_t __regCBAR;
- __ASM volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (__regCBAR));
-#else
- register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
-#endif
- return(__regCBAR);
-}
-
-/** \brief Get TTBR0
-
- This function returns the value of the Configuration Base Address register.
-
- \return Translation Table Base Register 0 value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_TTBR0() {
-#if 1
- register uint32_t __regTTBR0;
- __ASM volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (__regTTBR0));
-#else
- register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
-#endif
- return(__regTTBR0);
-}
-
-/** \brief Set TTBR0
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] ttbr0 Translation Table Base Register 0 value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttbr0));
-#else
- register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
- __regTTBR0 = ttbr0;
-#endif
- __ISB();
-}
-
-/** \brief Get DACR
-
- This function returns the value of the Domain Access Control Register.
-
- \return Domain Access Control Register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_DACR() {
-#if 1
- register uint32_t __regDACR;
- __ASM volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (__regDACR));
-#else
- register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
-#endif
- return(__regDACR);
-}
-
-/** \brief Set DACR
-
- This function assigns the given value to the Coprocessor Access Control register.
-
- \param [in] dacr Domain Access Control Register value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_DACR(uint32_t dacr) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r" (dacr));
-#else
- register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
- __regDACR = dacr;
-#endif
- __ISB();
-}
-
-/******************************** Cache and BTAC enable ****************************************************/
-
-/** \brief Set SCTLR
-
- This function assigns the given value to the System Control Register.
-
- \param [in] sctlr System Control Register, value to set
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
-{
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
-#else
- register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
- __regSCTLR = sctlr;
-#endif
-}
-
-/** \brief Get SCTLR
-
- This function returns the value of the System Control Register.
-
- \return System Control Register value
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_SCTLR() {
-#if 1
- register uint32_t __regSCTLR;
- __ASM volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (__regSCTLR));
-#else
- register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
-#endif
- return(__regSCTLR);
-}
-
-/** \brief Enable Caches
-
- Enable Caches
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_caches(void) {
- // Set I bit 12 to enable I Cache
- // Set C bit 2 to enable D Cache
- __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
-}
-
-/** \brief Disable Caches
-
- Disable Caches
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_caches(void) {
- // Clear I bit 12 to disable I Cache
- // Clear C bit 2 to disable D Cache
- __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
- __ISB();
-}
-
-/** \brief Enable BTAC
-
- Enable BTAC
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_btac(void) {
- // Set Z bit 11 to enable branch prediction
- __set_SCTLR( __get_SCTLR() | (1 << 11));
- __ISB();
-}
-
-/** \brief Disable BTAC
-
- Disable BTAC
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_btac(void) {
- // Clear Z bit 11 to disable branch prediction
- __set_SCTLR( __get_SCTLR() & ~(1 << 11));
-}
-
-
-/** \brief Enable MMU
-
- Enable MMU
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_mmu(void) {
- // Set M bit 0 to enable the MMU
- // Set AFE bit to enable simplified access permissions model
- // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
- __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
- __ISB();
-}
-
-/** \brief Enable MMU
-
- Enable MMU
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_mmu(void) {
- // Clear M bit 0 to disable the MMU
- __set_SCTLR( __get_SCTLR() & ~1);
- __ISB();
-}
-
-/******************************** TLB maintenance operations ************************************************/
-/** \brief Invalidate the whole tlb
-
- TLBIALL. Invalidate the whole tlb
- */
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
-#else
- register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
- __TLBIALL = 0;
-#endif
- __DSB();
- __ISB();
-}
-
-/******************************** BTB maintenance operations ************************************************/
-/** \brief Invalidate entire branch predictor array
-
- BPIALL. Branch Predictor Invalidate All.
- */
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_btac(void) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
-#else
- register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
- __BPIALL = 0;
-#endif
- __DSB(); //ensure completion of the invalidation
- __ISB(); //ensure instruction fetch path sees new state
-}
-
-
-/******************************** L1 cache operations ******************************************************/
-
-/** \brief Invalidate the whole I$
-
- ICIALLU. Instruction Cache Invalidate All to PoU
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_icache_all(void) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-#else
- register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
- __ICIALLU = 0;
-#endif
- __DSB(); //ensure completion of the invalidation
- __ISB(); //ensure instruction fetch path sees new I cache state
-}
-
-/** \brief Clean D$ by MVA
-
- DCCMVAC. Data cache clean by MVA to PoC
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((uint32_t)va));
-#else
- register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
- __DCCMVAC = (uint32_t)va;
-#endif
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief Invalidate D$ by MVA
-
- DCIMVAC. Data cache invalidate by MVA to PoC
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" ((uint32_t)va));
-#else
- register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
- __DCIMVAC = (uint32_t)va;
-#endif
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief Clean and Invalidate D$ by MVA
-
- DCCIMVAC. Data cache clean and invalidate by MVA to PoC
- */
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
-#if 1
- __ASM volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" ((uint32_t)va));
-#else
- register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
- __DCCIMVAC = (uint32_t)va;
-#endif
- __DMB(); //ensure the ordering of data cache maintenance operations and their effects
-}
-
-/** \brief
- * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
- */
-
-/** \brief __v7_all_cache - helper function
-
- */
-
-extern void __v7_all_cache(uint32_t op);
-
-
-/** \brief Invalidate the whole D$
-
- DCISW. Invalidate by Set/Way
- */
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_all(void) {
- __v7_all_cache(0);
-}
-
-/** \brief Clean the whole D$
-
- DCCSW. Clean by Set/Way
- */
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_all(void) {
- __v7_all_cache(1);
-}
-
-/** \brief Clean and invalidate the whole D$
-
- DCCISW. Clean and Invalidate by Set/Way
- */
-
-__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
- __v7_all_cache(2);
-}
-
-#include "core_ca_mmu.h"
-
-#elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/
-
-#error TASKING Compiler support not implemented for Cortex-A
-
-#endif
-
-/*@} end of CMSIS_Core_RegAccFunctions */
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __CORE_CAFUNC_H__ */