You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2023/01/14 19:43:44 UTC

[nuttx] branch master updated: mm: Integrate TLSF manager

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new a6428f4c27 mm: Integrate TLSF manager
a6428f4c27 is described below

commit a6428f4c2759a944d3ba995879e1976c7ee008f7
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Mon Mar 8 17:21:23 2021 +0800

    mm: Integrate TLSF manager
    
    can be enabled by CONFIG_MM_TLSF_MANAGER=y
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
    Signed-off-by: anjiahao <an...@xiaomi.com>
---
 mm/Kconfig                                         |    5 +
 mm/Makefile                                        |    7 +-
 mm/tlsf/.gitignore                                 |    1 +
 mm/tlsf/0001-Add-TLSF_API-and-tlsf_printf.patch    |  316 ++++++
 .../0002-Define-_DEBUG-to-0-if-not-done-yet.patch  |   31 +
 ...tomize-FL_INDEX_MAX-to-reduce-the-memory-.patch |   95 ++
 mm/tlsf/0004-Add-tlsf_extend_pool-function.patch   |  127 +++
 ...-warnining-on-implicit-pointer-conversion.patch |   26 +
 mm/tlsf/Make.defs                                  |   47 +
 mm/tlsf/mm_tlsf.c                                  | 1111 ++++++++++++++++++++
 tools/Directories.mk                               |    7 +
 11 files changed, 1771 insertions(+), 2 deletions(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index 389aa32362..d9da188d56 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -12,6 +12,11 @@ config MM_DEFAULT_MANAGER
 	---help---
 		NuttX original memory manager strategy.
 
+config MM_TLSF_MANAGER
+	bool "TLSF heap manager"
+	---help---
+		TLSF memory manager strategy.
+
 config MM_CUSTOMIZE_MANAGER
 	bool "Customized heap manager"
 	---help---
diff --git a/mm/Makefile b/mm/Makefile
index a9d0c2272b..5234948cd7 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -32,6 +32,7 @@ include circbuf/Make.defs
 include mempool/Make.defs
 include kasan/Make.defs
 include ubsan/Make.defs
+include tlsf/Make.defs
 include map/Make.defs
 
 BINDIR ?= bin
@@ -48,7 +49,7 @@ KBIN = libkmm$(LIBEXT)
 BIN ?= libmm$(LIBEXT)
 
 all: $(BIN)
-.PHONY: clean distclean
+.PHONY: context depend clean distclean
 
 $(AOBJS): $(BINDIR)$(DELIM)$(DELIM)%$(OBJEXT): %.S
 	$(call ASSEMBLE, $<, $@)
@@ -73,6 +74,8 @@ endif
 
 # Dependencies
 
+context::
+
 makedepfile: $(CSRCS:.c=.ddc) $(ASRCS:.S=.dds)
 	$(call CATFILE, bin/Make.dep, $^)
 	$(call DELFILE, $^)
@@ -101,7 +104,7 @@ clean:
 
 # Deep clean -- removes all traces of the configuration
 
-distclean: clean
+distclean:: clean
 	$(Q) $(MAKE) -C bin  distclean
 	$(Q) $(MAKE) -C kbin distclean
 	$(call DELFILE, bin$(DELIM)Make.dep)
diff --git a/mm/tlsf/.gitignore b/mm/tlsf/.gitignore
new file mode 100644
index 0000000000..a20a71985d
--- /dev/null
+++ b/mm/tlsf/.gitignore
@@ -0,0 +1 @@
+/tlsf
diff --git a/mm/tlsf/0001-Add-TLSF_API-and-tlsf_printf.patch b/mm/tlsf/0001-Add-TLSF_API-and-tlsf_printf.patch
new file mode 100644
index 0000000000..6155942412
--- /dev/null
+++ b/mm/tlsf/0001-Add-TLSF_API-and-tlsf_printf.patch
@@ -0,0 +1,316 @@
+From f413f7d60212a925078748e800f381ced51b9e9a Mon Sep 17 00:00:00 2001
+From: Nodir Temirkhodjaev <no...@gmail.com>
+Date: Fri, 3 Jan 2020 14:41:23 +0500
+Subject: [PATCH 1/8] Add TLSF_API and tlsf_printf.
+
+Needed for static building.
+---
+ tlsf.c | 58 +++++++++++++++++++++++++++++++---------------------------
+ tlsf.h | 46 ++++++++++++++++++++++++++--------------------
+ 2 files changed, 57 insertions(+), 47 deletions(-)
+
+diff --git a/tlsf.c tlsf/tlsf/tlsf.c
+index af57573..e344dd5 100644
+--- a/tlsf.c
++++ tlsf/tlsf/tlsf.c
+@@ -13,6 +13,10 @@
+ #define tlsf_decl static
+ #endif
+ 
++#if !defined(tlsf_printf)
++#define tlsf_printf printf
++#endif
++
+ /*
+ ** Architecture-specific bit manipulation routines.
+ **
+@@ -841,7 +845,7 @@ static void integrity_walker(void* ptr, size_t size, int used, void* user)
+ 	integ->status += status;
+ }
+ 
+-int tlsf_check(tlsf_t tlsf)
++TLSF_API int tlsf_check(tlsf_t tlsf)
+ {
+ 	int i, j;
+ 
+@@ -898,10 +902,10 @@ int tlsf_check(tlsf_t tlsf)
+ static void default_walker(void* ptr, size_t size, int used, void* user)
+ {
+ 	(void)user;
+-	printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
++	tlsf_printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
+ }
+ 
+-void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
++TLSF_API void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
+ {
+ 	tlsf_walker pool_walker = walker ? walker : default_walker;
+ 	block_header_t* block =
+@@ -918,7 +922,7 @@ void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
+ 	}
+ }
+ 
+-size_t tlsf_block_size(void* ptr)
++TLSF_API size_t tlsf_block_size(void* ptr)
+ {
+ 	size_t size = 0;
+ 	if (ptr)
+@@ -929,7 +933,7 @@ size_t tlsf_block_size(void* ptr)
+ 	return size;
+ }
+ 
+-int tlsf_check_pool(pool_t pool)
++TLSF_API int tlsf_check_pool(pool_t pool)
+ {
+ 	/* Check that the blocks are physically correct. */
+ 	integrity_t integ = { 0, 0 };
+@@ -942,22 +946,22 @@ int tlsf_check_pool(pool_t pool)
+ ** Size of the TLSF structures in a given memory block passed to
+ ** tlsf_create, equal to the size of a control_t
+ */
+-size_t tlsf_size(void)
++TLSF_API size_t tlsf_size(void)
+ {
+ 	return sizeof(control_t);
+ }
+ 
+-size_t tlsf_align_size(void)
++TLSF_API size_t tlsf_align_size(void)
+ {
+ 	return ALIGN_SIZE;
+ }
+ 
+-size_t tlsf_block_size_min(void)
++TLSF_API size_t tlsf_block_size_min(void)
+ {
+ 	return block_size_min;
+ }
+ 
+-size_t tlsf_block_size_max(void)
++TLSF_API size_t tlsf_block_size_max(void)
+ {
+ 	return block_size_max;
+ }
+@@ -967,17 +971,17 @@ size_t tlsf_block_size_max(void)
+ ** tlsf_add_pool, equal to the overhead of a free block and the
+ ** sentinel block.
+ */
+-size_t tlsf_pool_overhead(void)
++TLSF_API size_t tlsf_pool_overhead(void)
+ {
+ 	return 2 * block_header_overhead;
+ }
+ 
+-size_t tlsf_alloc_overhead(void)
++TLSF_API size_t tlsf_alloc_overhead(void)
+ {
+ 	return block_header_overhead;
+ }
+ 
+-pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
++TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ {
+ 	block_header_t* block;
+ 	block_header_t* next;
+@@ -987,7 +991,7 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ 
+ 	if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
+ 	{
+-		printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
++		tlsf_printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
+ 			(unsigned int)ALIGN_SIZE);
+ 		return 0;
+ 	}
+@@ -995,11 +999,11 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ 	if (pool_bytes < block_size_min || pool_bytes > block_size_max)
+ 	{
+ #if defined (TLSF_64BIT)
+-		printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 
++		tlsf_printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 
+ 			(unsigned int)(pool_overhead + block_size_min),
+ 			(unsigned int)((pool_overhead + block_size_max) / 256));
+ #else
+-		printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", 
++		tlsf_printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", 
+ 			(unsigned int)(pool_overhead + block_size_min),
+ 			(unsigned int)(pool_overhead + block_size_max));
+ #endif
+@@ -1026,7 +1030,7 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ 	return mem;
+ }
+ 
+-void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
++TLSF_API void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
+ {
+ 	control_t* control = tlsf_cast(control_t*, tlsf);
+ 	block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
+@@ -1046,7 +1050,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
+ */
+ 
+ #if _DEBUG
+-int test_ffs_fls()
++static int test_ffs_fls()
+ {
+ 	/* Verify ffs/fls work properly. */
+ 	int rv = 0;
+@@ -1067,13 +1071,13 @@ int test_ffs_fls()
+ 
+ 	if (rv)
+ 	{
+-		printf("test_ffs_fls: %x ffs/fls tests failed.\n", rv);
++		tlsf_printf("test_ffs_fls: %x ffs/fls tests failed.\n", rv);
+ 	}
+ 	return rv;
+ }
+ #endif
+ 
+-tlsf_t tlsf_create(void* mem)
++TLSF_API tlsf_t tlsf_create(void* mem)
+ {
+ #if _DEBUG
+ 	if (test_ffs_fls())
+@@ -1084,7 +1088,7 @@ tlsf_t tlsf_create(void* mem)
+ 
+ 	if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
+ 	{
+-		printf("tlsf_create: Memory must be aligned to %u bytes.\n",
++		tlsf_printf("tlsf_create: Memory must be aligned to %u bytes.\n",
+ 			(unsigned int)ALIGN_SIZE);
+ 		return 0;
+ 	}
+@@ -1094,25 +1098,25 @@ tlsf_t tlsf_create(void* mem)
+ 	return tlsf_cast(tlsf_t, mem);
+ }
+ 
+-tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
++TLSF_API tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
+ {
+ 	tlsf_t tlsf = tlsf_create(mem);
+ 	tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
+ 	return tlsf;
+ }
+ 
+-void tlsf_destroy(tlsf_t tlsf)
++TLSF_API void tlsf_destroy(tlsf_t tlsf)
+ {
+ 	/* Nothing to do. */
+ 	(void)tlsf;
+ }
+ 
+-pool_t tlsf_get_pool(tlsf_t tlsf)
++TLSF_API pool_t tlsf_get_pool(tlsf_t tlsf)
+ {
+ 	return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
+ }
+ 
+-void* tlsf_malloc(tlsf_t tlsf, size_t size)
++TLSF_API void* tlsf_malloc(tlsf_t tlsf, size_t size)
+ {
+ 	control_t* control = tlsf_cast(control_t*, tlsf);
+ 	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
+@@ -1120,7 +1124,7 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
+ 	return block_prepare_used(control, block, adjust);
+ }
+ 
+-void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
++TLSF_API void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
+ {
+ 	control_t* control = tlsf_cast(control_t*, tlsf);
+ 	const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
+@@ -1177,7 +1181,7 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
+ 	return block_prepare_used(control, block, adjust);
+ }
+ 
+-void tlsf_free(tlsf_t tlsf, void* ptr)
++TLSF_API void tlsf_free(tlsf_t tlsf, void* ptr)
+ {
+ 	/* Don't attempt to free a NULL pointer. */
+ 	if (ptr)
+@@ -1205,7 +1209,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr)
+ ** - an extended buffer size will leave the newly-allocated area with
+ **   contents undefined
+ */
+-void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
++TLSF_API void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
+ {
+ 	control_t* control = tlsf_cast(control_t*, tlsf);
+ 	void* p = 0;
+diff --git a/tlsf.h tlsf/tlsf/tlsf.h
+index e9b5a91..c2c4161 100644
+--- a/tlsf.h
++++ tlsf/tlsf/tlsf.h
+@@ -40,6 +40,12 @@
+ 
+ #include <stddef.h>
+ 
++/* Definition of the TLSF_API. */
++/* Provide the ability to override linkage features of the interface. */
++#if !defined(TLSF_API)
++#define TLSF_API
++#endif
++
+ #if defined(__cplusplus)
+ extern "C" {
+ #endif
+@@ -50,38 +56,38 @@ typedef void* tlsf_t;
+ typedef void* pool_t;
+ 
+ /* Create/destroy a memory pool. */
+-tlsf_t tlsf_create(void* mem);
+-tlsf_t tlsf_create_with_pool(void* mem, size_t bytes);
+-void tlsf_destroy(tlsf_t tlsf);
+-pool_t tlsf_get_pool(tlsf_t tlsf);
++TLSF_API tlsf_t tlsf_create(void* mem);
++TLSF_API tlsf_t tlsf_create_with_pool(void* mem, size_t bytes);
++TLSF_API void tlsf_destroy(tlsf_t tlsf);
++TLSF_API pool_t tlsf_get_pool(tlsf_t tlsf);
+ 
+ /* Add/remove memory pools. */
+-pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
+-void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
++TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
++TLSF_API void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
+ 
+ /* malloc/memalign/realloc/free replacements. */
+-void* tlsf_malloc(tlsf_t tlsf, size_t bytes);
+-void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t bytes);
+-void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
+-void tlsf_free(tlsf_t tlsf, void* ptr);
++TLSF_API void* tlsf_malloc(tlsf_t tlsf, size_t bytes);
++TLSF_API void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t bytes);
++TLSF_API void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
++TLSF_API void tlsf_free(tlsf_t tlsf, void* ptr);
+ 
+ /* Returns internal block size, not original request size */
+-size_t tlsf_block_size(void* ptr);
++TLSF_API size_t tlsf_block_size(void* ptr);
+ 
+ /* Overheads/limits of internal structures. */
+-size_t tlsf_size(void);
+-size_t tlsf_align_size(void);
+-size_t tlsf_block_size_min(void);
+-size_t tlsf_block_size_max(void);
+-size_t tlsf_pool_overhead(void);
+-size_t tlsf_alloc_overhead(void);
++TLSF_API size_t tlsf_size(void);
++TLSF_API size_t tlsf_align_size(void);
++TLSF_API size_t tlsf_block_size_min(void);
++TLSF_API size_t tlsf_block_size_max(void);
++TLSF_API size_t tlsf_pool_overhead(void);
++TLSF_API size_t tlsf_alloc_overhead(void);
+ 
+ /* Debugging. */
+ typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
+-void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
++TLSF_API void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
+ /* Returns nonzero if any internal consistency check fails. */
+-int tlsf_check(tlsf_t tlsf);
+-int tlsf_check_pool(pool_t pool);
++TLSF_API int tlsf_check(tlsf_t tlsf);
++TLSF_API int tlsf_check_pool(pool_t pool);
+ 
+ #if defined(__cplusplus)
+ };
+-- 
+2.34.1
+
diff --git a/mm/tlsf/0002-Define-_DEBUG-to-0-if-not-done-yet.patch b/mm/tlsf/0002-Define-_DEBUG-to-0-if-not-done-yet.patch
new file mode 100644
index 0000000000..7b30c8ab9a
--- /dev/null
+++ b/mm/tlsf/0002-Define-_DEBUG-to-0-if-not-done-yet.patch
@@ -0,0 +1,31 @@
+From 9d731cb125205ead7b80ab6ddb89c250978a86eb Mon Sep 17 00:00:00 2001
+From: Xiang Xiao <xi...@xiaomi.com>
+Date: Wed, 10 Mar 2021 01:05:42 +0800
+Subject: [PATCH 2/8] Define _DEBUG to 0 if not done yet
+
+to avoid the preprocess warning
+
+Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
+Change-Id: I4ae1eb8533563d377ec8614f0c9428c8734e1f2c
+---
+ tlsf.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/tlsf.c tlsf/tlsf/tlsf.c
+index e344dd5..ea8d640 100644
+--- a/tlsf.c
++++ tlsf/tlsf/tlsf.c
+@@ -7,6 +7,10 @@
+ 
+ #include "tlsf.h"
+ 
++#if !defined(_DEBUG)
++#define _DEBUG 0
++#endif
++
+ #if defined(__cplusplus)
+ #define tlsf_decl inline
+ #else
+-- 
+2.34.1
+
diff --git a/mm/tlsf/0003-Support-customize-FL_INDEX_MAX-to-reduce-the-memory-.patch b/mm/tlsf/0003-Support-customize-FL_INDEX_MAX-to-reduce-the-memory-.patch
new file mode 100644
index 0000000000..d806730f4f
--- /dev/null
+++ b/mm/tlsf/0003-Support-customize-FL_INDEX_MAX-to-reduce-the-memory-.patch
@@ -0,0 +1,95 @@
+From d2d18a9ed8836dac252f7c4c61541c2a9e4ebbf0 Mon Sep 17 00:00:00 2001
+From: Xiang Xiao <xi...@xiaomi.com>
+Date: Wed, 10 Mar 2021 02:30:33 +0800
+Subject: [PATCH 3/8] Support customize FL_INDEX_MAX to reduce the memory
+ overhead
+
+user can define the max pool size through TLSF_MAX_POOL_SIZE
+
+Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
+Change-Id: I021b816f65c1bc5c1025969bc6cc458029f3bc88
+---
+ tlsf.c | 46 +++++++++++++++++++++++++++++-----------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/tlsf.c tlsf/tlsf/tlsf.c
+index ea8d640..66daf33 100644
+--- a/tlsf.c
++++ tlsf/tlsf/tlsf.c
+@@ -48,6 +48,29 @@
+ #define TLSF_64BIT
+ #endif
+ 
++/*
++** Returns one plus the index of the most significant 1-bit of n,
++** or if n is zero, returns zero.
++*/
++#ifdef TLSF_64BIT
++#define TLSF_FLS(n) ((n) & 0xffffffff00000000ull ? 32 + TLSF_FLS32((size_t)(n) >> 32) : TLSF_FLS32(n))
++#else
++#define TLSF_FLS(n) TLSF_FLS32(n)
++#endif
++
++#define TLSF_FLS32(n) ((n) & 0xffff0000 ? 16 + TLSF_FLS16((n) >> 16) : TLSF_FLS16(n))
++#define TLSF_FLS16(n) ((n) & 0xff00     ?  8 + TLSF_FLS8 ((n) >>  8) : TLSF_FLS8 (n))
++#define TLSF_FLS8(n)  ((n) & 0xf0       ?  4 + TLSF_FLS4 ((n) >>  4) : TLSF_FLS4 (n))
++#define TLSF_FLS4(n)  ((n) & 0xc        ?  2 + TLSF_FLS2 ((n) >>  2) : TLSF_FLS2 (n))
++#define TLSF_FLS2(n)  ((n) & 0x2        ?  1 + TLSF_FLS1 ((n) >>  1) : TLSF_FLS1 (n))
++#define TLSF_FLS1(n)  ((n) & 0x1        ?  1 : 0)
++
++/*
++** Returns round up value of log2(n).
++** Note: it is used at compile time.
++*/
++#define TLSF_LOG2_CEIL(n) ((n) & (n - 1) ? TLSF_FLS(n) : TLSF_FLS(n) - 1)
++
+ /*
+ ** gcc 3.4 and above have builtin support, specialized for architecture.
+ ** Some compilers masquerade as gcc; patchlevel test filters them out.
+@@ -155,29 +178,16 @@ tlsf_decl int tlsf_fls(unsigned int word)
+ #else
+ /* Fall back to generic implementation. */
+ 
+-tlsf_decl int tlsf_fls_generic(unsigned int word)
+-{
+-	int bit = 32;
+-
+-	if (!word) bit -= 1;
+-	if (!(word & 0xffff0000)) { word <<= 16; bit -= 16; }
+-	if (!(word & 0xff000000)) { word <<= 8; bit -= 8; }
+-	if (!(word & 0xf0000000)) { word <<= 4; bit -= 4; }
+-	if (!(word & 0xc0000000)) { word <<= 2; bit -= 2; }
+-	if (!(word & 0x80000000)) { word <<= 1; bit -= 1; }
+-
+-	return bit;
+-}
+-
+ /* Implement ffs in terms of fls. */
+ tlsf_decl int tlsf_ffs(unsigned int word)
+ {
+-	return tlsf_fls_generic(word & (~word + 1)) - 1;
++	const unsigned int reverse = word & (~word + 1);
++	return TLSF_FLS32(reverse) - 1;
+ }
+ 
+ tlsf_decl int tlsf_fls(unsigned int word)
+ {
+-	return tlsf_fls_generic(word) - 1;
++	return TLSF_FLS32(word) - 1;
+ }
+ 
+ #endif
+@@ -242,7 +252,9 @@ enum tlsf_private
+ 	** blocks below that size into the 0th first-level list.
+ 	*/
+ 
+-#if defined (TLSF_64BIT)
++#if defined (TLSF_MAX_POOL_SIZE)
++	FL_INDEX_MAX = TLSF_LOG2_CEIL(TLSF_MAX_POOL_SIZE),
++#elif defined (TLSF_64BIT)
+ 	/*
+ 	** TODO: We can increase this to support larger sizes, at the expense
+ 	** of more overhead in the TLSF structure.
+-- 
+2.34.1
+
diff --git a/mm/tlsf/0004-Add-tlsf_extend_pool-function.patch b/mm/tlsf/0004-Add-tlsf_extend_pool-function.patch
new file mode 100644
index 0000000000..2c646b3131
--- /dev/null
+++ b/mm/tlsf/0004-Add-tlsf_extend_pool-function.patch
@@ -0,0 +1,127 @@
+From 76f37069b107ebc4dab50aa2ed553a3f751dc671 Mon Sep 17 00:00:00 2001
+From: Xiang Xiao <xi...@xiaomi.com>
+Date: Tue, 9 Mar 2021 21:54:16 +0800
+Subject: [PATCH 4/8] Add tlsf_extend_pool function
+
+could work with sbrk to extend the pool size dynamically
+
+Change-Id: I4f2cda419f88a31bc478e74813ebcd0d1275617c
+---
+ tlsf.c | 56 ++++++++++++++++++++++++++++++++++++++++++--------------
+ tlsf.h |  1 +
+ 2 files changed, 43 insertions(+), 14 deletions(-)
+
+diff --git a/tlsf.c tlsf/tlsf/tlsf.c
+index 66daf33..6fd281a 100644
+--- a/tlsf.c
++++ tlsf/tlsf/tlsf.c
+@@ -997,17 +997,18 @@ TLSF_API size_t tlsf_alloc_overhead(void)
+ 	return block_header_overhead;
+ }
+ 
+-TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
++TLSF_API pool_t tlsf_extend_pool(tlsf_t tlsf, void* mem, size_t bytes, size_t incr)
+ {
+ 	block_header_t* block;
+ 	block_header_t* next;
+ 
++	control_t* control = tlsf_cast(control_t*, tlsf);
+ 	const size_t pool_overhead = tlsf_pool_overhead();
+ 	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
+ 
+ 	if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
+ 	{
+-		tlsf_printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
++		tlsf_printf("tlsf_extend_pool: Memory must be aligned by %u bytes.\n",
+ 			(unsigned int)ALIGN_SIZE);
+ 		return 0;
+ 	}
+@@ -1015,27 +1016,49 @@ TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ 	if (pool_bytes < block_size_min || pool_bytes > block_size_max)
+ 	{
+ #if defined (TLSF_64BIT)
+-		tlsf_printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 
++		tlsf_printf("tlsf_extend_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 
+ 			(unsigned int)(pool_overhead + block_size_min),
+ 			(unsigned int)((pool_overhead + block_size_max) / 256));
+ #else
+-		tlsf_printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", 
++		tlsf_printf("tlsf_extend_pool: Memory size must be between %u and %u bytes.\n", 
+ 			(unsigned int)(pool_overhead + block_size_min),
+ 			(unsigned int)(pool_overhead + block_size_max));
+ #endif
+ 		return 0;
+ 	}
+ 
+-	/*
+-	** Create the main free block. Offset the start of the block slightly
+-	** so that the prev_phys_block field falls outside of the pool -
+-	** it will never be used.
+-	*/
+-	block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
+-	block_set_size(block, pool_bytes);
+-	block_set_free(block);
+-	block_set_prev_used(block);
+-	block_insert(tlsf_cast(control_t*, tlsf), block);
++	if (incr > 0 && incr < tlsf_block_size_min())
++	{
++		tlsf_printf("tlsf_extend_pool: Increased size must be at least %u bytes.\n",
++			(unsigned int)tlsf_block_size_min());
++		return 0;
++	}
++
++	if (incr == 0) /* Initialize the pool */
++	{
++		/*
++		** Create the main free block. Offset the start of the block slightly
++		** so that the prev_phys_block field falls outside of the pool -
++		** it will never be used.
++		*/
++		block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
++		block_set_size(block, pool_bytes);
++		block_set_free(block);
++		block_set_prev_used(block);
++		block_insert(control, block);
++	}
++	else /* Extend the pool */
++	{
++		/* Extend the sentinel block */
++		const size_t new_bytes = align_down((bytes + incr) -
++			(pool_overhead + pool_bytes) - block_header_overhead, ALIGN_SIZE);
++
++		block = offset_to_block(mem, pool_bytes);
++		block_set_size(block, new_bytes);
++		block_set_free(block);
++		block = block_merge_prev(control, block);
++		block_insert(control, block);
++	}
+ 
+ 	/* Split the block to create a zero-size sentinel block. */
+ 	next = block_link_next(block);
+@@ -1046,6 +1069,11 @@ TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+ 	return mem;
+ }
+ 
++TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
++{
++	return tlsf_extend_pool(tlsf, mem, bytes, 0);
++}
++
+ TLSF_API void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
+ {
+ 	control_t* control = tlsf_cast(control_t*, tlsf);
+diff --git a/tlsf.h tlsf/tlsf/tlsf.h
+index c2c4161..085e053 100644
+--- a/tlsf.h
++++ tlsf/tlsf/tlsf.h
+@@ -63,6 +63,7 @@ TLSF_API pool_t tlsf_get_pool(tlsf_t tlsf);
+ 
+ /* Add/remove memory pools. */
+ TLSF_API pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
++TLSF_API pool_t tlsf_extend_pool(tlsf_t tlsf, void* mem, size_t bytes, size_t incr);
+ TLSF_API void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
+ 
+ /* malloc/memalign/realloc/free replacements. */
+-- 
+2.34.1
+
diff --git a/mm/tlsf/0005-Fix-warnining-on-implicit-pointer-conversion.patch b/mm/tlsf/0005-Fix-warnining-on-implicit-pointer-conversion.patch
new file mode 100644
index 0000000000..5cce68e832
--- /dev/null
+++ b/mm/tlsf/0005-Fix-warnining-on-implicit-pointer-conversion.patch
@@ -0,0 +1,26 @@
+From be043f1f50a0b30c3817c262d516083e409283d7 Mon Sep 17 00:00:00 2001
+From: Juan Carrano <j....@fu-berlin.de>
+Date: Mon, 23 Apr 2018 13:55:42 +0200
+Subject: [PATCH 5/8] Fix warnining on implicit pointer conversion.
+
+Change-Id: I2a208a0a4c835e752fe827acd3d5adb1aa2be626
+---
+ tlsf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tlsf.c tlsf/tlsf/tlsf.c
+index 6fd281a..536bdff 100644
+--- a/tlsf.c
++++ tlsf/tlsf/tlsf.c
+@@ -918,7 +918,7 @@ TLSF_API int tlsf_check(tlsf_t tlsf)
+ static void default_walker(void* ptr, size_t size, int used, void* user)
+ {
+ 	(void)user;
+-	tlsf_printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
++	tlsf_printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, (void *)block_from_ptr(ptr));
+ }
+ 
+ TLSF_API void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
+-- 
+2.34.1
+
diff --git a/mm/tlsf/Make.defs b/mm/tlsf/Make.defs
new file mode 100644
index 0000000000..318cf76450
--- /dev/null
+++ b/mm/tlsf/Make.defs
@@ -0,0 +1,47 @@
+############################################################################
+# mm/tlsf/Make.defs
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# tlfs memory allocator
+
+ifeq ($(CONFIG_MM_TLSF_MANAGER),y)
+
+TLSF = tlsf/tlsf/.git
+$(TLSF):
+	$(Q) echo "Downloading: tlsf"
+	$(Q) git clone git@github.com:mattconte/tlsf.git tlsf/tlsf
+	$(Q) patch -p0 < tlsf/0001-Add-TLSF_API-and-tlsf_printf.patch
+	$(Q) patch -p0 < tlsf/0002-Define-_DEBUG-to-0-if-not-done-yet.patch
+	$(Q) patch -p0 < tlsf/0003-Support-customize-FL_INDEX_MAX-to-reduce-the-memory-.patch
+	$(Q) patch -p0 < tlsf/0004-Add-tlsf_extend_pool-function.patch
+	$(Q) patch -p0 < tlsf/0005-Fix-warnining-on-implicit-pointer-conversion.patch
+context::$(TLSF)
+
+distclean::
+	$(Q) rm -rf tlsf/tlsf
+
+CSRCS += mm_tlsf.c tlsf.c
+
+CFLAGS += ${shell $(DEFINE) "$(CC)" tlsf_printf=if(0)}
+
+# Add the tlsf directory to the build
+
+DEPPATH += --dep-path tlsf --dep-path tlsf/tlsf
+VPATH += :tlsf:tlsf/tlsf
+endif
diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
new file mode 100644
index 0000000000..a4a41b130f
--- /dev/null
+++ b/mm/tlsf/mm_tlsf.c
@@ -0,0 +1,1111 @@
+/****************************************************************************
+ * mm/tlsf/mm_tlsf.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <debug.h>
+#include <execinfo.h>
+#include <malloc.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <nuttx/arch.h>
+#include <nuttx/fs/procfs.h>
+#include <nuttx/mutex.h>
+#include <nuttx/mm/mm.h>
+
+#include "tlsf/tlsf.h"
+#include "kasan/kasan.h"
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#if UINTPTR_MAX <= UINT32_MAX
+#  define MM_PTR_FMT_WIDTH 11
+#elif UINTPTR_MAX <= UINT64_MAX
+#  define MM_PTR_FMT_WIDTH 19
+#endif
+
+/****************************************************************************
+ * Private Types
+ ****************************************************************************/
+
+struct mm_delaynode_s
+{
+  FAR struct mm_delaynode_s *flink;
+};
+
+struct mm_heap_s
+{
+  /* Mutually exclusive access to this data set is enforced with
+   * the following un-named mutex.
+   */
+
+  mutex_t mm_lock;
+
+  /* This is the size of the heap provided to mm */
+
+  size_t mm_heapsize;
+
+  /* This is the first and last of the heap */
+
+  FAR void *mm_heapstart[CONFIG_MM_REGIONS];
+  FAR void *mm_heapend[CONFIG_MM_REGIONS];
+
+#if CONFIG_MM_REGIONS > 1
+  int mm_nregions;
+#endif
+
+  tlsf_t mm_tlsf; /* The tlfs context */
+
+  /* Free delay list, for some situation can't do free immdiately */
+
+#ifdef CONFIG_SMP
+  struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
+#else
+  struct mm_delaynode_s *mm_delaylist[1];
+#endif
+
+#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
+  struct procfs_meminfo_entry_s mm_procfs;
+#endif
+};
+
+struct memdump_info_s
+{
+  pid_t pid;
+  int   blks;
+  int   size;
+};
+
+#if CONFIG_MM_BACKTRACE >= 0
+struct memdump_backtrace_s
+{
+  pid_t pid;                                /* The pid for caller */
+#if CONFIG_MM_BACKTRACE > 0
+  FAR void *backtrace[CONFIG_MM_BACKTRACE]; /* The backtrace buffer for caller */
+#endif
+};
+#endif
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+#if CONFIG_MM_BACKTRACE >= 0
+
+/****************************************************************************
+ * Name: memdump_backtrace
+ ****************************************************************************/
+
+static void memdump_backtrace(FAR struct mm_heap_s *heap,
+                              FAR struct memdump_backtrace_s *dump)
+{
+  dump->pid = getpid();
+#  if CONFIG_MM_BACKTRACE > 0
+  if (heap->mm_procfs.backtrace)
+    {
+      int ret = backtrace(dump->backtrace, CONFIG_MM_BACKTRACE);
+      while (ret < CONFIG_MM_BACKTRACE)
+        {
+          dump->backtrace[ret++] = NULL;
+        }
+    }
+#  endif
+}
+#endif
+
+/****************************************************************************
+ * Name: add_delaylist
+ ****************************************************************************/
+
+static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
+{
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  FAR struct mm_delaynode_s *tmp = mem;
+  irqstate_t flags;
+
+  /* Delay the deallocation until a more appropriate time. */
+
+  flags = enter_critical_section();
+
+  tmp->flink = heap->mm_delaylist[up_cpu_index()];
+  heap->mm_delaylist[up_cpu_index()] = tmp;
+
+  leave_critical_section(flags);
+#endif
+}
+
+/****************************************************************************
+ * Name: free_delaylist
+ ****************************************************************************/
+
+static void free_delaylist(FAR struct mm_heap_s *heap)
+{
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  FAR struct mm_delaynode_s *tmp;
+  irqstate_t flags;
+
+  /* Move the delay list to local */
+
+  flags = enter_critical_section();
+
+  tmp = heap->mm_delaylist[up_cpu_index()];
+  heap->mm_delaylist[up_cpu_index()] = NULL;
+
+  leave_critical_section(flags);
+
+  /* Test if the delayed is empty */
+
+  while (tmp)
+    {
+      FAR void *address;
+
+      /* Get the first delayed deallocation */
+
+      address = tmp;
+      tmp = tmp->flink;
+
+      /* The address should always be non-NULL since that was checked in the
+       * 'while' condition above.
+       */
+
+      mm_free(heap, address);
+    }
+#endif
+}
+
+/****************************************************************************
+ * Name: mallinfo_handler
+ ****************************************************************************/
+
+static void mallinfo_handler(FAR void *ptr, size_t size, int used,
+                             FAR void *user)
+{
+  FAR struct mallinfo *info = user;
+
+  if (!used)
+    {
+      info->ordblks++;
+      info->fordblks += size;
+      if (size > info->mxordblk)
+        {
+          info->mxordblk = size;
+        }
+    }
+  else
+    {
+      info->aordblks++;
+    }
+}
+
+#if CONFIG_MM_BACKTRACE >= 0
+
+/****************************************************************************
+ * Name: mallinfo_task_handler
+ ****************************************************************************/
+
+static void mallinfo_task_handler(FAR void *ptr, size_t size, int used,
+                                  FAR void *user)
+{
+  FAR struct memdump_backtrace_s *dump;
+  FAR struct mallinfo_task *info = user;
+
+  size -= sizeof(struct memdump_backtrace_s);
+  dump = ptr + size;
+  if (used && dump->pid == info->pid)
+    {
+      info->aordblks++;
+      info->uordblks += size;
+    }
+}
+#endif
+
+/****************************************************************************
+ * Name: mm_lock
+ *
+ * Description:
+ *   Take the MM mutex. This may be called from the OS in certain conditions
+ *   when it is impossible to wait on a mutex:
+ *     1.The idle process performs the memory corruption check.
+ *     2.The task/thread free the memory in the exiting process.
+ *
+ * Input Parameters:
+ *   heap  - heap instance want to take mutex
+ *
+ * Returned Value:
+ *   0 if the lock can be taken, otherwise negative errno.
+ *
+ ****************************************************************************/
+
+static int mm_lock(FAR struct mm_heap_s *heap)
+{
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  /* Check current environment */
+
+  if (up_interrupt_context())
+    {
+#if !defined(CONFIG_SMP)
+      /* Check the mutex value, if held by someone, then return false.
+       * Or, touch the heap internal data directly.
+       */
+
+      return nxmutex_is_locked(&heap->mm_lock) ? -EAGAIN : 0;
+#else
+      /* Can't take mutex in SMP interrupt handler */
+
+      return -EAGAIN;
+#endif
+    }
+  else
+#endif
+
+  /* gettid() returns the task ID of the task at the head of the ready-to-
+   * run task list.  mm_lock() may be called during context
+   * switches.  There are certain situations during context switching when
+   * the OS data structures are in flux and then can't be freed immediately
+   * (e.g. the running thread stack).
+   *
+   * This is handled by gettid() to return the special value -ESRCH to
+   * indicate this special situation.
+   */
+
+  if (gettid() < 0)
+    {
+      return -ESRCH;
+    }
+  else
+    {
+      return nxmutex_lock(&heap->mm_lock);
+    }
+}
+
+/****************************************************************************
+ * Name: mm_unlock
+ *
+ * Description:
+ *   Release the MM mutex when it is not longer needed.
+ *
+ ****************************************************************************/
+
+static void mm_unlock(FAR struct mm_heap_s *heap)
+{
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  if (up_interrupt_context())
+    {
+      return;
+    }
+#endif
+
+  DEBUGVERIFY(nxmutex_unlock(&heap->mm_lock));
+}
+
+/****************************************************************************
+ * Name: memdump_handler
+ ****************************************************************************/
+
+static void memdump_handler(FAR void *ptr, size_t size, int used,
+                            FAR void *user)
+{
+  FAR struct memdump_info_s *info = user;
+#if CONFIG_MM_BACKTRACE >= 0
+  FAR struct memdump_backtrace_s *dump;
+
+  size -= sizeof(struct memdump_backtrace_s);
+  dump = ptr + size;
+#endif
+
+  if (used)
+    {
+#if CONFIG_MM_BACKTRACE < 0
+      if (info->pid == -1)
+#else
+      if (info->pid == -1 || dump->pid == info->pid)
+#endif
+        {
+#if CONFIG_MM_BACKTRACE < 0
+          syslog(LOG_INFO, "%12zu%*p\n", size, MM_PTR_FMT_WIDTH, ptr);
+#else
+#  if CONFIG_MM_BACKTRACE > 0
+          int i;
+          FAR const char *format = " %0*p";
+#  endif
+          char buf[CONFIG_MM_BACKTRACE * MM_PTR_FMT_WIDTH + 1];
+
+          buf[0] = '\0';
+#  if CONFIG_MM_BACKTRACE > 0
+          for (i = 0; i < CONFIG_MM_BACKTRACE && dump->backtrace[i]; i++)
+            {
+              sprintf(buf + i * MM_PTR_FMT_WIDTH, format,
+                      MM_PTR_FMT_WIDTH - 1, dump->backtrace[i]);
+            }
+#  endif
+
+         syslog(LOG_INFO, "%6d%12zu%*p%s\n",
+                (int)dump->pid, size, MM_PTR_FMT_WIDTH,
+                ptr, buf);
+#endif
+          info->blks++;
+          info->size += size;
+        }
+    }
+  else if (info->pid <= -2)
+    {
+      info->blks++;
+      info->size += size;
+      syslog(LOG_INFO, "%12zu%*p\n", size, MM_PTR_FMT_WIDTH, ptr);
+    }
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: mm_addregion
+ *
+ * Description:
+ *   This function adds a region of contiguous memory to the selected heap.
+ *
+ * Input Parameters:
+ *   heap      - The selected heap
+ *   heapstart - Start of the heap region
+ *   heapsize  - Size of the heap region
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *
+ ****************************************************************************/
+
+void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
+                  size_t heapsize)
+{
+#if CONFIG_MM_REGIONS > 1
+  int idx;
+
+  idx = heap->mm_nregions;
+
+  /* Writing past CONFIG_MM_REGIONS would have catastrophic consequences */
+
+  DEBUGASSERT(idx < CONFIG_MM_REGIONS);
+  if (idx >= CONFIG_MM_REGIONS)
+    {
+      return;
+    }
+
+#else
+# define idx 0
+#endif
+
+  /* Register to KASan for access check */
+
+  kasan_register(heapstart, &heapsize);
+
+  DEBUGVERIFY(mm_lock(heap));
+
+  minfo("Region %d: base=%p size=%zu\n", idx + 1, heapstart, heapsize);
+
+  /* Add the size of this region to the total size of the heap */
+
+  heap->mm_heapsize += heapsize;
+
+  /* Save the start and end of the heap */
+
+  heap->mm_heapstart[idx] = heapstart;
+  heap->mm_heapend[idx]   = heapstart + heapsize;
+
+#undef idx
+
+#if CONFIG_MM_REGIONS > 1
+  heap->mm_nregions++;
+#endif
+
+  /* Add memory to the tlsf pool */
+
+  tlsf_add_pool(heap->mm_tlsf, heapstart, heapsize);
+  mm_unlock(heap);
+}
+
+/****************************************************************************
+ * Name: mm_brkaddr
+ *
+ * Description:
+ *   Return the break address of a heap region.  Zero is returned if the
+ *   memory region is not initialized.
+ *
+ ****************************************************************************/
+
+FAR void *mm_brkaddr(FAR struct mm_heap_s *heap, int region)
+{
+#if CONFIG_MM_REGIONS > 1
+  DEBUGASSERT(region >= 0 && region < heap->mm_nregions);
+#else
+  DEBUGASSERT(region == 0);
+#endif
+
+  return heap->mm_heapend[region];
+}
+
+/****************************************************************************
+ * Name: mm_calloc
+ *
+ * Descriptor:
+ *   mm_calloc() calculates the size of the allocation and calls mm_zalloc()
+ *
+ ****************************************************************************/
+
+FAR void *mm_calloc(FAR struct mm_heap_s *heap, size_t n, size_t elem_size)
+{
+  FAR void *ret = NULL;
+
+  /* Verify input parameters */
+
+  if (n > 0 && elem_size > 0)
+    {
+      /* Assure that the following multiplication cannot overflow the size_t
+       * type, i.e., that:  SIZE_MAX >= n * elem_size
+       *
+       * Refer to SEI CERT C Coding Standard.
+       */
+
+      if (n <= (SIZE_MAX / elem_size))
+        {
+          ret = mm_zalloc(heap, n * elem_size);
+        }
+    }
+
+  return ret;
+}
+
+#ifdef CONFIG_DEBUG_MM
+/****************************************************************************
+ * Name: mm_checkcorruption
+ *
+ * Description:
+ *   mm_checkcorruption is used to check whether memory heap is normal.
+ *
+ ****************************************************************************/
+
+void mm_checkcorruption(FAR struct mm_heap_s *heap)
+{
+#if CONFIG_MM_REGIONS > 1
+  int region;
+#else
+# define region 0
+#endif
+
+  /* Visit each region */
+
+#if CONFIG_MM_REGIONS > 1
+  for (region = 0; region < heap->mm_nregions; region++)
+#endif
+    {
+      /* Retake the mutex for each region to reduce latencies */
+
+      if (mm_lock(heap) < 0)
+        {
+          return;
+        }
+
+      /* Check tlsf control block in the first pass */
+
+      if (region == 0)
+        {
+          tlsf_check(heap->mm_tlsf);
+        }
+
+      /* Check tlsf pool in each iteration temporarily */
+
+      tlsf_check_pool(heap->mm_heapstart[region]);
+
+      /* Release the mutex */
+
+      mm_unlock(heap);
+    }
+#undef region
+}
+#endif
+
+/****************************************************************************
+ * Name: mm_extend
+ *
+ * Description:
+ *   Extend a heap region by add a block of (virtually) contiguous memory
+ *   to the end of the heap.
+ *
+ ****************************************************************************/
+
+void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
+               int region)
+{
+  size_t oldsize;
+
+  /* Make sure that we were passed valid parameters */
+
+#if CONFIG_MM_REGIONS > 1
+  DEBUGASSERT(region >= 0 && region < heap->mm_nregions);
+#else
+  DEBUGASSERT(region == 0);
+#endif
+  DEBUGASSERT(mem == heap->mm_heapend[region]);
+
+  /* Take the memory manager mutex */
+
+  DEBUGVERIFY(mm_lock(heap));
+
+  /* Extend the tlsf pool */
+
+  oldsize = heap->mm_heapend[region] - heap->mm_heapstart[region];
+  tlsf_extend_pool(heap->mm_tlsf, heap->mm_heapstart[region], oldsize, size);
+
+  /* Save the new size */
+
+  heap->mm_heapsize += size;
+  heap->mm_heapend[region] += size;
+
+  mm_unlock(heap);
+}
+
+/****************************************************************************
+ * Name: mm_free
+ *
+ * Description:
+ *   Returns a chunk of memory to the list of free nodes,  merging with
+ *   adjacent free chunks if possible.
+ *
+ ****************************************************************************/
+
+void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
+{
+  int ret;
+
+  UNUSED(ret);
+  minfo("Freeing %p\n", mem);
+
+  /* Protect against attempts to free a NULL reference */
+
+  if (!mem)
+    {
+      return;
+    }
+
+  if (mm_lock(heap) == 0)
+    {
+      kasan_poison(mem, mm_malloc_size(mem));
+
+      /* Pass, return to the tlsf pool */
+
+      tlsf_free(heap->mm_tlsf, mem);
+      mm_unlock(heap);
+    }
+  else
+    {
+      /* Add to the delay list(see the comment in mm_lock) */
+
+      add_delaylist(heap, mem);
+    }
+}
+
+/****************************************************************************
+ * Name: mm_heapmember
+ *
+ * Description:
+ *   Check if an address lies in the heap.
+ *
+ * Parameters:
+ *   heap - The heap to check
+ *   mem  - The address to check
+ *
+ * Return Value:
+ *   true if the address is a member of the heap.  false if not
+ *   not.  If the address is not a member of the heap, then it
+ *   must be a member of the user-space heap (unchecked)
+ *
+ ****************************************************************************/
+
+bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
+{
+#if CONFIG_MM_REGIONS > 1
+  int i;
+
+  /* A valid address from the heap for this region would have to lie
+   * between the region's two guard nodes.
+   */
+
+  for (i = 0; i < heap->mm_nregions; i++)
+    {
+      if (mem >= heap->mm_heapstart[i] &&
+          mem < heap->mm_heapend[i])
+        {
+          return true;
+        }
+    }
+
+  /* The address does not like any any region assigned to the heap */
+
+  return false;
+
+#else
+  /* A valid address from the heap would have to lie between the
+   * two guard nodes.
+   */
+
+  if (mem >= heap->mm_heapstart[0] &&
+      mem < heap->mm_heapend[0])
+    {
+      return true;
+    }
+
+  /* Otherwise, the address does not lie in the heap */
+
+  return false;
+
+#endif
+}
+
+/****************************************************************************
+ * Name: mm_initialize
+ *
+ * Description:
+ *   Initialize the selected heap data structures, providing the initial
+ *   heap region.
+ *
+ * Input Parameters:
+ *   heap      - The selected heap
+ *   heapstart - Start of the initial heap region
+ *   heapsize  - Size of the initial heap region
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *
+ ****************************************************************************/
+
+FAR struct mm_heap_s *mm_initialize(FAR const char *name,
+                                    FAR void *heapstart, size_t heapsize)
+{
+  FAR struct mm_heap_s *heap;
+
+  minfo("Heap: name=%s start=%p size=%zu\n", name, heapstart, heapsize);
+
+  /* Reserve a block space for mm_heap_s context */
+
+  DEBUGASSERT(heapsize > sizeof(struct mm_heap_s));
+  heap = (FAR struct mm_heap_s *)heapstart;
+  memset(heap, 0, sizeof(struct mm_heap_s));
+  heapstart += sizeof(struct mm_heap_s);
+  heapsize -= sizeof(struct mm_heap_s);
+
+  /* Allocate and create TLSF context */
+
+  DEBUGASSERT(heapsize > tlsf_size());
+  heap->mm_tlsf = tlsf_create(heapstart);
+  heapstart += tlsf_size();
+  heapsize -= tlsf_size();
+
+  /* Initialize the malloc mutex (to support one-at-
+   * a-time access to private data sets).
+   */
+
+  nxmutex_init(&heap->mm_lock);
+
+  /* Add the initial region of memory to the heap */
+
+  mm_addregion(heap, heapstart, heapsize);
+
+#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  heap->mm_procfs.name = name;
+  heap->mm_procfs.heap = heap;
+#  ifdef CONFIG_MM_BACKTRACE_DEFAULT
+  heap->mm_procfs.backtrace = true;
+#  endif
+  procfs_register_meminfo(&heap->mm_procfs);
+#endif
+#endif
+
+  return heap;
+}
+
+/****************************************************************************
+ * Name: mm_mallinfo
+ *
+ * Description:
+ *   mallinfo returns a copy of updated current heap information.
+ *
+ ****************************************************************************/
+
+int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
+{
+#if CONFIG_MM_REGIONS > 1
+  int region;
+#else
+# define region 0
+#endif
+
+  DEBUGASSERT(info);
+
+  memset(info, 0, sizeof(struct mallinfo));
+
+  /* Visit each region */
+
+#if CONFIG_MM_REGIONS > 1
+  for (region = 0; region < heap->mm_nregions; region++)
+#endif
+    {
+      /* Retake the mutex for each region to reduce latencies */
+
+      DEBUGVERIFY(mm_lock(heap));
+      tlsf_walk_pool(heap->mm_heapstart[region],
+                     mallinfo_handler, info);
+      mm_unlock(heap);
+    }
+#undef region
+
+  info->arena    = heap->mm_heapsize;
+  info->uordblks = info->arena - info->fordblks;
+
+  return OK;
+}
+
+#if CONFIG_MM_BACKTRACE >= 0
+int mm_mallinfo_task(FAR struct mm_heap_s *heap,
+                     FAR struct mallinfo_task *info)
+{
+#if CONFIG_MM_REGIONS > 1
+  int region;
+#else
+#define region 0
+#endif
+
+  DEBUGASSERT(info);
+  info->uordblks = 0;
+  info->aordblks = 0;
+#if CONFIG_MM_REGIONS > 1
+  for (region = 0; region < heap->mm_nregions; region++)
+#endif
+    {
+      /* Retake the mutex for each region to reduce latencies */
+
+      DEBUGVERIFY(mm_lock(heap));
+      tlsf_walk_pool(heap->mm_heapstart[region],
+                     mallinfo_task_handler, info);
+      mm_unlock(heap);
+    }
+#undef region
+
+  return OK;
+}
+#endif
+
+/****************************************************************************
+ * Name: mm_memdump
+ *
+ * Description:
+ *   mm_memdump returns a memory info about specified pid of task/thread.
+ *   if pid equals -1, this function will dump all allocated node and output
+ *   backtrace for every allocated node for this heap, if pid equals -2, this
+ *   function will dump all free node for this heap, and if pid is greater
+ *   than or equal to 0, will dump pid allocated node and output backtrace.
+ ****************************************************************************/
+
+void mm_memdump(FAR struct mm_heap_s *heap, pid_t pid)
+{
+#if CONFIG_MM_REGIONS > 1
+  int region;
+#else
+# define region 0
+#endif
+  struct memdump_info_s info;
+
+  if (pid >= -1)
+    {
+      syslog(LOG_INFO, "Dump all used memory node info:\n");
+#if CONFIG_MM_BACKTRACE < 0
+      syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address");
+#else
+      syslog(LOG_INFO, "%6s%12s%*s %s\n", "PID", "Size", MM_PTR_FMT_WIDTH,
+            "Address", "Backtrace");
+#endif
+    }
+  else
+    {
+      syslog(LOG_INFO, "Dump all free memory node info:\n");
+      syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address");
+    }
+
+  info.blks = 0;
+  info.size = 0;
+  info.pid  = pid;
+#if CONFIG_MM_REGIONS > 1
+  for (region = 0; region < heap->mm_nregions; region++)
+#endif
+    {
+      DEBUGVERIFY(mm_lock(heap));
+      tlsf_walk_pool(heap->mm_heapstart[region],
+                     memdump_handler, &info);
+      mm_unlock(heap);
+    }
+#undef region
+
+  syslog(LOG_INFO, "%12s%12s\n", "Total Blks", "Total Size");
+  syslog(LOG_INFO, "%12d%12d\n", info.blks, info.size);
+}
+
+/****************************************************************************
+ * Name: mm_malloc_size
+ ****************************************************************************/
+
+size_t mm_malloc_size(FAR void *mem)
+{
+#if CONFIG_MM_BACKTRACE >= 0
+  return tlsf_block_size(mem) - sizeof(struct memdump_backtrace_s);
+#else
+  return tlsf_block_size(mem);
+#endif
+}
+
+/****************************************************************************
+ * Name: mm_malloc
+ *
+ * Description:
+ *  Find the smallest chunk that satisfies the request. Take the memory from
+ *  that chunk, save the remaining, smaller chunk (if any).
+ *
+ *  8-byte alignment of the allocated data is assured.
+ *
+ ****************************************************************************/
+
+FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
+{
+  FAR void *ret;
+
+  /* Free the delay list first */
+
+  free_delaylist(heap);
+
+  /* Allocate from the tlsf pool */
+
+  DEBUGVERIFY(mm_lock(heap));
+#if CONFIG_MM_BACKTRACE >= 0
+  ret = tlsf_malloc(heap->mm_tlsf, size +
+                    sizeof(struct memdump_backtrace_s));
+#else
+  ret = tlsf_malloc(heap->mm_tlsf, size);
+#endif
+
+  mm_unlock(heap);
+
+  if (ret)
+    {
+#if CONFIG_MM_BACKTRACE >= 0
+      FAR struct memdump_backtrace_s *dump = ret + mm_malloc_size(ret);
+
+      memdump_backtrace(heap, dump);
+#endif
+      kasan_unpoison(ret, mm_malloc_size(ret));
+    }
+
+  return ret;
+}
+
+/****************************************************************************
+ * Name: mm_memalign
+ *
+ * Description:
+ *   memalign requests more than enough space from malloc, finds a region
+ *   within that chunk that meets the alignment request and then frees any
+ *   leading or trailing space.
+ *
+ *   The alignment argument must be a power of two (not checked).  8-byte
+ *   alignment is guaranteed by normal malloc calls.
+ *
+ ****************************************************************************/
+
+FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
+                      size_t size)
+{
+  FAR void *ret;
+
+  /* Free the delay list first */
+
+  free_delaylist(heap);
+
+  /* Allocate from the tlsf pool */
+
+  DEBUGVERIFY(mm_lock(heap));
+#if CONFIG_MM_BACKTRACE >= 0
+  ret = tlsf_memalign(heap->mm_tlsf, alignment, size +
+                      sizeof(struct memdump_backtrace_s));
+#else
+  ret = tlsf_memalign(heap->mm_tlsf, alignment, size);
+#endif
+  mm_unlock(heap);
+
+  if (ret)
+    {
+#if CONFIG_MM_BACKTRACE >= 0
+      FAR struct memdump_backtrace_s *dump = ret + mm_malloc_size(ret);
+
+      memdump_backtrace(heap, dump);
+#endif
+      kasan_unpoison(ret, mm_malloc_size(ret));
+    }
+
+  return ret;
+}
+
+/****************************************************************************
+ * Name: mm_realloc
+ *
+ * Description:
+ *   If the reallocation is for less space, then:
+ *
+ *     (1) the current allocation is reduced in size
+ *     (2) the remainder at the end of the allocation is returned to the
+ *         free list.
+ *
+ *  If the request is for more space and the current allocation can be
+ *  extended, it will be extended by:
+ *
+ *     (1) Taking the additional space from the following free chunk, or
+ *     (2) Taking the additional space from the preceding free chunk.
+ *     (3) Or both
+ *
+ *  If the request is for more space but the current chunk cannot be
+ *  extended, then malloc a new buffer, copy the data into the new buffer,
+ *  and free the old buffer.
+ *
+ ****************************************************************************/
+
+FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
+                     size_t size)
+{
+  FAR void *newmem;
+
+#ifdef CONFIG_MM_KASAN
+  if (oldmem == NULL)
+    {
+      return mm_malloc(heap, size);
+    }
+
+  if (size == 0)
+    {
+      mm_free(heap, oldmem);
+      return NULL;
+    }
+
+  newmem = mm_malloc(heap, size);
+  if (newmem)
+    {
+      if (size > mm_malloc_size(oldmem))
+        {
+          size = mm_malloc_size(oldmem);
+        }
+
+      memcpy(newmem, oldmem, size);
+      mm_free(heap, oldmem);
+    }
+#else
+  /* Free the delay list first */
+
+  free_delaylist(heap);
+
+  /* Allocate from the tlsf pool */
+
+  DEBUGVERIFY(mm_lock(heap));
+#if CONFIG_MM_BACKTRACE >= 0
+  newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size +
+                        sizeof(struct memdump_backtrace_s));
+#else
+  newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size);
+#endif
+  mm_unlock(heap);
+
+#if CONFIG_MM_BACKTRACE >= 0
+  if (newmem)
+    {
+      FAR struct memdump_backtrace_s *dump = newmem + mm_malloc_size(newmem);
+
+      memdump_backtrace(heap, dump);
+    }
+#endif
+
+#endif
+
+  return newmem;
+}
+
+/****************************************************************************
+ * Name: mm_uninitialize
+ *
+ * Description:
+ *   Uninitialize the selected heap data structures.
+ *
+ * Input Parameters:
+ *   heap - The heap to uninitialize
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+void mm_uninitialize(FAR struct mm_heap_s *heap)
+{
+#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
+#  if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+  procfs_unregister_meminfo(&heap->mm_procfs);
+#  endif
+#endif
+  nxmutex_destroy(&heap->mm_lock);
+  tlsf_destroy(&heap->mm_tlsf);
+}
+
+/****************************************************************************
+ * Name: mm_zalloc
+ *
+ * Description:
+ *   mm_zalloc calls mm_malloc, then zeroes out the allocated chunk.
+ *
+ ****************************************************************************/
+
+FAR void *mm_zalloc(FAR struct mm_heap_s *heap, size_t size)
+{
+  FAR void *alloc = mm_malloc(heap, size);
+
+  if (alloc)
+    {
+       memset(alloc, 0, size);
+    }
+
+  return alloc;
+}
diff --git a/tools/Directories.mk b/tools/Directories.mk
index 4192f107c7..4adc8d01a1 100644
--- a/tools/Directories.mk
+++ b/tools/Directories.mk
@@ -164,4 +164,11 @@ else
 CLEANDIRS += openamp
 endif
 
+ifeq ($(CONFIG_MM_TLSF_MANAGER),y)
+KERNDEPDIRS += mm
+CONTEXTDIRS += mm
+else
+CLEANDIRS += mm
+endif
+
 CLEANDIRS += $(KERNDEPDIRS) $(USERDEPDIRS)