Procházet zdrojové kódy

furi memory managment (#177)

* memory managment calls now forwarded to freertos heap

* memory managment tests

* local target test compability

* rename heap.c file to heap_4.c for local target and explicity init heap in single thread context

* rebase BlockLink_t struct

* check mutex in local heap

Co-authored-by: aanper <mail@s3f.ru>
DrZlo13 před 5 roky
rodič
revize
59740349fa

+ 1 - 0
applications/applications.mk

@@ -26,6 +26,7 @@ C_SOURCES	+= $(APP_DIR)/tests/furi_record_test.c
 C_SOURCES	+= $(APP_DIR)/tests/test_index.c
 C_SOURCES	+= $(APP_DIR)/tests/minunit_test.c
 C_SOURCES	+= $(APP_DIR)/tests/furi_valuemutex_test.c
+C_SOURCES	+= $(APP_DIR)/tests/furi_memmgr_test.c
 endif
 
 APP_EXAMPLE_BLINK ?= 0

+ 99 - 0
applications/tests/furi_memmgr_test.c

@@ -0,0 +1,99 @@
+#include "minunit.h"
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+// this test is not accurate, but gives a basic understanding
+// that memory management is working fine
+
+// do not include memmgr.h here
+// we also test that we are linking against stdlib
+extern size_t memmgr_get_free_heap(void);
+extern size_t memmgr_get_minimum_free_heap(void);
+
+// current heap managment realization consume:
+// X bytes after allocate and 0 bytes after allocate and free,
+// where X = sizeof(void*) + sizeof(size_t), look to BlockLink_t
+const size_t heap_overhead_max_size = sizeof(void*) + sizeof(size_t);
+
+bool heap_equal(size_t heap_size, size_t heap_size_old) {
+    // heap borders with overhead
+    const size_t heap_low = heap_size_old - heap_overhead_max_size;
+    const size_t heap_high = heap_size_old + heap_overhead_max_size;
+
+    // not extact, so we must test it against bigger numbers than "overhead size"
+    const bool result = ((heap_size >= heap_low) && (heap_size <= heap_high));
+
+    // debug allocation info
+    if(!result) {
+        printf("\n(hl: %zu) <= (p: %zu) <= (hh: %zu)\n", heap_low, heap_size, heap_high);
+    }
+
+    return result;
+}
+
+void test_furi_memmgr() {
+    size_t heap_size = 0;
+    size_t heap_size_old = 0;
+    const int alloc_size = 128;
+
+    void* ptr = NULL;
+    void* original_ptr = NULL;
+
+    // do not include furi memmgr.h case
+#ifdef FURI_MEMMGR_GUARD
+    mu_fail("do not link against furi memmgr.h");
+#endif
+
+    // allocate memory case
+    heap_size_old = memmgr_get_free_heap();
+    ptr = malloc(alloc_size);
+    heap_size = memmgr_get_free_heap();
+    mu_assert_pointers_not_eq(ptr, NULL);
+    mu_assert(heap_equal(heap_size, heap_size_old - alloc_size), "allocate failed");
+
+    // free memory case
+    heap_size_old = memmgr_get_free_heap();
+    free(ptr);
+    ptr = NULL;
+    heap_size = memmgr_get_free_heap();
+    mu_assert(heap_equal(heap_size, heap_size_old + alloc_size), "free failed");
+
+    // reallocate memory case
+
+    // get filled array with some data
+    original_ptr = malloc(alloc_size);
+    mu_assert_pointers_not_eq(original_ptr, NULL);
+    for(int i = 0; i < alloc_size; i++) {
+        *(unsigned char*)(original_ptr + i) = i;
+    }
+
+    // malloc array and copy data
+    ptr = malloc(alloc_size);
+    mu_assert_pointers_not_eq(ptr, NULL);
+    memcpy(ptr, original_ptr, alloc_size);
+
+    // reallocate array
+    heap_size_old = memmgr_get_free_heap();
+    ptr = realloc(ptr, alloc_size * 2);
+    heap_size = memmgr_get_free_heap();
+    mu_assert(heap_equal(heap_size, heap_size_old - alloc_size), "reallocate failed");
+    mu_assert_int_eq(memcmp(original_ptr, ptr, alloc_size), 0);
+    free(original_ptr);
+    free(ptr);
+
+    // allocate and zero-initialize array (calloc)
+    original_ptr = malloc(alloc_size);
+    mu_assert_pointers_not_eq(original_ptr, NULL);
+
+    for(int i = 0; i < alloc_size; i++) {
+        *(unsigned char*)(original_ptr + i) = 0;
+    }
+    heap_size_old = memmgr_get_free_heap();
+    ptr = calloc(1, alloc_size);
+    heap_size = memmgr_get_free_heap();
+    mu_assert(heap_equal(heap_size, heap_size_old - alloc_size), "callocate failed");
+    mu_assert_int_eq(memcmp(original_ptr, ptr, alloc_size), 0);
+
+    free(original_ptr);
+    free(ptr);
+}

+ 9 - 0
applications/tests/minunit_test.c

@@ -14,6 +14,7 @@ bool test_furi_mute_algorithm();
 void test_furi_create_open();
 void test_furi_valuemutex();
 void test_furi_concurrent_access();
+void test_furi_memmgr();
 
 static int foo = 0;
 
@@ -54,6 +55,12 @@ MU_TEST(mu_test_furi_concurrent_access) {
     test_furi_concurrent_access();
 }
 
+MU_TEST(mu_test_furi_memmgr) {
+    // this test is not accurate, but gives a basic understanding
+    // that memory management is working fine
+    test_furi_memmgr();
+}
+
 MU_TEST_SUITE(test_suite) {
     MU_SUITE_CONFIGURE(&test_setup, &test_teardown);
 
@@ -67,6 +74,8 @@ MU_TEST_SUITE(test_suite) {
     MU_RUN_TEST(mu_test_furi_create_open);
     MU_RUN_TEST(mu_test_furi_valuemutex);
     MU_RUN_TEST(mu_test_furi_concurrent_access);
+
+    MU_RUN_TEST(mu_test_furi_memmgr);
 }
 
 int run_minunit() {

+ 51 - 0
core/api-basic/memmgr.c

@@ -0,0 +1,51 @@
+#include "memmgr.h"
+#include <string.h>
+
+extern void* pvPortMalloc(size_t xSize);
+extern void vPortFree(void* pv);
+extern size_t xPortGetFreeHeapSize(void);
+extern size_t xPortGetMinimumEverFreeHeapSize(void);
+
+void* malloc(size_t size) {
+    return pvPortMalloc(size);
+}
+
+void free(void* ptr) {
+    vPortFree(ptr);
+}
+
+void* realloc(void* ptr, size_t size) {
+    if(size == 0) {
+        vPortFree(ptr);
+        return NULL;
+    }
+
+    void* p;
+    p = pvPortMalloc(size);
+    if(p) {
+        // TODO implement secure realloc
+        // insecure, but will do job in our case
+        if(ptr != NULL) {
+            memcpy(p, ptr, size);
+            vPortFree(ptr);
+        }
+    }
+    return p;
+}
+
+void* calloc(size_t count, size_t size) {
+    void* ptr = pvPortMalloc(count * size);
+    if(ptr) {
+        // zero the memory
+        memset(ptr, 0, count * size);
+    }
+    return ptr;
+}
+
+size_t memmgr_get_free_heap(void) {
+    return xPortGetFreeHeapSize();
+}
+
+size_t memmgr_get_minimum_free_heap(void) {
+    return xPortGetMinimumEverFreeHeapSize();
+}

+ 13 - 0
core/api-basic/memmgr.h

@@ -0,0 +1,13 @@
+#pragma once
+#include <stddef.h>
+
+// define for test case "link against furi memmgr"
+#define FURI_MEMMGR_GUARD 1
+
+void* malloc(size_t size);
+void free(void* ptr);
+void* realloc(void* ptr, size_t size);
+void* calloc(size_t count, size_t size);
+
+size_t memmgr_get_free_heap(void);
+size_t memmgr_get_minimum_free_heap(void);

+ 3 - 1
core/flipper_v2.h

@@ -4,4 +4,6 @@
 //#include "api-basic/flapp.h"
 #include "cmsis_os2.h"
 #include "api-basic/valuemutex.h"
-//#include "api-basic/pubsub.h"
+//#include "api-basic/pubsub.h"
+
+#include "api-basic/memmgr.h"

+ 389 - 0
firmware/targets/local/Src/heap_4.c

@@ -0,0 +1,389 @@
+/*
+ * FreeRTOS Kernel V10.2.1
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/*
+ * A sample implementation of pvPortMalloc() and vPortFree() that combines
+ * (coalescences) adjacent memory blocks as they are freed, and in so doing
+ * limits memory fragmentation.
+ *
+ * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
+ * memory management pages of http://www.FreeRTOS.org for more information.
+ */
+#include "heap.h"
+
+osMutexId_t heap_managment_mutex = NULL;
+
+/* Block sizes must not get too small. */
+#define heapMINIMUM_BLOCK_SIZE ((size_t)(xHeapStructSize << 1))
+
+/* Assumes 8bit bytes! */
+#define heapBITS_PER_BYTE ((size_t)8)
+
+/* Allocate the memory for the heap. */
+#if(configAPPLICATION_ALLOCATED_HEAP == 1)
+/* The application writer has already defined the array used for the RTOS
+	heap - probably so it can be placed in a special segment or address. */
+extern uint8_t ucHeap[configTOTAL_HEAP_SIZE];
+#else
+static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/* Define the linked list structure.  This is used to link free blocks in order
+of their memory address. */
+typedef struct A_BLOCK_LINK {
+    struct A_BLOCK_LINK* pxNextFreeBlock; /*<< The next free block in the list. */
+    size_t xBlockSize; /*<< The size of the free block. */
+} BlockLink_t;
+/*-----------------------------------------------------------*/
+
+/*
+ * Inserts a block of memory that is being freed into the correct position in
+ * the list of free memory blocks.  The block being freed will be merged with
+ * the block in front it and/or the block behind it if the memory blocks are
+ * adjacent to each other.
+ */
+static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert);
+
+// this function is not thread-safe, so it must be called in single thread context
+bool prvHeapInit(void);
+
+/*-----------------------------------------------------------*/
+
+/* The size of the structure placed at the beginning of each allocated memory
+block must by correctly byte aligned. */
+static const size_t xHeapStructSize = (sizeof(BlockLink_t) + ((size_t)(portBYTE_ALIGNMENT - 1))) &
+                                      ~((size_t)portBYTE_ALIGNMENT_MASK);
+
+/* Create a couple of list links to mark the start and end of the list. */
+static BlockLink_t xStart, *pxEnd = NULL;
+
+/* Keeps track of the number of free bytes remaining, but says nothing about
+fragmentation. */
+static size_t xFreeBytesRemaining = 0U;
+static size_t xMinimumEverFreeBytesRemaining = 0U;
+
+/* Gets set to the top bit of an size_t type.  When this bit in the xBlockSize
+member of an BlockLink_t structure is set then the block belongs to the
+application.  When the bit is free the block is still part of the free heap
+space. */
+static size_t xBlockAllocatedBit = 0;
+
+/*-----------------------------------------------------------*/
+
+void* pvPortMalloc(size_t xWantedSize) {
+    BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
+    void* pvReturn = NULL;
+
+    acquire_memalloc_mutex();
+    {
+        /* If this is the first call to malloc then the heap will require
+		initialisation to setup the list of free blocks. */
+        if(pxEnd == NULL) {
+            prvHeapInit();
+        } else {
+            mtCOVERAGE_TEST_MARKER();
+        }
+
+        /* Check the requested block size is not so large that the top bit is
+		set.  The top bit of the block size member of the BlockLink_t structure
+		is used to determine who owns the block - the application or the
+		kernel, so it must be free. */
+        if((xWantedSize & xBlockAllocatedBit) == 0) {
+            /* The wanted size is increased so it can contain a BlockLink_t
+			structure in addition to the requested amount of bytes. */
+            if(xWantedSize > 0) {
+                xWantedSize += xHeapStructSize;
+
+                /* Ensure that blocks are always aligned to the required number
+				of bytes. */
+                if((xWantedSize & portBYTE_ALIGNMENT_MASK) != 0x00) {
+                    /* Byte alignment required. */
+                    xWantedSize += (portBYTE_ALIGNMENT - (xWantedSize & portBYTE_ALIGNMENT_MASK));
+                    configASSERT((xWantedSize & portBYTE_ALIGNMENT_MASK) == 0);
+                } else {
+                    mtCOVERAGE_TEST_MARKER();
+                }
+            } else {
+                mtCOVERAGE_TEST_MARKER();
+            }
+
+            if((xWantedSize > 0) && (xWantedSize <= xFreeBytesRemaining)) {
+                /* Traverse the list from the start	(lowest address) block until
+				one	of adequate size is found. */
+                pxPreviousBlock = &xStart;
+                pxBlock = xStart.pxNextFreeBlock;
+                while((pxBlock->xBlockSize < xWantedSize) && (pxBlock->pxNextFreeBlock != NULL)) {
+                    pxPreviousBlock = pxBlock;
+                    pxBlock = pxBlock->pxNextFreeBlock;
+                }
+
+                /* If the end marker was reached then a block of adequate size
+				was	not found. */
+                if(pxBlock != pxEnd) {
+                    /* Return the memory space pointed to - jumping over the
+					BlockLink_t structure at its start. */
+                    pvReturn =
+                        (void*)(((uint8_t*)pxPreviousBlock->pxNextFreeBlock) + xHeapStructSize);
+
+                    /* This block is being returned for use so must be taken out
+					of the list of free blocks. */
+                    pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+                    /* If the block is larger than required it can be split into
+					two. */
+                    if((pxBlock->xBlockSize - xWantedSize) > heapMINIMUM_BLOCK_SIZE) {
+                        /* This block is to be split into two.  Create a new
+						block following the number of bytes requested. The void
+						cast is used to prevent byte alignment warnings from the
+						compiler. */
+                        pxNewBlockLink = (void*)(((uint8_t*)pxBlock) + xWantedSize);
+                        configASSERT((((size_t)pxNewBlockLink) & portBYTE_ALIGNMENT_MASK) == 0);
+
+                        /* Calculate the sizes of two blocks split from the
+						single block. */
+                        pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+                        pxBlock->xBlockSize = xWantedSize;
+
+                        /* Insert the new block into the list of free blocks. */
+                        prvInsertBlockIntoFreeList(pxNewBlockLink);
+                    } else {
+                        mtCOVERAGE_TEST_MARKER();
+                    }
+
+                    xFreeBytesRemaining -= pxBlock->xBlockSize;
+
+                    if(xFreeBytesRemaining < xMinimumEverFreeBytesRemaining) {
+                        xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
+                    } else {
+                        mtCOVERAGE_TEST_MARKER();
+                    }
+
+                    /* The block is being returned - it is allocated and owned
+					by the application and has no "next" block. */
+                    pxBlock->xBlockSize |= xBlockAllocatedBit;
+                    pxBlock->pxNextFreeBlock = NULL;
+                } else {
+                    mtCOVERAGE_TEST_MARKER();
+                }
+            } else {
+                mtCOVERAGE_TEST_MARKER();
+            }
+        } else {
+            mtCOVERAGE_TEST_MARKER();
+        }
+
+        traceMALLOC(pvReturn, xWantedSize);
+    }
+    release_memalloc_mutex();
+
+#if(configUSE_MALLOC_FAILED_HOOK == 1)
+    {
+        if(pvReturn == NULL) {
+            extern void vApplicationMallocFailedHook(void);
+            vApplicationMallocFailedHook();
+        } else {
+            mtCOVERAGE_TEST_MARKER();
+        }
+    }
+#endif
+
+    configASSERT((((size_t)pvReturn) & (size_t)portBYTE_ALIGNMENT_MASK) == 0);
+    return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree(void* pv) {
+    uint8_t* puc = (uint8_t*)pv;
+    BlockLink_t* pxLink;
+
+    if(pv != NULL) {
+        /* The memory being freed will have an BlockLink_t structure immediately
+		before it. */
+        puc -= xHeapStructSize;
+
+        /* This casting is to keep the compiler from issuing warnings. */
+        pxLink = (void*)puc;
+
+        /* Check the block is actually allocated. */
+        configASSERT((pxLink->xBlockSize & xBlockAllocatedBit) != 0);
+        configASSERT(pxLink->pxNextFreeBlock == NULL);
+
+        if((pxLink->xBlockSize & xBlockAllocatedBit) != 0) {
+            if(pxLink->pxNextFreeBlock == NULL) {
+                /* The block is being returned to the heap - it is no longer
+				allocated. */
+                pxLink->xBlockSize &= ~xBlockAllocatedBit;
+
+                acquire_memalloc_mutex();
+                {
+                    /* Add this block to the list of free blocks. */
+                    xFreeBytesRemaining += pxLink->xBlockSize;
+                    traceFREE(pv, pxLink->xBlockSize);
+                    prvInsertBlockIntoFreeList(((BlockLink_t*)pxLink));
+                }
+                release_memalloc_mutex();
+            } else {
+                mtCOVERAGE_TEST_MARKER();
+            }
+        } else {
+            mtCOVERAGE_TEST_MARKER();
+        }
+    }
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize(void) {
+    return xFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetMinimumEverFreeHeapSize(void) {
+    return xMinimumEverFreeBytesRemaining;
+}
+/*-----------------------------------------------------------*/
+
+void vPortInitialiseBlocks(void) {
+    /* This just exists to keep the linker quiet. */
+}
+/*-----------------------------------------------------------*/
+
+bool prvHeapInit(void) {
+    BlockLink_t* pxFirstFreeBlock;
+    uint8_t* pucAlignedHeap;
+    size_t uxAddress;
+    size_t xTotalHeapSize = configTOTAL_HEAP_SIZE;
+
+    /* Ensure the heap starts on a correctly aligned boundary. */
+    uxAddress = (size_t)ucHeap;
+
+    if((uxAddress & portBYTE_ALIGNMENT_MASK) != 0) {
+        uxAddress += (portBYTE_ALIGNMENT - 1);
+        uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK);
+        xTotalHeapSize -= uxAddress - (size_t)ucHeap;
+    }
+
+    pucAlignedHeap = (uint8_t*)uxAddress;
+
+    /* xStart is used to hold a pointer to the first item in the list of free
+	blocks.  The void cast is used to prevent compiler warnings. */
+    xStart.pxNextFreeBlock = (void*)pucAlignedHeap;
+    xStart.xBlockSize = (size_t)0;
+
+    /* pxEnd is used to mark the end of the list of free blocks and is inserted
+	at the end of the heap space. */
+    uxAddress = ((size_t)pucAlignedHeap) + xTotalHeapSize;
+    uxAddress -= xHeapStructSize;
+    uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK);
+    pxEnd = (void*)uxAddress;
+    pxEnd->xBlockSize = 0;
+    pxEnd->pxNextFreeBlock = NULL;
+
+    /* To start with there is a single free block that is sized to take up the
+	entire heap space, minus the space taken by pxEnd. */
+    pxFirstFreeBlock = (void*)pucAlignedHeap;
+    pxFirstFreeBlock->xBlockSize = uxAddress - (size_t)pxFirstFreeBlock;
+    pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
+
+    /* Only one block exists - and it covers the entire usable heap space. */
+    xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+    xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
+
+    /* Work out the position of the top bit in a size_t variable. */
+    xBlockAllocatedBit = ((size_t)1) << ((sizeof(size_t) * heapBITS_PER_BYTE) - 1);
+
+    // now we can use malloc, so we init heap managment mutex
+    const osMutexAttr_t heap_managment_mutext_attr = {
+        .name = NULL, .attr_bits = 0, .cb_mem = NULL, .cb_size = 0U};
+
+    heap_managment_mutex = osMutexNew(&heap_managment_mutext_attr);
+
+    return heap_managment_mutex != NULL;
+}
+/*-----------------------------------------------------------*/
+
+static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert) {
+    BlockLink_t* pxIterator;
+    uint8_t* puc;
+
+    /* Iterate through the list until a block is found that has a higher address
+	than the block being inserted. */
+    for(pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert;
+        pxIterator = pxIterator->pxNextFreeBlock) {
+        /* Nothing to do here, just iterate to the right position. */
+    }
+
+    /* Do the block being inserted, and the block it is being inserted after
+	make a contiguous block of memory? */
+    puc = (uint8_t*)pxIterator;
+    if((puc + pxIterator->xBlockSize) == (uint8_t*)pxBlockToInsert) {
+        pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
+        pxBlockToInsert = pxIterator;
+    } else {
+        mtCOVERAGE_TEST_MARKER();
+    }
+
+    /* Do the block being inserted, and the block it is being inserted before
+	make a contiguous block of memory? */
+    puc = (uint8_t*)pxBlockToInsert;
+    if((puc + pxBlockToInsert->xBlockSize) == (uint8_t*)pxIterator->pxNextFreeBlock) {
+        if(pxIterator->pxNextFreeBlock != pxEnd) {
+            /* Form one big block from the two blocks. */
+            pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
+            pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
+        } else {
+            pxBlockToInsert->pxNextFreeBlock = pxEnd;
+        }
+    } else {
+        pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
+    }
+
+    /* If the block being inserted plugged a gab, so was merged with the block
+	before and the block after, then it's pxNextFreeBlock pointer will have
+	already been set, and should not be set here as that would make it point
+	to itself. */
+    if(pxIterator != pxBlockToInsert) {
+        pxIterator->pxNextFreeBlock = pxBlockToInsert;
+    } else {
+        mtCOVERAGE_TEST_MARKER();
+    }
+}
+
+/* 
+at first run (heap init) it not work properly and prvHeapInit
+is not thread-safe. But then we init mutex or die
+*/
+void acquire_memalloc_mutex() {
+    if(heap_managment_mutex != NULL) {
+        osMutexAcquire(heap_managment_mutex, osWaitForever);
+    }
+}
+
+void release_memalloc_mutex() {
+    if(heap_managment_mutex != NULL) {
+        osMutexRelease(heap_managment_mutex);
+    }
+}

+ 8 - 0
firmware/targets/local/Src/main.c

@@ -1,3 +1,6 @@
+#include "heap.h"
+#include "errno.h"
+
 /*
 Flipper devices inc.
 
@@ -7,5 +10,10 @@ Local fw build entry point.
 int app();
 
 int main() {
+    // this function is not thread-safe, so it must be called in single thread context
+    if(!prvHeapInit()){
+        return ENOMEM;
+    }
+
     return app();
 }

+ 37 - 0
firmware/targets/local/fatfs/heap.h

@@ -0,0 +1,37 @@
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <cmsis_os.h>
+
+#define configTOTAL_HEAP_SIZE ((size_t)(8192 * 16))
+#define configAPPLICATION_ALLOCATED_HEAP 0
+#define portBYTE_ALIGNMENT 8
+
+#if portBYTE_ALIGNMENT == 8
+#define portBYTE_ALIGNMENT_MASK (0x0007)
+#endif
+
+/* No test marker by default. */
+#ifndef mtCOVERAGE_TEST_MARKER
+#define mtCOVERAGE_TEST_MARKER()
+#endif
+
+/* No tracing by default. */
+#ifndef traceMALLOC
+#define traceMALLOC(pvReturn, xWantedSize)
+#endif
+
+/* No tracing by default. */
+#ifndef traceFREE
+#define traceFREE(pvReturn, xBlockSize)
+#endif
+
+/* No assert by default. */
+#ifndef configASSERT
+#define configASSERT(var)
+#endif
+
+bool prvHeapInit(void);
+
+void acquire_memalloc_mutex();
+void release_memalloc_mutex();

+ 3 - 0
firmware/targets/local/target.mk

@@ -15,5 +15,8 @@ LDFLAGS += -pthread
 CFLAGS += -I$(TARGET_DIR)/fatfs
 C_SOURCES += $(TARGET_DIR)/fatfs/syscall.c
 
+# memory manager
+C_SOURCES += $(TARGET_DIR)/Src/heap_4.c
+
 run: all
 	$(OBJ_DIR)/$(PROJECT).elf