gc.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013, 2014 Damien P. George
  7. * Copyright (c) 2014 Paul Sokolovsky
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <assert.h>
  28. #include <stdio.h>
  29. #include <string.h>
  30. #include "py/gc.h"
  31. #include "py/runtime.h"
  32. #if MICROPY_DEBUG_VALGRIND
  33. #include <valgrind/memcheck.h>
  34. #endif
  35. #if MICROPY_ENABLE_GC
  36. #if MICROPY_DEBUG_VERBOSE // print debugging info
  37. #define DEBUG_PRINT (1)
  38. #define DEBUG_printf DEBUG_printf
  39. #else // don't print debugging info
  40. #define DEBUG_PRINT (0)
  41. #define DEBUG_printf(...) (void)0
  42. #endif
  43. // make this 1 to dump the heap each time it changes
  44. #define EXTENSIVE_HEAP_PROFILING (0)
  45. // make this 1 to zero out swept memory to more eagerly
  46. // detect untraced object still in use
  47. #define CLEAR_ON_SWEEP (0)
  48. #define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
  49. #define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
  50. // ATB = allocation table byte
  51. // 0b00 = FREE -- free block
  52. // 0b01 = HEAD -- head of a chain of blocks
  53. // 0b10 = TAIL -- in the tail of a chain of blocks
  54. // 0b11 = MARK -- marked head block
  55. #define AT_FREE (0)
  56. #define AT_HEAD (1)
  57. #define AT_TAIL (2)
  58. #define AT_MARK (3)
  59. #define BLOCKS_PER_ATB (4)
  60. #define ATB_MASK_0 (0x03)
  61. #define ATB_MASK_1 (0x0c)
  62. #define ATB_MASK_2 (0x30)
  63. #define ATB_MASK_3 (0xc0)
  64. #define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
  65. #define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
  66. #define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
  67. #define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
  68. #if MICROPY_GC_SPLIT_HEAP
  69. #define NEXT_AREA(area) ((area)->next)
  70. #else
  71. #define NEXT_AREA(area) (NULL)
  72. #endif
  73. #define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
  74. #define ATB_GET_KIND(area, block) (((area)->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
  75. #define ATB_ANY_TO_FREE(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
  76. #define ATB_FREE_TO_HEAD(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
  77. #define ATB_FREE_TO_TAIL(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
  78. #define ATB_HEAD_TO_MARK(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
  79. #define ATB_MARK_TO_HEAD(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
  80. #define BLOCK_FROM_PTR(area, ptr) (((byte *)(ptr) - area->gc_pool_start) / BYTES_PER_BLOCK)
  81. #define PTR_FROM_BLOCK(area, block) (((block) * BYTES_PER_BLOCK + (uintptr_t)area->gc_pool_start))
  82. // After the ATB, there must be a byte filled with AT_FREE so that gc_mark_tree
  83. // cannot erroneously conclude that a block extends past the end of the GC heap
  84. // due to bit patterns in the FTB (or first block, if finalizers are disabled)
  85. // being interpreted as AT_TAIL.
  86. #define ALLOC_TABLE_GAP_BYTE (1)
  87. #if MICROPY_ENABLE_FINALISER
  88. // FTB = finaliser table byte
  89. // if set, then the corresponding block may have a finaliser
  90. #define BLOCKS_PER_FTB (8)
  91. #define FTB_GET(area, block) ((area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
  92. #define FTB_SET(area, block) do { area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
  93. #define FTB_CLEAR(area, block) do { area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
  94. #endif
  95. #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
  96. #define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1)
  97. #define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex))
  98. #else
  99. #define GC_ENTER()
  100. #define GC_EXIT()
  101. #endif
  102. // TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
  103. static void gc_setup_area(mp_state_mem_area_t *area, void *start, void *end) {
  104. // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
  105. // T = A + F + P
  106. // F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
  107. // P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
  108. // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
  109. size_t total_byte_len = (byte *)end - (byte *)start;
  110. #if MICROPY_ENABLE_FINALISER
  111. area->gc_alloc_table_byte_len = (total_byte_len - ALLOC_TABLE_GAP_BYTE)
  112. * MP_BITS_PER_BYTE
  113. / (
  114. MP_BITS_PER_BYTE
  115. + MP_BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB
  116. + MP_BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK
  117. );
  118. #else
  119. area->gc_alloc_table_byte_len = (total_byte_len - ALLOC_TABLE_GAP_BYTE) / (1 + MP_BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
  120. #endif
  121. area->gc_alloc_table_start = (byte *)start;
  122. #if MICROPY_ENABLE_FINALISER
  123. size_t gc_finaliser_table_byte_len = (area->gc_alloc_table_byte_len * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
  124. area->gc_finaliser_table_start = area->gc_alloc_table_start + area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE;
  125. #endif
  126. size_t gc_pool_block_len = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
  127. area->gc_pool_start = (byte *)end - gc_pool_block_len * BYTES_PER_BLOCK;
  128. area->gc_pool_end = end;
  129. #if MICROPY_ENABLE_FINALISER
  130. assert(area->gc_pool_start >= area->gc_finaliser_table_start + gc_finaliser_table_byte_len);
  131. #endif
  132. #if MICROPY_ENABLE_FINALISER
  133. // clear ATB's and FTB's
  134. memset(area->gc_alloc_table_start, 0, gc_finaliser_table_byte_len + area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE);
  135. #else
  136. // clear ATB's
  137. memset(area->gc_alloc_table_start, 0, area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE);
  138. #endif
  139. area->gc_last_free_atb_index = 0;
  140. area->gc_last_used_block = 0;
  141. #if MICROPY_GC_SPLIT_HEAP
  142. area->next = NULL;
  143. #endif
  144. DEBUG_printf("GC layout:\n");
  145. DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, "
  146. UINT_FMT " blocks\n",
  147. area->gc_alloc_table_start, area->gc_alloc_table_byte_len,
  148. area->gc_alloc_table_byte_len * BLOCKS_PER_ATB);
  149. #if MICROPY_ENABLE_FINALISER
  150. DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, "
  151. UINT_FMT " blocks\n", area->gc_finaliser_table_start,
  152. gc_finaliser_table_byte_len,
  153. gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
  154. #endif
  155. DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, "
  156. UINT_FMT " blocks\n", area->gc_pool_start,
  157. gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
  158. }
  159. void gc_init(void *start, void *end) {
  160. // align end pointer on block boundary
  161. end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
  162. DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
  163. gc_setup_area(&MP_STATE_MEM(area), start, end);
  164. // set last free ATB index to start of heap
  165. #if MICROPY_GC_SPLIT_HEAP
  166. MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
  167. #endif
  168. // unlock the GC
  169. MP_STATE_THREAD(gc_lock_depth) = 0;
  170. // allow auto collection
  171. MP_STATE_MEM(gc_auto_collect_enabled) = 1;
  172. #if MICROPY_GC_ALLOC_THRESHOLD
  173. // by default, maxuint for gc threshold, effectively turning gc-by-threshold off
  174. MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
  175. MP_STATE_MEM(gc_alloc_amount) = 0;
  176. #endif
  177. #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
  178. mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
  179. #endif
  180. }
  181. #if MICROPY_GC_SPLIT_HEAP
  182. void gc_add(void *start, void *end) {
  183. // Place the area struct at the start of the area.
  184. mp_state_mem_area_t *area = (mp_state_mem_area_t *)start;
  185. start = (void *)((uintptr_t)start + sizeof(mp_state_mem_area_t));
  186. end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
  187. DEBUG_printf("Adding GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
  188. // Init this area
  189. gc_setup_area(area, start, end);
  190. // Find the last registered area in the linked list
  191. mp_state_mem_area_t *prev_area = &MP_STATE_MEM(area);
  192. while (prev_area->next != NULL) {
  193. prev_area = prev_area->next;
  194. }
  195. // Add this area to the linked list
  196. prev_area->next = area;
  197. }
  198. #if MICROPY_GC_SPLIT_HEAP_AUTO
  199. // Try to automatically add a heap area large enough to fulfill 'failed_alloc'.
  200. static bool gc_try_add_heap(size_t failed_alloc) {
  201. // 'needed' is the size of a heap large enough to hold failed_alloc, with
  202. // the additional metadata overheads as calculated in gc_setup_area().
  203. //
  204. // Rather than reproduce all of that logic here, we approximate that adding
  205. // (13/512) is enough overhead for sufficiently large heap areas (the
  206. // overhead converges to 3/128, but there's some fixed overhead and some
  207. // rounding up of partial block sizes).
  208. size_t needed = failed_alloc + MAX(2048, failed_alloc * 13 / 512);
  209. size_t avail = gc_get_max_new_split();
  210. DEBUG_printf("gc_try_add_heap failed_alloc " UINT_FMT ", "
  211. "needed " UINT_FMT ", avail " UINT_FMT " bytes \n",
  212. failed_alloc,
  213. needed,
  214. avail);
  215. if (avail < needed) {
  216. // Can't fit this allocation, or system heap has nearly run out anyway
  217. return false;
  218. }
  219. // Deciding how much to grow the total heap by each time is tricky:
  220. //
  221. // - Grow by too small amounts, leads to heap fragmentation issues.
  222. //
  223. // - Grow by too large amounts, may lead to system heap running out of
  224. // space.
  225. //
  226. // Currently, this implementation is:
  227. //
  228. // - At minimum, aim to double the total heap size each time we add a new
  229. // heap. i.e. without any large single allocations, total size will be
  230. // 64KB -> 128KB -> 256KB -> 512KB -> 1MB, etc
  231. //
  232. // - If the failed allocation is too large to fit in that size, the new
  233. // heap is made exactly large enough for that allocation. Future growth
  234. // will double the total heap size again.
  235. //
  236. // - If the new heap won't fit in the available free space, add the largest
  237. // new heap that will fit (this may lead to failed system heap allocations
  238. // elsewhere, but some allocation will likely fail in this circumstance!)
  239. // Compute total number of blocks in the current heap.
  240. size_t total_blocks = 0;
  241. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area);
  242. area != NULL;
  243. area = NEXT_AREA(area)) {
  244. total_blocks += area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
  245. }
  246. // Compute bytes needed to build a heap with total_blocks blocks.
  247. size_t total_heap =
  248. total_blocks / BLOCKS_PER_ATB
  249. #if MICROPY_ENABLE_FINALISER
  250. + total_blocks / BLOCKS_PER_FTB
  251. #endif
  252. + total_blocks * BYTES_PER_BLOCK
  253. + ALLOC_TABLE_GAP_BYTE
  254. + sizeof(mp_state_mem_area_t);
  255. // Round up size to the nearest multiple of BYTES_PER_BLOCK.
  256. total_heap = (total_heap + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1));
  257. DEBUG_printf("total_heap " UINT_FMT " bytes\n", total_heap);
  258. size_t to_alloc = MIN(avail, MAX(total_heap, needed));
  259. mp_state_mem_area_t *new_heap = MP_PLAT_ALLOC_HEAP(to_alloc);
  260. DEBUG_printf("MP_PLAT_ALLOC_HEAP " UINT_FMT " = %p\n",
  261. to_alloc, new_heap);
  262. if (new_heap == NULL) {
  263. // This should only fail:
  264. // - In a threaded environment if another thread has
  265. // allocated while this function ran.
  266. // - If there is a bug in gc_get_max_new_split().
  267. return false;
  268. }
  269. gc_add(new_heap, (void *)new_heap + to_alloc);
  270. return true;
  271. }
  272. #endif
  273. #endif
  274. void gc_lock(void) {
  275. // This does not need to be atomic or have the GC mutex because:
  276. // - each thread has its own gc_lock_depth so there are no races between threads;
  277. // - a hard interrupt will only change gc_lock_depth during its execution, and
  278. // upon return will restore the value of gc_lock_depth.
  279. MP_STATE_THREAD(gc_lock_depth)++;
  280. }
  281. void gc_unlock(void) {
  282. // This does not need to be atomic, See comment above in gc_lock.
  283. MP_STATE_THREAD(gc_lock_depth)--;
  284. }
  285. bool gc_is_locked(void) {
  286. return MP_STATE_THREAD(gc_lock_depth) != 0;
  287. }
  288. #if MICROPY_GC_SPLIT_HEAP
  289. // Returns the area to which this pointer belongs, or NULL if it isn't
  290. // allocated on the GC-managed heap.
  291. static inline mp_state_mem_area_t *gc_get_ptr_area(const void *ptr) {
  292. if (((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) != 0) { // must be aligned on a block
  293. return NULL;
  294. }
  295. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  296. if (ptr >= (void *)area->gc_pool_start // must be above start of pool
  297. && ptr < (void *)area->gc_pool_end) { // must be below end of pool
  298. return area;
  299. }
  300. }
  301. return NULL;
  302. }
  303. #endif
  304. // ptr should be of type void*
  305. #define VERIFY_PTR(ptr) ( \
  306. ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
  307. && ptr >= (void *)MP_STATE_MEM(area).gc_pool_start /* must be above start of pool */ \
  308. && ptr < (void *)MP_STATE_MEM(area).gc_pool_end /* must be below end of pool */ \
  309. )
  310. #ifndef TRACE_MARK
  311. #if DEBUG_PRINT
  312. #define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr)
  313. #else
  314. #define TRACE_MARK(block, ptr)
  315. #endif
  316. #endif
  317. // Take the given block as the topmost block on the stack. Check all it's
  318. // children: mark the unmarked child blocks and put those newly marked
  319. // blocks on the stack. When all children have been checked, pop off the
  320. // topmost block on the stack and repeat with that one.
  321. #if MICROPY_GC_SPLIT_HEAP
  322. static void gc_mark_subtree(mp_state_mem_area_t *area, size_t block)
  323. #else
  324. static void gc_mark_subtree(size_t block)
  325. #endif
  326. {
  327. // Start with the block passed in the argument.
  328. size_t sp = 0;
  329. for (;;) {
  330. #if !MICROPY_GC_SPLIT_HEAP
  331. mp_state_mem_area_t *area = &MP_STATE_MEM(area);
  332. #endif
  333. // work out number of consecutive blocks in the chain starting with this one
  334. size_t n_blocks = 0;
  335. do {
  336. n_blocks += 1;
  337. } while (ATB_GET_KIND(area, block + n_blocks) == AT_TAIL);
  338. // check that the consecutive blocks didn't overflow past the end of the area
  339. assert(area->gc_pool_start + (block + n_blocks) * BYTES_PER_BLOCK <= area->gc_pool_end);
  340. // check this block's children
  341. void **ptrs = (void **)PTR_FROM_BLOCK(area, block);
  342. for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void *); i > 0; i--, ptrs++) {
  343. MICROPY_GC_HOOK_LOOP(i);
  344. void *ptr = *ptrs;
  345. // If this is a heap pointer that hasn't been marked, mark it and push
  346. // it's children to the stack.
  347. #if MICROPY_GC_SPLIT_HEAP
  348. mp_state_mem_area_t *ptr_area = gc_get_ptr_area(ptr);
  349. if (!ptr_area) {
  350. // Not a heap-allocated pointer (might even be random data).
  351. continue;
  352. }
  353. #else
  354. if (!VERIFY_PTR(ptr)) {
  355. continue;
  356. }
  357. mp_state_mem_area_t *ptr_area = area;
  358. #endif
  359. size_t ptr_block = BLOCK_FROM_PTR(ptr_area, ptr);
  360. if (ATB_GET_KIND(ptr_area, ptr_block) != AT_HEAD) {
  361. // This block is already marked.
  362. continue;
  363. }
  364. // An unmarked head. Mark it, and push it on gc stack.
  365. TRACE_MARK(ptr_block, ptr);
  366. ATB_HEAD_TO_MARK(ptr_area, ptr_block);
  367. if (sp < MICROPY_ALLOC_GC_STACK_SIZE) {
  368. MP_STATE_MEM(gc_block_stack)[sp] = ptr_block;
  369. #if MICROPY_GC_SPLIT_HEAP
  370. MP_STATE_MEM(gc_area_stack)[sp] = ptr_area;
  371. #endif
  372. sp += 1;
  373. } else {
  374. MP_STATE_MEM(gc_stack_overflow) = 1;
  375. }
  376. }
  377. // Are there any blocks on the stack?
  378. if (sp == 0) {
  379. break; // No, stack is empty, we're done.
  380. }
  381. // pop the next block off the stack
  382. sp -= 1;
  383. block = MP_STATE_MEM(gc_block_stack)[sp];
  384. #if MICROPY_GC_SPLIT_HEAP
  385. area = MP_STATE_MEM(gc_area_stack)[sp];
  386. #endif
  387. }
  388. }
  389. static void gc_deal_with_stack_overflow(void) {
  390. while (MP_STATE_MEM(gc_stack_overflow)) {
  391. MP_STATE_MEM(gc_stack_overflow) = 0;
  392. // scan entire memory looking for blocks which have been marked but not their children
  393. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  394. for (size_t block = 0; block < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
  395. MICROPY_GC_HOOK_LOOP(block);
  396. // trace (again) if mark bit set
  397. if (ATB_GET_KIND(area, block) == AT_MARK) {
  398. #if MICROPY_GC_SPLIT_HEAP
  399. gc_mark_subtree(area, block);
  400. #else
  401. gc_mark_subtree(block);
  402. #endif
  403. }
  404. }
  405. }
  406. }
  407. }
  408. static void gc_sweep(void) {
  409. #if MICROPY_PY_GC_COLLECT_RETVAL
  410. MP_STATE_MEM(gc_collected) = 0;
  411. #endif
  412. // free unmarked heads and their tails
  413. int free_tail = 0;
  414. #if MICROPY_GC_SPLIT_HEAP_AUTO
  415. mp_state_mem_area_t *prev_area = NULL;
  416. #endif
  417. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  418. size_t end_block = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
  419. if (area->gc_last_used_block < end_block) {
  420. end_block = area->gc_last_used_block + 1;
  421. }
  422. size_t last_used_block = 0;
  423. for (size_t block = 0; block < end_block; block++) {
  424. MICROPY_GC_HOOK_LOOP(block);
  425. switch (ATB_GET_KIND(area, block)) {
  426. case AT_HEAD:
  427. #if MICROPY_ENABLE_FINALISER
  428. if (FTB_GET(area, block)) {
  429. mp_obj_base_t *obj = (mp_obj_base_t *)PTR_FROM_BLOCK(area, block);
  430. if (obj->type != NULL) {
  431. // if the object has a type then see if it has a __del__ method
  432. mp_obj_t dest[2];
  433. mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest);
  434. if (dest[0] != MP_OBJ_NULL) {
  435. // load_method returned a method, execute it in a protected environment
  436. #if MICROPY_ENABLE_SCHEDULER
  437. mp_sched_lock();
  438. #endif
  439. mp_call_function_1_protected(dest[0], dest[1]);
  440. #if MICROPY_ENABLE_SCHEDULER
  441. mp_sched_unlock();
  442. #endif
  443. }
  444. }
  445. // clear finaliser flag
  446. FTB_CLEAR(area, block);
  447. }
  448. #endif
  449. free_tail = 1;
  450. DEBUG_printf("gc_sweep(%p)\n", (void *)PTR_FROM_BLOCK(area, block));
  451. #if MICROPY_PY_GC_COLLECT_RETVAL
  452. MP_STATE_MEM(gc_collected)++;
  453. #endif
  454. // fall through to free the head
  455. MP_FALLTHROUGH
  456. case AT_TAIL:
  457. if (free_tail) {
  458. ATB_ANY_TO_FREE(area, block);
  459. #if CLEAR_ON_SWEEP
  460. memset((void *)PTR_FROM_BLOCK(area, block), 0, BYTES_PER_BLOCK);
  461. #endif
  462. } else {
  463. last_used_block = block;
  464. }
  465. break;
  466. case AT_MARK:
  467. ATB_MARK_TO_HEAD(area, block);
  468. free_tail = 0;
  469. last_used_block = block;
  470. break;
  471. }
  472. }
  473. area->gc_last_used_block = last_used_block;
  474. #if MICROPY_GC_SPLIT_HEAP_AUTO
  475. // Free any empty area, aside from the first one
  476. if (last_used_block == 0 && prev_area != NULL) {
  477. DEBUG_printf("gc_sweep free empty area %p\n", area);
  478. NEXT_AREA(prev_area) = NEXT_AREA(area);
  479. MP_PLAT_FREE_HEAP(area);
  480. area = prev_area;
  481. }
  482. prev_area = area;
  483. #endif
  484. }
  485. }
  486. void gc_collect_start(void) {
  487. GC_ENTER();
  488. MP_STATE_THREAD(gc_lock_depth)++;
  489. #if MICROPY_GC_ALLOC_THRESHOLD
  490. MP_STATE_MEM(gc_alloc_amount) = 0;
  491. #endif
  492. MP_STATE_MEM(gc_stack_overflow) = 0;
  493. // Trace root pointers. This relies on the root pointers being organised
  494. // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
  495. // dict_globals, then the root pointer section of mp_state_vm.
  496. void **ptrs = (void **)(void *)&mp_state_ctx;
  497. size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals);
  498. size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk);
  499. gc_collect_root(ptrs + root_start / sizeof(void *), (root_end - root_start) / sizeof(void *));
  500. #if MICROPY_ENABLE_PYSTACK
  501. // Trace root pointers from the Python stack.
  502. ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
  503. gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
  504. #endif
  505. }
  506. // Address sanitizer needs to know that the access to ptrs[i] must always be
  507. // considered OK, even if it's a load from an address that would normally be
  508. // prohibited (due to being undefined, in a red zone, etc).
  509. #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
  510. __attribute__((no_sanitize_address))
  511. #endif
  512. static void *gc_get_ptr(void **ptrs, int i) {
  513. #if MICROPY_DEBUG_VALGRIND
  514. if (!VALGRIND_CHECK_MEM_IS_ADDRESSABLE(&ptrs[i], sizeof(*ptrs))) {
  515. return NULL;
  516. }
  517. #endif
  518. return ptrs[i];
  519. }
  520. void gc_collect_root(void **ptrs, size_t len) {
  521. #if !MICROPY_GC_SPLIT_HEAP
  522. mp_state_mem_area_t *area = &MP_STATE_MEM(area);
  523. #endif
  524. for (size_t i = 0; i < len; i++) {
  525. MICROPY_GC_HOOK_LOOP(i);
  526. void *ptr = gc_get_ptr(ptrs, i);
  527. #if MICROPY_GC_SPLIT_HEAP
  528. mp_state_mem_area_t *area = gc_get_ptr_area(ptr);
  529. if (!area) {
  530. continue;
  531. }
  532. #else
  533. if (!VERIFY_PTR(ptr)) {
  534. continue;
  535. }
  536. #endif
  537. size_t block = BLOCK_FROM_PTR(area, ptr);
  538. if (ATB_GET_KIND(area, block) == AT_HEAD) {
  539. // An unmarked head: mark it, and mark all its children
  540. ATB_HEAD_TO_MARK(area, block);
  541. #if MICROPY_GC_SPLIT_HEAP
  542. gc_mark_subtree(area, block);
  543. #else
  544. gc_mark_subtree(block);
  545. #endif
  546. }
  547. }
  548. }
  549. void gc_collect_end(void) {
  550. gc_deal_with_stack_overflow();
  551. gc_sweep();
  552. #if MICROPY_GC_SPLIT_HEAP
  553. MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
  554. #endif
  555. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  556. area->gc_last_free_atb_index = 0;
  557. }
  558. MP_STATE_THREAD(gc_lock_depth)--;
  559. GC_EXIT();
  560. }
  561. void gc_sweep_all(void) {
  562. GC_ENTER();
  563. MP_STATE_THREAD(gc_lock_depth)++;
  564. MP_STATE_MEM(gc_stack_overflow) = 0;
  565. gc_collect_end();
  566. }
  567. void gc_info(gc_info_t *info) {
  568. GC_ENTER();
  569. info->total = 0;
  570. info->used = 0;
  571. info->free = 0;
  572. info->max_free = 0;
  573. info->num_1block = 0;
  574. info->num_2block = 0;
  575. info->max_block = 0;
  576. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  577. bool finish = false;
  578. info->total += area->gc_pool_end - area->gc_pool_start;
  579. for (size_t block = 0, len = 0, len_free = 0; !finish;) {
  580. MICROPY_GC_HOOK_LOOP(block);
  581. size_t kind = ATB_GET_KIND(area, block);
  582. switch (kind) {
  583. case AT_FREE:
  584. info->free += 1;
  585. len_free += 1;
  586. len = 0;
  587. break;
  588. case AT_HEAD:
  589. info->used += 1;
  590. len = 1;
  591. break;
  592. case AT_TAIL:
  593. info->used += 1;
  594. len += 1;
  595. break;
  596. case AT_MARK:
  597. // shouldn't happen
  598. break;
  599. }
  600. block++;
  601. finish = (block == area->gc_alloc_table_byte_len * BLOCKS_PER_ATB);
  602. // Get next block type if possible
  603. if (!finish) {
  604. kind = ATB_GET_KIND(area, block);
  605. }
  606. if (finish || kind == AT_FREE || kind == AT_HEAD) {
  607. if (len == 1) {
  608. info->num_1block += 1;
  609. } else if (len == 2) {
  610. info->num_2block += 1;
  611. }
  612. if (len > info->max_block) {
  613. info->max_block = len;
  614. }
  615. if (finish || kind == AT_HEAD) {
  616. if (len_free > info->max_free) {
  617. info->max_free = len_free;
  618. }
  619. len_free = 0;
  620. }
  621. }
  622. }
  623. }
  624. info->used *= BYTES_PER_BLOCK;
  625. info->free *= BYTES_PER_BLOCK;
  626. #if MICROPY_GC_SPLIT_HEAP_AUTO
  627. info->max_new_split = gc_get_max_new_split();
  628. #endif
  629. GC_EXIT();
  630. }
  631. void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
  632. bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER;
  633. size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
  634. DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
  635. // check for 0 allocation
  636. if (n_blocks == 0) {
  637. return NULL;
  638. }
  639. // check if GC is locked
  640. if (MP_STATE_THREAD(gc_lock_depth) > 0) {
  641. return NULL;
  642. }
  643. GC_ENTER();
  644. mp_state_mem_area_t *area;
  645. size_t i;
  646. size_t end_block;
  647. size_t start_block;
  648. size_t n_free;
  649. int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
  650. #if MICROPY_GC_SPLIT_HEAP_AUTO
  651. bool added = false;
  652. #endif
  653. #if MICROPY_GC_ALLOC_THRESHOLD
  654. if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
  655. GC_EXIT();
  656. gc_collect();
  657. collected = 1;
  658. GC_ENTER();
  659. }
  660. #endif
  661. for (;;) {
  662. #if MICROPY_GC_SPLIT_HEAP
  663. area = MP_STATE_MEM(gc_last_free_area);
  664. #else
  665. area = &MP_STATE_MEM(area);
  666. #endif
  667. // look for a run of n_blocks available blocks
  668. for (; area != NULL; area = NEXT_AREA(area), i = 0) {
  669. n_free = 0;
  670. for (i = area->gc_last_free_atb_index; i < area->gc_alloc_table_byte_len; i++) {
  671. MICROPY_GC_HOOK_LOOP(i);
  672. byte a = area->gc_alloc_table_start[i];
  673. // *FORMAT-OFF*
  674. if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
  675. if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
  676. if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
  677. if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
  678. // *FORMAT-ON*
  679. }
  680. // No free blocks found on this heap. Mark this heap as
  681. // filled, so we won't try to find free space here again until
  682. // space is freed.
  683. #if MICROPY_GC_SPLIT_HEAP
  684. if (n_blocks == 1) {
  685. area->gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB; // or (size_t)-1
  686. }
  687. #endif
  688. }
  689. GC_EXIT();
  690. // nothing found!
  691. if (collected) {
  692. #if MICROPY_GC_SPLIT_HEAP_AUTO
  693. if (!added && gc_try_add_heap(n_bytes)) {
  694. added = true;
  695. continue;
  696. }
  697. #endif
  698. return NULL;
  699. }
  700. DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
  701. gc_collect();
  702. collected = 1;
  703. GC_ENTER();
  704. }
  705. // found, ending at block i inclusive
  706. found:
  707. // get starting and end blocks, both inclusive
  708. end_block = i;
  709. start_block = i - n_free + 1;
  710. // Set last free ATB index to block after last block we found, for start of
  711. // next scan. To reduce fragmentation, we only do this if we were looking
  712. // for a single free block, which guarantees that there are no free blocks
  713. // before this one. Also, whenever we free or shink a block we must check
  714. // if this index needs adjusting (see gc_realloc and gc_free).
  715. if (n_free == 1) {
  716. #if MICROPY_GC_SPLIT_HEAP
  717. MP_STATE_MEM(gc_last_free_area) = area;
  718. #endif
  719. area->gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB;
  720. }
  721. area->gc_last_used_block = MAX(area->gc_last_used_block, end_block);
  722. // mark first block as used head
  723. ATB_FREE_TO_HEAD(area, start_block);
  724. // mark rest of blocks as used tail
  725. // TODO for a run of many blocks can make this more efficient
  726. for (size_t bl = start_block + 1; bl <= end_block; bl++) {
  727. ATB_FREE_TO_TAIL(area, bl);
  728. }
  729. // get pointer to first block
  730. // we must create this pointer before unlocking the GC so a collection can find it
  731. void *ret_ptr = (void *)(area->gc_pool_start + start_block * BYTES_PER_BLOCK);
  732. DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
  733. #if MICROPY_GC_ALLOC_THRESHOLD
  734. MP_STATE_MEM(gc_alloc_amount) += n_blocks;
  735. #endif
  736. GC_EXIT();
  737. #if MICROPY_GC_CONSERVATIVE_CLEAR
  738. // be conservative and zero out all the newly allocated blocks
  739. memset((byte *)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
  740. #else
  741. // zero out the additional bytes of the newly allocated blocks
  742. // This is needed because the blocks may have previously held pointers
  743. // to the heap and will not be set to something else if the caller
  744. // doesn't actually use the entire block. As such they will continue
  745. // to point to the heap and may prevent other blocks from being reclaimed.
  746. memset((byte *)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
  747. #endif
  748. #if MICROPY_ENABLE_FINALISER
  749. if (has_finaliser) {
  750. // clear type pointer in case it is never set
  751. ((mp_obj_base_t *)ret_ptr)->type = NULL;
  752. // set mp_obj flag only if it has a finaliser
  753. GC_ENTER();
  754. FTB_SET(area, start_block);
  755. GC_EXIT();
  756. }
  757. #else
  758. (void)has_finaliser;
  759. #endif
  760. #if EXTENSIVE_HEAP_PROFILING
  761. gc_dump_alloc_table(&mp_plat_print);
  762. #endif
  763. return ret_ptr;
  764. }
  765. /*
  766. void *gc_alloc(mp_uint_t n_bytes) {
  767. return _gc_alloc(n_bytes, false);
  768. }
  769. void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
  770. return _gc_alloc(n_bytes, true);
  771. }
  772. */
  773. // force the freeing of a piece of memory
  774. // TODO: freeing here does not call finaliser
  775. void gc_free(void *ptr) {
  776. if (MP_STATE_THREAD(gc_lock_depth) > 0) {
  777. // Cannot free while the GC is locked. However free is an optimisation
  778. // to reclaim the memory immediately, this means it will now be left
  779. // until the next collection.
  780. return;
  781. }
  782. GC_ENTER();
  783. DEBUG_printf("gc_free(%p)\n", ptr);
  784. if (ptr == NULL) {
  785. // free(NULL) is a no-op
  786. GC_EXIT();
  787. return;
  788. }
  789. // get the GC block number corresponding to this pointer
  790. mp_state_mem_area_t *area;
  791. #if MICROPY_GC_SPLIT_HEAP
  792. area = gc_get_ptr_area(ptr);
  793. assert(area);
  794. #else
  795. assert(VERIFY_PTR(ptr));
  796. area = &MP_STATE_MEM(area);
  797. #endif
  798. size_t block = BLOCK_FROM_PTR(area, ptr);
  799. assert(ATB_GET_KIND(area, block) == AT_HEAD);
  800. #if MICROPY_ENABLE_FINALISER
  801. FTB_CLEAR(area, block);
  802. #endif
  803. #if MICROPY_GC_SPLIT_HEAP
  804. if (MP_STATE_MEM(gc_last_free_area) != area) {
  805. // We freed something but it isn't the current area. Reset the
  806. // last free area to the start for a rescan. Note that this won't
  807. // give much of a performance hit, since areas that are completely
  808. // filled will likely be skipped (the gc_last_free_atb_index
  809. // points to the last block).
  810. // The reason why this is necessary is because it is not possible
  811. // to see which area came first (like it is possible to adjust
  812. // gc_last_free_atb_index based on whether the freed block is
  813. // before the last free block).
  814. MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
  815. }
  816. #endif
  817. // set the last_free pointer to this block if it's earlier in the heap
  818. if (block / BLOCKS_PER_ATB < area->gc_last_free_atb_index) {
  819. area->gc_last_free_atb_index = block / BLOCKS_PER_ATB;
  820. }
  821. // free head and all of its tail blocks
  822. do {
  823. ATB_ANY_TO_FREE(area, block);
  824. block += 1;
  825. } while (ATB_GET_KIND(area, block) == AT_TAIL);
  826. GC_EXIT();
  827. #if EXTENSIVE_HEAP_PROFILING
  828. gc_dump_alloc_table(&mp_plat_print);
  829. #endif
  830. }
  831. size_t gc_nbytes(const void *ptr) {
  832. GC_ENTER();
  833. mp_state_mem_area_t *area;
  834. #if MICROPY_GC_SPLIT_HEAP
  835. area = gc_get_ptr_area(ptr);
  836. #else
  837. if (VERIFY_PTR(ptr)) {
  838. area = &MP_STATE_MEM(area);
  839. } else {
  840. area = NULL;
  841. }
  842. #endif
  843. if (area) {
  844. size_t block = BLOCK_FROM_PTR(area, ptr);
  845. if (ATB_GET_KIND(area, block) == AT_HEAD) {
  846. // work out number of consecutive blocks in the chain starting with this on
  847. size_t n_blocks = 0;
  848. do {
  849. n_blocks += 1;
  850. } while (ATB_GET_KIND(area, block + n_blocks) == AT_TAIL);
  851. GC_EXIT();
  852. return n_blocks * BYTES_PER_BLOCK;
  853. }
  854. }
  855. // invalid pointer
  856. GC_EXIT();
  857. return 0;
  858. }
  859. #if 0
  860. // old, simple realloc that didn't expand memory in place
  861. void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
  862. mp_uint_t n_existing = gc_nbytes(ptr);
  863. if (n_bytes <= n_existing) {
  864. return ptr;
  865. } else {
  866. bool has_finaliser;
  867. if (ptr == NULL) {
  868. has_finaliser = false;
  869. } else {
  870. #if MICROPY_ENABLE_FINALISER
  871. has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
  872. #else
  873. has_finaliser = false;
  874. #endif
  875. }
  876. void *ptr2 = gc_alloc(n_bytes, has_finaliser);
  877. if (ptr2 == NULL) {
  878. return ptr2;
  879. }
  880. memcpy(ptr2, ptr, n_existing);
  881. gc_free(ptr);
  882. return ptr2;
  883. }
  884. }
  885. #else // Alternative gc_realloc impl
  886. void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
  887. // check for pure allocation
  888. if (ptr_in == NULL) {
  889. return gc_alloc(n_bytes, false);
  890. }
  891. // check for pure free
  892. if (n_bytes == 0) {
  893. gc_free(ptr_in);
  894. return NULL;
  895. }
  896. if (MP_STATE_THREAD(gc_lock_depth) > 0) {
  897. return NULL;
  898. }
  899. void *ptr = ptr_in;
  900. GC_ENTER();
  901. // get the GC block number corresponding to this pointer
  902. mp_state_mem_area_t *area;
  903. #if MICROPY_GC_SPLIT_HEAP
  904. area = gc_get_ptr_area(ptr);
  905. assert(area);
  906. #else
  907. assert(VERIFY_PTR(ptr));
  908. area = &MP_STATE_MEM(area);
  909. #endif
  910. size_t block = BLOCK_FROM_PTR(area, ptr);
  911. assert(ATB_GET_KIND(area, block) == AT_HEAD);
  912. // compute number of new blocks that are requested
  913. size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK;
  914. // Get the total number of consecutive blocks that are already allocated to
  915. // this chunk of memory, and then count the number of free blocks following
  916. // it. Stop if we reach the end of the heap, or if we find enough extra
  917. // free blocks to satisfy the realloc. Note that we need to compute the
  918. // total size of the existing memory chunk so we can correctly and
  919. // efficiently shrink it (see below for shrinking code).
  920. size_t n_free = 0;
  921. size_t n_blocks = 1; // counting HEAD block
  922. size_t max_block = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
  923. for (size_t bl = block + n_blocks; bl < max_block; bl++) {
  924. byte block_type = ATB_GET_KIND(area, bl);
  925. if (block_type == AT_TAIL) {
  926. n_blocks++;
  927. continue;
  928. }
  929. if (block_type == AT_FREE) {
  930. n_free++;
  931. if (n_blocks + n_free >= new_blocks) {
  932. // stop as soon as we find enough blocks for n_bytes
  933. break;
  934. }
  935. continue;
  936. }
  937. break;
  938. }
  939. // return original ptr if it already has the requested number of blocks
  940. if (new_blocks == n_blocks) {
  941. GC_EXIT();
  942. return ptr_in;
  943. }
  944. // check if we can shrink the allocated area
  945. if (new_blocks < n_blocks) {
  946. // free unneeded tail blocks
  947. for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) {
  948. ATB_ANY_TO_FREE(area, bl);
  949. }
  950. #if MICROPY_GC_SPLIT_HEAP
  951. if (MP_STATE_MEM(gc_last_free_area) != area) {
  952. // See comment in gc_free.
  953. MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
  954. }
  955. #endif
  956. // set the last_free pointer to end of this block if it's earlier in the heap
  957. if ((block + new_blocks) / BLOCKS_PER_ATB < area->gc_last_free_atb_index) {
  958. area->gc_last_free_atb_index = (block + new_blocks) / BLOCKS_PER_ATB;
  959. }
  960. GC_EXIT();
  961. #if EXTENSIVE_HEAP_PROFILING
  962. gc_dump_alloc_table(&mp_plat_print);
  963. #endif
  964. return ptr_in;
  965. }
  966. // check if we can expand in place
  967. if (new_blocks <= n_blocks + n_free) {
  968. // mark few more blocks as used tail
  969. size_t end_block = block + new_blocks;
  970. for (size_t bl = block + n_blocks; bl < end_block; bl++) {
  971. assert(ATB_GET_KIND(area, bl) == AT_FREE);
  972. ATB_FREE_TO_TAIL(area, bl);
  973. }
  974. area->gc_last_used_block = MAX(area->gc_last_used_block, end_block);
  975. GC_EXIT();
  976. #if MICROPY_GC_CONSERVATIVE_CLEAR
  977. // be conservative and zero out all the newly allocated blocks
  978. memset((byte *)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
  979. #else
  980. // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
  981. memset((byte *)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
  982. #endif
  983. #if EXTENSIVE_HEAP_PROFILING
  984. gc_dump_alloc_table(&mp_plat_print);
  985. #endif
  986. return ptr_in;
  987. }
  988. #if MICROPY_ENABLE_FINALISER
  989. bool ftb_state = FTB_GET(area, block);
  990. #else
  991. bool ftb_state = false;
  992. #endif
  993. GC_EXIT();
  994. if (!allow_move) {
  995. // not allowed to move memory block so return failure
  996. return NULL;
  997. }
  998. // can't resize inplace; try to find a new contiguous chain
  999. void *ptr_out = gc_alloc(n_bytes, ftb_state);
  1000. // check that the alloc succeeded
  1001. if (ptr_out == NULL) {
  1002. return NULL;
  1003. }
  1004. DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out);
  1005. memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK);
  1006. gc_free(ptr_in);
  1007. return ptr_out;
  1008. }
  1009. #endif // Alternative gc_realloc impl
  1010. void gc_dump_info(const mp_print_t *print) {
  1011. gc_info_t info;
  1012. gc_info(&info);
  1013. mp_printf(print, "GC: total: %u, used: %u, free: %u",
  1014. (uint)info.total, (uint)info.used, (uint)info.free);
  1015. #if MICROPY_GC_SPLIT_HEAP_AUTO
  1016. mp_printf(print, ", max new split: %u", (uint)info.max_new_split);
  1017. #endif
  1018. mp_printf(print, "\n No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n",
  1019. (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
  1020. }
  1021. void gc_dump_alloc_table(const mp_print_t *print) {
  1022. GC_ENTER();
  1023. static const size_t DUMP_BYTES_PER_LINE = 64;
  1024. for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
  1025. #if !EXTENSIVE_HEAP_PROFILING
  1026. // When comparing heap output we don't want to print the starting
  1027. // pointer of the heap because it changes from run to run.
  1028. mp_printf(print, "GC memory layout; from %p:", area->gc_pool_start);
  1029. #endif
  1030. for (size_t bl = 0; bl < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB; bl++) {
  1031. if (bl % DUMP_BYTES_PER_LINE == 0) {
  1032. // a new line of blocks
  1033. {
  1034. // check if this line contains only free blocks
  1035. size_t bl2 = bl;
  1036. while (bl2 < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB && ATB_GET_KIND(area, bl2) == AT_FREE) {
  1037. bl2++;
  1038. }
  1039. if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
  1040. // there are at least 2 lines containing only free blocks, so abbreviate their printing
  1041. mp_printf(print, "\n (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE);
  1042. bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
  1043. if (bl >= area->gc_alloc_table_byte_len * BLOCKS_PER_ATB) {
  1044. // got to end of heap
  1045. break;
  1046. }
  1047. }
  1048. }
  1049. // print header for new line of blocks
  1050. // (the cast to uint32_t is for 16-bit ports)
  1051. mp_printf(print, "\n%08x: ", (uint)(bl * BYTES_PER_BLOCK));
  1052. }
  1053. int c = ' ';
  1054. switch (ATB_GET_KIND(area, bl)) {
  1055. case AT_FREE:
  1056. c = '.';
  1057. break;
  1058. /* this prints out if the object is reachable from BSS or STACK (for unix only)
  1059. case AT_HEAD: {
  1060. c = 'h';
  1061. void **ptrs = (void**)(void*)&mp_state_ctx;
  1062. mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t);
  1063. for (mp_uint_t i = 0; i < len; i++) {
  1064. mp_uint_t ptr = (mp_uint_t)ptrs[i];
  1065. if (gc_get_ptr_area(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
  1066. c = 'B';
  1067. break;
  1068. }
  1069. }
  1070. if (c == 'h') {
  1071. ptrs = (void**)&c;
  1072. len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t);
  1073. for (mp_uint_t i = 0; i < len; i++) {
  1074. mp_uint_t ptr = (mp_uint_t)ptrs[i];
  1075. if (gc_get_ptr_area(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
  1076. c = 'S';
  1077. break;
  1078. }
  1079. }
  1080. }
  1081. break;
  1082. }
  1083. */
  1084. /* this prints the uPy object type of the head block */
  1085. case AT_HEAD: {
  1086. void **ptr = (void **)(area->gc_pool_start + bl * BYTES_PER_BLOCK);
  1087. if (*ptr == &mp_type_tuple) {
  1088. c = 'T';
  1089. } else if (*ptr == &mp_type_list) {
  1090. c = 'L';
  1091. } else if (*ptr == &mp_type_dict) {
  1092. c = 'D';
  1093. } else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) {
  1094. c = 'S';
  1095. }
  1096. #if MICROPY_PY_BUILTINS_BYTEARRAY
  1097. else if (*ptr == &mp_type_bytearray) {
  1098. c = 'A';
  1099. }
  1100. #endif
  1101. #if MICROPY_PY_ARRAY
  1102. else if (*ptr == &mp_type_array) {
  1103. c = 'A';
  1104. }
  1105. #endif
  1106. #if MICROPY_PY_BUILTINS_FLOAT
  1107. else if (*ptr == &mp_type_float) {
  1108. c = 'F';
  1109. }
  1110. #endif
  1111. else if (*ptr == &mp_type_fun_bc) {
  1112. c = 'B';
  1113. } else if (*ptr == &mp_type_module) {
  1114. c = 'M';
  1115. } else {
  1116. c = 'h';
  1117. #if 0
  1118. // This code prints "Q" for qstr-pool data, and "q" for qstr-str
  1119. // data. It can be useful to see how qstrs are being allocated,
  1120. // but is disabled by default because it is very slow.
  1121. for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
  1122. if ((qstr_pool_t *)ptr == pool) {
  1123. c = 'Q';
  1124. break;
  1125. }
  1126. for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
  1127. if ((const byte *)ptr == *q) {
  1128. c = 'q';
  1129. break;
  1130. }
  1131. }
  1132. }
  1133. #endif
  1134. }
  1135. break;
  1136. }
  1137. case AT_TAIL:
  1138. c = '=';
  1139. break;
  1140. case AT_MARK:
  1141. c = 'm';
  1142. break;
  1143. }
  1144. mp_printf(print, "%c", c);
  1145. }
  1146. mp_print_str(print, "\n");
  1147. }
  1148. GC_EXIT();
  1149. }
  1150. #if 0
  1151. // For testing the GC functions
  1152. void gc_test(void) {
  1153. mp_uint_t len = 500;
  1154. mp_uint_t *heap = malloc(len);
  1155. gc_init(heap, heap + len / sizeof(mp_uint_t));
  1156. void *ptrs[100];
  1157. {
  1158. mp_uint_t **p = gc_alloc(16, false);
  1159. p[0] = gc_alloc(64, false);
  1160. p[1] = gc_alloc(1, false);
  1161. p[2] = gc_alloc(1, false);
  1162. p[3] = gc_alloc(1, false);
  1163. mp_uint_t ***p2 = gc_alloc(16, false);
  1164. p2[0] = p;
  1165. p2[1] = p;
  1166. ptrs[0] = p2;
  1167. }
  1168. for (int i = 0; i < 25; i += 2) {
  1169. mp_uint_t *p = gc_alloc(i, false);
  1170. printf("p=%p\n", p);
  1171. if (i & 3) {
  1172. // ptrs[i] = p;
  1173. }
  1174. }
  1175. printf("Before GC:\n");
  1176. gc_dump_alloc_table(&mp_plat_print);
  1177. printf("Starting GC...\n");
  1178. gc_collect_start();
  1179. gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void *));
  1180. gc_collect_end();
  1181. printf("After GC:\n");
  1182. gc_dump_alloc_table(&mp_plat_print);
  1183. }
  1184. #endif
  1185. #endif // MICROPY_ENABLE_GC