thread.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. #include "thread.h"
  2. #include "kernel.h"
  3. #include "memmgr.h"
  4. #include "memmgr_heap.h"
  5. #include "check.h"
  6. #include "common_defines.h"
  7. #include "mutex.h"
  8. #include "string.h"
  9. #include <task.h>
  10. #include "log.h"
  11. #include <furi_hal_rtc.h>
  12. #include <furi_hal_console.h>
  13. #define TAG "FuriThread"
  14. #define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
  15. typedef struct FuriThreadStdout FuriThreadStdout;
  16. struct FuriThreadStdout {
  17. FuriThreadStdoutWriteCallback write_callback;
  18. FuriString* buffer;
  19. };
  20. struct FuriThread {
  21. FuriThreadState state;
  22. int32_t ret;
  23. FuriThreadCallback callback;
  24. void* context;
  25. FuriThreadStateCallback state_callback;
  26. void* state_context;
  27. char* name;
  28. char* appid;
  29. FuriThreadPriority priority;
  30. TaskHandle_t task_handle;
  31. size_t heap_size;
  32. FuriThreadStdout output;
  33. // Keep all non-alignable byte types in one place,
  34. // this ensures that the size of this structure is minimal
  35. bool is_service;
  36. bool heap_trace_enabled;
  37. configSTACK_DEPTH_TYPE stack_size;
  38. };
  39. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size);
  40. static int32_t __furi_thread_stdout_flush(FuriThread* thread);
  41. /** Catch threads that are trying to exit wrong way */
  42. __attribute__((__noreturn__)) void furi_thread_catch() { //-V1082
  43. asm volatile("nop"); // extra magic
  44. furi_crash("You are doing it wrong"); //-V779
  45. __builtin_unreachable();
  46. }
  47. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  48. furi_assert(thread);
  49. thread->state = state;
  50. if(thread->state_callback) {
  51. thread->state_callback(state, thread->state_context);
  52. }
  53. }
  54. static void furi_thread_body(void* context) {
  55. furi_assert(context);
  56. FuriThread* thread = context;
  57. // store thread instance to thread local storage
  58. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) == NULL);
  59. vTaskSetThreadLocalStoragePointer(NULL, 0, thread);
  60. furi_assert(thread->state == FuriThreadStateStarting);
  61. furi_thread_set_state(thread, FuriThreadStateRunning);
  62. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  63. if(thread->heap_trace_enabled == true) {
  64. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  65. }
  66. thread->ret = thread->callback(thread->context);
  67. if(thread->heap_trace_enabled == true) {
  68. furi_delay_ms(33);
  69. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  70. furi_log_print_format( //-V576
  71. thread->heap_size ? FuriLogLevelError : FuriLogLevelInfo,
  72. TAG,
  73. "%s allocation balance: %u",
  74. thread->name ? thread->name : "Thread",
  75. thread->heap_size);
  76. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  77. }
  78. furi_assert(thread->state == FuriThreadStateRunning);
  79. if(thread->is_service) {
  80. FURI_LOG_W(
  81. TAG,
  82. "%s service thread TCB memory will not be reclaimed",
  83. thread->name ? thread->name : "<unknown service>");
  84. }
  85. // flush stdout
  86. __furi_thread_stdout_flush(thread);
  87. furi_thread_set_state(thread, FuriThreadStateStopped);
  88. vTaskDelete(NULL);
  89. furi_thread_catch();
  90. }
  91. FuriThread* furi_thread_alloc() {
  92. FuriThread* thread = malloc(sizeof(FuriThread));
  93. thread->output.buffer = furi_string_alloc();
  94. thread->is_service = false;
  95. FuriThread* parent = NULL;
  96. if(xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
  97. // TLS is not available, if we called not from thread context
  98. parent = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  99. if(parent && parent->appid) {
  100. furi_thread_set_appid(thread, parent->appid);
  101. } else {
  102. furi_thread_set_appid(thread, "unknown");
  103. }
  104. } else {
  105. // if scheduler is not started, we are starting driver thread
  106. furi_thread_set_appid(thread, "driver");
  107. }
  108. FuriHalRtcHeapTrackMode mode = furi_hal_rtc_get_heap_track_mode();
  109. if(mode == FuriHalRtcHeapTrackModeAll) {
  110. thread->heap_trace_enabled = true;
  111. } else if(mode == FuriHalRtcHeapTrackModeTree && furi_thread_get_current_id()) {
  112. if(parent) thread->heap_trace_enabled = parent->heap_trace_enabled;
  113. } else {
  114. thread->heap_trace_enabled = false;
  115. }
  116. return thread;
  117. }
  118. FuriThread* furi_thread_alloc_ex(
  119. const char* name,
  120. uint32_t stack_size,
  121. FuriThreadCallback callback,
  122. void* context) {
  123. FuriThread* thread = furi_thread_alloc();
  124. furi_thread_set_name(thread, name);
  125. furi_thread_set_stack_size(thread, stack_size);
  126. furi_thread_set_callback(thread, callback);
  127. furi_thread_set_context(thread, context);
  128. return thread;
  129. }
  130. void furi_thread_free(FuriThread* thread) {
  131. furi_assert(thread);
  132. // Ensure that use join before free
  133. furi_assert(thread->state == FuriThreadStateStopped);
  134. furi_assert(thread->task_handle == NULL);
  135. if(thread->name) free(thread->name);
  136. if(thread->appid) free(thread->appid);
  137. furi_string_free(thread->output.buffer);
  138. free(thread);
  139. }
  140. void furi_thread_set_name(FuriThread* thread, const char* name) {
  141. furi_assert(thread);
  142. furi_assert(thread->state == FuriThreadStateStopped);
  143. if(thread->name) free(thread->name);
  144. thread->name = name ? strdup(name) : NULL;
  145. }
  146. void furi_thread_set_appid(FuriThread* thread, const char* appid) {
  147. furi_assert(thread);
  148. furi_assert(thread->state == FuriThreadStateStopped);
  149. if(thread->appid) free(thread->appid);
  150. thread->appid = appid ? strdup(appid) : NULL;
  151. }
  152. void furi_thread_mark_as_service(FuriThread* thread) {
  153. thread->is_service = true;
  154. }
  155. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  156. furi_assert(thread);
  157. furi_assert(thread->state == FuriThreadStateStopped);
  158. furi_assert(stack_size % 4 == 0);
  159. thread->stack_size = stack_size;
  160. }
  161. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  162. furi_assert(thread);
  163. furi_assert(thread->state == FuriThreadStateStopped);
  164. thread->callback = callback;
  165. }
  166. void furi_thread_set_context(FuriThread* thread, void* context) {
  167. furi_assert(thread);
  168. furi_assert(thread->state == FuriThreadStateStopped);
  169. thread->context = context;
  170. }
  171. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  172. furi_assert(thread);
  173. furi_assert(thread->state == FuriThreadStateStopped);
  174. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  175. thread->priority = priority;
  176. }
  177. void furi_thread_set_current_priority(FuriThreadPriority priority) {
  178. UBaseType_t new_priority = priority ? priority : FuriThreadPriorityNormal;
  179. vTaskPrioritySet(NULL, new_priority);
  180. }
  181. FuriThreadPriority furi_thread_get_current_priority() {
  182. return (FuriThreadPriority)uxTaskPriorityGet(NULL);
  183. }
  184. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  185. furi_assert(thread);
  186. furi_assert(thread->state == FuriThreadStateStopped);
  187. thread->state_callback = callback;
  188. }
  189. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  190. furi_assert(thread);
  191. furi_assert(thread->state == FuriThreadStateStopped);
  192. thread->state_context = context;
  193. }
  194. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  195. furi_assert(thread);
  196. return thread->state;
  197. }
  198. void furi_thread_start(FuriThread* thread) {
  199. furi_assert(thread);
  200. furi_assert(thread->callback);
  201. furi_assert(thread->state == FuriThreadStateStopped);
  202. furi_assert(thread->stack_size > 0 && thread->stack_size < (UINT16_MAX * sizeof(StackType_t)));
  203. furi_thread_set_state(thread, FuriThreadStateStarting);
  204. uint32_t stack = thread->stack_size / sizeof(StackType_t);
  205. UBaseType_t priority = thread->priority ? thread->priority : FuriThreadPriorityNormal;
  206. if(thread->is_service) {
  207. thread->task_handle = xTaskCreateStatic(
  208. furi_thread_body,
  209. thread->name,
  210. stack,
  211. thread,
  212. priority,
  213. memmgr_alloc_from_pool(sizeof(StackType_t) * stack),
  214. memmgr_alloc_from_pool(sizeof(StaticTask_t)));
  215. } else {
  216. BaseType_t ret = xTaskCreate(
  217. furi_thread_body, thread->name, stack, thread, priority, &thread->task_handle);
  218. furi_check(ret == pdPASS);
  219. }
  220. furi_check(thread->task_handle);
  221. }
  222. void furi_thread_cleanup_tcb_event(TaskHandle_t task) {
  223. FuriThread* thread = pvTaskGetThreadLocalStoragePointer(task, 0);
  224. if(thread) {
  225. // clear thread local storage
  226. vTaskSetThreadLocalStoragePointer(task, 0, NULL);
  227. furi_assert(thread->task_handle == task);
  228. thread->task_handle = NULL;
  229. }
  230. }
  231. bool furi_thread_join(FuriThread* thread) {
  232. furi_assert(thread);
  233. furi_check(furi_thread_get_current() != thread);
  234. // !!! IMPORTANT NOTICE !!!
  235. //
  236. // If your thread exited, but your app stuck here: some other thread uses
  237. // all cpu time, which delays kernel from releasing task handle
  238. while(thread->task_handle) {
  239. furi_delay_ms(10);
  240. }
  241. return true;
  242. }
  243. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  244. furi_assert(thread);
  245. return thread->task_handle;
  246. }
  247. void furi_thread_enable_heap_trace(FuriThread* thread) {
  248. furi_assert(thread);
  249. furi_assert(thread->state == FuriThreadStateStopped);
  250. thread->heap_trace_enabled = true;
  251. }
  252. void furi_thread_disable_heap_trace(FuriThread* thread) {
  253. furi_assert(thread);
  254. furi_assert(thread->state == FuriThreadStateStopped);
  255. thread->heap_trace_enabled = false;
  256. }
  257. size_t furi_thread_get_heap_size(FuriThread* thread) {
  258. furi_assert(thread);
  259. furi_assert(thread->heap_trace_enabled == true);
  260. return thread->heap_size;
  261. }
  262. int32_t furi_thread_get_return_code(FuriThread* thread) {
  263. furi_assert(thread);
  264. furi_assert(thread->state == FuriThreadStateStopped);
  265. return thread->ret;
  266. }
  267. FuriThreadId furi_thread_get_current_id() {
  268. return xTaskGetCurrentTaskHandle();
  269. }
  270. FuriThread* furi_thread_get_current() {
  271. FuriThread* thread = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  272. return thread;
  273. }
  274. void furi_thread_yield() {
  275. furi_assert(!FURI_IS_IRQ_MODE());
  276. taskYIELD();
  277. }
  278. /* Limits */
  279. #define MAX_BITS_TASK_NOTIFY 31U
  280. #define MAX_BITS_EVENT_GROUPS 24U
  281. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  282. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  283. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  284. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  285. uint32_t rflags;
  286. BaseType_t yield;
  287. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  288. rflags = (uint32_t)FuriStatusErrorParameter;
  289. } else {
  290. rflags = (uint32_t)FuriStatusError;
  291. if(FURI_IS_IRQ_MODE()) {
  292. yield = pdFALSE;
  293. (void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
  294. (void)xTaskNotifyAndQueryIndexedFromISR(
  295. hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  296. portYIELD_FROM_ISR(yield);
  297. } else {
  298. (void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
  299. (void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
  300. }
  301. }
  302. /* Return flags after setting */
  303. return (rflags);
  304. }
  305. uint32_t furi_thread_flags_clear(uint32_t flags) {
  306. TaskHandle_t hTask;
  307. uint32_t rflags, cflags;
  308. if(FURI_IS_IRQ_MODE()) {
  309. rflags = (uint32_t)FuriStatusErrorISR;
  310. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  311. rflags = (uint32_t)FuriStatusErrorParameter;
  312. } else {
  313. hTask = xTaskGetCurrentTaskHandle();
  314. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
  315. pdPASS) {
  316. rflags = cflags;
  317. cflags &= ~flags;
  318. if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
  319. pdPASS) {
  320. rflags = (uint32_t)FuriStatusError;
  321. }
  322. } else {
  323. rflags = (uint32_t)FuriStatusError;
  324. }
  325. }
  326. /* Return flags before clearing */
  327. return (rflags);
  328. }
  329. uint32_t furi_thread_flags_get(void) {
  330. TaskHandle_t hTask;
  331. uint32_t rflags;
  332. if(FURI_IS_IRQ_MODE()) {
  333. rflags = (uint32_t)FuriStatusErrorISR;
  334. } else {
  335. hTask = xTaskGetCurrentTaskHandle();
  336. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
  337. pdPASS) {
  338. rflags = (uint32_t)FuriStatusError;
  339. }
  340. }
  341. return (rflags);
  342. }
  343. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  344. uint32_t rflags, nval;
  345. uint32_t clear;
  346. TickType_t t0, td, tout;
  347. BaseType_t rval;
  348. if(FURI_IS_IRQ_MODE()) {
  349. rflags = (uint32_t)FuriStatusErrorISR;
  350. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  351. rflags = (uint32_t)FuriStatusErrorParameter;
  352. } else {
  353. if((options & FuriFlagNoClear) == FuriFlagNoClear) {
  354. clear = 0U;
  355. } else {
  356. clear = flags;
  357. }
  358. rflags = 0U;
  359. tout = timeout;
  360. t0 = xTaskGetTickCount();
  361. do {
  362. rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
  363. if(rval == pdPASS) {
  364. rflags &= flags;
  365. rflags |= nval;
  366. if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
  367. if((flags & rflags) == flags) {
  368. break;
  369. } else {
  370. if(timeout == 0U) {
  371. rflags = (uint32_t)FuriStatusErrorResource;
  372. break;
  373. }
  374. }
  375. } else {
  376. if((flags & rflags) != 0) {
  377. break;
  378. } else {
  379. if(timeout == 0U) {
  380. rflags = (uint32_t)FuriStatusErrorResource;
  381. break;
  382. }
  383. }
  384. }
  385. /* Update timeout */
  386. td = xTaskGetTickCount() - t0;
  387. if(td > tout) {
  388. tout = 0;
  389. } else {
  390. tout -= td;
  391. }
  392. } else {
  393. if(timeout == 0) {
  394. rflags = (uint32_t)FuriStatusErrorResource;
  395. } else {
  396. rflags = (uint32_t)FuriStatusErrorTimeout;
  397. }
  398. }
  399. } while(rval != pdFAIL);
  400. }
  401. /* Return flags before clearing */
  402. return (rflags);
  403. }
  404. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  405. uint32_t i, count;
  406. TaskStatus_t* task;
  407. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  408. count = 0U;
  409. } else {
  410. vTaskSuspendAll();
  411. count = uxTaskGetNumberOfTasks();
  412. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  413. if(task != NULL) {
  414. count = uxTaskGetSystemState(task, count, NULL);
  415. for(i = 0U; (i < count) && (i < array_items); i++) {
  416. thread_array[i] = (FuriThreadId)task[i].xHandle;
  417. }
  418. count = i;
  419. }
  420. (void)xTaskResumeAll();
  421. vPortFree(task);
  422. }
  423. return (count);
  424. }
  425. const char* furi_thread_get_name(FuriThreadId thread_id) {
  426. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  427. const char* name;
  428. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  429. name = NULL;
  430. } else {
  431. name = pcTaskGetName(hTask);
  432. }
  433. return (name);
  434. }
  435. const char* furi_thread_get_appid(FuriThreadId thread_id) {
  436. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  437. const char* appid = "system";
  438. if(!FURI_IS_IRQ_MODE() && (hTask != NULL)) {
  439. FuriThread* thread = (FuriThread*)pvTaskGetThreadLocalStoragePointer(hTask, 0);
  440. if(thread) {
  441. appid = thread->appid;
  442. }
  443. }
  444. return (appid);
  445. }
  446. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  447. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  448. uint32_t sz;
  449. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  450. sz = 0U;
  451. } else {
  452. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  453. }
  454. return (sz);
  455. }
  456. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size) {
  457. if(thread->output.write_callback != NULL) {
  458. thread->output.write_callback(data, size);
  459. } else {
  460. furi_hal_console_tx((const uint8_t*)data, size);
  461. }
  462. return size;
  463. }
  464. static int32_t __furi_thread_stdout_flush(FuriThread* thread) {
  465. FuriString* buffer = thread->output.buffer;
  466. size_t size = furi_string_size(buffer);
  467. if(size > 0) {
  468. __furi_thread_stdout_write(thread, furi_string_get_cstr(buffer), size);
  469. furi_string_reset(buffer);
  470. }
  471. return 0;
  472. }
  473. void furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback) {
  474. FuriThread* thread = furi_thread_get_current();
  475. furi_assert(thread);
  476. __furi_thread_stdout_flush(thread);
  477. thread->output.write_callback = callback;
  478. }
  479. FuriThreadStdoutWriteCallback furi_thread_get_stdout_callback() {
  480. FuriThread* thread = furi_thread_get_current();
  481. furi_assert(thread);
  482. return thread->output.write_callback;
  483. }
  484. size_t furi_thread_stdout_write(const char* data, size_t size) {
  485. FuriThread* thread = furi_thread_get_current();
  486. furi_assert(thread);
  487. if(size == 0 || data == NULL) {
  488. return __furi_thread_stdout_flush(thread);
  489. } else {
  490. if(data[size - 1] == '\n') {
  491. // if the last character is a newline, we can flush buffer and write data as is, wo buffers
  492. __furi_thread_stdout_flush(thread);
  493. __furi_thread_stdout_write(thread, data, size);
  494. } else {
  495. // string_cat doesn't work here because we need to write the exact size data
  496. for(size_t i = 0; i < size; i++) {
  497. furi_string_push_back(thread->output.buffer, data[i]);
  498. if(data[i] == '\n') {
  499. __furi_thread_stdout_flush(thread);
  500. }
  501. }
  502. }
  503. }
  504. return size;
  505. }
  506. int32_t furi_thread_stdout_flush() {
  507. FuriThread* thread = furi_thread_get_current();
  508. furi_assert(thread);
  509. return __furi_thread_stdout_flush(thread);
  510. }
  511. void furi_thread_suspend(FuriThreadId thread_id) {
  512. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  513. vTaskSuspend(hTask);
  514. }
  515. void furi_thread_resume(FuriThreadId thread_id) {
  516. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  517. if(FURI_IS_IRQ_MODE()) {
  518. xTaskResumeFromISR(hTask);
  519. } else {
  520. vTaskResume(hTask);
  521. }
  522. }
  523. bool furi_thread_is_suspended(FuriThreadId thread_id) {
  524. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  525. return eTaskGetState(hTask) == eSuspended;
  526. }