thread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. #include "thread.h"
  2. #include "kernel.h"
  3. #include "memmgr.h"
  4. #include "memmgr_heap.h"
  5. #include "check.h"
  6. #include "common_defines.h"
  7. #include "mutex.h"
  8. #include "string.h"
  9. #include <task.h>
  10. #include "log.h"
  11. #include <furi_hal_rtc.h>
  12. #include <furi_hal_console.h>
  13. #define TAG "FuriThread"
  14. #define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
  15. typedef struct FuriThreadStdout FuriThreadStdout;
  16. struct FuriThreadStdout {
  17. FuriThreadStdoutWriteCallback write_callback;
  18. FuriString* buffer;
  19. };
  20. struct FuriThread {
  21. bool is_service;
  22. FuriThreadState state;
  23. int32_t ret;
  24. FuriThreadCallback callback;
  25. void* context;
  26. FuriThreadStateCallback state_callback;
  27. void* state_context;
  28. char* name;
  29. configSTACK_DEPTH_TYPE stack_size;
  30. FuriThreadPriority priority;
  31. TaskHandle_t task_handle;
  32. bool heap_trace_enabled;
  33. size_t heap_size;
  34. FuriThreadStdout output;
  35. };
  36. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size);
  37. static int32_t __furi_thread_stdout_flush(FuriThread* thread);
  38. /** Catch threads that are trying to exit wrong way */
  39. __attribute__((__noreturn__)) void furi_thread_catch() {
  40. asm volatile("nop"); // extra magic
  41. furi_crash("You are doing it wrong");
  42. __builtin_unreachable();
  43. }
  44. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  45. furi_assert(thread);
  46. thread->state = state;
  47. if(thread->state_callback) {
  48. thread->state_callback(state, thread->state_context);
  49. }
  50. }
  51. static void furi_thread_body(void* context) {
  52. furi_assert(context);
  53. FuriThread* thread = context;
  54. // store thread instance to thread local storage
  55. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) == NULL);
  56. vTaskSetThreadLocalStoragePointer(NULL, 0, thread);
  57. furi_assert(thread->state == FuriThreadStateStarting);
  58. furi_thread_set_state(thread, FuriThreadStateRunning);
  59. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  60. if(thread->heap_trace_enabled == true) {
  61. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  62. }
  63. thread->ret = thread->callback(thread->context);
  64. if(thread->heap_trace_enabled == true) {
  65. furi_delay_ms(33);
  66. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  67. furi_log_print_format(
  68. thread->heap_size ? FuriLogLevelError : FuriLogLevelInfo,
  69. TAG,
  70. "%s allocation balance: %d",
  71. thread->name ? thread->name : "Thread",
  72. thread->heap_size);
  73. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  74. }
  75. furi_assert(thread->state == FuriThreadStateRunning);
  76. if(thread->is_service) {
  77. FURI_LOG_E(
  78. TAG,
  79. "%s service thread exited. Thread memory cannot be reclaimed.",
  80. thread->name ? thread->name : "<unknown service>");
  81. }
  82. // flush stdout
  83. __furi_thread_stdout_flush(thread);
  84. // from here we can't use thread pointer
  85. furi_thread_set_state(thread, FuriThreadStateStopped);
  86. // clear thread local storage
  87. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) != NULL);
  88. vTaskSetThreadLocalStoragePointer(NULL, 0, NULL);
  89. thread->task_handle = NULL;
  90. vTaskDelete(NULL);
  91. furi_thread_catch();
  92. }
  93. FuriThread* furi_thread_alloc() {
  94. FuriThread* thread = malloc(sizeof(FuriThread));
  95. thread->output.buffer = furi_string_alloc();
  96. thread->is_service = false;
  97. if(furi_thread_get_current_id()) {
  98. FuriThread* parent = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  99. if(parent) thread->heap_trace_enabled = parent->heap_trace_enabled;
  100. }
  101. return thread;
  102. }
  103. void furi_thread_free(FuriThread* thread) {
  104. furi_assert(thread);
  105. furi_assert(thread->state == FuriThreadStateStopped);
  106. if(thread->name) free((void*)thread->name);
  107. furi_string_free(thread->output.buffer);
  108. free(thread);
  109. }
  110. void furi_thread_set_name(FuriThread* thread, const char* name) {
  111. furi_assert(thread);
  112. furi_assert(thread->state == FuriThreadStateStopped);
  113. if(thread->name) free((void*)thread->name);
  114. thread->name = name ? strdup(name) : NULL;
  115. }
  116. void furi_thread_mark_as_service(FuriThread* thread) {
  117. thread->is_service = true;
  118. }
  119. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  120. furi_assert(thread);
  121. furi_assert(thread->state == FuriThreadStateStopped);
  122. furi_assert(stack_size % 4 == 0);
  123. thread->stack_size = stack_size;
  124. }
  125. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  126. furi_assert(thread);
  127. furi_assert(thread->state == FuriThreadStateStopped);
  128. thread->callback = callback;
  129. }
  130. void furi_thread_set_context(FuriThread* thread, void* context) {
  131. furi_assert(thread);
  132. furi_assert(thread->state == FuriThreadStateStopped);
  133. thread->context = context;
  134. }
  135. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  136. furi_assert(thread);
  137. furi_assert(thread->state == FuriThreadStateStopped);
  138. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  139. thread->priority = priority;
  140. }
  141. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  142. furi_assert(thread);
  143. furi_assert(thread->state == FuriThreadStateStopped);
  144. thread->state_callback = callback;
  145. }
  146. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  147. furi_assert(thread);
  148. furi_assert(thread->state == FuriThreadStateStopped);
  149. thread->state_context = context;
  150. }
  151. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  152. furi_assert(thread);
  153. return thread->state;
  154. }
  155. void furi_thread_start(FuriThread* thread) {
  156. furi_assert(thread);
  157. furi_assert(thread->callback);
  158. furi_assert(thread->state == FuriThreadStateStopped);
  159. furi_assert(thread->stack_size > 0 && thread->stack_size < 0xFFFF * 4);
  160. furi_thread_set_state(thread, FuriThreadStateStarting);
  161. uint32_t stack = thread->stack_size / 4;
  162. UBaseType_t priority = thread->priority ? thread->priority : FuriThreadPriorityNormal;
  163. if(thread->is_service) {
  164. thread->task_handle = xTaskCreateStatic(
  165. furi_thread_body,
  166. thread->name,
  167. stack,
  168. thread,
  169. priority,
  170. memmgr_alloc_from_pool(sizeof(StackType_t) * stack),
  171. memmgr_alloc_from_pool(sizeof(StaticTask_t)));
  172. } else {
  173. BaseType_t ret = xTaskCreate(
  174. furi_thread_body, thread->name, stack, thread, priority, &thread->task_handle);
  175. furi_check(ret == pdPASS);
  176. }
  177. furi_check(thread->task_handle);
  178. }
  179. bool furi_thread_join(FuriThread* thread) {
  180. furi_assert(thread);
  181. furi_check(furi_thread_get_current() != thread);
  182. // Wait for thread to stop
  183. while(thread->task_handle) {
  184. furi_delay_ms(10);
  185. }
  186. return true;
  187. }
  188. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  189. furi_assert(thread);
  190. return thread->task_handle;
  191. }
  192. void furi_thread_enable_heap_trace(FuriThread* thread) {
  193. furi_assert(thread);
  194. furi_assert(thread->state == FuriThreadStateStopped);
  195. furi_assert(thread->heap_trace_enabled == false);
  196. thread->heap_trace_enabled = true;
  197. }
  198. void furi_thread_disable_heap_trace(FuriThread* thread) {
  199. furi_assert(thread);
  200. furi_assert(thread->state == FuriThreadStateStopped);
  201. furi_assert(thread->heap_trace_enabled == true);
  202. thread->heap_trace_enabled = false;
  203. }
  204. size_t furi_thread_get_heap_size(FuriThread* thread) {
  205. furi_assert(thread);
  206. furi_assert(thread->heap_trace_enabled == true);
  207. return thread->heap_size;
  208. }
  209. int32_t furi_thread_get_return_code(FuriThread* thread) {
  210. furi_assert(thread);
  211. furi_assert(thread->state == FuriThreadStateStopped);
  212. return thread->ret;
  213. }
  214. FuriThreadId furi_thread_get_current_id() {
  215. return xTaskGetCurrentTaskHandle();
  216. }
  217. FuriThread* furi_thread_get_current() {
  218. FuriThread* thread = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  219. furi_assert(thread != NULL);
  220. return thread;
  221. }
  222. void furi_thread_yield() {
  223. furi_assert(!FURI_IS_IRQ_MODE());
  224. taskYIELD();
  225. }
  226. /* Limits */
  227. #define MAX_BITS_TASK_NOTIFY 31U
  228. #define MAX_BITS_EVENT_GROUPS 24U
  229. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  230. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  231. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  232. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  233. uint32_t rflags;
  234. BaseType_t yield;
  235. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  236. rflags = (uint32_t)FuriStatusErrorParameter;
  237. } else {
  238. rflags = (uint32_t)FuriStatusError;
  239. if(FURI_IS_IRQ_MODE()) {
  240. yield = pdFALSE;
  241. (void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
  242. (void)xTaskNotifyAndQueryIndexedFromISR(
  243. hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  244. portYIELD_FROM_ISR(yield);
  245. } else {
  246. (void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
  247. (void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
  248. }
  249. }
  250. /* Return flags after setting */
  251. return (rflags);
  252. }
  253. uint32_t furi_thread_flags_clear(uint32_t flags) {
  254. TaskHandle_t hTask;
  255. uint32_t rflags, cflags;
  256. if(FURI_IS_IRQ_MODE()) {
  257. rflags = (uint32_t)FuriStatusErrorISR;
  258. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  259. rflags = (uint32_t)FuriStatusErrorParameter;
  260. } else {
  261. hTask = xTaskGetCurrentTaskHandle();
  262. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
  263. pdPASS) {
  264. rflags = cflags;
  265. cflags &= ~flags;
  266. if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
  267. pdPASS) {
  268. rflags = (uint32_t)FuriStatusError;
  269. }
  270. } else {
  271. rflags = (uint32_t)FuriStatusError;
  272. }
  273. }
  274. /* Return flags before clearing */
  275. return (rflags);
  276. }
  277. uint32_t furi_thread_flags_get(void) {
  278. TaskHandle_t hTask;
  279. uint32_t rflags;
  280. if(FURI_IS_IRQ_MODE()) {
  281. rflags = (uint32_t)FuriStatusErrorISR;
  282. } else {
  283. hTask = xTaskGetCurrentTaskHandle();
  284. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
  285. pdPASS) {
  286. rflags = (uint32_t)FuriStatusError;
  287. }
  288. }
  289. return (rflags);
  290. }
  291. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  292. uint32_t rflags, nval;
  293. uint32_t clear;
  294. TickType_t t0, td, tout;
  295. BaseType_t rval;
  296. if(FURI_IS_IRQ_MODE()) {
  297. rflags = (uint32_t)FuriStatusErrorISR;
  298. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  299. rflags = (uint32_t)FuriStatusErrorParameter;
  300. } else {
  301. if((options & FuriFlagNoClear) == FuriFlagNoClear) {
  302. clear = 0U;
  303. } else {
  304. clear = flags;
  305. }
  306. rflags = 0U;
  307. tout = timeout;
  308. t0 = xTaskGetTickCount();
  309. do {
  310. rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
  311. if(rval == pdPASS) {
  312. rflags &= flags;
  313. rflags |= nval;
  314. if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
  315. if((flags & rflags) == flags) {
  316. break;
  317. } else {
  318. if(timeout == 0U) {
  319. rflags = (uint32_t)FuriStatusErrorResource;
  320. break;
  321. }
  322. }
  323. } else {
  324. if((flags & rflags) != 0) {
  325. break;
  326. } else {
  327. if(timeout == 0U) {
  328. rflags = (uint32_t)FuriStatusErrorResource;
  329. break;
  330. }
  331. }
  332. }
  333. /* Update timeout */
  334. td = xTaskGetTickCount() - t0;
  335. if(td > tout) {
  336. tout = 0;
  337. } else {
  338. tout -= td;
  339. }
  340. } else {
  341. if(timeout == 0) {
  342. rflags = (uint32_t)FuriStatusErrorResource;
  343. } else {
  344. rflags = (uint32_t)FuriStatusErrorTimeout;
  345. }
  346. }
  347. } while(rval != pdFAIL);
  348. }
  349. /* Return flags before clearing */
  350. return (rflags);
  351. }
  352. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  353. uint32_t i, count;
  354. TaskStatus_t* task;
  355. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  356. count = 0U;
  357. } else {
  358. vTaskSuspendAll();
  359. count = uxTaskGetNumberOfTasks();
  360. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  361. if(task != NULL) {
  362. count = uxTaskGetSystemState(task, count, NULL);
  363. for(i = 0U; (i < count) && (i < array_items); i++) {
  364. thread_array[i] = (FuriThreadId)task[i].xHandle;
  365. }
  366. count = i;
  367. }
  368. (void)xTaskResumeAll();
  369. vPortFree(task);
  370. }
  371. return (count);
  372. }
  373. const char* furi_thread_get_name(FuriThreadId thread_id) {
  374. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  375. const char* name;
  376. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  377. name = NULL;
  378. } else {
  379. name = pcTaskGetName(hTask);
  380. }
  381. return (name);
  382. }
  383. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  384. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  385. uint32_t sz;
  386. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  387. sz = 0U;
  388. } else {
  389. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  390. }
  391. return (sz);
  392. }
  393. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size) {
  394. if(thread->output.write_callback != NULL) {
  395. thread->output.write_callback(data, size);
  396. } else {
  397. furi_hal_console_tx((const uint8_t*)data, size);
  398. }
  399. return size;
  400. }
  401. static int32_t __furi_thread_stdout_flush(FuriThread* thread) {
  402. FuriString* buffer = thread->output.buffer;
  403. size_t size = furi_string_size(buffer);
  404. if(size > 0) {
  405. __furi_thread_stdout_write(thread, furi_string_get_cstr(buffer), size);
  406. furi_string_reset(buffer);
  407. }
  408. return 0;
  409. }
  410. bool furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback) {
  411. FuriThread* thread = furi_thread_get_current();
  412. __furi_thread_stdout_flush(thread);
  413. thread->output.write_callback = callback;
  414. return true;
  415. }
  416. size_t furi_thread_stdout_write(const char* data, size_t size) {
  417. FuriThread* thread = furi_thread_get_current();
  418. if(size == 0 || data == NULL) {
  419. return __furi_thread_stdout_flush(thread);
  420. } else {
  421. if(data[size - 1] == '\n') {
  422. // if the last character is a newline, we can flush buffer and write data as is, wo buffers
  423. __furi_thread_stdout_flush(thread);
  424. __furi_thread_stdout_write(thread, data, size);
  425. } else {
  426. // string_cat doesn't work here because we need to write the exact size data
  427. for(size_t i = 0; i < size; i++) {
  428. furi_string_push_back(thread->output.buffer, data[i]);
  429. if(data[i] == '\n') {
  430. __furi_thread_stdout_flush(thread);
  431. }
  432. }
  433. }
  434. }
  435. return size;
  436. }
  437. int32_t furi_thread_stdout_flush() {
  438. return __furi_thread_stdout_flush(furi_thread_get_current());
  439. }
  440. void furi_thread_suspend(FuriThreadId thread_id) {
  441. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  442. vTaskSuspend(hTask);
  443. }
  444. void furi_thread_resume(FuriThreadId thread_id) {
  445. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  446. if(FURI_IS_IRQ_MODE()) {
  447. xTaskResumeFromISR(hTask);
  448. } else {
  449. vTaskResume(hTask);
  450. }
  451. }
  452. bool furi_thread_is_suspended(FuriThreadId thread_id) {
  453. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  454. return eTaskGetState(hTask) == eSuspended;
  455. }