thread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. #include "thread.h"
  2. #include "kernel.h"
  3. #include "memmgr.h"
  4. #include "memmgr_heap.h"
  5. #include "check.h"
  6. #include "common_defines.h"
  7. #include "mutex.h"
  8. #include <task.h>
  9. #include "log.h"
  10. #include <m-string.h>
  11. #include <furi_hal_rtc.h>
  12. #include <furi_hal_console.h>
  13. #define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
  14. typedef struct FuriThreadStdout FuriThreadStdout;
  15. struct FuriThreadStdout {
  16. FuriThreadStdoutWriteCallback write_callback;
  17. string_t buffer;
  18. };
  19. struct FuriThread {
  20. bool is_service;
  21. FuriThreadState state;
  22. int32_t ret;
  23. FuriThreadCallback callback;
  24. void* context;
  25. FuriThreadStateCallback state_callback;
  26. void* state_context;
  27. char* name;
  28. configSTACK_DEPTH_TYPE stack_size;
  29. FuriThreadPriority priority;
  30. TaskHandle_t task_handle;
  31. bool heap_trace_enabled;
  32. size_t heap_size;
  33. FuriThreadStdout output;
  34. };
  35. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size);
  36. static int32_t __furi_thread_stdout_flush(FuriThread* thread);
  37. /** Catch threads that are trying to exit wrong way */
  38. __attribute__((__noreturn__)) void furi_thread_catch() {
  39. asm volatile("nop"); // extra magic
  40. furi_crash("You are doing it wrong");
  41. }
  42. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  43. furi_assert(thread);
  44. thread->state = state;
  45. if(thread->state_callback) {
  46. thread->state_callback(state, thread->state_context);
  47. }
  48. }
  49. static void furi_thread_body(void* context) {
  50. furi_assert(context);
  51. FuriThread* thread = context;
  52. // store thread instance to thread local storage
  53. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) == NULL);
  54. vTaskSetThreadLocalStoragePointer(NULL, 0, thread);
  55. furi_assert(thread->state == FuriThreadStateStarting);
  56. furi_thread_set_state(thread, FuriThreadStateRunning);
  57. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  58. if(thread->heap_trace_enabled == true) {
  59. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  60. }
  61. thread->ret = thread->callback(thread->context);
  62. if(thread->heap_trace_enabled == true) {
  63. furi_delay_ms(33);
  64. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  65. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  66. }
  67. furi_assert(thread->state == FuriThreadStateRunning);
  68. if(thread->is_service) {
  69. FURI_LOG_E(
  70. "Service",
  71. "%s thread exited. Thread memory cannot be reclaimed.",
  72. thread->name ? thread->name : "<unknown service>");
  73. }
  74. // flush stdout
  75. __furi_thread_stdout_flush(thread);
  76. // from here we can't use thread pointer
  77. furi_thread_set_state(thread, FuriThreadStateStopped);
  78. // clear thread local storage
  79. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) != NULL);
  80. vTaskSetThreadLocalStoragePointer(NULL, 0, NULL);
  81. vTaskDelete(NULL);
  82. furi_thread_catch();
  83. }
  84. FuriThread* furi_thread_alloc() {
  85. FuriThread* thread = malloc(sizeof(FuriThread));
  86. string_init(thread->output.buffer);
  87. thread->is_service = false;
  88. return thread;
  89. }
  90. void furi_thread_free(FuriThread* thread) {
  91. furi_assert(thread);
  92. furi_assert(thread->state == FuriThreadStateStopped);
  93. if(thread->name) free((void*)thread->name);
  94. string_clear(thread->output.buffer);
  95. free(thread);
  96. }
  97. void furi_thread_set_name(FuriThread* thread, const char* name) {
  98. furi_assert(thread);
  99. furi_assert(thread->state == FuriThreadStateStopped);
  100. if(thread->name) free((void*)thread->name);
  101. thread->name = name ? strdup(name) : NULL;
  102. }
  103. void furi_thread_mark_as_service(FuriThread* thread) {
  104. thread->is_service = true;
  105. }
  106. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  107. furi_assert(thread);
  108. furi_assert(thread->state == FuriThreadStateStopped);
  109. furi_assert(stack_size % 4 == 0);
  110. thread->stack_size = stack_size;
  111. }
  112. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  113. furi_assert(thread);
  114. furi_assert(thread->state == FuriThreadStateStopped);
  115. thread->callback = callback;
  116. }
  117. void furi_thread_set_context(FuriThread* thread, void* context) {
  118. furi_assert(thread);
  119. furi_assert(thread->state == FuriThreadStateStopped);
  120. thread->context = context;
  121. }
  122. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  123. furi_assert(thread);
  124. furi_assert(thread->state == FuriThreadStateStopped);
  125. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  126. thread->priority = priority;
  127. }
  128. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  129. furi_assert(thread);
  130. furi_assert(thread->state == FuriThreadStateStopped);
  131. thread->state_callback = callback;
  132. }
  133. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  134. furi_assert(thread);
  135. furi_assert(thread->state == FuriThreadStateStopped);
  136. thread->state_context = context;
  137. }
  138. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  139. furi_assert(thread);
  140. return thread->state;
  141. }
  142. void furi_thread_start(FuriThread* thread) {
  143. furi_assert(thread);
  144. furi_assert(thread->callback);
  145. furi_assert(thread->state == FuriThreadStateStopped);
  146. furi_assert(thread->stack_size > 0 && thread->stack_size < 0xFFFF * 4);
  147. furi_thread_set_state(thread, FuriThreadStateStarting);
  148. uint32_t stack = thread->stack_size / 4;
  149. UBaseType_t priority = thread->priority ? thread->priority : FuriThreadPriorityNormal;
  150. if(thread->is_service) {
  151. thread->task_handle = xTaskCreateStatic(
  152. furi_thread_body,
  153. thread->name,
  154. stack,
  155. thread,
  156. priority,
  157. memmgr_alloc_from_pool(sizeof(StackType_t) * stack),
  158. memmgr_alloc_from_pool(sizeof(StaticTask_t)));
  159. } else {
  160. BaseType_t ret = xTaskCreate(
  161. furi_thread_body, thread->name, stack, thread, priority, &thread->task_handle);
  162. furi_check(ret == pdPASS);
  163. }
  164. furi_check(thread->task_handle);
  165. }
  166. bool furi_thread_join(FuriThread* thread) {
  167. furi_assert(thread);
  168. furi_check(furi_thread_get_current() != thread);
  169. // Check if thread was started
  170. if(thread->task_handle == NULL) {
  171. return false;
  172. }
  173. // Wait for thread to stop
  174. while(eTaskGetState(thread->task_handle) != eDeleted) {
  175. furi_delay_ms(10);
  176. }
  177. return true;
  178. }
  179. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  180. furi_assert(thread);
  181. return thread->task_handle;
  182. }
  183. void furi_thread_enable_heap_trace(FuriThread* thread) {
  184. furi_assert(thread);
  185. furi_assert(thread->state == FuriThreadStateStopped);
  186. furi_assert(thread->heap_trace_enabled == false);
  187. thread->heap_trace_enabled = true;
  188. }
  189. void furi_thread_disable_heap_trace(FuriThread* thread) {
  190. furi_assert(thread);
  191. furi_assert(thread->state == FuriThreadStateStopped);
  192. furi_assert(thread->heap_trace_enabled == true);
  193. thread->heap_trace_enabled = false;
  194. }
  195. size_t furi_thread_get_heap_size(FuriThread* thread) {
  196. furi_assert(thread);
  197. furi_assert(thread->heap_trace_enabled == true);
  198. return thread->heap_size;
  199. }
  200. int32_t furi_thread_get_return_code(FuriThread* thread) {
  201. furi_assert(thread);
  202. furi_assert(thread->state == FuriThreadStateStopped);
  203. return thread->ret;
  204. }
  205. FuriThreadId furi_thread_get_current_id() {
  206. return xTaskGetCurrentTaskHandle();
  207. }
  208. FuriThread* furi_thread_get_current() {
  209. FuriThread* thread = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  210. furi_assert(thread != NULL);
  211. return thread;
  212. }
  213. void furi_thread_yield() {
  214. furi_assert(!FURI_IS_IRQ_MODE());
  215. taskYIELD();
  216. }
  217. /* Limits */
  218. #define MAX_BITS_TASK_NOTIFY 31U
  219. #define MAX_BITS_EVENT_GROUPS 24U
  220. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  221. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  222. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  223. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  224. uint32_t rflags;
  225. BaseType_t yield;
  226. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  227. rflags = (uint32_t)FuriStatusErrorParameter;
  228. } else {
  229. rflags = (uint32_t)FuriStatusError;
  230. if(FURI_IS_IRQ_MODE()) {
  231. yield = pdFALSE;
  232. (void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
  233. (void)xTaskNotifyAndQueryIndexedFromISR(
  234. hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  235. portYIELD_FROM_ISR(yield);
  236. } else {
  237. (void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
  238. (void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
  239. }
  240. }
  241. /* Return flags after setting */
  242. return (rflags);
  243. }
  244. uint32_t furi_thread_flags_clear(uint32_t flags) {
  245. TaskHandle_t hTask;
  246. uint32_t rflags, cflags;
  247. if(FURI_IS_IRQ_MODE()) {
  248. rflags = (uint32_t)FuriStatusErrorISR;
  249. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  250. rflags = (uint32_t)FuriStatusErrorParameter;
  251. } else {
  252. hTask = xTaskGetCurrentTaskHandle();
  253. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
  254. pdPASS) {
  255. rflags = cflags;
  256. cflags &= ~flags;
  257. if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
  258. pdPASS) {
  259. rflags = (uint32_t)FuriStatusError;
  260. }
  261. } else {
  262. rflags = (uint32_t)FuriStatusError;
  263. }
  264. }
  265. /* Return flags before clearing */
  266. return (rflags);
  267. }
  268. uint32_t furi_thread_flags_get(void) {
  269. TaskHandle_t hTask;
  270. uint32_t rflags;
  271. if(FURI_IS_IRQ_MODE()) {
  272. rflags = (uint32_t)FuriStatusErrorISR;
  273. } else {
  274. hTask = xTaskGetCurrentTaskHandle();
  275. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
  276. pdPASS) {
  277. rflags = (uint32_t)FuriStatusError;
  278. }
  279. }
  280. return (rflags);
  281. }
  282. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  283. uint32_t rflags, nval;
  284. uint32_t clear;
  285. TickType_t t0, td, tout;
  286. BaseType_t rval;
  287. if(FURI_IS_IRQ_MODE()) {
  288. rflags = (uint32_t)FuriStatusErrorISR;
  289. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  290. rflags = (uint32_t)FuriStatusErrorParameter;
  291. } else {
  292. if((options & FuriFlagNoClear) == FuriFlagNoClear) {
  293. clear = 0U;
  294. } else {
  295. clear = flags;
  296. }
  297. rflags = 0U;
  298. tout = timeout;
  299. t0 = xTaskGetTickCount();
  300. do {
  301. rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
  302. if(rval == pdPASS) {
  303. rflags &= flags;
  304. rflags |= nval;
  305. if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
  306. if((flags & rflags) == flags) {
  307. break;
  308. } else {
  309. if(timeout == 0U) {
  310. rflags = (uint32_t)FuriStatusErrorResource;
  311. break;
  312. }
  313. }
  314. } else {
  315. if((flags & rflags) != 0) {
  316. break;
  317. } else {
  318. if(timeout == 0U) {
  319. rflags = (uint32_t)FuriStatusErrorResource;
  320. break;
  321. }
  322. }
  323. }
  324. /* Update timeout */
  325. td = xTaskGetTickCount() - t0;
  326. if(td > tout) {
  327. tout = 0;
  328. } else {
  329. tout -= td;
  330. }
  331. } else {
  332. if(timeout == 0) {
  333. rflags = (uint32_t)FuriStatusErrorResource;
  334. } else {
  335. rflags = (uint32_t)FuriStatusErrorTimeout;
  336. }
  337. }
  338. } while(rval != pdFAIL);
  339. }
  340. /* Return flags before clearing */
  341. return (rflags);
  342. }
  343. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  344. uint32_t i, count;
  345. TaskStatus_t* task;
  346. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  347. count = 0U;
  348. } else {
  349. vTaskSuspendAll();
  350. count = uxTaskGetNumberOfTasks();
  351. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  352. if(task != NULL) {
  353. count = uxTaskGetSystemState(task, count, NULL);
  354. for(i = 0U; (i < count) && (i < array_items); i++) {
  355. thread_array[i] = (FuriThreadId)task[i].xHandle;
  356. }
  357. count = i;
  358. }
  359. (void)xTaskResumeAll();
  360. vPortFree(task);
  361. }
  362. return (count);
  363. }
  364. const char* furi_thread_get_name(FuriThreadId thread_id) {
  365. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  366. const char* name;
  367. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  368. name = NULL;
  369. } else {
  370. name = pcTaskGetName(hTask);
  371. }
  372. return (name);
  373. }
  374. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  375. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  376. uint32_t sz;
  377. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  378. sz = 0U;
  379. } else {
  380. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  381. }
  382. return (sz);
  383. }
  384. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size) {
  385. if(thread->output.write_callback != NULL) {
  386. thread->output.write_callback(data, size);
  387. } else {
  388. furi_hal_console_tx((const uint8_t*)data, size);
  389. }
  390. return size;
  391. }
  392. static int32_t __furi_thread_stdout_flush(FuriThread* thread) {
  393. string_ptr buffer = thread->output.buffer;
  394. size_t size = string_size(buffer);
  395. if(size > 0) {
  396. __furi_thread_stdout_write(thread, string_get_cstr(buffer), size);
  397. string_reset(buffer);
  398. }
  399. return 0;
  400. }
  401. bool furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback) {
  402. FuriThread* thread = furi_thread_get_current();
  403. __furi_thread_stdout_flush(thread);
  404. thread->output.write_callback = callback;
  405. return true;
  406. }
  407. size_t furi_thread_stdout_write(const char* data, size_t size) {
  408. FuriThread* thread = furi_thread_get_current();
  409. if(size == 0 || data == NULL) {
  410. return __furi_thread_stdout_flush(thread);
  411. } else {
  412. if(data[size - 1] == '\n') {
  413. // if the last character is a newline, we can flush buffer and write data as is, wo buffers
  414. __furi_thread_stdout_flush(thread);
  415. __furi_thread_stdout_write(thread, data, size);
  416. } else {
  417. // string_cat doesn't work here because we need to write the exact size data
  418. for(size_t i = 0; i < size; i++) {
  419. string_push_back(thread->output.buffer, data[i]);
  420. if(data[i] == '\n') {
  421. __furi_thread_stdout_flush(thread);
  422. }
  423. }
  424. }
  425. }
  426. return size;
  427. }
  428. int32_t furi_thread_stdout_flush() {
  429. return __furi_thread_stdout_flush(furi_thread_get_current());
  430. }
  431. void furi_thread_suspend(FuriThreadId thread_id) {
  432. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  433. vTaskSuspend(hTask);
  434. }
  435. void furi_thread_resume(FuriThreadId thread_id) {
  436. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  437. if(FURI_IS_IRQ_MODE()) {
  438. xTaskResumeFromISR(hTask);
  439. } else {
  440. vTaskResume(hTask);
  441. }
  442. }
  443. bool furi_thread_is_suspended(FuriThreadId thread_id) {
  444. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  445. return eTaskGetState(hTask) == eSuspended;
  446. }