thread.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. #include "thread.h"
  2. #include "kernel.h"
  3. #include "memmgr.h"
  4. #include "memmgr_heap.h"
  5. #include "check.h"
  6. #include "common_defines.h"
  7. #include "mutex.h"
  8. #include <task.h>
  9. #include <m-string.h>
  10. #include <furi_hal_console.h>
  11. #define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
  12. typedef struct FuriThreadStdout FuriThreadStdout;
  13. struct FuriThreadStdout {
  14. FuriThreadStdoutWriteCallback write_callback;
  15. string_t buffer;
  16. };
  17. struct FuriThread {
  18. FuriThreadState state;
  19. int32_t ret;
  20. FuriThreadCallback callback;
  21. void* context;
  22. FuriThreadStateCallback state_callback;
  23. void* state_context;
  24. char* name;
  25. configSTACK_DEPTH_TYPE stack_size;
  26. FuriThreadPriority priority;
  27. TaskHandle_t task_handle;
  28. bool heap_trace_enabled;
  29. size_t heap_size;
  30. FuriThreadStdout output;
  31. };
  32. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size);
  33. static int32_t __furi_thread_stdout_flush(FuriThread* thread);
  34. /** Catch threads that are trying to exit wrong way */
  35. __attribute__((__noreturn__)) void furi_thread_catch() {
  36. asm volatile("nop"); // extra magic
  37. furi_crash("You are doing it wrong");
  38. }
  39. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  40. furi_assert(thread);
  41. thread->state = state;
  42. if(thread->state_callback) {
  43. thread->state_callback(state, thread->state_context);
  44. }
  45. }
  46. static void furi_thread_body(void* context) {
  47. furi_assert(context);
  48. FuriThread* thread = context;
  49. // store thread instance to thread local storage
  50. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) == NULL);
  51. vTaskSetThreadLocalStoragePointer(NULL, 0, thread);
  52. furi_assert(thread->state == FuriThreadStateStarting);
  53. furi_thread_set_state(thread, FuriThreadStateRunning);
  54. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  55. if(thread->heap_trace_enabled == true) {
  56. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  57. }
  58. thread->ret = thread->callback(thread->context);
  59. if(thread->heap_trace_enabled == true) {
  60. furi_delay_ms(33);
  61. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  62. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  63. }
  64. furi_assert(thread->state == FuriThreadStateRunning);
  65. furi_thread_set_state(thread, FuriThreadStateStopped);
  66. // clear thread local storage
  67. __furi_thread_stdout_flush(thread);
  68. furi_assert(pvTaskGetThreadLocalStoragePointer(NULL, 0) != NULL);
  69. vTaskSetThreadLocalStoragePointer(NULL, 0, NULL);
  70. vTaskDelete(thread->task_handle);
  71. furi_thread_catch();
  72. }
  73. FuriThread* furi_thread_alloc() {
  74. FuriThread* thread = malloc(sizeof(FuriThread));
  75. string_init(thread->output.buffer);
  76. return thread;
  77. }
  78. void furi_thread_free(FuriThread* thread) {
  79. furi_assert(thread);
  80. furi_assert(thread->state == FuriThreadStateStopped);
  81. if(thread->name) free((void*)thread->name);
  82. string_clear(thread->output.buffer);
  83. free(thread);
  84. }
  85. void furi_thread_set_name(FuriThread* thread, const char* name) {
  86. furi_assert(thread);
  87. furi_assert(thread->state == FuriThreadStateStopped);
  88. if(thread->name) free((void*)thread->name);
  89. thread->name = name ? strdup(name) : NULL;
  90. }
  91. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  92. furi_assert(thread);
  93. furi_assert(thread->state == FuriThreadStateStopped);
  94. furi_assert(stack_size % 4 == 0);
  95. thread->stack_size = stack_size;
  96. }
  97. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  98. furi_assert(thread);
  99. furi_assert(thread->state == FuriThreadStateStopped);
  100. thread->callback = callback;
  101. }
  102. void furi_thread_set_context(FuriThread* thread, void* context) {
  103. furi_assert(thread);
  104. furi_assert(thread->state == FuriThreadStateStopped);
  105. thread->context = context;
  106. }
  107. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  108. furi_assert(thread);
  109. furi_assert(thread->state == FuriThreadStateStopped);
  110. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  111. thread->priority = priority;
  112. }
  113. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  114. furi_assert(thread);
  115. furi_assert(thread->state == FuriThreadStateStopped);
  116. thread->state_callback = callback;
  117. }
  118. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  119. furi_assert(thread);
  120. furi_assert(thread->state == FuriThreadStateStopped);
  121. thread->state_context = context;
  122. }
  123. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  124. furi_assert(thread);
  125. return thread->state;
  126. }
  127. void furi_thread_start(FuriThread* thread) {
  128. furi_assert(thread);
  129. furi_assert(thread->callback);
  130. furi_assert(thread->state == FuriThreadStateStopped);
  131. furi_assert(thread->stack_size > 0 && thread->stack_size < 0xFFFF * 4);
  132. furi_thread_set_state(thread, FuriThreadStateStarting);
  133. BaseType_t ret = xTaskCreate(
  134. furi_thread_body,
  135. thread->name,
  136. thread->stack_size / 4,
  137. thread,
  138. thread->priority ? thread->priority : FuriThreadPriorityNormal,
  139. &thread->task_handle);
  140. furi_check(ret == pdPASS);
  141. furi_check(thread->task_handle);
  142. }
  143. bool furi_thread_join(FuriThread* thread) {
  144. furi_assert(thread);
  145. while(thread->state != FuriThreadStateStopped) {
  146. furi_delay_ms(10);
  147. }
  148. return FuriStatusOk;
  149. }
  150. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  151. furi_assert(thread);
  152. return thread->task_handle;
  153. }
  154. void furi_thread_enable_heap_trace(FuriThread* thread) {
  155. furi_assert(thread);
  156. furi_assert(thread->state == FuriThreadStateStopped);
  157. furi_assert(thread->heap_trace_enabled == false);
  158. thread->heap_trace_enabled = true;
  159. }
  160. void furi_thread_disable_heap_trace(FuriThread* thread) {
  161. furi_assert(thread);
  162. furi_assert(thread->state == FuriThreadStateStopped);
  163. furi_assert(thread->heap_trace_enabled == true);
  164. thread->heap_trace_enabled = false;
  165. }
  166. size_t furi_thread_get_heap_size(FuriThread* thread) {
  167. furi_assert(thread);
  168. furi_assert(thread->heap_trace_enabled == true);
  169. return thread->heap_size;
  170. }
  171. int32_t furi_thread_get_return_code(FuriThread* thread) {
  172. furi_assert(thread);
  173. furi_assert(thread->state == FuriThreadStateStopped);
  174. return thread->ret;
  175. }
  176. FuriThreadId furi_thread_get_current_id() {
  177. return xTaskGetCurrentTaskHandle();
  178. }
  179. FuriThread* furi_thread_get_current() {
  180. FuriThread* thread = pvTaskGetThreadLocalStoragePointer(NULL, 0);
  181. furi_assert(thread != NULL);
  182. return thread;
  183. }
  184. void furi_thread_yield() {
  185. furi_assert(!FURI_IS_IRQ_MODE());
  186. taskYIELD();
  187. }
  188. /* Limits */
  189. #define MAX_BITS_TASK_NOTIFY 31U
  190. #define MAX_BITS_EVENT_GROUPS 24U
  191. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  192. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  193. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  194. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  195. uint32_t rflags;
  196. BaseType_t yield;
  197. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  198. rflags = (uint32_t)FuriStatusErrorParameter;
  199. } else {
  200. rflags = (uint32_t)FuriStatusError;
  201. if(FURI_IS_IRQ_MODE()) {
  202. yield = pdFALSE;
  203. (void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
  204. (void)xTaskNotifyAndQueryIndexedFromISR(
  205. hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  206. portYIELD_FROM_ISR(yield);
  207. } else {
  208. (void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
  209. (void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
  210. }
  211. }
  212. /* Return flags after setting */
  213. return (rflags);
  214. }
  215. uint32_t furi_thread_flags_clear(uint32_t flags) {
  216. TaskHandle_t hTask;
  217. uint32_t rflags, cflags;
  218. if(FURI_IS_IRQ_MODE()) {
  219. rflags = (uint32_t)FuriStatusErrorISR;
  220. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  221. rflags = (uint32_t)FuriStatusErrorParameter;
  222. } else {
  223. hTask = xTaskGetCurrentTaskHandle();
  224. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
  225. pdPASS) {
  226. rflags = cflags;
  227. cflags &= ~flags;
  228. if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
  229. pdPASS) {
  230. rflags = (uint32_t)FuriStatusError;
  231. }
  232. } else {
  233. rflags = (uint32_t)FuriStatusError;
  234. }
  235. }
  236. /* Return flags before clearing */
  237. return (rflags);
  238. }
  239. uint32_t furi_thread_flags_get(void) {
  240. TaskHandle_t hTask;
  241. uint32_t rflags;
  242. if(FURI_IS_IRQ_MODE()) {
  243. rflags = (uint32_t)FuriStatusErrorISR;
  244. } else {
  245. hTask = xTaskGetCurrentTaskHandle();
  246. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
  247. pdPASS) {
  248. rflags = (uint32_t)FuriStatusError;
  249. }
  250. }
  251. return (rflags);
  252. }
  253. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  254. uint32_t rflags, nval;
  255. uint32_t clear;
  256. TickType_t t0, td, tout;
  257. BaseType_t rval;
  258. if(FURI_IS_IRQ_MODE()) {
  259. rflags = (uint32_t)FuriStatusErrorISR;
  260. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  261. rflags = (uint32_t)FuriStatusErrorParameter;
  262. } else {
  263. if((options & FuriFlagNoClear) == FuriFlagNoClear) {
  264. clear = 0U;
  265. } else {
  266. clear = flags;
  267. }
  268. rflags = 0U;
  269. tout = timeout;
  270. t0 = xTaskGetTickCount();
  271. do {
  272. rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
  273. if(rval == pdPASS) {
  274. rflags &= flags;
  275. rflags |= nval;
  276. if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
  277. if((flags & rflags) == flags) {
  278. break;
  279. } else {
  280. if(timeout == 0U) {
  281. rflags = (uint32_t)FuriStatusErrorResource;
  282. break;
  283. }
  284. }
  285. } else {
  286. if((flags & rflags) != 0) {
  287. break;
  288. } else {
  289. if(timeout == 0U) {
  290. rflags = (uint32_t)FuriStatusErrorResource;
  291. break;
  292. }
  293. }
  294. }
  295. /* Update timeout */
  296. td = xTaskGetTickCount() - t0;
  297. if(td > tout) {
  298. tout = 0;
  299. } else {
  300. tout -= td;
  301. }
  302. } else {
  303. if(timeout == 0) {
  304. rflags = (uint32_t)FuriStatusErrorResource;
  305. } else {
  306. rflags = (uint32_t)FuriStatusErrorTimeout;
  307. }
  308. }
  309. } while(rval != pdFAIL);
  310. }
  311. /* Return flags before clearing */
  312. return (rflags);
  313. }
  314. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  315. uint32_t i, count;
  316. TaskStatus_t* task;
  317. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  318. count = 0U;
  319. } else {
  320. vTaskSuspendAll();
  321. count = uxTaskGetNumberOfTasks();
  322. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  323. if(task != NULL) {
  324. count = uxTaskGetSystemState(task, count, NULL);
  325. for(i = 0U; (i < count) && (i < array_items); i++) {
  326. thread_array[i] = (FuriThreadId)task[i].xHandle;
  327. }
  328. count = i;
  329. }
  330. (void)xTaskResumeAll();
  331. vPortFree(task);
  332. }
  333. return (count);
  334. }
  335. const char* furi_thread_get_name(FuriThreadId thread_id) {
  336. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  337. const char* name;
  338. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  339. name = NULL;
  340. } else {
  341. name = pcTaskGetName(hTask);
  342. }
  343. return (name);
  344. }
  345. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  346. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  347. uint32_t sz;
  348. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  349. sz = 0U;
  350. } else {
  351. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  352. }
  353. return (sz);
  354. }
  355. static size_t __furi_thread_stdout_write(FuriThread* thread, const char* data, size_t size) {
  356. if(thread->output.write_callback != NULL) {
  357. thread->output.write_callback(data, size);
  358. } else {
  359. furi_hal_console_tx((const uint8_t*)data, size);
  360. }
  361. return size;
  362. }
  363. static int32_t __furi_thread_stdout_flush(FuriThread* thread) {
  364. string_ptr buffer = thread->output.buffer;
  365. size_t size = string_size(buffer);
  366. if(size > 0) {
  367. __furi_thread_stdout_write(thread, string_get_cstr(buffer), size);
  368. string_reset(buffer);
  369. }
  370. return 0;
  371. }
  372. bool furi_thread_set_stdout_callback(FuriThreadStdoutWriteCallback callback) {
  373. FuriThread* thread = furi_thread_get_current();
  374. __furi_thread_stdout_flush(thread);
  375. thread->output.write_callback = callback;
  376. return true;
  377. }
  378. size_t furi_thread_stdout_write(const char* data, size_t size) {
  379. FuriThread* thread = furi_thread_get_current();
  380. if(size == 0 || data == NULL) {
  381. return __furi_thread_stdout_flush(thread);
  382. } else {
  383. if(data[size - 1] == '\n') {
  384. // if the last character is a newline, we can flush buffer and write data as is, wo buffers
  385. __furi_thread_stdout_flush(thread);
  386. __furi_thread_stdout_write(thread, data, size);
  387. } else {
  388. // string_cat doesn't work here because we need to write the exact size data
  389. for(size_t i = 0; i < size; i++) {
  390. string_push_back(thread->output.buffer, data[i]);
  391. if(data[i] == '\n') {
  392. __furi_thread_stdout_flush(thread);
  393. }
  394. }
  395. }
  396. }
  397. return size;
  398. }
  399. int32_t furi_thread_stdout_flush() {
  400. return __furi_thread_stdout_flush(furi_thread_get_current());
  401. }