thread.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. #include "thread.h"
  2. #include "memmgr.h"
  3. #include "memmgr_heap.h"
  4. #include "check.h"
  5. #include "common_defines.h"
  6. #include <task.h>
  7. #include <m-string.h>
  8. struct FuriThread {
  9. FuriThreadState state;
  10. int32_t ret;
  11. FuriThreadCallback callback;
  12. void* context;
  13. FuriThreadStateCallback state_callback;
  14. void* state_context;
  15. char* name;
  16. configSTACK_DEPTH_TYPE stack_size;
  17. FuriThreadPriority priority;
  18. TaskHandle_t task_handle;
  19. bool heap_trace_enabled;
  20. size_t heap_size;
  21. };
  22. /** Catch threads that are trying to exit wrong way */
  23. __attribute__((__noreturn__)) void furi_thread_catch() {
  24. asm volatile("nop"); // extra magic
  25. furi_crash("You are doing it wrong");
  26. }
  27. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  28. furi_assert(thread);
  29. thread->state = state;
  30. if(thread->state_callback) {
  31. thread->state_callback(state, thread->state_context);
  32. }
  33. }
  34. static void furi_thread_body(void* context) {
  35. furi_assert(context);
  36. FuriThread* thread = context;
  37. furi_assert(thread->state == FuriThreadStateStarting);
  38. furi_thread_set_state(thread, FuriThreadStateRunning);
  39. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  40. if(thread->heap_trace_enabled == true) {
  41. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  42. }
  43. thread->ret = thread->callback(thread->context);
  44. if(thread->heap_trace_enabled == true) {
  45. osDelay(33);
  46. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  47. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  48. }
  49. furi_assert(thread->state == FuriThreadStateRunning);
  50. furi_thread_set_state(thread, FuriThreadStateStopped);
  51. vTaskDelete(thread->task_handle);
  52. furi_thread_catch();
  53. }
  54. FuriThread* furi_thread_alloc() {
  55. FuriThread* thread = malloc(sizeof(FuriThread));
  56. return thread;
  57. }
  58. void furi_thread_free(FuriThread* thread) {
  59. furi_assert(thread);
  60. furi_assert(thread->state == FuriThreadStateStopped);
  61. if(thread->name) free((void*)thread->name);
  62. free(thread);
  63. }
  64. void furi_thread_set_name(FuriThread* thread, const char* name) {
  65. furi_assert(thread);
  66. furi_assert(thread->state == FuriThreadStateStopped);
  67. if(thread->name) free((void*)thread->name);
  68. thread->name = strdup(name);
  69. }
  70. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  71. furi_assert(thread);
  72. furi_assert(thread->state == FuriThreadStateStopped);
  73. furi_assert(stack_size % 4 == 0);
  74. thread->stack_size = stack_size;
  75. }
  76. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  77. furi_assert(thread);
  78. furi_assert(thread->state == FuriThreadStateStopped);
  79. thread->callback = callback;
  80. }
  81. void furi_thread_set_context(FuriThread* thread, void* context) {
  82. furi_assert(thread);
  83. furi_assert(thread->state == FuriThreadStateStopped);
  84. thread->context = context;
  85. }
  86. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  87. furi_assert(thread);
  88. furi_assert(thread->state == FuriThreadStateStopped);
  89. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  90. thread->priority = priority;
  91. }
  92. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  93. furi_assert(thread);
  94. furi_assert(thread->state == FuriThreadStateStopped);
  95. thread->state_callback = callback;
  96. }
  97. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  98. furi_assert(thread);
  99. furi_assert(thread->state == FuriThreadStateStopped);
  100. thread->state_context = context;
  101. }
  102. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  103. furi_assert(thread);
  104. return thread->state;
  105. }
  106. void furi_thread_start(FuriThread* thread) {
  107. furi_assert(thread);
  108. furi_assert(thread->callback);
  109. furi_assert(thread->state == FuriThreadStateStopped);
  110. furi_assert(thread->stack_size > 0 && thread->stack_size < 0xFFFF * 4);
  111. furi_thread_set_state(thread, FuriThreadStateStarting);
  112. BaseType_t ret = xTaskCreate(
  113. furi_thread_body,
  114. thread->name,
  115. thread->stack_size / 4,
  116. thread,
  117. thread->priority ? thread->priority : FuriThreadPriorityNormal,
  118. &thread->task_handle);
  119. furi_check(ret == pdPASS);
  120. furi_check(thread->task_handle);
  121. }
  122. bool furi_thread_join(FuriThread* thread) {
  123. furi_assert(thread);
  124. while(thread->state != FuriThreadStateStopped) {
  125. osDelay(10);
  126. }
  127. return osOK;
  128. }
  129. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  130. furi_assert(thread);
  131. return thread->task_handle;
  132. }
  133. void furi_thread_enable_heap_trace(FuriThread* thread) {
  134. furi_assert(thread);
  135. furi_assert(thread->state == FuriThreadStateStopped);
  136. furi_assert(thread->heap_trace_enabled == false);
  137. thread->heap_trace_enabled = true;
  138. }
  139. void furi_thread_disable_heap_trace(FuriThread* thread) {
  140. furi_assert(thread);
  141. furi_assert(thread->state == FuriThreadStateStopped);
  142. furi_assert(thread->heap_trace_enabled == true);
  143. thread->heap_trace_enabled = false;
  144. }
  145. size_t furi_thread_get_heap_size(FuriThread* thread) {
  146. furi_assert(thread);
  147. furi_assert(thread->heap_trace_enabled == true);
  148. return thread->heap_size;
  149. }
  150. int32_t furi_thread_get_return_code(FuriThread* thread) {
  151. furi_assert(thread);
  152. furi_assert(thread->state == FuriThreadStateStopped);
  153. return thread->ret;
  154. }
  155. FuriThreadId furi_thread_get_current_id() {
  156. return xTaskGetCurrentTaskHandle();
  157. }
  158. void furi_thread_yield() {
  159. furi_assert(!FURI_IS_IRQ_MODE());
  160. taskYIELD();
  161. }
  162. /* Limits */
  163. #define MAX_BITS_TASK_NOTIFY 31U
  164. #define MAX_BITS_EVENT_GROUPS 24U
  165. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  166. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  167. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  168. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  169. uint32_t rflags;
  170. BaseType_t yield;
  171. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  172. rflags = (uint32_t)osErrorParameter;
  173. } else {
  174. rflags = (uint32_t)osError;
  175. if(FURI_IS_IRQ_MODE()) {
  176. yield = pdFALSE;
  177. (void)xTaskNotifyFromISR(hTask, flags, eSetBits, &yield);
  178. (void)xTaskNotifyAndQueryFromISR(hTask, 0, eNoAction, &rflags, NULL);
  179. portYIELD_FROM_ISR(yield);
  180. } else {
  181. (void)xTaskNotify(hTask, flags, eSetBits);
  182. (void)xTaskNotifyAndQuery(hTask, 0, eNoAction, &rflags);
  183. }
  184. }
  185. /* Return flags after setting */
  186. return (rflags);
  187. }
  188. uint32_t furi_thread_flags_clear(uint32_t flags) {
  189. TaskHandle_t hTask;
  190. uint32_t rflags, cflags;
  191. if(FURI_IS_IRQ_MODE()) {
  192. rflags = (uint32_t)osErrorISR;
  193. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  194. rflags = (uint32_t)osErrorParameter;
  195. } else {
  196. hTask = xTaskGetCurrentTaskHandle();
  197. if(xTaskNotifyAndQuery(hTask, 0, eNoAction, &cflags) == pdPASS) {
  198. rflags = cflags;
  199. cflags &= ~flags;
  200. if(xTaskNotify(hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
  201. rflags = (uint32_t)osError;
  202. }
  203. } else {
  204. rflags = (uint32_t)osError;
  205. }
  206. }
  207. /* Return flags before clearing */
  208. return (rflags);
  209. }
  210. uint32_t furi_thread_flags_get(void) {
  211. TaskHandle_t hTask;
  212. uint32_t rflags;
  213. if(FURI_IS_IRQ_MODE()) {
  214. rflags = (uint32_t)osErrorISR;
  215. } else {
  216. hTask = xTaskGetCurrentTaskHandle();
  217. if(xTaskNotifyAndQuery(hTask, 0, eNoAction, &rflags) != pdPASS) {
  218. rflags = (uint32_t)osError;
  219. }
  220. }
  221. return (rflags);
  222. }
  223. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  224. uint32_t rflags, nval;
  225. uint32_t clear;
  226. TickType_t t0, td, tout;
  227. BaseType_t rval;
  228. if(FURI_IS_IRQ_MODE()) {
  229. rflags = (uint32_t)osErrorISR;
  230. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  231. rflags = (uint32_t)osErrorParameter;
  232. } else {
  233. if((options & osFlagsNoClear) == osFlagsNoClear) {
  234. clear = 0U;
  235. } else {
  236. clear = flags;
  237. }
  238. rflags = 0U;
  239. tout = timeout;
  240. t0 = xTaskGetTickCount();
  241. do {
  242. rval = xTaskNotifyWait(0, clear, &nval, tout);
  243. if(rval == pdPASS) {
  244. rflags &= flags;
  245. rflags |= nval;
  246. if((options & osFlagsWaitAll) == osFlagsWaitAll) {
  247. if((flags & rflags) == flags) {
  248. break;
  249. } else {
  250. if(timeout == 0U) {
  251. rflags = (uint32_t)osErrorResource;
  252. break;
  253. }
  254. }
  255. } else {
  256. if((flags & rflags) != 0) {
  257. break;
  258. } else {
  259. if(timeout == 0U) {
  260. rflags = (uint32_t)osErrorResource;
  261. break;
  262. }
  263. }
  264. }
  265. /* Update timeout */
  266. td = xTaskGetTickCount() - t0;
  267. if(td > tout) {
  268. tout = 0;
  269. } else {
  270. tout -= td;
  271. }
  272. } else {
  273. if(timeout == 0) {
  274. rflags = (uint32_t)osErrorResource;
  275. } else {
  276. rflags = (uint32_t)osErrorTimeout;
  277. }
  278. }
  279. } while(rval != pdFAIL);
  280. }
  281. /* Return flags before clearing */
  282. return (rflags);
  283. }
  284. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  285. uint32_t i, count;
  286. TaskStatus_t* task;
  287. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  288. count = 0U;
  289. } else {
  290. vTaskSuspendAll();
  291. count = uxTaskGetNumberOfTasks();
  292. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  293. if(task != NULL) {
  294. count = uxTaskGetSystemState(task, count, NULL);
  295. for(i = 0U; (i < count) && (i < array_items); i++) {
  296. thread_array[i] = (FuriThreadId)task[i].xHandle;
  297. }
  298. count = i;
  299. }
  300. (void)xTaskResumeAll();
  301. vPortFree(task);
  302. }
  303. return (count);
  304. }
  305. const char* furi_thread_get_name(FuriThreadId thread_id) {
  306. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  307. const char* name;
  308. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  309. name = NULL;
  310. } else {
  311. name = pcTaskGetName(hTask);
  312. }
  313. return (name);
  314. }
  315. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  316. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  317. uint32_t sz;
  318. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  319. sz = 0U;
  320. } else {
  321. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  322. }
  323. return (sz);
  324. }