thread.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. #include "thread.h"
  2. #include "kernel.h"
  3. #include "memmgr.h"
  4. #include "memmgr_heap.h"
  5. #include "check.h"
  6. #include "common_defines.h"
  7. #include <task.h>
  8. #include <m-string.h>
  9. #define THREAD_NOTIFY_INDEX 1 // Index 0 is used for stream buffers
  10. struct FuriThread {
  11. FuriThreadState state;
  12. int32_t ret;
  13. FuriThreadCallback callback;
  14. void* context;
  15. FuriThreadStateCallback state_callback;
  16. void* state_context;
  17. char* name;
  18. configSTACK_DEPTH_TYPE stack_size;
  19. FuriThreadPriority priority;
  20. TaskHandle_t task_handle;
  21. bool heap_trace_enabled;
  22. size_t heap_size;
  23. };
  24. /** Catch threads that are trying to exit wrong way */
  25. __attribute__((__noreturn__)) void furi_thread_catch() {
  26. asm volatile("nop"); // extra magic
  27. furi_crash("You are doing it wrong");
  28. }
  29. static void furi_thread_set_state(FuriThread* thread, FuriThreadState state) {
  30. furi_assert(thread);
  31. thread->state = state;
  32. if(thread->state_callback) {
  33. thread->state_callback(state, thread->state_context);
  34. }
  35. }
  36. static void furi_thread_body(void* context) {
  37. furi_assert(context);
  38. FuriThread* thread = context;
  39. furi_assert(thread->state == FuriThreadStateStarting);
  40. furi_thread_set_state(thread, FuriThreadStateRunning);
  41. TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
  42. if(thread->heap_trace_enabled == true) {
  43. memmgr_heap_enable_thread_trace((FuriThreadId)task_handle);
  44. }
  45. thread->ret = thread->callback(thread->context);
  46. if(thread->heap_trace_enabled == true) {
  47. furi_delay_ms(33);
  48. thread->heap_size = memmgr_heap_get_thread_memory((FuriThreadId)task_handle);
  49. memmgr_heap_disable_thread_trace((FuriThreadId)task_handle);
  50. }
  51. furi_assert(thread->state == FuriThreadStateRunning);
  52. furi_thread_set_state(thread, FuriThreadStateStopped);
  53. vTaskDelete(thread->task_handle);
  54. furi_thread_catch();
  55. }
  56. FuriThread* furi_thread_alloc() {
  57. FuriThread* thread = malloc(sizeof(FuriThread));
  58. return thread;
  59. }
  60. void furi_thread_free(FuriThread* thread) {
  61. furi_assert(thread);
  62. furi_assert(thread->state == FuriThreadStateStopped);
  63. if(thread->name) free((void*)thread->name);
  64. free(thread);
  65. }
  66. void furi_thread_set_name(FuriThread* thread, const char* name) {
  67. furi_assert(thread);
  68. furi_assert(thread->state == FuriThreadStateStopped);
  69. if(thread->name) free((void*)thread->name);
  70. thread->name = name ? strdup(name) : NULL;
  71. }
  72. void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size) {
  73. furi_assert(thread);
  74. furi_assert(thread->state == FuriThreadStateStopped);
  75. furi_assert(stack_size % 4 == 0);
  76. thread->stack_size = stack_size;
  77. }
  78. void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback) {
  79. furi_assert(thread);
  80. furi_assert(thread->state == FuriThreadStateStopped);
  81. thread->callback = callback;
  82. }
  83. void furi_thread_set_context(FuriThread* thread, void* context) {
  84. furi_assert(thread);
  85. furi_assert(thread->state == FuriThreadStateStopped);
  86. thread->context = context;
  87. }
  88. void furi_thread_set_priority(FuriThread* thread, FuriThreadPriority priority) {
  89. furi_assert(thread);
  90. furi_assert(thread->state == FuriThreadStateStopped);
  91. furi_assert(priority >= FuriThreadPriorityIdle && priority <= FuriThreadPriorityIsr);
  92. thread->priority = priority;
  93. }
  94. void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback) {
  95. furi_assert(thread);
  96. furi_assert(thread->state == FuriThreadStateStopped);
  97. thread->state_callback = callback;
  98. }
  99. void furi_thread_set_state_context(FuriThread* thread, void* context) {
  100. furi_assert(thread);
  101. furi_assert(thread->state == FuriThreadStateStopped);
  102. thread->state_context = context;
  103. }
  104. FuriThreadState furi_thread_get_state(FuriThread* thread) {
  105. furi_assert(thread);
  106. return thread->state;
  107. }
  108. void furi_thread_start(FuriThread* thread) {
  109. furi_assert(thread);
  110. furi_assert(thread->callback);
  111. furi_assert(thread->state == FuriThreadStateStopped);
  112. furi_assert(thread->stack_size > 0 && thread->stack_size < 0xFFFF * 4);
  113. furi_thread_set_state(thread, FuriThreadStateStarting);
  114. BaseType_t ret = xTaskCreate(
  115. furi_thread_body,
  116. thread->name,
  117. thread->stack_size / 4,
  118. thread,
  119. thread->priority ? thread->priority : FuriThreadPriorityNormal,
  120. &thread->task_handle);
  121. furi_check(ret == pdPASS);
  122. furi_check(thread->task_handle);
  123. }
  124. bool furi_thread_join(FuriThread* thread) {
  125. furi_assert(thread);
  126. while(thread->state != FuriThreadStateStopped) {
  127. furi_delay_ms(10);
  128. }
  129. return FuriStatusOk;
  130. }
  131. FuriThreadId furi_thread_get_id(FuriThread* thread) {
  132. furi_assert(thread);
  133. return thread->task_handle;
  134. }
  135. void furi_thread_enable_heap_trace(FuriThread* thread) {
  136. furi_assert(thread);
  137. furi_assert(thread->state == FuriThreadStateStopped);
  138. furi_assert(thread->heap_trace_enabled == false);
  139. thread->heap_trace_enabled = true;
  140. }
  141. void furi_thread_disable_heap_trace(FuriThread* thread) {
  142. furi_assert(thread);
  143. furi_assert(thread->state == FuriThreadStateStopped);
  144. furi_assert(thread->heap_trace_enabled == true);
  145. thread->heap_trace_enabled = false;
  146. }
  147. size_t furi_thread_get_heap_size(FuriThread* thread) {
  148. furi_assert(thread);
  149. furi_assert(thread->heap_trace_enabled == true);
  150. return thread->heap_size;
  151. }
  152. int32_t furi_thread_get_return_code(FuriThread* thread) {
  153. furi_assert(thread);
  154. furi_assert(thread->state == FuriThreadStateStopped);
  155. return thread->ret;
  156. }
  157. FuriThreadId furi_thread_get_current_id() {
  158. return xTaskGetCurrentTaskHandle();
  159. }
  160. void furi_thread_yield() {
  161. furi_assert(!FURI_IS_IRQ_MODE());
  162. taskYIELD();
  163. }
  164. /* Limits */
  165. #define MAX_BITS_TASK_NOTIFY 31U
  166. #define MAX_BITS_EVENT_GROUPS 24U
  167. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  168. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  169. uint32_t furi_thread_flags_set(FuriThreadId thread_id, uint32_t flags) {
  170. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  171. uint32_t rflags;
  172. BaseType_t yield;
  173. if((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  174. rflags = (uint32_t)FuriStatusErrorParameter;
  175. } else {
  176. rflags = (uint32_t)FuriStatusError;
  177. if(FURI_IS_IRQ_MODE()) {
  178. yield = pdFALSE;
  179. (void)xTaskNotifyIndexedFromISR(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits, &yield);
  180. (void)xTaskNotifyAndQueryIndexedFromISR(
  181. hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  182. portYIELD_FROM_ISR(yield);
  183. } else {
  184. (void)xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, flags, eSetBits);
  185. (void)xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags);
  186. }
  187. }
  188. /* Return flags after setting */
  189. return (rflags);
  190. }
  191. uint32_t furi_thread_flags_clear(uint32_t flags) {
  192. TaskHandle_t hTask;
  193. uint32_t rflags, cflags;
  194. if(FURI_IS_IRQ_MODE()) {
  195. rflags = (uint32_t)FuriStatusErrorISR;
  196. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  197. rflags = (uint32_t)FuriStatusErrorParameter;
  198. } else {
  199. hTask = xTaskGetCurrentTaskHandle();
  200. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &cflags) ==
  201. pdPASS) {
  202. rflags = cflags;
  203. cflags &= ~flags;
  204. if(xTaskNotifyIndexed(hTask, THREAD_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) !=
  205. pdPASS) {
  206. rflags = (uint32_t)FuriStatusError;
  207. }
  208. } else {
  209. rflags = (uint32_t)FuriStatusError;
  210. }
  211. }
  212. /* Return flags before clearing */
  213. return (rflags);
  214. }
  215. uint32_t furi_thread_flags_get(void) {
  216. TaskHandle_t hTask;
  217. uint32_t rflags;
  218. if(FURI_IS_IRQ_MODE()) {
  219. rflags = (uint32_t)FuriStatusErrorISR;
  220. } else {
  221. hTask = xTaskGetCurrentTaskHandle();
  222. if(xTaskNotifyAndQueryIndexed(hTask, THREAD_NOTIFY_INDEX, 0, eNoAction, &rflags) !=
  223. pdPASS) {
  224. rflags = (uint32_t)FuriStatusError;
  225. }
  226. }
  227. return (rflags);
  228. }
  229. uint32_t furi_thread_flags_wait(uint32_t flags, uint32_t options, uint32_t timeout) {
  230. uint32_t rflags, nval;
  231. uint32_t clear;
  232. TickType_t t0, td, tout;
  233. BaseType_t rval;
  234. if(FURI_IS_IRQ_MODE()) {
  235. rflags = (uint32_t)FuriStatusErrorISR;
  236. } else if((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  237. rflags = (uint32_t)FuriStatusErrorParameter;
  238. } else {
  239. if((options & FuriFlagNoClear) == FuriFlagNoClear) {
  240. clear = 0U;
  241. } else {
  242. clear = flags;
  243. }
  244. rflags = 0U;
  245. tout = timeout;
  246. t0 = xTaskGetTickCount();
  247. do {
  248. rval = xTaskNotifyWaitIndexed(THREAD_NOTIFY_INDEX, 0, clear, &nval, tout);
  249. if(rval == pdPASS) {
  250. rflags &= flags;
  251. rflags |= nval;
  252. if((options & FuriFlagWaitAll) == FuriFlagWaitAll) {
  253. if((flags & rflags) == flags) {
  254. break;
  255. } else {
  256. if(timeout == 0U) {
  257. rflags = (uint32_t)FuriStatusErrorResource;
  258. break;
  259. }
  260. }
  261. } else {
  262. if((flags & rflags) != 0) {
  263. break;
  264. } else {
  265. if(timeout == 0U) {
  266. rflags = (uint32_t)FuriStatusErrorResource;
  267. break;
  268. }
  269. }
  270. }
  271. /* Update timeout */
  272. td = xTaskGetTickCount() - t0;
  273. if(td > tout) {
  274. tout = 0;
  275. } else {
  276. tout -= td;
  277. }
  278. } else {
  279. if(timeout == 0) {
  280. rflags = (uint32_t)FuriStatusErrorResource;
  281. } else {
  282. rflags = (uint32_t)FuriStatusErrorTimeout;
  283. }
  284. }
  285. } while(rval != pdFAIL);
  286. }
  287. /* Return flags before clearing */
  288. return (rflags);
  289. }
  290. uint32_t furi_thread_enumerate(FuriThreadId* thread_array, uint32_t array_items) {
  291. uint32_t i, count;
  292. TaskStatus_t* task;
  293. if(FURI_IS_IRQ_MODE() || (thread_array == NULL) || (array_items == 0U)) {
  294. count = 0U;
  295. } else {
  296. vTaskSuspendAll();
  297. count = uxTaskGetNumberOfTasks();
  298. task = pvPortMalloc(count * sizeof(TaskStatus_t));
  299. if(task != NULL) {
  300. count = uxTaskGetSystemState(task, count, NULL);
  301. for(i = 0U; (i < count) && (i < array_items); i++) {
  302. thread_array[i] = (FuriThreadId)task[i].xHandle;
  303. }
  304. count = i;
  305. }
  306. (void)xTaskResumeAll();
  307. vPortFree(task);
  308. }
  309. return (count);
  310. }
  311. const char* furi_thread_get_name(FuriThreadId thread_id) {
  312. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  313. const char* name;
  314. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  315. name = NULL;
  316. } else {
  317. name = pcTaskGetName(hTask);
  318. }
  319. return (name);
  320. }
  321. uint32_t furi_thread_get_stack_space(FuriThreadId thread_id) {
  322. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  323. uint32_t sz;
  324. if(FURI_IS_IRQ_MODE() || (hTask == NULL)) {
  325. sz = 0U;
  326. } else {
  327. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  328. }
  329. return (sz);
  330. }