kernel.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #include "kernel.h"
  2. #include "base.h"
  3. #include "check.h"
  4. #include "common_defines.h"
  5. #include <furi_hal.h>
  6. #include CMSIS_device_header
  7. bool furi_kernel_is_irq_or_masked() {
  8. bool irq = false;
  9. BaseType_t state;
  10. if(FURI_IS_IRQ_MODE()) {
  11. /* Called from interrupt context */
  12. irq = true;
  13. } else {
  14. /* Get FreeRTOS scheduler state */
  15. state = xTaskGetSchedulerState();
  16. if(state != taskSCHEDULER_NOT_STARTED) {
  17. /* Scheduler was started */
  18. if(FURI_IS_IRQ_MASKED()) {
  19. /* Interrupts are masked */
  20. irq = true;
  21. }
  22. }
  23. }
  24. /* Return context, 0: thread context, 1: IRQ context */
  25. return (irq);
  26. }
  27. int32_t furi_kernel_lock() {
  28. furi_assert(!furi_kernel_is_irq_or_masked());
  29. int32_t lock;
  30. switch(xTaskGetSchedulerState()) {
  31. case taskSCHEDULER_SUSPENDED:
  32. lock = 1;
  33. break;
  34. case taskSCHEDULER_RUNNING:
  35. vTaskSuspendAll();
  36. lock = 0;
  37. break;
  38. case taskSCHEDULER_NOT_STARTED:
  39. default:
  40. lock = (int32_t)FuriStatusError;
  41. break;
  42. }
  43. /* Return previous lock state */
  44. return (lock);
  45. }
  46. int32_t furi_kernel_unlock() {
  47. furi_assert(!furi_kernel_is_irq_or_masked());
  48. int32_t lock;
  49. switch(xTaskGetSchedulerState()) {
  50. case taskSCHEDULER_SUSPENDED:
  51. lock = 1;
  52. if(xTaskResumeAll() != pdTRUE) {
  53. if(xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
  54. lock = (int32_t)FuriStatusError;
  55. }
  56. }
  57. break;
  58. case taskSCHEDULER_RUNNING:
  59. lock = 0;
  60. break;
  61. case taskSCHEDULER_NOT_STARTED:
  62. default:
  63. lock = (int32_t)FuriStatusError;
  64. break;
  65. }
  66. /* Return previous lock state */
  67. return (lock);
  68. }
  69. int32_t furi_kernel_restore_lock(int32_t lock) {
  70. furi_assert(!furi_kernel_is_irq_or_masked());
  71. switch(xTaskGetSchedulerState()) {
  72. case taskSCHEDULER_SUSPENDED:
  73. case taskSCHEDULER_RUNNING:
  74. if(lock == 1) {
  75. vTaskSuspendAll();
  76. } else {
  77. if(lock != 0) {
  78. lock = (int32_t)FuriStatusError;
  79. } else {
  80. if(xTaskResumeAll() != pdTRUE) {
  81. if(xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
  82. lock = (int32_t)FuriStatusError;
  83. }
  84. }
  85. }
  86. }
  87. break;
  88. case taskSCHEDULER_NOT_STARTED:
  89. default:
  90. lock = (int32_t)FuriStatusError;
  91. break;
  92. }
  93. /* Return new lock state */
  94. return (lock);
  95. }
  96. uint32_t furi_kernel_get_tick_frequency() {
  97. /* Return frequency in hertz */
  98. return (configTICK_RATE_HZ_RAW);
  99. }
  100. void furi_delay_tick(uint32_t ticks) {
  101. furi_assert(!furi_kernel_is_irq_or_masked());
  102. if(ticks == 0U) {
  103. taskYIELD();
  104. } else {
  105. vTaskDelay(ticks);
  106. }
  107. }
  108. FuriStatus furi_delay_until_tick(uint32_t tick) {
  109. furi_assert(!furi_kernel_is_irq_or_masked());
  110. TickType_t tcnt, delay;
  111. FuriStatus stat;
  112. stat = FuriStatusOk;
  113. tcnt = xTaskGetTickCount();
  114. /* Determine remaining number of tick to delay */
  115. delay = (TickType_t)tick - tcnt;
  116. /* Check if target tick has not expired */
  117. if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
  118. if(xTaskDelayUntil(&tcnt, delay) == pdFALSE) {
  119. /* Did not delay */
  120. stat = FuriStatusError;
  121. }
  122. } else {
  123. /* No delay or already expired */
  124. stat = FuriStatusErrorParameter;
  125. }
  126. /* Return execution status */
  127. return (stat);
  128. }
  129. uint32_t furi_get_tick() {
  130. TickType_t ticks;
  131. if(furi_kernel_is_irq_or_masked() != 0U) {
  132. ticks = xTaskGetTickCountFromISR();
  133. } else {
  134. ticks = xTaskGetTickCount();
  135. }
  136. return ticks;
  137. }
  138. uint32_t furi_ms_to_ticks(uint32_t milliseconds) {
  139. #if configTICK_RATE_HZ_RAW == 1000
  140. return milliseconds;
  141. #else
  142. return (uint32_t)((float)configTICK_RATE_HZ_RAW) / 1000.0f * (float)milliseconds;
  143. #endif
  144. }
  145. void furi_delay_ms(uint32_t milliseconds) {
  146. if(!FURI_IS_ISR() && xTaskGetSchedulerState() == taskSCHEDULER_RUNNING) {
  147. if(milliseconds > 0 && milliseconds < portMAX_DELAY - 1) {
  148. milliseconds += 1;
  149. }
  150. #if configTICK_RATE_HZ_RAW == 1000
  151. furi_delay_tick(milliseconds);
  152. #else
  153. furi_delay_tick(furi_ms_to_ticks(milliseconds));
  154. #endif
  155. } else if(milliseconds > 0) {
  156. furi_delay_us(milliseconds * 1000);
  157. }
  158. }
  159. void furi_delay_us(uint32_t microseconds) {
  160. furi_hal_cortex_delay_us(microseconds);
  161. }