scheduler.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2017 Damien P. George
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. */
  26. #include <stdio.h>
  27. #include "py/mphal.h"
  28. #include "py/runtime.h"
  29. // Schedules an exception on the main thread (for exceptions "thrown" by async
  30. // sources such as interrupts and UNIX signal handlers).
  31. void MICROPY_WRAP_MP_SCHED_EXCEPTION(mp_sched_exception)(mp_obj_t exc) {
  32. MP_STATE_MAIN_THREAD(mp_pending_exception) = exc;
  33. #if MICROPY_ENABLE_SCHEDULER && !MICROPY_PY_THREAD
  34. // Optimisation for the case where we have scheduler but no threading.
  35. // Allows the VM to do a single check to exclude both pending exception
  36. // and queued tasks.
  37. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
  38. MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
  39. }
  40. #endif
  41. }
  42. #if MICROPY_KBD_EXCEPTION
  43. // This function may be called asynchronously at any time so only do the bare minimum.
  44. void MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT(mp_sched_keyboard_interrupt)(void) {
  45. MP_STATE_VM(mp_kbd_exception).traceback_data = NULL;
  46. mp_sched_exception(MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_kbd_exception)));
  47. }
  48. #endif
  49. #if MICROPY_ENABLE_VM_ABORT
  50. void MICROPY_WRAP_MP_SCHED_VM_ABORT(mp_sched_vm_abort)(void) {
  51. MP_STATE_VM(vm_abort) = true;
  52. }
  53. #endif
  54. #if MICROPY_ENABLE_SCHEDULER
  55. #define IDX_MASK(i) ((i) & (MICROPY_SCHEDULER_DEPTH - 1))
  56. // This is a macro so it is guaranteed to be inlined in functions like
  57. // mp_sched_schedule that may be located in a special memory region.
  58. #define mp_sched_full() (mp_sched_num_pending() == MICROPY_SCHEDULER_DEPTH)
  59. static inline bool mp_sched_empty(void) {
  60. MP_STATIC_ASSERT(MICROPY_SCHEDULER_DEPTH <= 255); // MICROPY_SCHEDULER_DEPTH must fit in 8 bits
  61. MP_STATIC_ASSERT((IDX_MASK(MICROPY_SCHEDULER_DEPTH) == 0)); // MICROPY_SCHEDULER_DEPTH must be a power of 2
  62. return mp_sched_num_pending() == 0;
  63. }
  64. static inline void mp_sched_run_pending(void) {
  65. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  66. if (MP_STATE_VM(sched_state) != MP_SCHED_PENDING) {
  67. // Something else (e.g. hard IRQ) locked the scheduler while we
  68. // acquired the lock.
  69. MICROPY_END_ATOMIC_SECTION(atomic_state);
  70. return;
  71. }
  72. // Equivalent to mp_sched_lock(), but we're already in the atomic
  73. // section and know that we're pending.
  74. MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
  75. #if MICROPY_SCHEDULER_STATIC_NODES
  76. // Run all pending C callbacks.
  77. while (MP_STATE_VM(sched_head) != NULL) {
  78. mp_sched_node_t *node = MP_STATE_VM(sched_head);
  79. MP_STATE_VM(sched_head) = node->next;
  80. if (MP_STATE_VM(sched_head) == NULL) {
  81. MP_STATE_VM(sched_tail) = NULL;
  82. }
  83. mp_sched_callback_t callback = node->callback;
  84. node->callback = NULL;
  85. MICROPY_END_ATOMIC_SECTION(atomic_state);
  86. callback(node);
  87. atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  88. }
  89. #endif
  90. // Run at most one pending Python callback.
  91. if (!mp_sched_empty()) {
  92. mp_sched_item_t item = MP_STATE_VM(sched_queue)[MP_STATE_VM(sched_idx)];
  93. MP_STATE_VM(sched_idx) = IDX_MASK(MP_STATE_VM(sched_idx) + 1);
  94. --MP_STATE_VM(sched_len);
  95. MICROPY_END_ATOMIC_SECTION(atomic_state);
  96. mp_call_function_1_protected(item.func, item.arg);
  97. } else {
  98. MICROPY_END_ATOMIC_SECTION(atomic_state);
  99. }
  100. // Restore MP_STATE_VM(sched_state) to idle (or pending if there are still
  101. // tasks in the queue).
  102. mp_sched_unlock();
  103. }
  104. // Locking the scheduler prevents tasks from executing (does not prevent new
  105. // tasks from being added). We lock the scheduler while executing scheduled
  106. // tasks and also in hard interrupts or GC finalisers.
  107. void mp_sched_lock(void) {
  108. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  109. if (MP_STATE_VM(sched_state) < 0) {
  110. // Already locked, increment lock (recursive lock).
  111. --MP_STATE_VM(sched_state);
  112. } else {
  113. // Pending or idle.
  114. MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
  115. }
  116. MICROPY_END_ATOMIC_SECTION(atomic_state);
  117. }
  118. void mp_sched_unlock(void) {
  119. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  120. assert(MP_STATE_VM(sched_state) < 0);
  121. if (++MP_STATE_VM(sched_state) == 0) {
  122. // Scheduler became unlocked. Check if there are still tasks in the
  123. // queue and set sched_state accordingly.
  124. if (
  125. #if !MICROPY_PY_THREAD
  126. // See optimisation in mp_sched_exception.
  127. MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL ||
  128. #endif
  129. #if MICROPY_SCHEDULER_STATIC_NODES
  130. MP_STATE_VM(sched_head) != NULL ||
  131. #endif
  132. mp_sched_num_pending()) {
  133. MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
  134. } else {
  135. MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
  136. }
  137. }
  138. MICROPY_END_ATOMIC_SECTION(atomic_state);
  139. }
  140. bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj_t arg) {
  141. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  142. bool ret;
  143. if (!mp_sched_full()) {
  144. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
  145. MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
  146. }
  147. uint8_t iput = IDX_MASK(MP_STATE_VM(sched_idx) + MP_STATE_VM(sched_len)++);
  148. MP_STATE_VM(sched_queue)[iput].func = function;
  149. MP_STATE_VM(sched_queue)[iput].arg = arg;
  150. MICROPY_SCHED_HOOK_SCHEDULED;
  151. ret = true;
  152. } else {
  153. // schedule queue is full
  154. ret = false;
  155. }
  156. MICROPY_END_ATOMIC_SECTION(atomic_state);
  157. return ret;
  158. }
  159. #if MICROPY_SCHEDULER_STATIC_NODES
  160. bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback) {
  161. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  162. bool ret;
  163. if (node->callback == NULL) {
  164. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
  165. MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
  166. }
  167. node->callback = callback;
  168. node->next = NULL;
  169. if (MP_STATE_VM(sched_tail) == NULL) {
  170. MP_STATE_VM(sched_head) = node;
  171. } else {
  172. MP_STATE_VM(sched_tail)->next = node;
  173. }
  174. MP_STATE_VM(sched_tail) = node;
  175. MICROPY_SCHED_HOOK_SCHEDULED;
  176. ret = true;
  177. } else {
  178. // already scheduled
  179. ret = false;
  180. }
  181. MICROPY_END_ATOMIC_SECTION(atomic_state);
  182. return ret;
  183. }
  184. #endif
  185. MP_REGISTER_ROOT_POINTER(mp_sched_item_t sched_queue[MICROPY_SCHEDULER_DEPTH]);
  186. #endif // MICROPY_ENABLE_SCHEDULER
  187. // Called periodically from the VM or from "waiting" code (e.g. sleep) to
  188. // process background tasks and pending exceptions (e.g. KeyboardInterrupt).
  189. void mp_handle_pending(bool raise_exc) {
  190. // Handle pending VM abort.
  191. #if MICROPY_ENABLE_VM_ABORT
  192. if (MP_STATE_VM(vm_abort) && mp_thread_is_main_thread()) {
  193. MP_STATE_VM(vm_abort) = false;
  194. if (raise_exc && nlr_get_abort() != NULL) {
  195. nlr_jump_abort();
  196. }
  197. }
  198. #endif
  199. // Handle any pending exception.
  200. if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
  201. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
  202. mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
  203. if (obj != MP_OBJ_NULL) {
  204. MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
  205. if (raise_exc) {
  206. MICROPY_END_ATOMIC_SECTION(atomic_state);
  207. nlr_raise(obj);
  208. }
  209. }
  210. MICROPY_END_ATOMIC_SECTION(atomic_state);
  211. }
  212. // Handle any pending callbacks.
  213. #if MICROPY_ENABLE_SCHEDULER
  214. if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
  215. mp_sched_run_pending();
  216. }
  217. #endif
  218. }
  219. // Handles any pending MicroPython events without waiting for an interrupt or event.
  220. void mp_event_handle_nowait(void) {
  221. #if defined(MICROPY_EVENT_POLL_HOOK_FAST) && !MICROPY_PREVIEW_VERSION_2
  222. // For ports still using the old macros.
  223. MICROPY_EVENT_POLL_HOOK_FAST
  224. #else
  225. // Process any port layer (non-blocking) events.
  226. MICROPY_INTERNAL_EVENT_HOOK;
  227. mp_handle_pending(true);
  228. #endif
  229. }
  230. // Handles any pending MicroPython events and then suspends execution until the
  231. // next interrupt or event.
  232. void mp_event_wait_indefinite(void) {
  233. #if defined(MICROPY_EVENT_POLL_HOOK) && !MICROPY_PREVIEW_VERSION_2
  234. // For ports still using the old macros.
  235. MICROPY_EVENT_POLL_HOOK
  236. #else
  237. mp_event_handle_nowait();
  238. MICROPY_INTERNAL_WFE(-1);
  239. #endif
  240. }
  241. // Handle any pending MicroPython events and then suspends execution until the
  242. // next interrupt or event, or until timeout_ms milliseconds have elapsed.
  243. void mp_event_wait_ms(mp_uint_t timeout_ms) {
  244. #if defined(MICROPY_EVENT_POLL_HOOK) && !MICROPY_PREVIEW_VERSION_2
  245. // For ports still using the old macros.
  246. MICROPY_EVENT_POLL_HOOK
  247. #else
  248. mp_event_handle_nowait();
  249. MICROPY_INTERNAL_WFE(timeout_ms);
  250. #endif
  251. }