vm.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013-2019 Damien P. George
  7. * Copyright (c) 2014-2015 Paul Sokolovsky
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <assert.h>
  30. #include "py/emitglue.h"
  31. #include "py/objtype.h"
  32. #include "py/objfun.h"
  33. #include "py/runtime.h"
  34. #include "py/bc0.h"
  35. #include "py/profile.h"
  36. // *FORMAT-OFF*
  37. #if 0
  38. #if MICROPY_PY_THREAD
  39. #define TRACE_PREFIX mp_printf(&mp_plat_print, "ts=%p sp=%d ", mp_thread_get_state(), (int)(sp - &code_state->state[0] + 1))
  40. #else
  41. #define TRACE_PREFIX mp_printf(&mp_plat_print, "sp=%d ", (int)(sp - &code_state->state[0] + 1))
  42. #endif
  43. #define TRACE(ip) TRACE_PREFIX; mp_bytecode_print2(&mp_plat_print, ip, 1, code_state->fun_bc->child_table, &code_state->fun_bc->context->constants);
  44. #else
  45. #define TRACE(ip)
  46. #endif
  47. // Value stack grows up (this makes it incompatible with native C stack, but
  48. // makes sure that arguments to functions are in natural order arg1..argN
  49. // (Python semantics mandates left-to-right evaluation order, including for
  50. // function arguments). Stack pointer is pre-incremented and points at the
  51. // top element.
  52. // Exception stack also grows up, top element is also pointed at.
  53. #define DECODE_UINT \
  54. mp_uint_t unum = 0; \
  55. do { \
  56. unum = (unum << 7) + (*ip & 0x7f); \
  57. } while ((*ip++ & 0x80) != 0)
  58. #define DECODE_ULABEL \
  59. size_t ulab; \
  60. do { \
  61. if (ip[0] & 0x80) { \
  62. ulab = ((ip[0] & 0x7f) | (ip[1] << 7)); \
  63. ip += 2; \
  64. } else { \
  65. ulab = ip[0]; \
  66. ip += 1; \
  67. } \
  68. } while (0)
  69. #define DECODE_SLABEL \
  70. size_t slab; \
  71. do { \
  72. if (ip[0] & 0x80) { \
  73. slab = ((ip[0] & 0x7f) | (ip[1] << 7)) - 0x4000; \
  74. ip += 2; \
  75. } else { \
  76. slab = ip[0] - 0x40; \
  77. ip += 1; \
  78. } \
  79. } while (0)
  80. #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
  81. #define DECODE_QSTR \
  82. DECODE_UINT; \
  83. qstr qst = qstr_table[unum]
  84. #else
  85. #define DECODE_QSTR \
  86. DECODE_UINT; \
  87. qstr qst = unum;
  88. #endif
  89. #define DECODE_PTR \
  90. DECODE_UINT; \
  91. void *ptr = (void *)(uintptr_t)code_state->fun_bc->child_table[unum]
  92. #define DECODE_OBJ \
  93. DECODE_UINT; \
  94. mp_obj_t obj = (mp_obj_t)code_state->fun_bc->context->constants.obj_table[unum]
  95. #define PUSH(val) *++sp = (val)
  96. #define POP() (*sp--)
  97. #define TOP() (*sp)
  98. #define SET_TOP(val) *sp = (val)
  99. #if MICROPY_PY_SYS_EXC_INFO
  100. #define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = NULL;
  101. #else
  102. #define CLEAR_SYS_EXC_INFO()
  103. #endif
  104. #define PUSH_EXC_BLOCK(with_or_finally) do { \
  105. DECODE_ULABEL; /* except labels are always forward */ \
  106. ++exc_sp; \
  107. exc_sp->handler = ip + ulab; \
  108. exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1)); \
  109. exc_sp->prev_exc = NULL; \
  110. } while (0)
  111. #define POP_EXC_BLOCK() \
  112. exc_sp--; /* pop back to previous exception handler */ \
  113. CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
  114. #define CANCEL_ACTIVE_FINALLY(sp) do { \
  115. if (mp_obj_is_small_int(sp[-1])) { \
  116. /* Stack: (..., prev_dest_ip, prev_cause, dest_ip) */ \
  117. /* Cancel the unwind through the previous finally, replace with current one */ \
  118. sp[-2] = sp[0]; \
  119. sp -= 2; \
  120. } else { \
  121. assert(sp[-1] == mp_const_none || mp_obj_is_exception_instance(sp[-1])); \
  122. /* Stack: (..., None/exception, dest_ip) */ \
  123. /* Silence the finally's exception value (may be None or an exception) */ \
  124. sp[-1] = sp[0]; \
  125. --sp; \
  126. } \
  127. } while (0)
  128. #if MICROPY_PY_SYS_SETTRACE
  129. #define FRAME_SETUP() do { \
  130. assert(code_state != code_state->prev_state); \
  131. MP_STATE_THREAD(current_code_state) = code_state; \
  132. assert(code_state != code_state->prev_state); \
  133. } while(0)
  134. #define FRAME_ENTER() do { \
  135. assert(code_state != code_state->prev_state); \
  136. code_state->prev_state = MP_STATE_THREAD(current_code_state); \
  137. assert(code_state != code_state->prev_state); \
  138. if (!mp_prof_is_executing) { \
  139. mp_prof_frame_enter(code_state); \
  140. } \
  141. } while(0)
  142. #define FRAME_LEAVE() do { \
  143. assert(code_state != code_state->prev_state); \
  144. MP_STATE_THREAD(current_code_state) = code_state->prev_state; \
  145. assert(code_state != code_state->prev_state); \
  146. } while(0)
  147. #define FRAME_UPDATE() do { \
  148. assert(MP_STATE_THREAD(current_code_state) == code_state); \
  149. if (!mp_prof_is_executing) { \
  150. code_state->frame = MP_OBJ_TO_PTR(mp_prof_frame_update(code_state)); \
  151. } \
  152. } while(0)
  153. #define TRACE_TICK(current_ip, current_sp, is_exception) do { \
  154. assert(code_state != code_state->prev_state); \
  155. assert(MP_STATE_THREAD(current_code_state) == code_state); \
  156. if (!mp_prof_is_executing && code_state->frame && MP_STATE_THREAD(prof_trace_callback)) { \
  157. MP_PROF_INSTR_DEBUG_PRINT(code_state->ip); \
  158. } \
  159. if (!mp_prof_is_executing && code_state->frame && code_state->frame->callback) { \
  160. mp_prof_instr_tick(code_state, is_exception); \
  161. } \
  162. } while(0)
  163. #else // MICROPY_PY_SYS_SETTRACE
  164. #define FRAME_SETUP()
  165. #define FRAME_ENTER()
  166. #define FRAME_LEAVE()
  167. #define FRAME_UPDATE()
  168. #define TRACE_TICK(current_ip, current_sp, is_exception)
  169. #endif // MICROPY_PY_SYS_SETTRACE
  170. // fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
  171. // sp points to bottom of stack which grows up
  172. // returns:
  173. // MP_VM_RETURN_NORMAL, sp valid, return value in *sp
  174. // MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
  175. // MP_VM_RETURN_EXCEPTION, exception in state[0]
  176. mp_vm_return_kind_t MICROPY_WRAP_MP_EXECUTE_BYTECODE(mp_execute_bytecode)(mp_code_state_t *code_state, volatile mp_obj_t inject_exc) {
  177. #define SELECTIVE_EXC_IP (0)
  178. // When disabled, code_state->ip is updated unconditionally during op
  179. // dispatch, and this is subsequently used in the exception handler
  180. // (either NLR jump or direct RAISE). This is good for code size because it
  181. // happens in a single place but is more work than necessary, as many opcodes
  182. // cannot raise. Enabling SELECTIVE_EXC_IP means that code_state->ip
  183. // is "selectively" updated only during handling of opcodes that might raise.
  184. // This costs about 360 bytes on PYBV11 for a 1-3% performance gain (e.g. 3%
  185. // in bm_fft.py). On rp2040, there is zero code size diff for a 0-1% gain.
  186. // (Both with computed goto enabled).
  187. #if SELECTIVE_EXC_IP
  188. // Note: Because ip has already been advanced by one byte in the dispatch, the
  189. // value of ip here is one byte past the last opcode.
  190. #define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; }
  191. // No need to update in dispatch.
  192. #define MARK_EXC_IP_GLOBAL()
  193. #else
  194. #define MARK_EXC_IP_SELECTIVE()
  195. // Immediately before dispatch, save the current ip, which will be the opcode
  196. // about to be dispatched.
  197. #define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; }
  198. #endif
  199. #if MICROPY_OPT_COMPUTED_GOTO
  200. #include "py/vmentrytable.h"
  201. #define DISPATCH() do { \
  202. TRACE(ip); \
  203. MARK_EXC_IP_GLOBAL(); \
  204. TRACE_TICK(ip, sp, false); \
  205. goto *entry_table[*ip++]; \
  206. } while (0)
  207. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  208. #define ENTRY(op) entry_##op
  209. #define ENTRY_DEFAULT entry_default
  210. #else
  211. #define DISPATCH() goto dispatch_loop
  212. #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
  213. #define ENTRY(op) case op
  214. #define ENTRY_DEFAULT default
  215. #endif
  216. // nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
  217. // sees that it's possible for us to jump from the dispatch loop to the exception
  218. // handler. Without this, the code may have a different stack layout in the dispatch
  219. // loop and the exception handler, leading to very obscure bugs.
  220. #define RAISE(o) do { nlr_pop(); nlr.ret_val = MP_OBJ_TO_PTR(o); goto exception_handler; } while (0)
  221. #if MICROPY_STACKLESS
  222. run_code_state: ;
  223. #endif
  224. FRAME_ENTER();
  225. #if MICROPY_STACKLESS
  226. run_code_state_from_return: ;
  227. #endif
  228. FRAME_SETUP();
  229. // Pointers which are constant for particular invocation of mp_execute_bytecode()
  230. mp_obj_t * /*const*/ fastn;
  231. mp_exc_stack_t * /*const*/ exc_stack;
  232. {
  233. size_t n_state = code_state->n_state;
  234. fastn = &code_state->state[n_state - 1];
  235. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  236. }
  237. // variables that are visible to the exception handler (declared volatile)
  238. mp_exc_stack_t *volatile exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
  239. #if MICROPY_PY_THREAD_GIL && MICROPY_PY_THREAD_GIL_VM_DIVISOR
  240. // This needs to be volatile and outside the VM loop so it persists across handling
  241. // of any exceptions. Otherwise it's possible that the VM never gives up the GIL.
  242. volatile int gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  243. #endif
  244. // outer exception handling loop
  245. for (;;) {
  246. nlr_buf_t nlr;
  247. outer_dispatch_loop:
  248. if (nlr_push(&nlr) == 0) {
  249. // local variables that are not visible to the exception handler
  250. const byte *ip = code_state->ip;
  251. mp_obj_t *sp = code_state->sp;
  252. #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
  253. const qstr_short_t *qstr_table = code_state->fun_bc->context->constants.qstr_table;
  254. #endif
  255. mp_obj_t obj_shared;
  256. MICROPY_VM_HOOK_INIT
  257. // If we have exception to inject, now that we finish setting up
  258. // execution context, raise it. This works as if MP_BC_RAISE_OBJ
  259. // bytecode was executed.
  260. // Injecting exc into yield from generator is a special case,
  261. // handled by MP_BC_YIELD_FROM itself
  262. if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
  263. mp_obj_t exc = inject_exc;
  264. inject_exc = MP_OBJ_NULL;
  265. exc = mp_make_raise_obj(exc);
  266. RAISE(exc);
  267. }
  268. // loop to execute byte code
  269. for (;;) {
  270. dispatch_loop:
  271. #if MICROPY_OPT_COMPUTED_GOTO
  272. DISPATCH();
  273. #else
  274. TRACE(ip);
  275. MARK_EXC_IP_GLOBAL();
  276. TRACE_TICK(ip, sp, false);
  277. switch (*ip++) {
  278. #endif
  279. ENTRY(MP_BC_LOAD_CONST_FALSE):
  280. PUSH(mp_const_false);
  281. DISPATCH();
  282. ENTRY(MP_BC_LOAD_CONST_NONE):
  283. PUSH(mp_const_none);
  284. DISPATCH();
  285. ENTRY(MP_BC_LOAD_CONST_TRUE):
  286. PUSH(mp_const_true);
  287. DISPATCH();
  288. ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
  289. mp_uint_t num = 0;
  290. if ((ip[0] & 0x40) != 0) {
  291. // Number is negative
  292. num--;
  293. }
  294. do {
  295. num = (num << 7) | (*ip & 0x7f);
  296. } while ((*ip++ & 0x80) != 0);
  297. PUSH(MP_OBJ_NEW_SMALL_INT(num));
  298. DISPATCH();
  299. }
  300. ENTRY(MP_BC_LOAD_CONST_STRING): {
  301. DECODE_QSTR;
  302. PUSH(MP_OBJ_NEW_QSTR(qst));
  303. DISPATCH();
  304. }
  305. ENTRY(MP_BC_LOAD_CONST_OBJ): {
  306. DECODE_OBJ;
  307. PUSH(obj);
  308. DISPATCH();
  309. }
  310. ENTRY(MP_BC_LOAD_NULL):
  311. PUSH(MP_OBJ_NULL);
  312. DISPATCH();
  313. ENTRY(MP_BC_LOAD_FAST_N): {
  314. DECODE_UINT;
  315. obj_shared = fastn[-unum];
  316. load_check:
  317. if (obj_shared == MP_OBJ_NULL) {
  318. local_name_error: {
  319. MARK_EXC_IP_SELECTIVE();
  320. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, MP_ERROR_TEXT("local variable referenced before assignment"));
  321. RAISE(obj);
  322. }
  323. }
  324. PUSH(obj_shared);
  325. DISPATCH();
  326. }
  327. ENTRY(MP_BC_LOAD_DEREF): {
  328. DECODE_UINT;
  329. obj_shared = mp_obj_cell_get(fastn[-unum]);
  330. goto load_check;
  331. }
  332. ENTRY(MP_BC_LOAD_NAME): {
  333. MARK_EXC_IP_SELECTIVE();
  334. DECODE_QSTR;
  335. PUSH(mp_load_name(qst));
  336. DISPATCH();
  337. }
  338. ENTRY(MP_BC_LOAD_GLOBAL): {
  339. MARK_EXC_IP_SELECTIVE();
  340. DECODE_QSTR;
  341. PUSH(mp_load_global(qst));
  342. DISPATCH();
  343. }
  344. ENTRY(MP_BC_LOAD_ATTR): {
  345. FRAME_UPDATE();
  346. MARK_EXC_IP_SELECTIVE();
  347. DECODE_QSTR;
  348. mp_obj_t top = TOP();
  349. mp_obj_t obj;
  350. #if MICROPY_OPT_LOAD_ATTR_FAST_PATH
  351. // For the specific case of an instance type, it implements .attr
  352. // and forwards to its members map. Attribute lookups on instance
  353. // types are extremely common, so avoid all the other checks and
  354. // calls that normally happen first.
  355. mp_map_elem_t *elem = NULL;
  356. if (mp_obj_is_instance_type(mp_obj_get_type(top))) {
  357. mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
  358. elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
  359. }
  360. if (elem) {
  361. obj = elem->value;
  362. } else
  363. #endif
  364. {
  365. obj = mp_load_attr(top, qst);
  366. }
  367. SET_TOP(obj);
  368. DISPATCH();
  369. }
  370. ENTRY(MP_BC_LOAD_METHOD): {
  371. MARK_EXC_IP_SELECTIVE();
  372. DECODE_QSTR;
  373. mp_load_method(*sp, qst, sp);
  374. sp += 1;
  375. DISPATCH();
  376. }
  377. ENTRY(MP_BC_LOAD_SUPER_METHOD): {
  378. MARK_EXC_IP_SELECTIVE();
  379. DECODE_QSTR;
  380. sp -= 1;
  381. mp_load_super_method(qst, sp - 1);
  382. DISPATCH();
  383. }
  384. ENTRY(MP_BC_LOAD_BUILD_CLASS):
  385. MARK_EXC_IP_SELECTIVE();
  386. PUSH(mp_load_build_class());
  387. DISPATCH();
  388. ENTRY(MP_BC_LOAD_SUBSCR): {
  389. MARK_EXC_IP_SELECTIVE();
  390. mp_obj_t index = POP();
  391. SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
  392. DISPATCH();
  393. }
  394. ENTRY(MP_BC_STORE_FAST_N): {
  395. DECODE_UINT;
  396. fastn[-unum] = POP();
  397. DISPATCH();
  398. }
  399. ENTRY(MP_BC_STORE_DEREF): {
  400. DECODE_UINT;
  401. mp_obj_cell_set(fastn[-unum], POP());
  402. DISPATCH();
  403. }
  404. ENTRY(MP_BC_STORE_NAME): {
  405. MARK_EXC_IP_SELECTIVE();
  406. DECODE_QSTR;
  407. mp_store_name(qst, POP());
  408. DISPATCH();
  409. }
  410. ENTRY(MP_BC_STORE_GLOBAL): {
  411. MARK_EXC_IP_SELECTIVE();
  412. DECODE_QSTR;
  413. mp_store_global(qst, POP());
  414. DISPATCH();
  415. }
  416. ENTRY(MP_BC_STORE_ATTR): {
  417. FRAME_UPDATE();
  418. MARK_EXC_IP_SELECTIVE();
  419. DECODE_QSTR;
  420. mp_store_attr(sp[0], qst, sp[-1]);
  421. sp -= 2;
  422. DISPATCH();
  423. }
  424. ENTRY(MP_BC_STORE_SUBSCR):
  425. MARK_EXC_IP_SELECTIVE();
  426. mp_obj_subscr(sp[-1], sp[0], sp[-2]);
  427. sp -= 3;
  428. DISPATCH();
  429. ENTRY(MP_BC_DELETE_FAST): {
  430. MARK_EXC_IP_SELECTIVE();
  431. DECODE_UINT;
  432. if (fastn[-unum] == MP_OBJ_NULL) {
  433. goto local_name_error;
  434. }
  435. fastn[-unum] = MP_OBJ_NULL;
  436. DISPATCH();
  437. }
  438. ENTRY(MP_BC_DELETE_DEREF): {
  439. MARK_EXC_IP_SELECTIVE();
  440. DECODE_UINT;
  441. if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
  442. goto local_name_error;
  443. }
  444. mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
  445. DISPATCH();
  446. }
  447. ENTRY(MP_BC_DELETE_NAME): {
  448. MARK_EXC_IP_SELECTIVE();
  449. DECODE_QSTR;
  450. mp_delete_name(qst);
  451. DISPATCH();
  452. }
  453. ENTRY(MP_BC_DELETE_GLOBAL): {
  454. MARK_EXC_IP_SELECTIVE();
  455. DECODE_QSTR;
  456. mp_delete_global(qst);
  457. DISPATCH();
  458. }
  459. ENTRY(MP_BC_DUP_TOP): {
  460. mp_obj_t top = TOP();
  461. PUSH(top);
  462. DISPATCH();
  463. }
  464. ENTRY(MP_BC_DUP_TOP_TWO):
  465. sp += 2;
  466. sp[0] = sp[-2];
  467. sp[-1] = sp[-3];
  468. DISPATCH();
  469. ENTRY(MP_BC_POP_TOP):
  470. sp -= 1;
  471. DISPATCH();
  472. ENTRY(MP_BC_ROT_TWO): {
  473. mp_obj_t top = sp[0];
  474. sp[0] = sp[-1];
  475. sp[-1] = top;
  476. DISPATCH();
  477. }
  478. ENTRY(MP_BC_ROT_THREE): {
  479. mp_obj_t top = sp[0];
  480. sp[0] = sp[-1];
  481. sp[-1] = sp[-2];
  482. sp[-2] = top;
  483. DISPATCH();
  484. }
  485. ENTRY(MP_BC_JUMP): {
  486. DECODE_SLABEL;
  487. ip += slab;
  488. DISPATCH_WITH_PEND_EXC_CHECK();
  489. }
  490. ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
  491. DECODE_SLABEL;
  492. if (mp_obj_is_true(POP())) {
  493. ip += slab;
  494. }
  495. DISPATCH_WITH_PEND_EXC_CHECK();
  496. }
  497. ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
  498. DECODE_SLABEL;
  499. if (!mp_obj_is_true(POP())) {
  500. ip += slab;
  501. }
  502. DISPATCH_WITH_PEND_EXC_CHECK();
  503. }
  504. ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
  505. DECODE_ULABEL;
  506. if (mp_obj_is_true(TOP())) {
  507. ip += ulab;
  508. } else {
  509. sp--;
  510. }
  511. DISPATCH_WITH_PEND_EXC_CHECK();
  512. }
  513. ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
  514. DECODE_ULABEL;
  515. if (mp_obj_is_true(TOP())) {
  516. sp--;
  517. } else {
  518. ip += ulab;
  519. }
  520. DISPATCH_WITH_PEND_EXC_CHECK();
  521. }
  522. ENTRY(MP_BC_SETUP_WITH): {
  523. MARK_EXC_IP_SELECTIVE();
  524. // stack: (..., ctx_mgr)
  525. mp_obj_t obj = TOP();
  526. mp_load_method(obj, MP_QSTR___exit__, sp);
  527. mp_load_method(obj, MP_QSTR___enter__, sp + 2);
  528. mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
  529. sp += 1;
  530. PUSH_EXC_BLOCK(1);
  531. PUSH(ret);
  532. // stack: (..., __exit__, ctx_mgr, as_value)
  533. DISPATCH();
  534. }
  535. ENTRY(MP_BC_WITH_CLEANUP): {
  536. MARK_EXC_IP_SELECTIVE();
  537. // Arriving here, there's "exception control block" on top of stack,
  538. // and __exit__ method (with self) underneath it. Bytecode calls __exit__,
  539. // and "deletes" it off stack, shifting "exception control block"
  540. // to its place.
  541. // The bytecode emitter ensures that there is enough space on the Python
  542. // value stack to hold the __exit__ method plus an additional 4 entries.
  543. if (TOP() == mp_const_none) {
  544. // stack: (..., __exit__, ctx_mgr, None)
  545. sp[1] = mp_const_none;
  546. sp[2] = mp_const_none;
  547. sp -= 2;
  548. mp_call_method_n_kw(3, 0, sp);
  549. SET_TOP(mp_const_none);
  550. } else if (mp_obj_is_small_int(TOP())) {
  551. // Getting here there are two distinct cases:
  552. // - unwind return, stack: (..., __exit__, ctx_mgr, ret_val, SMALL_INT(-1))
  553. // - unwind jump, stack: (..., __exit__, ctx_mgr, dest_ip, SMALL_INT(num_exc))
  554. // For both cases we do exactly the same thing.
  555. mp_obj_t data = sp[-1];
  556. mp_obj_t cause = sp[0];
  557. sp[-1] = mp_const_none;
  558. sp[0] = mp_const_none;
  559. sp[1] = mp_const_none;
  560. mp_call_method_n_kw(3, 0, sp - 3);
  561. sp[-3] = data;
  562. sp[-2] = cause;
  563. sp -= 2; // we removed (__exit__, ctx_mgr)
  564. } else {
  565. assert(mp_obj_is_exception_instance(TOP()));
  566. // stack: (..., __exit__, ctx_mgr, exc_instance)
  567. // Need to pass (exc_type, exc_instance, None) as arguments to __exit__.
  568. sp[1] = sp[0];
  569. sp[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(sp[0]));
  570. sp[2] = mp_const_none;
  571. sp -= 2;
  572. mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp);
  573. if (mp_obj_is_true(ret_value)) {
  574. // We need to silence/swallow the exception. This is done
  575. // by popping the exception and the __exit__ handler and
  576. // replacing it with None, which signals END_FINALLY to just
  577. // execute the finally handler normally.
  578. SET_TOP(mp_const_none);
  579. } else {
  580. // We need to re-raise the exception. We pop __exit__ handler
  581. // by copying the exception instance down to the new top-of-stack.
  582. sp[0] = sp[3];
  583. }
  584. }
  585. DISPATCH();
  586. }
  587. ENTRY(MP_BC_UNWIND_JUMP): {
  588. MARK_EXC_IP_SELECTIVE();
  589. DECODE_SLABEL;
  590. PUSH((mp_obj_t)(mp_uint_t)(uintptr_t)(ip + slab)); // push destination ip for jump
  591. PUSH((mp_obj_t)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
  592. unwind_jump:;
  593. mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
  594. while ((unum & 0x7f) > 0) {
  595. unum -= 1;
  596. assert(exc_sp >= exc_stack);
  597. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  598. if (exc_sp->handler >= ip) {
  599. // Found a finally handler that isn't active; run it.
  600. // Getting here the stack looks like:
  601. // (..., X, dest_ip)
  602. // where X is pointed to by exc_sp->val_sp and in the case
  603. // of a "with" block contains the context manager info.
  604. assert(&sp[-1] == MP_TAGPTR_PTR(exc_sp->val_sp));
  605. // We're going to run "finally" code as a coroutine
  606. // (not calling it recursively). Set up a sentinel
  607. // on the stack so it can return back to us when it is
  608. // done (when WITH_CLEANUP or END_FINALLY reached).
  609. // The sentinel is the number of exception handlers left to
  610. // unwind, which is a non-negative integer.
  611. PUSH(MP_OBJ_NEW_SMALL_INT(unum));
  612. ip = exc_sp->handler;
  613. goto dispatch_loop;
  614. } else {
  615. // Found a finally handler that is already active; cancel it.
  616. CANCEL_ACTIVE_FINALLY(sp);
  617. }
  618. }
  619. POP_EXC_BLOCK();
  620. }
  621. ip = (const byte*)MP_OBJ_TO_PTR(POP()); // pop destination ip for jump
  622. if (unum != 0) {
  623. // pop the exhausted iterator
  624. sp -= MP_OBJ_ITER_BUF_NSLOTS;
  625. }
  626. DISPATCH_WITH_PEND_EXC_CHECK();
  627. }
  628. ENTRY(MP_BC_SETUP_EXCEPT):
  629. ENTRY(MP_BC_SETUP_FINALLY): {
  630. MARK_EXC_IP_SELECTIVE();
  631. #if SELECTIVE_EXC_IP
  632. PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  633. #else
  634. PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
  635. #endif
  636. DISPATCH();
  637. }
  638. ENTRY(MP_BC_END_FINALLY):
  639. MARK_EXC_IP_SELECTIVE();
  640. // if TOS is None, just pops it and continues
  641. // if TOS is an integer, finishes coroutine and returns control to caller
  642. // if TOS is an exception, reraises the exception
  643. assert(exc_sp >= exc_stack);
  644. POP_EXC_BLOCK();
  645. if (TOP() == mp_const_none) {
  646. sp--;
  647. } else if (mp_obj_is_small_int(TOP())) {
  648. // We finished "finally" coroutine and now dispatch back
  649. // to our caller, based on TOS value
  650. mp_int_t cause = MP_OBJ_SMALL_INT_VALUE(POP());
  651. if (cause < 0) {
  652. // A negative cause indicates unwind return
  653. goto unwind_return;
  654. } else {
  655. // Otherwise it's an unwind jump and we must push as a raw
  656. // number the number of exception handlers to unwind
  657. PUSH((mp_obj_t)cause);
  658. goto unwind_jump;
  659. }
  660. } else {
  661. assert(mp_obj_is_exception_instance(TOP()));
  662. RAISE(TOP());
  663. }
  664. DISPATCH();
  665. ENTRY(MP_BC_GET_ITER):
  666. MARK_EXC_IP_SELECTIVE();
  667. SET_TOP(mp_getiter(TOP(), NULL));
  668. DISPATCH();
  669. // An iterator for a for-loop takes MP_OBJ_ITER_BUF_NSLOTS slots on
  670. // the Python value stack. These slots are either used to store the
  671. // iterator object itself, or the first slot is MP_OBJ_NULL and
  672. // the second slot holds a reference to the iterator object.
  673. ENTRY(MP_BC_GET_ITER_STACK): {
  674. MARK_EXC_IP_SELECTIVE();
  675. mp_obj_t obj = TOP();
  676. mp_obj_iter_buf_t *iter_buf = (mp_obj_iter_buf_t*)sp;
  677. sp += MP_OBJ_ITER_BUF_NSLOTS - 1;
  678. obj = mp_getiter(obj, iter_buf);
  679. if (obj != MP_OBJ_FROM_PTR(iter_buf)) {
  680. // Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
  681. *(sp - MP_OBJ_ITER_BUF_NSLOTS + 1) = MP_OBJ_NULL;
  682. *(sp - MP_OBJ_ITER_BUF_NSLOTS + 2) = obj;
  683. }
  684. DISPATCH();
  685. }
  686. ENTRY(MP_BC_FOR_ITER): {
  687. FRAME_UPDATE();
  688. MARK_EXC_IP_SELECTIVE();
  689. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  690. code_state->sp = sp;
  691. mp_obj_t obj;
  692. if (*(sp - MP_OBJ_ITER_BUF_NSLOTS + 1) == MP_OBJ_NULL) {
  693. obj = *(sp - MP_OBJ_ITER_BUF_NSLOTS + 2);
  694. } else {
  695. obj = MP_OBJ_FROM_PTR(&sp[-MP_OBJ_ITER_BUF_NSLOTS + 1]);
  696. }
  697. mp_obj_t value = mp_iternext_allow_raise(obj);
  698. if (value == MP_OBJ_STOP_ITERATION) {
  699. sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  700. ip += ulab; // jump to after for-block
  701. } else {
  702. PUSH(value); // push the next iteration value
  703. #if MICROPY_PY_SYS_SETTRACE
  704. // LINE event should trigger for every iteration so invalidate last trigger
  705. if (code_state->frame) {
  706. code_state->frame->lineno = 0;
  707. }
  708. #endif
  709. }
  710. DISPATCH();
  711. }
  712. ENTRY(MP_BC_POP_EXCEPT_JUMP): {
  713. assert(exc_sp >= exc_stack);
  714. POP_EXC_BLOCK();
  715. DECODE_ULABEL;
  716. ip += ulab;
  717. DISPATCH_WITH_PEND_EXC_CHECK();
  718. }
  719. ENTRY(MP_BC_BUILD_TUPLE): {
  720. MARK_EXC_IP_SELECTIVE();
  721. DECODE_UINT;
  722. sp -= unum - 1;
  723. SET_TOP(mp_obj_new_tuple(unum, sp));
  724. DISPATCH();
  725. }
  726. ENTRY(MP_BC_BUILD_LIST): {
  727. MARK_EXC_IP_SELECTIVE();
  728. DECODE_UINT;
  729. sp -= unum - 1;
  730. SET_TOP(mp_obj_new_list(unum, sp));
  731. DISPATCH();
  732. }
  733. ENTRY(MP_BC_BUILD_MAP): {
  734. MARK_EXC_IP_SELECTIVE();
  735. DECODE_UINT;
  736. PUSH(mp_obj_new_dict(unum));
  737. DISPATCH();
  738. }
  739. ENTRY(MP_BC_STORE_MAP):
  740. MARK_EXC_IP_SELECTIVE();
  741. sp -= 2;
  742. mp_obj_dict_store(sp[0], sp[2], sp[1]);
  743. DISPATCH();
  744. #if MICROPY_PY_BUILTINS_SET
  745. ENTRY(MP_BC_BUILD_SET): {
  746. MARK_EXC_IP_SELECTIVE();
  747. DECODE_UINT;
  748. sp -= unum - 1;
  749. SET_TOP(mp_obj_new_set(unum, sp));
  750. DISPATCH();
  751. }
  752. #endif
  753. #if MICROPY_PY_BUILTINS_SLICE
  754. ENTRY(MP_BC_BUILD_SLICE): {
  755. MARK_EXC_IP_SELECTIVE();
  756. mp_obj_t step = mp_const_none;
  757. if (*ip++ == 3) {
  758. // 3-argument slice includes step
  759. step = POP();
  760. }
  761. mp_obj_t stop = POP();
  762. mp_obj_t start = TOP();
  763. SET_TOP(mp_obj_new_slice(start, stop, step));
  764. DISPATCH();
  765. }
  766. #endif
  767. ENTRY(MP_BC_STORE_COMP): {
  768. MARK_EXC_IP_SELECTIVE();
  769. DECODE_UINT;
  770. mp_obj_t obj = sp[-(unum >> 2)];
  771. if ((unum & 3) == 0) {
  772. mp_obj_list_append(obj, sp[0]);
  773. sp--;
  774. } else if (!MICROPY_PY_BUILTINS_SET || (unum & 3) == 1) {
  775. mp_obj_dict_store(obj, sp[0], sp[-1]);
  776. sp -= 2;
  777. #if MICROPY_PY_BUILTINS_SET
  778. } else {
  779. mp_obj_set_store(obj, sp[0]);
  780. sp--;
  781. #endif
  782. }
  783. DISPATCH();
  784. }
  785. ENTRY(MP_BC_UNPACK_SEQUENCE): {
  786. MARK_EXC_IP_SELECTIVE();
  787. DECODE_UINT;
  788. mp_unpack_sequence(sp[0], unum, sp);
  789. sp += unum - 1;
  790. DISPATCH();
  791. }
  792. ENTRY(MP_BC_UNPACK_EX): {
  793. MARK_EXC_IP_SELECTIVE();
  794. DECODE_UINT;
  795. mp_unpack_ex(sp[0], unum, sp);
  796. sp += (unum & 0xff) + ((unum >> 8) & 0xff);
  797. DISPATCH();
  798. }
  799. ENTRY(MP_BC_MAKE_FUNCTION): {
  800. DECODE_PTR;
  801. PUSH(mp_make_function_from_proto_fun(ptr, code_state->fun_bc->context, NULL));
  802. DISPATCH();
  803. }
  804. ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
  805. DECODE_PTR;
  806. // Stack layout: def_tuple def_dict <- TOS
  807. sp -= 1;
  808. SET_TOP(mp_make_function_from_proto_fun(ptr, code_state->fun_bc->context, sp));
  809. DISPATCH();
  810. }
  811. ENTRY(MP_BC_MAKE_CLOSURE): {
  812. DECODE_PTR;
  813. size_t n_closed_over = *ip++;
  814. // Stack layout: closed_overs <- TOS
  815. sp -= n_closed_over - 1;
  816. SET_TOP(mp_make_closure_from_proto_fun(ptr, code_state->fun_bc->context, n_closed_over, sp));
  817. DISPATCH();
  818. }
  819. ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
  820. DECODE_PTR;
  821. size_t n_closed_over = *ip++;
  822. // Stack layout: def_tuple def_dict closed_overs <- TOS
  823. sp -= 2 + n_closed_over - 1;
  824. SET_TOP(mp_make_closure_from_proto_fun(ptr, code_state->fun_bc->context, 0x100 | n_closed_over, sp));
  825. DISPATCH();
  826. }
  827. ENTRY(MP_BC_CALL_FUNCTION): {
  828. FRAME_UPDATE();
  829. MARK_EXC_IP_SELECTIVE();
  830. DECODE_UINT;
  831. // unum & 0xff == n_positional
  832. // (unum >> 8) & 0xff == n_keyword
  833. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
  834. #if MICROPY_STACKLESS
  835. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  836. code_state->ip = ip;
  837. code_state->sp = sp;
  838. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  839. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
  840. #if !MICROPY_ENABLE_PYSTACK
  841. if (new_state == NULL) {
  842. // Couldn't allocate codestate on heap: in the strict case raise
  843. // an exception, otherwise just fall through to stack allocation.
  844. #if MICROPY_STACKLESS_STRICT
  845. deep_recursion_error:
  846. mp_raise_recursion_depth();
  847. #endif
  848. } else
  849. #endif
  850. {
  851. new_state->prev = code_state;
  852. code_state = new_state;
  853. nlr_pop();
  854. goto run_code_state;
  855. }
  856. }
  857. #endif
  858. SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
  859. DISPATCH();
  860. }
  861. ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
  862. FRAME_UPDATE();
  863. MARK_EXC_IP_SELECTIVE();
  864. DECODE_UINT;
  865. // unum & 0xff == n_positional
  866. // (unum >> 8) & 0xff == n_keyword
  867. // We have following stack layout here:
  868. // fun arg0 arg1 ... kw0 val0 kw1 val1 ... bitmap <- TOS
  869. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
  870. #if MICROPY_STACKLESS
  871. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  872. code_state->ip = ip;
  873. code_state->sp = sp;
  874. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  875. mp_call_args_t out_args;
  876. mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
  877. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  878. out_args.n_args, out_args.n_kw, out_args.args);
  879. #if !MICROPY_ENABLE_PYSTACK
  880. // Freeing args at this point does not follow a LIFO order so only do it if
  881. // pystack is not enabled. For pystack, they are freed when code_state is.
  882. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  883. #endif
  884. #if !MICROPY_ENABLE_PYSTACK
  885. if (new_state == NULL) {
  886. // Couldn't allocate codestate on heap: in the strict case raise
  887. // an exception, otherwise just fall through to stack allocation.
  888. #if MICROPY_STACKLESS_STRICT
  889. goto deep_recursion_error;
  890. #endif
  891. } else
  892. #endif
  893. {
  894. new_state->prev = code_state;
  895. code_state = new_state;
  896. nlr_pop();
  897. goto run_code_state;
  898. }
  899. }
  900. #endif
  901. SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
  902. DISPATCH();
  903. }
  904. ENTRY(MP_BC_CALL_METHOD): {
  905. FRAME_UPDATE();
  906. MARK_EXC_IP_SELECTIVE();
  907. DECODE_UINT;
  908. // unum & 0xff == n_positional
  909. // (unum >> 8) & 0xff == n_keyword
  910. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
  911. #if MICROPY_STACKLESS
  912. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  913. code_state->ip = ip;
  914. code_state->sp = sp;
  915. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  916. size_t n_args = unum & 0xff;
  917. size_t n_kw = (unum >> 8) & 0xff;
  918. int adjust = (sp[1] == MP_OBJ_NULL) ? 0 : 1;
  919. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
  920. #if !MICROPY_ENABLE_PYSTACK
  921. if (new_state == NULL) {
  922. // Couldn't allocate codestate on heap: in the strict case raise
  923. // an exception, otherwise just fall through to stack allocation.
  924. #if MICROPY_STACKLESS_STRICT
  925. goto deep_recursion_error;
  926. #endif
  927. } else
  928. #endif
  929. {
  930. new_state->prev = code_state;
  931. code_state = new_state;
  932. nlr_pop();
  933. goto run_code_state;
  934. }
  935. }
  936. #endif
  937. SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
  938. DISPATCH();
  939. }
  940. ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
  941. FRAME_UPDATE();
  942. MARK_EXC_IP_SELECTIVE();
  943. DECODE_UINT;
  944. // unum & 0xff == n_positional
  945. // (unum >> 8) & 0xff == n_keyword
  946. // We have following stack layout here:
  947. // fun self arg0 arg1 ... kw0 val0 kw1 val1 ... bitmap <- TOS
  948. sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
  949. #if MICROPY_STACKLESS
  950. if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
  951. code_state->ip = ip;
  952. code_state->sp = sp;
  953. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  954. mp_call_args_t out_args;
  955. mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
  956. mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
  957. out_args.n_args, out_args.n_kw, out_args.args);
  958. #if !MICROPY_ENABLE_PYSTACK
  959. // Freeing args at this point does not follow a LIFO order so only do it if
  960. // pystack is not enabled. For pystack, they are freed when code_state is.
  961. mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
  962. #endif
  963. #if !MICROPY_ENABLE_PYSTACK
  964. if (new_state == NULL) {
  965. // Couldn't allocate codestate on heap: in the strict case raise
  966. // an exception, otherwise just fall through to stack allocation.
  967. #if MICROPY_STACKLESS_STRICT
  968. goto deep_recursion_error;
  969. #endif
  970. } else
  971. #endif
  972. {
  973. new_state->prev = code_state;
  974. code_state = new_state;
  975. nlr_pop();
  976. goto run_code_state;
  977. }
  978. }
  979. #endif
  980. SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
  981. DISPATCH();
  982. }
  983. ENTRY(MP_BC_RETURN_VALUE):
  984. MARK_EXC_IP_SELECTIVE();
  985. unwind_return:
  986. // Search for and execute finally handlers that aren't already active
  987. while (exc_sp >= exc_stack) {
  988. if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
  989. if (exc_sp->handler >= ip) {
  990. // Found a finally handler that isn't active; run it.
  991. // Getting here the stack looks like:
  992. // (..., X, [iter0, iter1, ...,] ret_val)
  993. // where X is pointed to by exc_sp->val_sp and in the case
  994. // of a "with" block contains the context manager info.
  995. // There may be 0 or more for-iterators between X and the
  996. // return value, and these must be removed before control can
  997. // pass to the finally code. We simply copy the ret_value down
  998. // over these iterators, if they exist. If they don't then the
  999. // following is a null operation.
  1000. mp_obj_t *finally_sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  1001. finally_sp[1] = sp[0];
  1002. sp = &finally_sp[1];
  1003. // We're going to run "finally" code as a coroutine
  1004. // (not calling it recursively). Set up a sentinel
  1005. // on a stack so it can return back to us when it is
  1006. // done (when WITH_CLEANUP or END_FINALLY reached).
  1007. PUSH(MP_OBJ_NEW_SMALL_INT(-1));
  1008. ip = exc_sp->handler;
  1009. goto dispatch_loop;
  1010. } else {
  1011. // Found a finally handler that is already active; cancel it.
  1012. CANCEL_ACTIVE_FINALLY(sp);
  1013. }
  1014. }
  1015. POP_EXC_BLOCK();
  1016. }
  1017. nlr_pop();
  1018. code_state->sp = sp;
  1019. assert(exc_sp == exc_stack - 1);
  1020. MICROPY_VM_HOOK_RETURN
  1021. #if MICROPY_STACKLESS
  1022. if (code_state->prev != NULL) {
  1023. mp_obj_t res = *sp;
  1024. mp_globals_set(code_state->old_globals);
  1025. mp_code_state_t *new_code_state = code_state->prev;
  1026. #if MICROPY_ENABLE_PYSTACK
  1027. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1028. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1029. // The sizeof in the following statement does not include the size of the variable
  1030. // part of the struct. This arg is anyway not used if pystack is enabled.
  1031. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1032. #endif
  1033. code_state = new_code_state;
  1034. *code_state->sp = res;
  1035. goto run_code_state_from_return;
  1036. }
  1037. #endif
  1038. FRAME_LEAVE();
  1039. return MP_VM_RETURN_NORMAL;
  1040. ENTRY(MP_BC_RAISE_LAST): {
  1041. MARK_EXC_IP_SELECTIVE();
  1042. // search for the inner-most previous exception, to reraise it
  1043. mp_obj_t obj = MP_OBJ_NULL;
  1044. for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; --e) {
  1045. if (e->prev_exc != NULL) {
  1046. obj = MP_OBJ_FROM_PTR(e->prev_exc);
  1047. break;
  1048. }
  1049. }
  1050. if (obj == MP_OBJ_NULL) {
  1051. obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("no active exception to reraise"));
  1052. }
  1053. RAISE(obj);
  1054. }
  1055. ENTRY(MP_BC_RAISE_OBJ): {
  1056. MARK_EXC_IP_SELECTIVE();
  1057. mp_obj_t obj = mp_make_raise_obj(TOP());
  1058. RAISE(obj);
  1059. }
  1060. ENTRY(MP_BC_RAISE_FROM): {
  1061. MARK_EXC_IP_SELECTIVE();
  1062. mp_obj_t from_value = POP();
  1063. if (from_value != mp_const_none) {
  1064. mp_warning(NULL, "exception chaining not supported");
  1065. }
  1066. mp_obj_t obj = mp_make_raise_obj(TOP());
  1067. RAISE(obj);
  1068. }
  1069. ENTRY(MP_BC_YIELD_VALUE):
  1070. yield:
  1071. nlr_pop();
  1072. code_state->ip = ip;
  1073. code_state->sp = sp;
  1074. code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
  1075. FRAME_LEAVE();
  1076. return MP_VM_RETURN_YIELD;
  1077. ENTRY(MP_BC_YIELD_FROM): {
  1078. MARK_EXC_IP_SELECTIVE();
  1079. mp_vm_return_kind_t ret_kind;
  1080. mp_obj_t send_value = POP();
  1081. mp_obj_t t_exc = MP_OBJ_NULL;
  1082. mp_obj_t ret_value;
  1083. code_state->sp = sp; // Save sp because it's needed if mp_resume raises StopIteration
  1084. if (inject_exc != MP_OBJ_NULL) {
  1085. t_exc = inject_exc;
  1086. inject_exc = MP_OBJ_NULL;
  1087. ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
  1088. } else {
  1089. ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
  1090. }
  1091. if (ret_kind == MP_VM_RETURN_YIELD) {
  1092. ip--;
  1093. PUSH(ret_value);
  1094. goto yield;
  1095. } else if (ret_kind == MP_VM_RETURN_NORMAL) {
  1096. // The generator has finished, and returned a value via StopIteration
  1097. // Replace exhausted generator with the returned value
  1098. SET_TOP(ret_value);
  1099. // If we injected GeneratorExit downstream, then even
  1100. // if it was swallowed, we re-raise GeneratorExit
  1101. if (t_exc != MP_OBJ_NULL && mp_obj_exception_match(t_exc, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
  1102. mp_obj_t raise_t = mp_make_raise_obj(t_exc);
  1103. RAISE(raise_t);
  1104. }
  1105. DISPATCH();
  1106. } else {
  1107. assert(ret_kind == MP_VM_RETURN_EXCEPTION);
  1108. assert(!mp_obj_exception_match(ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration)));
  1109. // Pop exhausted gen
  1110. sp--;
  1111. RAISE(ret_value);
  1112. }
  1113. }
  1114. ENTRY(MP_BC_IMPORT_NAME): {
  1115. FRAME_UPDATE();
  1116. MARK_EXC_IP_SELECTIVE();
  1117. DECODE_QSTR;
  1118. mp_obj_t obj = POP();
  1119. SET_TOP(mp_import_name(qst, obj, TOP()));
  1120. DISPATCH();
  1121. }
  1122. ENTRY(MP_BC_IMPORT_FROM): {
  1123. FRAME_UPDATE();
  1124. MARK_EXC_IP_SELECTIVE();
  1125. DECODE_QSTR;
  1126. mp_obj_t obj = mp_import_from(TOP(), qst);
  1127. PUSH(obj);
  1128. DISPATCH();
  1129. }
  1130. ENTRY(MP_BC_IMPORT_STAR):
  1131. MARK_EXC_IP_SELECTIVE();
  1132. mp_import_all(POP());
  1133. DISPATCH();
  1134. #if MICROPY_OPT_COMPUTED_GOTO
  1135. ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
  1136. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
  1137. DISPATCH();
  1138. ENTRY(MP_BC_LOAD_FAST_MULTI):
  1139. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1140. goto load_check;
  1141. ENTRY(MP_BC_STORE_FAST_MULTI):
  1142. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1143. DISPATCH();
  1144. ENTRY(MP_BC_UNARY_OP_MULTI):
  1145. MARK_EXC_IP_SELECTIVE();
  1146. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1147. DISPATCH();
  1148. ENTRY(MP_BC_BINARY_OP_MULTI): {
  1149. MARK_EXC_IP_SELECTIVE();
  1150. mp_obj_t rhs = POP();
  1151. mp_obj_t lhs = TOP();
  1152. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1153. DISPATCH();
  1154. }
  1155. ENTRY_DEFAULT:
  1156. MARK_EXC_IP_SELECTIVE();
  1157. #else
  1158. ENTRY_DEFAULT:
  1159. if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM) {
  1160. PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
  1161. DISPATCH();
  1162. } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + MP_BC_LOAD_FAST_MULTI_NUM) {
  1163. obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
  1164. goto load_check;
  1165. } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + MP_BC_STORE_FAST_MULTI_NUM) {
  1166. fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
  1167. DISPATCH();
  1168. } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_BC_UNARY_OP_MULTI_NUM) {
  1169. SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
  1170. DISPATCH();
  1171. } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BC_BINARY_OP_MULTI_NUM) {
  1172. mp_obj_t rhs = POP();
  1173. mp_obj_t lhs = TOP();
  1174. SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
  1175. DISPATCH();
  1176. } else
  1177. #endif // MICROPY_OPT_COMPUTED_GOTO
  1178. {
  1179. mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, MP_ERROR_TEXT("opcode"));
  1180. nlr_pop();
  1181. code_state->state[0] = obj;
  1182. FRAME_LEAVE();
  1183. return MP_VM_RETURN_EXCEPTION;
  1184. }
  1185. #if !MICROPY_OPT_COMPUTED_GOTO
  1186. } // switch
  1187. #endif
  1188. pending_exception_check:
  1189. // We've just done a branch, use this as a convenient point to
  1190. // run periodic code/checks and/or bounce the GIL.. i.e.
  1191. // not _every_ instruction but on average a branch should
  1192. // occur every few instructions.
  1193. MICROPY_VM_HOOK_LOOP
  1194. // Check for pending exceptions or scheduled tasks to run.
  1195. // Note: it's safe to just call mp_handle_pending(true), but
  1196. // we can inline the check for the common case where there is
  1197. // neither.
  1198. if (
  1199. #if MICROPY_ENABLE_SCHEDULER
  1200. #if MICROPY_PY_THREAD
  1201. // Scheduler + threading: Scheduler and pending exceptions are independent, check both.
  1202. MP_STATE_VM(sched_state) == MP_SCHED_PENDING || MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
  1203. #else
  1204. // Scheduler + non-threading: Optimisation: pending exception sets sched_state, only check sched_state.
  1205. MP_STATE_VM(sched_state) == MP_SCHED_PENDING
  1206. #endif
  1207. #else
  1208. // No scheduler: Just check pending exception.
  1209. MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
  1210. #endif
  1211. #if MICROPY_ENABLE_VM_ABORT
  1212. // Check if the VM should abort execution.
  1213. || MP_STATE_VM(vm_abort)
  1214. #endif
  1215. ) {
  1216. MARK_EXC_IP_SELECTIVE();
  1217. mp_handle_pending(true);
  1218. }
  1219. #if MICROPY_PY_THREAD_GIL
  1220. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1221. // Don't bounce the GIL too frequently (default every 32 branches).
  1222. if (--gil_divisor == 0)
  1223. #endif
  1224. {
  1225. #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
  1226. gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
  1227. #endif
  1228. #if MICROPY_ENABLE_SCHEDULER
  1229. // can only switch threads if the scheduler is unlocked
  1230. if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE)
  1231. #endif
  1232. {
  1233. MP_THREAD_GIL_EXIT();
  1234. MP_THREAD_GIL_ENTER();
  1235. }
  1236. }
  1237. #endif
  1238. } // for loop
  1239. } else {
  1240. exception_handler:
  1241. // exception occurred
  1242. #if MICROPY_PY_SYS_EXC_INFO
  1243. MP_STATE_VM(cur_exception) = nlr.ret_val;
  1244. #endif
  1245. #if SELECTIVE_EXC_IP
  1246. // with selective ip, we store the ip 1 byte past the opcode, so move ptr back
  1247. code_state->ip -= 1;
  1248. #endif
  1249. if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
  1250. // check if it's a StopIteration within a for block
  1251. if (*code_state->ip == MP_BC_FOR_ITER) {
  1252. const byte *ip = code_state->ip + 1;
  1253. DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
  1254. code_state->ip = ip + ulab; // jump to after for-block
  1255. code_state->sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
  1256. goto outer_dispatch_loop; // continue with dispatch loop
  1257. } else if (*code_state->ip == MP_BC_YIELD_FROM) {
  1258. // StopIteration inside yield from call means return a value of
  1259. // yield from, so inject exception's value as yield from's result
  1260. // (Instead of stack pop then push we just replace exhausted gen with value)
  1261. *code_state->sp = mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val));
  1262. code_state->ip++; // yield from is over, move to next instruction
  1263. goto outer_dispatch_loop; // continue with dispatch loop
  1264. }
  1265. }
  1266. #if MICROPY_PY_SYS_SETTRACE
  1267. // Exceptions are traced here
  1268. if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_Exception))) {
  1269. TRACE_TICK(code_state->ip, code_state->sp, true /* yes, it's an exception */);
  1270. }
  1271. #endif
  1272. #if MICROPY_STACKLESS
  1273. unwind_loop:
  1274. #endif
  1275. // Set traceback info (file and line number) where the exception occurred, but not for:
  1276. // - constant GeneratorExit object, because it's const
  1277. // - exceptions re-raised by END_FINALLY
  1278. // - exceptions re-raised explicitly by "raise"
  1279. if (nlr.ret_val != &mp_const_GeneratorExit_obj
  1280. && *code_state->ip != MP_BC_END_FINALLY
  1281. && *code_state->ip != MP_BC_RAISE_LAST) {
  1282. const byte *ip = code_state->fun_bc->bytecode;
  1283. MP_BC_PRELUDE_SIG_DECODE(ip);
  1284. MP_BC_PRELUDE_SIZE_DECODE(ip);
  1285. const byte *line_info_top = ip + n_info;
  1286. const byte *bytecode_start = ip + n_info + n_cell;
  1287. size_t bc = code_state->ip - bytecode_start;
  1288. qstr block_name = mp_decode_uint_value(ip);
  1289. for (size_t i = 0; i < 1 + n_pos_args + n_kwonly_args; ++i) {
  1290. ip = mp_decode_uint_skip(ip);
  1291. }
  1292. #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
  1293. block_name = code_state->fun_bc->context->constants.qstr_table[block_name];
  1294. qstr source_file = code_state->fun_bc->context->constants.qstr_table[0];
  1295. #else
  1296. qstr source_file = code_state->fun_bc->context->constants.source_file;
  1297. #endif
  1298. size_t source_line = mp_bytecode_get_source_line(ip, line_info_top, bc);
  1299. mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name);
  1300. }
  1301. while (exc_sp >= exc_stack && exc_sp->handler <= code_state->ip) {
  1302. // nested exception
  1303. assert(exc_sp >= exc_stack);
  1304. // TODO make a proper message for nested exception
  1305. // at the moment we are just raising the very last exception (the one that caused the nested exception)
  1306. // move up to previous exception handler
  1307. POP_EXC_BLOCK();
  1308. }
  1309. if (exc_sp >= exc_stack) {
  1310. // catch exception and pass to byte code
  1311. code_state->ip = exc_sp->handler;
  1312. mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
  1313. // save this exception in the stack so it can be used in a reraise, if needed
  1314. exc_sp->prev_exc = nlr.ret_val;
  1315. // push exception object so it can be handled by bytecode
  1316. PUSH(MP_OBJ_FROM_PTR(nlr.ret_val));
  1317. code_state->sp = sp;
  1318. #if MICROPY_STACKLESS
  1319. } else if (code_state->prev != NULL) {
  1320. mp_globals_set(code_state->old_globals);
  1321. mp_code_state_t *new_code_state = code_state->prev;
  1322. #if MICROPY_ENABLE_PYSTACK
  1323. // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
  1324. // (The latter is implicitly freed when using pystack due to its LIFO nature.)
  1325. // The sizeof in the following statement does not include the size of the variable
  1326. // part of the struct. This arg is anyway not used if pystack is enabled.
  1327. mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
  1328. #endif
  1329. code_state = new_code_state;
  1330. size_t n_state = code_state->n_state;
  1331. fastn = &code_state->state[n_state - 1];
  1332. exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
  1333. // variables that are visible to the exception handler (declared volatile)
  1334. exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
  1335. goto unwind_loop;
  1336. #endif
  1337. } else {
  1338. // propagate exception to higher level
  1339. // Note: ip and sp don't have usable values at this point
  1340. code_state->state[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // put exception here because sp is invalid
  1341. FRAME_LEAVE();
  1342. return MP_VM_RETURN_EXCEPTION;
  1343. }
  1344. }
  1345. }
  1346. }