Przeglądaj źródła

Add mp_flipper/lib/micropython from https://github.com/ofabel/mp-flipper

git-subtree-dir: mp_flipper/lib/micropython
git-subtree-mainline: b3db3f075a0aa74ef655c99b44027d594bd6b765
git-subtree-split: 29091048ae9a613070da21b373062a450f7ecd08
Willy-JL 1 rok temu
rodzic
commit
801946db06
100 zmienionych plików z 32000 dodań i 0 usunięć
  1. 1 0
      mp_flipper/lib/micropython/.gitsubtree
  2. 386 0
      mp_flipper/lib/micropython/extmod/modjson.c
  3. 107 0
      mp_flipper/lib/micropython/extmod/modplatform.h
  4. 261 0
      mp_flipper/lib/micropython/extmod/modrandom.c
  5. 236 0
      mp_flipper/lib/micropython/extmod/modtime.c
  6. 43 0
      mp_flipper/lib/micropython/extmod/modtime.h
  7. 43 0
      mp_flipper/lib/micropython/genhdr/moduledefs.h
  8. 4 0
      mp_flipper/lib/micropython/genhdr/mpversion.h
  9. 481 0
      mp_flipper/lib/micropython/genhdr/qstrdefs.generated.h
  10. 3 0
      mp_flipper/lib/micropython/genhdr/root_pointers.h
  11. 57 0
      mp_flipper/lib/micropython/mp_flipper_compiler.c
  12. 9 0
      mp_flipper/lib/micropython/mp_flipper_compiler.h
  13. 16 0
      mp_flipper/lib/micropython/mp_flipper_file_reader.c
  14. 12 0
      mp_flipper/lib/micropython/mp_flipper_file_reader.h
  15. 284 0
      mp_flipper/lib/micropython/mp_flipper_fileio.c
  16. 25 0
      mp_flipper/lib/micropython/mp_flipper_fileio.h
  17. 36 0
      mp_flipper/lib/micropython/mp_flipper_halport.c
  18. 19 0
      mp_flipper/lib/micropython/mp_flipper_halport.h
  19. 110 0
      mp_flipper/lib/micropython/mp_flipper_logging.c
  20. 14 0
      mp_flipper/lib/micropython/mp_flipper_logging.h
  21. 1006 0
      mp_flipper/lib/micropython/mp_flipper_modflipperzero.c
  22. 269 0
      mp_flipper/lib/micropython/mp_flipper_modflipperzero.h
  23. 5 0
      mp_flipper/lib/micropython/mp_flipper_modrandom.h
  24. 34 0
      mp_flipper/lib/micropython/mp_flipper_modtime.c
  25. 13 0
      mp_flipper/lib/micropython/mp_flipper_modtime.h
  26. 12 0
      mp_flipper/lib/micropython/mp_flipper_repl.c
  27. 12 0
      mp_flipper/lib/micropython/mp_flipper_repl.h
  28. 73 0
      mp_flipper/lib/micropython/mp_flipper_runtime.c
  29. 27 0
      mp_flipper/lib/micropython/mp_flipper_runtime.h
  30. 164 0
      mp_flipper/lib/micropython/mpconfigport.h
  31. 148 0
      mp_flipper/lib/micropython/py/argcheck.c
  32. 399 0
      mp_flipper/lib/micropython/py/asmarm.c
  33. 220 0
      mp_flipper/lib/micropython/py/asmarm.h
  34. 113 0
      mp_flipper/lib/micropython/py/asmbase.c
  35. 78 0
      mp_flipper/lib/micropython/py/asmbase.h
  36. 592 0
      mp_flipper/lib/micropython/py/asmthumb.c
  37. 436 0
      mp_flipper/lib/micropython/py/asmthumb.h
  38. 642 0
      mp_flipper/lib/micropython/py/asmx64.c
  39. 223 0
      mp_flipper/lib/micropython/py/asmx64.h
  40. 545 0
      mp_flipper/lib/micropython/py/asmx86.c
  41. 218 0
      mp_flipper/lib/micropython/py/asmx86.h
  42. 267 0
      mp_flipper/lib/micropython/py/asmxtensa.c
  43. 415 0
      mp_flipper/lib/micropython/py/asmxtensa.h
  44. 345 0
      mp_flipper/lib/micropython/py/bc.c
  45. 338 0
      mp_flipper/lib/micropython/py/bc.h
  46. 162 0
      mp_flipper/lib/micropython/py/bc0.h
  47. 542 0
      mp_flipper/lib/micropython/py/binary.c
  48. 46 0
      mp_flipper/lib/micropython/py/binary.h
  49. 142 0
      mp_flipper/lib/micropython/py/builtin.h
  50. 179 0
      mp_flipper/lib/micropython/py/builtinevex.c
  51. 174 0
      mp_flipper/lib/micropython/py/builtinhelp.c
  52. 665 0
      mp_flipper/lib/micropython/py/builtinimport.c
  53. 3680 0
      mp_flipper/lib/micropython/py/compile.c
  54. 51 0
      mp_flipper/lib/micropython/py/compile.h
  55. 324 0
      mp_flipper/lib/micropython/py/dynruntime.h
  56. 313 0
      mp_flipper/lib/micropython/py/emit.h
  57. 905 0
      mp_flipper/lib/micropython/py/emitbc.c
  58. 123 0
      mp_flipper/lib/micropython/py/emitcommon.c
  59. 250 0
      mp_flipper/lib/micropython/py/emitglue.c
  60. 144 0
      mp_flipper/lib/micropython/py/emitglue.h
  61. 865 0
      mp_flipper/lib/micropython/py/emitinlinethumb.c
  62. 352 0
      mp_flipper/lib/micropython/py/emitinlinextensa.c
  63. 18 0
      mp_flipper/lib/micropython/py/emitnarm.c
  64. 3009 0
      mp_flipper/lib/micropython/py/emitnative.c
  65. 18 0
      mp_flipper/lib/micropython/py/emitnthumb.c
  66. 18 0
      mp_flipper/lib/micropython/py/emitnx64.c
  67. 70 0
      mp_flipper/lib/micropython/py/emitnx86.c
  68. 18 0
      mp_flipper/lib/micropython/py/emitnxtensa.c
  69. 20 0
      mp_flipper/lib/micropython/py/emitnxtensawin.c
  70. 424 0
      mp_flipper/lib/micropython/py/formatfloat.c
  71. 35 0
      mp_flipper/lib/micropython/py/formatfloat.h
  72. 135 0
      mp_flipper/lib/micropython/py/frozenmod.c
  73. 40 0
      mp_flipper/lib/micropython/py/frozenmod.h
  74. 1354 0
      mp_flipper/lib/micropython/py/gc.c
  75. 87 0
      mp_flipper/lib/micropython/py/gc.h
  76. 372 0
      mp_flipper/lib/micropython/py/grammar.h
  77. 944 0
      mp_flipper/lib/micropython/py/lexer.c
  78. 203 0
      mp_flipper/lib/micropython/py/lexer.h
  79. 315 0
      mp_flipper/lib/micropython/py/malloc.c
  80. 461 0
      mp_flipper/lib/micropython/py/map.c
  81. 337 0
      mp_flipper/lib/micropython/py/misc.h
  82. 45 0
      mp_flipper/lib/micropython/py/modarray.c
  83. 773 0
      mp_flipper/lib/micropython/py/modbuiltins.c
  84. 154 0
      mp_flipper/lib/micropython/py/modcmath.c
  85. 51 0
      mp_flipper/lib/micropython/py/modcollections.c
  86. 124 0
      mp_flipper/lib/micropython/py/moderrno.c
  87. 125 0
      mp_flipper/lib/micropython/py/modgc.c
  88. 231 0
      mp_flipper/lib/micropython/py/modio.c
  89. 440 0
      mp_flipper/lib/micropython/py/modmath.c
  90. 217 0
      mp_flipper/lib/micropython/py/modmicropython.c
  91. 278 0
      mp_flipper/lib/micropython/py/modstruct.c
  92. 376 0
      mp_flipper/lib/micropython/py/modsys.c
  93. 292 0
      mp_flipper/lib/micropython/py/modthread.c
  94. 2081 0
      mp_flipper/lib/micropython/py/mpconfig.h
  95. 152 0
      mp_flipper/lib/micropython/py/mperrno.h
  96. 114 0
      mp_flipper/lib/micropython/py/mphal.h
  97. 576 0
      mp_flipper/lib/micropython/py/mpprint.c
  98. 82 0
      mp_flipper/lib/micropython/py/mpprint.h
  99. 33 0
      mp_flipper/lib/micropython/py/mpstate.c
  100. 320 0
      mp_flipper/lib/micropython/py/mpstate.h

+ 1 - 0
mp_flipper/lib/micropython/.gitsubtree

@@ -0,0 +1 @@
+https://github.com/ofabel/mp-flipper 29091048ae9a613070da21b373062a450f7ecd08 /

+ 386 - 0
mp_flipper/lib/micropython/extmod/modjson.c

@@ -0,0 +1,386 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/objlist.h"
+#include "py/objstringio.h"
+#include "py/parsenum.h"
+#include "py/runtime.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_JSON
+
+#if MICROPY_PY_JSON_SEPARATORS
+
+enum {
+    DUMP_MODE_TO_STRING = 1,
+    DUMP_MODE_TO_STREAM = 2,
+};
+
+static mp_obj_t mod_json_dump_helper(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args, unsigned int mode) {
+    enum { ARG_separators };
+    static const mp_arg_t allowed_args[] = {
+        { MP_QSTR_separators, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+    };
+
+    mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+    mp_arg_parse_all(n_args - mode, pos_args + mode, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+    mp_print_ext_t print_ext;
+
+    if (args[ARG_separators].u_obj == mp_const_none) {
+        print_ext.item_separator = ", ";
+        print_ext.key_separator = ": ";
+    } else {
+        mp_obj_t *items;
+        mp_obj_get_array_fixed_n(args[ARG_separators].u_obj, 2, &items);
+        print_ext.item_separator = mp_obj_str_get_str(items[0]);
+        print_ext.key_separator = mp_obj_str_get_str(items[1]);
+    }
+
+    if (mode == DUMP_MODE_TO_STRING) {
+        // dumps(obj)
+        vstr_t vstr;
+        vstr_init_print(&vstr, 8, &print_ext.base);
+        mp_obj_print_helper(&print_ext.base, pos_args[0], PRINT_JSON);
+        return mp_obj_new_str_from_utf8_vstr(&vstr);
+    } else {
+        // dump(obj, stream)
+        print_ext.base.data = MP_OBJ_TO_PTR(pos_args[1]);
+        print_ext.base.print_strn = mp_stream_write_adaptor;
+        mp_get_stream_raise(pos_args[1], MP_STREAM_OP_WRITE);
+        mp_obj_print_helper(&print_ext.base, pos_args[0], PRINT_JSON);
+        return mp_const_none;
+    }
+}
+
+static mp_obj_t mod_json_dump(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+    return mod_json_dump_helper(n_args, pos_args, kw_args, DUMP_MODE_TO_STREAM);
+}
+static MP_DEFINE_CONST_FUN_OBJ_KW(mod_json_dump_obj, 2, mod_json_dump);
+
+static mp_obj_t mod_json_dumps(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+    return mod_json_dump_helper(n_args, pos_args, kw_args, DUMP_MODE_TO_STRING);
+}
+static MP_DEFINE_CONST_FUN_OBJ_KW(mod_json_dumps_obj, 1, mod_json_dumps);
+
+#else
+
+static mp_obj_t mod_json_dump(mp_obj_t obj, mp_obj_t stream) {
+    mp_get_stream_raise(stream, MP_STREAM_OP_WRITE);
+    mp_print_t print = {MP_OBJ_TO_PTR(stream), mp_stream_write_adaptor};
+    mp_obj_print_helper(&print, obj, PRINT_JSON);
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(mod_json_dump_obj, mod_json_dump);
+
+static mp_obj_t mod_json_dumps(mp_obj_t obj) {
+    vstr_t vstr;
+    mp_print_t print;
+    vstr_init_print(&vstr, 8, &print);
+    mp_obj_print_helper(&print, obj, PRINT_JSON);
+    return mp_obj_new_str_from_utf8_vstr(&vstr);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mod_json_dumps_obj, mod_json_dumps);
+
+#endif
+
+// The function below implements a simple non-recursive JSON parser.
+//
+// The JSON specification is at http://www.ietf.org/rfc/rfc4627.txt
+// The parser here will parse any valid JSON and return the correct
+// corresponding Python object.  It allows through a superset of JSON, since
+// it treats commas and colons as "whitespace", and doesn't care if
+// brackets/braces are correctly paired.  It will raise a ValueError if the
+// input is outside it's specs.
+//
+// Most of the work is parsing the primitives (null, false, true, numbers,
+// strings).  It does 1 pass over the input stream.  It tries to be fast and
+// small in code size, while not using more RAM than necessary.
+
+typedef struct _json_stream_t {
+    mp_obj_t stream_obj;
+    mp_uint_t (*read)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode);
+    int errcode;
+    byte cur;
+} json_stream_t;
+
+#define S_EOF (0) // null is not allowed in json stream so is ok as EOF marker
+#define S_END(s) ((s).cur == S_EOF)
+#define S_CUR(s) ((s).cur)
+#define S_NEXT(s) (json_stream_next(&(s)))
+
+static byte json_stream_next(json_stream_t *s) {
+    mp_uint_t ret = s->read(s->stream_obj, &s->cur, 1, &s->errcode);
+    if (s->errcode != 0) {
+        mp_raise_OSError(s->errcode);
+    }
+    if (ret == 0) {
+        s->cur = S_EOF;
+    }
+    return s->cur;
+}
+
+static mp_obj_t mod_json_load(mp_obj_t stream_obj) {
+    const mp_stream_p_t *stream_p = mp_get_stream_raise(stream_obj, MP_STREAM_OP_READ);
+    json_stream_t s = {stream_obj, stream_p->read, 0, 0};
+    vstr_t vstr;
+    vstr_init(&vstr, 8);
+    mp_obj_list_t stack; // we use a list as a simple stack for nested JSON
+    stack.len = 0;
+    stack.items = NULL;
+    mp_obj_t stack_top = MP_OBJ_NULL;
+    const mp_obj_type_t *stack_top_type = NULL;
+    mp_obj_t stack_key = MP_OBJ_NULL;
+    S_NEXT(s);
+    for (;;) {
+    cont:
+        if (S_END(s)) {
+            break;
+        }
+        mp_obj_t next = MP_OBJ_NULL;
+        bool enter = false;
+        byte cur = S_CUR(s);
+        S_NEXT(s);
+        switch (cur) {
+            case ',':
+            case ':':
+            case ' ':
+            case '\t':
+            case '\n':
+            case '\r':
+                goto cont;
+            case 'n':
+                if (S_CUR(s) == 'u' && S_NEXT(s) == 'l' && S_NEXT(s) == 'l') {
+                    S_NEXT(s);
+                    next = mp_const_none;
+                } else {
+                    goto fail;
+                }
+                break;
+            case 'f':
+                if (S_CUR(s) == 'a' && S_NEXT(s) == 'l' && S_NEXT(s) == 's' && S_NEXT(s) == 'e') {
+                    S_NEXT(s);
+                    next = mp_const_false;
+                } else {
+                    goto fail;
+                }
+                break;
+            case 't':
+                if (S_CUR(s) == 'r' && S_NEXT(s) == 'u' && S_NEXT(s) == 'e') {
+                    S_NEXT(s);
+                    next = mp_const_true;
+                } else {
+                    goto fail;
+                }
+                break;
+            case '"':
+                vstr_reset(&vstr);
+                for (; !S_END(s) && S_CUR(s) != '"';) {
+                    byte c = S_CUR(s);
+                    if (c == '\\') {
+                        c = S_NEXT(s);
+                        switch (c) {
+                            case 'b':
+                                c = 0x08;
+                                break;
+                            case 'f':
+                                c = 0x0c;
+                                break;
+                            case 'n':
+                                c = 0x0a;
+                                break;
+                            case 'r':
+                                c = 0x0d;
+                                break;
+                            case 't':
+                                c = 0x09;
+                                break;
+                            case 'u': {
+                                mp_uint_t num = 0;
+                                for (int i = 0; i < 4; i++) {
+                                    c = (S_NEXT(s) | 0x20) - '0';
+                                    if (c > 9) {
+                                        c -= ('a' - ('9' + 1));
+                                    }
+                                    num = (num << 4) | c;
+                                }
+                                vstr_add_char(&vstr, num);
+                                goto str_cont;
+                            }
+                        }
+                    }
+                    vstr_add_byte(&vstr, c);
+                str_cont:
+                    S_NEXT(s);
+                }
+                if (S_END(s)) {
+                    goto fail;
+                }
+                S_NEXT(s);
+                next = mp_obj_new_str(vstr.buf, vstr.len);
+                break;
+            case '-':
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9': {
+                bool flt = false;
+                vstr_reset(&vstr);
+                for (;;) {
+                    vstr_add_byte(&vstr, cur);
+                    cur = S_CUR(s);
+                    if (cur == '.' || cur == 'E' || cur == 'e') {
+                        flt = true;
+                    } else if (cur == '+' || cur == '-' || unichar_isdigit(cur)) {
+                        // pass
+                    } else {
+                        break;
+                    }
+                    S_NEXT(s);
+                }
+                if (flt) {
+                    next = mp_parse_num_float(vstr.buf, vstr.len, false, NULL);
+                } else {
+                    next = mp_parse_num_integer(vstr.buf, vstr.len, 10, NULL);
+                }
+                break;
+            }
+            case '[':
+                next = mp_obj_new_list(0, NULL);
+                enter = true;
+                break;
+            case '{':
+                next = mp_obj_new_dict(0);
+                enter = true;
+                break;
+            case '}':
+            case ']': {
+                if (stack_top == MP_OBJ_NULL) {
+                    // no object at all
+                    goto fail;
+                }
+                if (stack.len == 0) {
+                    // finished; compound object
+                    goto success;
+                }
+                stack.len -= 1;
+                stack_top = stack.items[stack.len];
+                stack_top_type = mp_obj_get_type(stack_top);
+                goto cont;
+            }
+            default:
+                goto fail;
+        }
+        if (stack_top == MP_OBJ_NULL) {
+            stack_top = next;
+            stack_top_type = mp_obj_get_type(stack_top);
+            if (!enter) {
+                // finished; single primitive only
+                goto success;
+            }
+        } else {
+            // append to list or dict
+            if (stack_top_type == &mp_type_list) {
+                mp_obj_list_append(stack_top, next);
+            } else {
+                if (stack_key == MP_OBJ_NULL) {
+                    stack_key = next;
+                    if (enter) {
+                        goto fail;
+                    }
+                } else {
+                    mp_obj_dict_store(stack_top, stack_key, next);
+                    stack_key = MP_OBJ_NULL;
+                }
+            }
+            if (enter) {
+                if (stack.items == NULL) {
+                    mp_obj_list_init(&stack, 1);
+                    stack.items[0] = stack_top;
+                } else {
+                    mp_obj_list_append(MP_OBJ_FROM_PTR(&stack), stack_top);
+                }
+                stack_top = next;
+                stack_top_type = mp_obj_get_type(stack_top);
+            }
+        }
+    }
+success:
+    // eat trailing whitespace
+    while (unichar_isspace(S_CUR(s))) {
+        S_NEXT(s);
+    }
+    if (!S_END(s)) {
+        // unexpected chars
+        goto fail;
+    }
+    if (stack_top == MP_OBJ_NULL || stack.len != 0) {
+        // not exactly 1 object
+        goto fail;
+    }
+    vstr_clear(&vstr);
+    return stack_top;
+
+fail:
+    mp_raise_ValueError(MP_ERROR_TEXT("syntax error in JSON"));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mod_json_load_obj, mod_json_load);
+
+static mp_obj_t mod_json_loads(mp_obj_t obj) {
+    mp_buffer_info_t bufinfo;
+    mp_get_buffer_raise(obj, &bufinfo, MP_BUFFER_READ);
+    vstr_t vstr = {bufinfo.len, bufinfo.len, (char *)bufinfo.buf, true};
+    mp_obj_stringio_t sio = {{&mp_type_stringio}, &vstr, 0, MP_OBJ_NULL};
+    return mod_json_load(MP_OBJ_FROM_PTR(&sio));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mod_json_loads_obj, mod_json_loads);
+
+static const mp_rom_map_elem_t mp_module_json_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_json) },
+    { MP_ROM_QSTR(MP_QSTR_dump), MP_ROM_PTR(&mod_json_dump_obj) },
+    { MP_ROM_QSTR(MP_QSTR_dumps), MP_ROM_PTR(&mod_json_dumps_obj) },
+    { MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&mod_json_load_obj) },
+    { MP_ROM_QSTR(MP_QSTR_loads), MP_ROM_PTR(&mod_json_loads_obj) },
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_json_globals, mp_module_json_globals_table);
+
+const mp_obj_module_t mp_module_json = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_json_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_json, mp_module_json);
+
+#endif // MICROPY_PY_JSON

+ 107 - 0
mp_flipper/lib/micropython/extmod/modplatform.h

@@ -0,0 +1,107 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_MODPLATFORM_H
+#define MICROPY_INCLUDED_MODPLATFORM_H
+
+#include "py/misc.h"  // For MP_STRINGIFY.
+#include "py/mpconfig.h"
+
+// Preprocessor directives identifying the platform.
+// The platform module itself is guarded by MICROPY_PY_PLATFORM, see the
+// .c file, but these are made available because they're generally usable.
+// TODO: Add more architectures, compilers and libraries.
+// See: https://sourceforge.net/p/predef/wiki/Home/
+
+#if defined(__ARM_ARCH)
+#define MICROPY_PLATFORM_ARCH   "arm"
+#elif defined(__x86_64__) || defined(_M_X64)
+#define MICROPY_PLATFORM_ARCH   "x86_64"
+#elif defined(__i386__) || defined(_M_IX86)
+#define MICROPY_PLATFORM_ARCH   "x86"
+#elif defined(__xtensa__)
+#define MICROPY_PLATFORM_ARCH   "xtensa"
+#elif defined(__riscv)
+#define MICROPY_PLATFORM_ARCH   "riscv"
+#else
+#define MICROPY_PLATFORM_ARCH   ""
+#endif
+
+#if defined(__GNUC__)
+#define MICROPY_PLATFORM_COMPILER \
+    "GCC " \
+    MP_STRINGIFY(__GNUC__) "." \
+    MP_STRINGIFY(__GNUC_MINOR__) "." \
+    MP_STRINGIFY(__GNUC_PATCHLEVEL__)
+#elif defined(__ARMCC_VERSION)
+#define MICROPY_PLATFORM_COMPILER \
+    "ARMCC " \
+    MP_STRINGIFY((__ARMCC_VERSION / 1000000)) "." \
+    MP_STRINGIFY((__ARMCC_VERSION / 10000 % 100)) "." \
+    MP_STRINGIFY((__ARMCC_VERSION % 10000))
+#elif defined(_MSC_VER)
+#if defined(_WIN64)
+#define MICROPY_PLATFORM_COMPILER_BITS  "64 bit"
+#elif defined(_M_IX86)
+#define MICROPY_PLATFORM_COMPILER_BITS  "32 bit"
+#else
+#define MICROPY_PLATFORM_COMPILER_BITS  ""
+#endif
+#define MICROPY_PLATFORM_COMPILER \
+    "MSC v." MP_STRINGIFY(_MSC_VER) " " MICROPY_PLATFORM_COMPILER_BITS
+#else
+#define MICROPY_PLATFORM_COMPILER       ""
+#endif
+
+#if defined(__GLIBC__)
+#define MICROPY_PLATFORM_LIBC_LIB       "glibc"
+#define MICROPY_PLATFORM_LIBC_VER \
+    MP_STRINGIFY(__GLIBC__) "." \
+    MP_STRINGIFY(__GLIBC_MINOR__)
+#elif defined(__NEWLIB__)
+#define MICROPY_PLATFORM_LIBC_LIB       "newlib"
+#define MICROPY_PLATFORM_LIBC_VER       _NEWLIB_VERSION
+#else
+#define MICROPY_PLATFORM_LIBC_LIB       ""
+#define MICROPY_PLATFORM_LIBC_VER       ""
+#endif
+
+#if defined(__linux)
+#define MICROPY_PLATFORM_SYSTEM         "Linux"
+#elif defined(__unix__)
+#define MICROPY_PLATFORM_SYSTEM         "Unix"
+#elif defined(__CYGWIN__)
+#define MICROPY_PLATFORM_SYSTEM         "Cygwin"
+#elif defined(_WIN32)
+#define MICROPY_PLATFORM_SYSTEM         "Windows"
+#else
+#define MICROPY_PLATFORM_SYSTEM         "MicroPython"
+#endif
+
+#ifndef MICROPY_PLATFORM_VERSION
+#define MICROPY_PLATFORM_VERSION ""
+#endif
+
+#endif // MICROPY_INCLUDED_MODPLATFORM_H

+ 261 - 0
mp_flipper/lib/micropython/extmod/modrandom.c

@@ -0,0 +1,261 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_RANDOM
+
+// Work out if the seed will be set on import or not.
+#if MICROPY_MODULE_BUILTIN_INIT && defined(MICROPY_PY_RANDOM_SEED_INIT_FUNC)
+#define SEED_ON_IMPORT (1)
+#else
+#define SEED_ON_IMPORT (0)
+#endif
+
+// Yasmarang random number generator
+// by Ilya Levin
+// http://www.literatecode.com/yasmarang
+// Public Domain
+
+#if !MICROPY_ENABLE_DYNRUNTIME
+#if SEED_ON_IMPORT
+// If the state is seeded on import then keep these variables in the BSS.
+static uint32_t yasmarang_pad, yasmarang_n, yasmarang_d;
+static uint8_t yasmarang_dat;
+#else
+// Without seed-on-import these variables must be initialised via the data section.
+static uint32_t yasmarang_pad = 0xeda4baba, yasmarang_n = 69, yasmarang_d = 233;
+static uint8_t yasmarang_dat = 0;
+#endif
+#endif
+
+static uint32_t yasmarang(void) {
+    yasmarang_pad += yasmarang_dat + yasmarang_d * yasmarang_n;
+    yasmarang_pad = (yasmarang_pad << 3) + (yasmarang_pad >> 29);
+    yasmarang_n = yasmarang_pad | 2;
+    yasmarang_d ^= (yasmarang_pad << 31) + (yasmarang_pad >> 1);
+    yasmarang_dat ^= (char)yasmarang_pad ^ (yasmarang_d >> 8) ^ 1;
+
+    return yasmarang_pad ^ (yasmarang_d << 5) ^ (yasmarang_pad >> 18) ^ (yasmarang_dat << 1);
+}  /* yasmarang */
+
+// End of Yasmarang
+
+#if MICROPY_PY_RANDOM_EXTRA_FUNCS
+
+// returns an unsigned integer below the given argument
+// n must not be zero
+static uint32_t yasmarang_randbelow(uint32_t n) {
+    uint32_t mask = 1;
+    while ((n & mask) < n) {
+        mask = (mask << 1) | 1;
+    }
+    uint32_t r;
+    do {
+        r = yasmarang() & mask;
+    } while (r >= n);
+    return r;
+}
+
+#endif
+
+static mp_obj_t mod_random_getrandbits(mp_obj_t num_in) {
+    mp_int_t n = mp_obj_get_int(num_in);
+    if (n > 32 || n < 0) {
+        mp_raise_ValueError(MP_ERROR_TEXT("bits must be 32 or less"));
+    }
+    if (n == 0) {
+        return MP_OBJ_NEW_SMALL_INT(0);
+    }
+    uint32_t mask = ~0;
+    // Beware of C undefined behavior when shifting by >= than bit size
+    mask >>= (32 - n);
+    return mp_obj_new_int_from_uint(yasmarang() & mask);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mod_random_getrandbits_obj, mod_random_getrandbits);
+
+static mp_obj_t mod_random_seed(size_t n_args, const mp_obj_t *args) {
+    mp_uint_t seed;
+    if (n_args == 0 || args[0] == mp_const_none) {
+        #ifdef MICROPY_PY_RANDOM_SEED_INIT_FUNC
+        seed = MICROPY_PY_RANDOM_SEED_INIT_FUNC;
+        #else
+        mp_raise_ValueError(MP_ERROR_TEXT("no default seed"));
+        #endif
+    } else {
+        seed = mp_obj_get_int_truncated(args[0]);
+    }
+    yasmarang_pad = (uint32_t)seed;
+    yasmarang_n = 69;
+    yasmarang_d = 233;
+    yasmarang_dat = 0;
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_random_seed_obj, 0, 1, mod_random_seed);
+
+#if MICROPY_PY_RANDOM_EXTRA_FUNCS
+
+static mp_obj_t mod_random_randrange(size_t n_args, const mp_obj_t *args) {
+    mp_int_t start = mp_obj_get_int(args[0]);
+    if (n_args == 1) {
+        // range(stop)
+        if (start > 0) {
+            return mp_obj_new_int(yasmarang_randbelow((uint32_t)start));
+        } else {
+            goto error;
+        }
+    } else {
+        mp_int_t stop = mp_obj_get_int(args[1]);
+        if (n_args == 2) {
+            // range(start, stop)
+            if (start < stop) {
+                return mp_obj_new_int(start + yasmarang_randbelow((uint32_t)(stop - start)));
+            } else {
+                goto error;
+            }
+        } else {
+            // range(start, stop, step)
+            mp_int_t step = mp_obj_get_int(args[2]);
+            mp_int_t n;
+            if (step > 0) {
+                n = (stop - start + step - 1) / step;
+            } else if (step < 0) {
+                n = (stop - start + step + 1) / step;
+            } else {
+                goto error;
+            }
+            if (n > 0) {
+                return mp_obj_new_int(start + step * yasmarang_randbelow((uint32_t)n));
+            } else {
+                goto error;
+            }
+        }
+    }
+
+error:
+    mp_raise_ValueError(NULL);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_random_randrange_obj, 1, 3, mod_random_randrange);
+
+static mp_obj_t mod_random_randint(mp_obj_t a_in, mp_obj_t b_in) {
+    mp_int_t a = mp_obj_get_int(a_in);
+    mp_int_t b = mp_obj_get_int(b_in);
+    if (a <= b) {
+        return mp_obj_new_int(a + yasmarang_randbelow((uint32_t)(b - a + 1)));
+    } else {
+        mp_raise_ValueError(NULL);
+    }
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(mod_random_randint_obj, mod_random_randint);
+
+static mp_obj_t mod_random_choice(mp_obj_t seq) {
+    mp_int_t len = mp_obj_get_int(mp_obj_len(seq));
+    if (len > 0) {
+        return mp_obj_subscr(seq, mp_obj_new_int(yasmarang_randbelow((uint32_t)len)), MP_OBJ_SENTINEL);
+    } else {
+        mp_raise_type(&mp_type_IndexError);
+    }
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mod_random_choice_obj, mod_random_choice);
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+// returns a number in the range [0..1) using Yasmarang to fill in the fraction bits
+static mp_float_t yasmarang_float(void) {
+    mp_float_union_t u;
+    u.p.sgn = 0;
+    u.p.exp = (1 << (MP_FLOAT_EXP_BITS - 1)) - 1;
+    if (MP_FLOAT_FRAC_BITS <= 32) {
+        u.p.frc = yasmarang();
+    } else {
+        u.p.frc = ((uint64_t)yasmarang() << 32) | (uint64_t)yasmarang();
+    }
+    return u.f - 1;
+}
+
+static mp_obj_t mod_random_random(void) {
+    return mp_obj_new_float(yasmarang_float());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mod_random_random_obj, mod_random_random);
+
+static mp_obj_t mod_random_uniform(mp_obj_t a_in, mp_obj_t b_in) {
+    mp_float_t a = mp_obj_get_float(a_in);
+    mp_float_t b = mp_obj_get_float(b_in);
+    return mp_obj_new_float(a + (b - a) * yasmarang_float());
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(mod_random_uniform_obj, mod_random_uniform);
+
+#endif
+
+#endif // MICROPY_PY_RANDOM_EXTRA_FUNCS
+
+#if SEED_ON_IMPORT
+static mp_obj_t mod_random___init__(void) {
+    // This module may be imported by more than one name so need to ensure
+    // that it's only ever seeded once.
+    static bool seeded = false;
+    if (!seeded) {
+        seeded = true;
+        mod_random_seed(0, NULL);
+    }
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mod_random___init___obj, mod_random___init__);
+#endif
+
+#if !MICROPY_ENABLE_DYNRUNTIME
+static const mp_rom_map_elem_t mp_module_random_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_random) },
+    #if SEED_ON_IMPORT
+    { MP_ROM_QSTR(MP_QSTR___init__), MP_ROM_PTR(&mod_random___init___obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_getrandbits), MP_ROM_PTR(&mod_random_getrandbits_obj) },
+    { MP_ROM_QSTR(MP_QSTR_seed), MP_ROM_PTR(&mod_random_seed_obj) },
+    #if MICROPY_PY_RANDOM_EXTRA_FUNCS
+    { MP_ROM_QSTR(MP_QSTR_randrange), MP_ROM_PTR(&mod_random_randrange_obj) },
+    { MP_ROM_QSTR(MP_QSTR_randint), MP_ROM_PTR(&mod_random_randint_obj) },
+    { MP_ROM_QSTR(MP_QSTR_choice), MP_ROM_PTR(&mod_random_choice_obj) },
+    #if MICROPY_PY_BUILTINS_FLOAT
+    { MP_ROM_QSTR(MP_QSTR_random), MP_ROM_PTR(&mod_random_random_obj) },
+    { MP_ROM_QSTR(MP_QSTR_uniform), MP_ROM_PTR(&mod_random_uniform_obj) },
+    #endif
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_random_globals, mp_module_random_globals_table);
+
+const mp_obj_module_t mp_module_random = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_random_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_random, mp_module_random);
+#endif
+
+#endif // MICROPY_PY_RANDOM

+ 236 - 0
mp_flipper/lib/micropython/extmod/modtime.c

@@ -0,0 +1,236 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2023 Damien P. George
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mphal.h"
+#include "py/runtime.h"
+#include "py/smallint.h"
+#include "extmod/modtime.h"
+
+#if MICROPY_PY_TIME
+
+#ifdef MICROPY_PY_TIME_INCLUDEFILE
+#include MICROPY_PY_TIME_INCLUDEFILE
+#endif
+
+#if MICROPY_PY_TIME_GMTIME_LOCALTIME_MKTIME
+
+#include "shared/timeutils/timeutils.h"
+
+// localtime([secs])
+// Convert a time expressed in seconds since the Epoch into an 8-tuple which
+// contains: (year, month, mday, hour, minute, second, weekday, yearday)
+// If secs is not provided or None, then the current time is used.
+// - year    is the full year, eg 2000
+// - month   is 1-12
+// - mday    is 1-31
+// - hour    is 0-23
+// - minute  is 0-59
+// - second  is 0-59
+// - weekday is 0-6 for Mon-Sun
+// - yearday is 1-366
+static mp_obj_t time_localtime(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 0 || args[0] == mp_const_none) {
+        // Get current date and time.
+        return mp_time_localtime_get();
+    } else {
+        // Convert given seconds to tuple.
+        mp_int_t seconds = mp_obj_get_int(args[0]);
+        timeutils_struct_time_t tm;
+        timeutils_seconds_since_epoch_to_struct_time(seconds, &tm);
+        mp_obj_t tuple[8] = {
+            tuple[0] = mp_obj_new_int(tm.tm_year),
+            tuple[1] = mp_obj_new_int(tm.tm_mon),
+            tuple[2] = mp_obj_new_int(tm.tm_mday),
+            tuple[3] = mp_obj_new_int(tm.tm_hour),
+            tuple[4] = mp_obj_new_int(tm.tm_min),
+            tuple[5] = mp_obj_new_int(tm.tm_sec),
+            tuple[6] = mp_obj_new_int(tm.tm_wday),
+            tuple[7] = mp_obj_new_int(tm.tm_yday),
+        };
+        return mp_obj_new_tuple(8, tuple);
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_time_localtime_obj, 0, 1, time_localtime);
+
+// mktime()
+// This is the inverse function of localtime. Its argument is a full 8-tuple
+// which expresses a time as per localtime. It returns an integer which is
+// the number of seconds since the Epoch (eg 1st Jan 1970, or 1st Jan 2000).
+static mp_obj_t time_mktime(mp_obj_t tuple) {
+    size_t len;
+    mp_obj_t *elem;
+    mp_obj_get_array(tuple, &len, &elem);
+
+    // localtime generates a tuple of len 8. CPython uses 9, so we accept both.
+    if (len < 8 || len > 9) {
+        mp_raise_TypeError(MP_ERROR_TEXT("mktime needs a tuple of length 8 or 9"));
+    }
+
+    return mp_obj_new_int_from_uint(timeutils_mktime(mp_obj_get_int(elem[0]),
+        mp_obj_get_int(elem[1]), mp_obj_get_int(elem[2]), mp_obj_get_int(elem[3]),
+        mp_obj_get_int(elem[4]), mp_obj_get_int(elem[5])));
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_time_mktime_obj, time_mktime);
+
+#endif // MICROPY_PY_TIME_GMTIME_LOCALTIME_MKTIME
+
+#if MICROPY_PY_TIME_TIME_TIME_NS
+
+// time()
+// Return the number of seconds since the Epoch.
+static mp_obj_t time_time(void) {
+    return mp_time_time_get();
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_time_time_obj, time_time);
+
+// time_ns()
+// Returns the number of nanoseconds since the Epoch, as an integer.
+static mp_obj_t time_time_ns(void) {
+    return mp_obj_new_int_from_ull(mp_hal_time_ns());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_time_time_ns_obj, time_time_ns);
+
+#endif // MICROPY_PY_TIME_TIME_TIME_NS
+
+static mp_obj_t time_sleep(mp_obj_t seconds_o) {
+    #ifdef MICROPY_PY_TIME_CUSTOM_SLEEP
+    mp_time_sleep(seconds_o);
+    #else
+    #if MICROPY_PY_BUILTINS_FLOAT
+    mp_hal_delay_ms((mp_uint_t)(1000 * mp_obj_get_float(seconds_o)));
+    #else
+    mp_hal_delay_ms(1000 * mp_obj_get_int(seconds_o));
+    #endif
+    #endif
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_time_sleep_obj, time_sleep);
+
+static mp_obj_t time_sleep_ms(mp_obj_t arg) {
+    mp_int_t ms = mp_obj_get_int(arg);
+    if (ms >= 0) {
+        mp_hal_delay_ms(ms);
+    }
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_time_sleep_ms_obj, time_sleep_ms);
+
+static mp_obj_t time_sleep_us(mp_obj_t arg) {
+    mp_int_t us = mp_obj_get_int(arg);
+    if (us > 0) {
+        mp_hal_delay_us(us);
+    }
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_time_sleep_us_obj, time_sleep_us);
+
+static mp_obj_t time_ticks_ms(void) {
+    return MP_OBJ_NEW_SMALL_INT(mp_hal_ticks_ms() & (MICROPY_PY_TIME_TICKS_PERIOD - 1));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_time_ticks_ms_obj, time_ticks_ms);
+
+static mp_obj_t time_ticks_us(void) {
+    return MP_OBJ_NEW_SMALL_INT(mp_hal_ticks_us() & (MICROPY_PY_TIME_TICKS_PERIOD - 1));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_time_ticks_us_obj, time_ticks_us);
+
+static mp_obj_t time_ticks_cpu(void) {
+    return MP_OBJ_NEW_SMALL_INT(mp_hal_ticks_cpu() & (MICROPY_PY_TIME_TICKS_PERIOD - 1));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_time_ticks_cpu_obj, time_ticks_cpu);
+
+static mp_obj_t time_ticks_diff(mp_obj_t end_in, mp_obj_t start_in) {
+    // we assume that the arguments come from ticks_xx so are small ints
+    mp_uint_t start = MP_OBJ_SMALL_INT_VALUE(start_in);
+    mp_uint_t end = MP_OBJ_SMALL_INT_VALUE(end_in);
+    // Optimized formula avoiding if conditions. We adjust difference "forward",
+    // wrap it around and adjust back.
+    mp_int_t diff = ((end - start + MICROPY_PY_TIME_TICKS_PERIOD / 2) & (MICROPY_PY_TIME_TICKS_PERIOD - 1))
+        - MICROPY_PY_TIME_TICKS_PERIOD / 2;
+    return MP_OBJ_NEW_SMALL_INT(diff);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_time_ticks_diff_obj, time_ticks_diff);
+
+static mp_obj_t time_ticks_add(mp_obj_t ticks_in, mp_obj_t delta_in) {
+    // we assume that first argument come from ticks_xx so is small int
+    mp_uint_t ticks = MP_OBJ_SMALL_INT_VALUE(ticks_in);
+    mp_uint_t delta = mp_obj_get_int(delta_in);
+
+    // Check that delta does not overflow the range that ticks_diff can handle.
+    // This ensures the following:
+    //  - ticks_diff(ticks_add(T, delta), T) == delta
+    //  - ticks_diff(T, ticks_add(T, delta)) == -delta
+    // The latter requires excluding delta=-TICKS_PERIOD/2.
+    //
+    // This unsigned comparison is equivalent to a signed comparison of:
+    //   delta <= -TICKS_PERIOD/2 || delta >= TICKS_PERIOD/2
+    if (delta + MICROPY_PY_TIME_TICKS_PERIOD / 2 - 1 >= MICROPY_PY_TIME_TICKS_PERIOD - 1) {
+        mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("ticks interval overflow"));
+    }
+
+    return MP_OBJ_NEW_SMALL_INT((ticks + delta) & (MICROPY_PY_TIME_TICKS_PERIOD - 1));
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_time_ticks_add_obj, time_ticks_add);
+
+static const mp_rom_map_elem_t mp_module_time_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_time) },
+
+    #if MICROPY_PY_TIME_GMTIME_LOCALTIME_MKTIME
+    { MP_ROM_QSTR(MP_QSTR_gmtime), MP_ROM_PTR(&mp_time_localtime_obj) },
+    { MP_ROM_QSTR(MP_QSTR_localtime), MP_ROM_PTR(&mp_time_localtime_obj) },
+    { MP_ROM_QSTR(MP_QSTR_mktime), MP_ROM_PTR(&mp_time_mktime_obj) },
+    #endif
+
+    #if MICROPY_PY_TIME_TIME_TIME_NS
+    { MP_ROM_QSTR(MP_QSTR_time), MP_ROM_PTR(&mp_time_time_obj) },
+    { MP_ROM_QSTR(MP_QSTR_time_ns), MP_ROM_PTR(&mp_time_time_ns_obj) },
+    #endif
+
+    { MP_ROM_QSTR(MP_QSTR_sleep), MP_ROM_PTR(&mp_time_sleep_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sleep_ms), MP_ROM_PTR(&mp_time_sleep_ms_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sleep_us), MP_ROM_PTR(&mp_time_sleep_us_obj) },
+
+    { MP_ROM_QSTR(MP_QSTR_ticks_ms), MP_ROM_PTR(&mp_time_ticks_ms_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ticks_us), MP_ROM_PTR(&mp_time_ticks_us_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ticks_cpu), MP_ROM_PTR(&mp_time_ticks_cpu_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ticks_add), MP_ROM_PTR(&mp_time_ticks_add_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ticks_diff), MP_ROM_PTR(&mp_time_ticks_diff_obj) },
+
+    #ifdef MICROPY_PY_TIME_EXTRA_GLOBALS
+    MICROPY_PY_TIME_EXTRA_GLOBALS
+    #endif
+};
+static MP_DEFINE_CONST_DICT(mp_module_time_globals, mp_module_time_globals_table);
+
+const mp_obj_module_t mp_module_time = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_time_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_time, mp_module_time);
+
+#endif // MICROPY_PY_TIME

+ 43 - 0
mp_flipper/lib/micropython/extmod/modtime.h

@@ -0,0 +1,43 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2016 Damien P. George
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_EXTMOD_MODUTIME_H
+#define MICROPY_INCLUDED_EXTMOD_MODUTIME_H
+
+#include "py/obj.h"
+
+MP_DECLARE_CONST_FUN_OBJ_1(mp_time_mktime_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_time_sleep_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_time_sleep_ms_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_time_sleep_us_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_time_ticks_ms_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_time_ticks_us_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_time_ticks_cpu_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_time_ticks_diff_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_time_ticks_add_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_time_time_ns_obj);
+
+#endif // MICROPY_INCLUDED_EXTMOD_MODUTIME_H

+ 43 - 0
mp_flipper/lib/micropython/genhdr/moduledefs.h

@@ -0,0 +1,43 @@
+// Automatically generated by makemoduledefs.py.
+
+extern const struct _mp_obj_module_t mp_module_io;
+#undef MODULE_DEF_IO
+#define MODULE_DEF_IO { MP_ROM_QSTR(MP_QSTR_io), MP_ROM_PTR(&mp_module_io) },
+
+extern const struct _mp_obj_module_t mp_module_logging;
+#undef MODULE_DEF_LOGGING
+#define MODULE_DEF_LOGGING { MP_ROM_QSTR(MP_QSTR_logging), MP_ROM_PTR(&mp_module_logging) },
+
+extern const struct _mp_obj_module_t mp_module_random;
+#undef MODULE_DEF_RANDOM
+#define MODULE_DEF_RANDOM { MP_ROM_QSTR(MP_QSTR_random), MP_ROM_PTR(&mp_module_random) },
+
+extern const struct _mp_obj_module_t mp_module_time;
+#undef MODULE_DEF_TIME
+#define MODULE_DEF_TIME { MP_ROM_QSTR(MP_QSTR_time), MP_ROM_PTR(&mp_module_time) },
+
+extern const struct _mp_obj_module_t mp_module___main__;
+#undef MODULE_DEF___MAIN__
+#define MODULE_DEF___MAIN__ { MP_ROM_QSTR(MP_QSTR___main__), MP_ROM_PTR(&mp_module___main__) },
+
+extern const struct _mp_obj_module_t mp_module_builtins;
+#undef MODULE_DEF_BUILTINS
+#define MODULE_DEF_BUILTINS { MP_ROM_QSTR(MP_QSTR_builtins), MP_ROM_PTR(&mp_module_builtins) },
+
+extern const struct _mp_obj_module_t flipperzero_module;
+#undef MODULE_DEF_FLIPPERZERO
+#define MODULE_DEF_FLIPPERZERO { MP_ROM_QSTR(MP_QSTR_flipperzero), MP_ROM_PTR(&flipperzero_module) },
+
+
+#define MICROPY_REGISTERED_MODULES \
+    MODULE_DEF_BUILTINS \
+    MODULE_DEF_FLIPPERZERO \
+    MODULE_DEF___MAIN__ \
+// MICROPY_REGISTERED_MODULES
+
+#define MICROPY_REGISTERED_EXTENSIBLE_MODULES \
+    MODULE_DEF_IO \
+    MODULE_DEF_LOGGING \
+    MODULE_DEF_RANDOM \
+    MODULE_DEF_TIME \
+// MICROPY_REGISTERED_EXTENSIBLE_MODULES

+ 4 - 0
mp_flipper/lib/micropython/genhdr/mpversion.h

@@ -0,0 +1,4 @@
+// This file was generated by py/makeversionhdr.py
+#define MICROPY_GIT_TAG "v1.23.0"
+#define MICROPY_GIT_HASH "a61c446c0"
+#define MICROPY_BUILD_DATE "2024-10-06"

+ 481 - 0
mp_flipper/lib/micropython/genhdr/qstrdefs.generated.h

@@ -0,0 +1,481 @@
+// This file was automatically generated by makeqstrdata.py
+
+QDEF0(MP_QSTRnull, 0, 0, "")
+QDEF0(MP_QSTR_, 5, 0, "")
+QDEF0(MP_QSTR___dir__, 122, 7, "__dir__")
+QDEF0(MP_QSTR__0x0a_, 175, 1, "\x0a")
+QDEF0(MP_QSTR__space_, 133, 1, " ")
+QDEF0(MP_QSTR__star_, 143, 1, "*")
+QDEF0(MP_QSTR__slash_, 138, 1, "/")
+QDEF0(MP_QSTR__lt_module_gt_, 189, 8, "<module>")
+QDEF0(MP_QSTR__, 250, 1, "_")
+QDEF0(MP_QSTR___call__, 167, 8, "__call__")
+QDEF0(MP_QSTR___class__, 43, 9, "__class__")
+QDEF0(MP_QSTR___delitem__, 253, 11, "__delitem__")
+QDEF0(MP_QSTR___enter__, 109, 9, "__enter__")
+QDEF0(MP_QSTR___exit__, 69, 8, "__exit__")
+QDEF0(MP_QSTR___getattr__, 64, 11, "__getattr__")
+QDEF0(MP_QSTR___getitem__, 38, 11, "__getitem__")
+QDEF0(MP_QSTR___hash__, 247, 8, "__hash__")
+QDEF0(MP_QSTR___init__, 95, 8, "__init__")
+QDEF0(MP_QSTR___int__, 22, 7, "__int__")
+QDEF0(MP_QSTR___iter__, 207, 8, "__iter__")
+QDEF0(MP_QSTR___len__, 226, 7, "__len__")
+QDEF0(MP_QSTR___main__, 142, 8, "__main__")
+QDEF0(MP_QSTR___module__, 255, 10, "__module__")
+QDEF0(MP_QSTR___name__, 226, 8, "__name__")
+QDEF0(MP_QSTR___new__, 121, 7, "__new__")
+QDEF0(MP_QSTR___next__, 2, 8, "__next__")
+QDEF0(MP_QSTR___qualname__, 107, 12, "__qualname__")
+QDEF0(MP_QSTR___repr__, 16, 8, "__repr__")
+QDEF0(MP_QSTR___setitem__, 50, 11, "__setitem__")
+QDEF0(MP_QSTR___str__, 208, 7, "__str__")
+QDEF0(MP_QSTR_ArithmeticError, 45, 15, "ArithmeticError")
+QDEF0(MP_QSTR_AssertionError, 151, 14, "AssertionError")
+QDEF0(MP_QSTR_AttributeError, 33, 14, "AttributeError")
+QDEF0(MP_QSTR_BaseException, 7, 13, "BaseException")
+QDEF0(MP_QSTR_EOFError, 145, 8, "EOFError")
+QDEF0(MP_QSTR_Ellipsis, 240, 8, "Ellipsis")
+QDEF0(MP_QSTR_Exception, 242, 9, "Exception")
+QDEF0(MP_QSTR_GeneratorExit, 22, 13, "GeneratorExit")
+QDEF0(MP_QSTR_ImportError, 32, 11, "ImportError")
+QDEF0(MP_QSTR_IndentationError, 92, 16, "IndentationError")
+QDEF0(MP_QSTR_IndexError, 131, 10, "IndexError")
+QDEF0(MP_QSTR_KeyError, 234, 8, "KeyError")
+QDEF0(MP_QSTR_KeyboardInterrupt, 175, 17, "KeyboardInterrupt")
+QDEF0(MP_QSTR_LookupError, 255, 11, "LookupError")
+QDEF0(MP_QSTR_MemoryError, 220, 11, "MemoryError")
+QDEF0(MP_QSTR_NameError, 186, 9, "NameError")
+QDEF0(MP_QSTR_NoneType, 23, 8, "NoneType")
+QDEF0(MP_QSTR_NotImplementedError, 198, 19, "NotImplementedError")
+QDEF0(MP_QSTR_OSError, 161, 7, "OSError")
+QDEF0(MP_QSTR_OverflowError, 129, 13, "OverflowError")
+QDEF0(MP_QSTR_RuntimeError, 97, 12, "RuntimeError")
+QDEF0(MP_QSTR_StopIteration, 234, 13, "StopIteration")
+QDEF0(MP_QSTR_SyntaxError, 148, 11, "SyntaxError")
+QDEF0(MP_QSTR_SystemExit, 32, 10, "SystemExit")
+QDEF0(MP_QSTR_TypeError, 37, 9, "TypeError")
+QDEF0(MP_QSTR_ValueError, 150, 10, "ValueError")
+QDEF0(MP_QSTR_ZeroDivisionError, 182, 17, "ZeroDivisionError")
+QDEF0(MP_QSTR_abs, 149, 3, "abs")
+QDEF0(MP_QSTR_all, 68, 3, "all")
+QDEF0(MP_QSTR_any, 19, 3, "any")
+QDEF0(MP_QSTR_append, 107, 6, "append")
+QDEF0(MP_QSTR_args, 194, 4, "args")
+QDEF0(MP_QSTR_bool, 235, 4, "bool")
+QDEF0(MP_QSTR_builtins, 247, 8, "builtins")
+QDEF0(MP_QSTR_bytearray, 118, 9, "bytearray")
+QDEF0(MP_QSTR_bytecode, 34, 8, "bytecode")
+QDEF0(MP_QSTR_bytes, 92, 5, "bytes")
+QDEF0(MP_QSTR_callable, 13, 8, "callable")
+QDEF0(MP_QSTR_chr, 220, 3, "chr")
+QDEF0(MP_QSTR_classmethod, 180, 11, "classmethod")
+QDEF0(MP_QSTR_clear, 124, 5, "clear")
+QDEF0(MP_QSTR_close, 51, 5, "close")
+QDEF0(MP_QSTR_const, 192, 5, "const")
+QDEF0(MP_QSTR_copy, 224, 4, "copy")
+QDEF0(MP_QSTR_count, 166, 5, "count")
+QDEF0(MP_QSTR_dict, 63, 4, "dict")
+QDEF0(MP_QSTR_dir, 250, 3, "dir")
+QDEF0(MP_QSTR_divmod, 184, 6, "divmod")
+QDEF0(MP_QSTR_end, 10, 3, "end")
+QDEF0(MP_QSTR_endswith, 27, 8, "endswith")
+QDEF0(MP_QSTR_eval, 155, 4, "eval")
+QDEF0(MP_QSTR_exec, 30, 4, "exec")
+QDEF0(MP_QSTR_extend, 99, 6, "extend")
+QDEF0(MP_QSTR_find, 1, 4, "find")
+QDEF0(MP_QSTR_format, 38, 6, "format")
+QDEF0(MP_QSTR_from_bytes, 53, 10, "from_bytes")
+QDEF0(MP_QSTR_get, 51, 3, "get")
+QDEF0(MP_QSTR_getattr, 192, 7, "getattr")
+QDEF0(MP_QSTR_globals, 157, 7, "globals")
+QDEF0(MP_QSTR_hasattr, 140, 7, "hasattr")
+QDEF0(MP_QSTR_hash, 183, 4, "hash")
+QDEF0(MP_QSTR_id, 40, 2, "id")
+QDEF0(MP_QSTR_index, 123, 5, "index")
+QDEF0(MP_QSTR_insert, 18, 6, "insert")
+QDEF0(MP_QSTR_int, 22, 3, "int")
+QDEF0(MP_QSTR_isalpha, 235, 7, "isalpha")
+QDEF0(MP_QSTR_isdigit, 168, 7, "isdigit")
+QDEF0(MP_QSTR_isinstance, 182, 10, "isinstance")
+QDEF0(MP_QSTR_islower, 252, 7, "islower")
+QDEF0(MP_QSTR_isspace, 91, 7, "isspace")
+QDEF0(MP_QSTR_issubclass, 181, 10, "issubclass")
+QDEF0(MP_QSTR_isupper, 221, 7, "isupper")
+QDEF0(MP_QSTR_items, 227, 5, "items")
+QDEF0(MP_QSTR_iter, 143, 4, "iter")
+QDEF0(MP_QSTR_join, 167, 4, "join")
+QDEF0(MP_QSTR_key, 50, 3, "key")
+QDEF0(MP_QSTR_keys, 1, 4, "keys")
+QDEF0(MP_QSTR_len, 98, 3, "len")
+QDEF0(MP_QSTR_list, 39, 4, "list")
+QDEF0(MP_QSTR_little, 137, 6, "little")
+QDEF0(MP_QSTR_locals, 59, 6, "locals")
+QDEF0(MP_QSTR_lower, 198, 5, "lower")
+QDEF0(MP_QSTR_lstrip, 229, 6, "lstrip")
+QDEF0(MP_QSTR_main, 206, 4, "main")
+QDEF0(MP_QSTR_map, 185, 3, "map")
+QDEF0(MP_QSTR_micropython, 11, 11, "micropython")
+QDEF0(MP_QSTR_next, 66, 4, "next")
+QDEF0(MP_QSTR_object, 144, 6, "object")
+QDEF0(MP_QSTR_open, 209, 4, "open")
+QDEF0(MP_QSTR_ord, 28, 3, "ord")
+QDEF0(MP_QSTR_pop, 42, 3, "pop")
+QDEF0(MP_QSTR_popitem, 191, 7, "popitem")
+QDEF0(MP_QSTR_pow, 45, 3, "pow")
+QDEF0(MP_QSTR_print, 84, 5, "print")
+QDEF0(MP_QSTR_range, 26, 5, "range")
+QDEF0(MP_QSTR_read, 183, 4, "read")
+QDEF0(MP_QSTR_readinto, 75, 8, "readinto")
+QDEF0(MP_QSTR_readline, 249, 8, "readline")
+QDEF0(MP_QSTR_remove, 99, 6, "remove")
+QDEF0(MP_QSTR_replace, 73, 7, "replace")
+QDEF0(MP_QSTR_repr, 208, 4, "repr")
+QDEF0(MP_QSTR_reverse, 37, 7, "reverse")
+QDEF0(MP_QSTR_rfind, 210, 5, "rfind")
+QDEF0(MP_QSTR_rindex, 233, 6, "rindex")
+QDEF0(MP_QSTR_round, 231, 5, "round")
+QDEF0(MP_QSTR_rsplit, 165, 6, "rsplit")
+QDEF0(MP_QSTR_rstrip, 59, 6, "rstrip")
+QDEF0(MP_QSTR_self, 121, 4, "self")
+QDEF0(MP_QSTR_send, 185, 4, "send")
+QDEF0(MP_QSTR_sep, 35, 3, "sep")
+QDEF0(MP_QSTR_set, 39, 3, "set")
+QDEF0(MP_QSTR_setattr, 212, 7, "setattr")
+QDEF0(MP_QSTR_setdefault, 108, 10, "setdefault")
+QDEF0(MP_QSTR_sort, 191, 4, "sort")
+QDEF0(MP_QSTR_sorted, 94, 6, "sorted")
+QDEF0(MP_QSTR_split, 183, 5, "split")
+QDEF0(MP_QSTR_start, 133, 5, "start")
+QDEF0(MP_QSTR_startswith, 116, 10, "startswith")
+QDEF0(MP_QSTR_staticmethod, 98, 12, "staticmethod")
+QDEF0(MP_QSTR_step, 87, 4, "step")
+QDEF0(MP_QSTR_stop, 157, 4, "stop")
+QDEF0(MP_QSTR_str, 80, 3, "str")
+QDEF0(MP_QSTR_strip, 41, 5, "strip")
+QDEF0(MP_QSTR_sum, 46, 3, "sum")
+QDEF0(MP_QSTR_super, 196, 5, "super")
+QDEF0(MP_QSTR_throw, 179, 5, "throw")
+QDEF0(MP_QSTR_to_bytes, 216, 8, "to_bytes")
+QDEF0(MP_QSTR_tuple, 253, 5, "tuple")
+QDEF0(MP_QSTR_type, 157, 4, "type")
+QDEF0(MP_QSTR_update, 180, 6, "update")
+QDEF0(MP_QSTR_upper, 39, 5, "upper")
+QDEF0(MP_QSTR_utf_hyphen_8, 183, 5, "utf-8")
+QDEF0(MP_QSTR_value, 78, 5, "value")
+QDEF0(MP_QSTR_values, 125, 6, "values")
+QDEF0(MP_QSTR_write, 152, 5, "write")
+QDEF0(MP_QSTR_zip, 230, 3, "zip")
+QDEF1(MP_QSTR__percent__hash_o, 108, 3, "%#o")
+QDEF1(MP_QSTR__percent__hash_x, 123, 3, "%#x")
+QDEF0(MP_QSTR__lt_dictcomp_gt_, 204, 10, "<dictcomp>")
+QDEF0(MP_QSTR__lt_genexpr_gt_, 52, 9, "<genexpr>")
+QDEF0(MP_QSTR__lt_lambda_gt_, 128, 8, "<lambda>")
+QDEF0(MP_QSTR__lt_listcomp_gt_, 212, 10, "<listcomp>")
+QDEF0(MP_QSTR__lt_setcomp_gt_, 84, 9, "<setcomp>")
+QDEF1(MP_QSTR__lt_stdin_gt_, 227, 7, "<stdin>")
+QDEF1(MP_QSTR__lt_string_gt_, 82, 8, "<string>")
+QDEF1(MP_QSTR_ALIGN_BEGIN, 240, 11, "ALIGN_BEGIN")
+QDEF1(MP_QSTR_ALIGN_CENTER, 28, 12, "ALIGN_CENTER")
+QDEF1(MP_QSTR_ALIGN_END, 248, 9, "ALIGN_END")
+QDEF1(MP_QSTR_BinaryFileIO, 106, 12, "BinaryFileIO")
+QDEF1(MP_QSTR_CANVAS_BLACK, 213, 12, "CANVAS_BLACK")
+QDEF1(MP_QSTR_CANVAS_WHITE, 53, 12, "CANVAS_WHITE")
+QDEF1(MP_QSTR_DEBUG, 52, 5, "DEBUG")
+QDEF1(MP_QSTR_ERROR, 157, 5, "ERROR")
+QDEF1(MP_QSTR_FONT_PRIMARY, 133, 12, "FONT_PRIMARY")
+QDEF1(MP_QSTR_FONT_SECONDARY, 51, 14, "FONT_SECONDARY")
+QDEF1(MP_QSTR_GPIO_MODE_ANALOG, 29, 16, "GPIO_MODE_ANALOG")
+QDEF1(MP_QSTR_GPIO_MODE_INPUT, 97, 15, "GPIO_MODE_INPUT")
+QDEF1(MP_QSTR_GPIO_MODE_INTERRUPT_FALL, 40, 24, "GPIO_MODE_INTERRUPT_FALL")
+QDEF1(MP_QSTR_GPIO_MODE_INTERRUPT_RISE, 130, 24, "GPIO_MODE_INTERRUPT_RISE")
+QDEF1(MP_QSTR_GPIO_MODE_OUTPUT_OPEN_DRAIN, 172, 27, "GPIO_MODE_OUTPUT_OPEN_DRAIN")
+QDEF1(MP_QSTR_GPIO_MODE_OUTPUT_PUSH_PULL, 179, 26, "GPIO_MODE_OUTPUT_PUSH_PULL")
+QDEF1(MP_QSTR_GPIO_PIN_PA4, 102, 12, "GPIO_PIN_PA4")
+QDEF1(MP_QSTR_GPIO_PIN_PA6, 100, 12, "GPIO_PIN_PA6")
+QDEF1(MP_QSTR_GPIO_PIN_PA7, 101, 12, "GPIO_PIN_PA7")
+QDEF1(MP_QSTR_GPIO_PIN_PB2, 3, 12, "GPIO_PIN_PB2")
+QDEF1(MP_QSTR_GPIO_PIN_PB3, 2, 12, "GPIO_PIN_PB3")
+QDEF1(MP_QSTR_GPIO_PIN_PC0, 32, 12, "GPIO_PIN_PC0")
+QDEF1(MP_QSTR_GPIO_PIN_PC1, 33, 12, "GPIO_PIN_PC1")
+QDEF1(MP_QSTR_GPIO_PIN_PC3, 35, 12, "GPIO_PIN_PC3")
+QDEF1(MP_QSTR_GPIO_PULL_DOWN, 163, 14, "GPIO_PULL_DOWN")
+QDEF1(MP_QSTR_GPIO_PULL_NO, 144, 12, "GPIO_PULL_NO")
+QDEF1(MP_QSTR_GPIO_PULL_UP, 52, 12, "GPIO_PULL_UP")
+QDEF1(MP_QSTR_GPIO_SPEED_HIGH, 125, 15, "GPIO_SPEED_HIGH")
+QDEF1(MP_QSTR_GPIO_SPEED_LOW, 71, 14, "GPIO_SPEED_LOW")
+QDEF1(MP_QSTR_GPIO_SPEED_MEDIUM, 78, 17, "GPIO_SPEED_MEDIUM")
+QDEF1(MP_QSTR_GPIO_SPEED_VERY_HIGH, 154, 20, "GPIO_SPEED_VERY_HIGH")
+QDEF1(MP_QSTR_INFO, 235, 4, "INFO")
+QDEF1(MP_QSTR_INPUT_BUTTON_BACK, 174, 17, "INPUT_BUTTON_BACK")
+QDEF1(MP_QSTR_INPUT_BUTTON_DOWN, 247, 17, "INPUT_BUTTON_DOWN")
+QDEF1(MP_QSTR_INPUT_BUTTON_LEFT, 94, 17, "INPUT_BUTTON_LEFT")
+QDEF1(MP_QSTR_INPUT_BUTTON_OK, 33, 15, "INPUT_BUTTON_OK")
+QDEF1(MP_QSTR_INPUT_BUTTON_RIGHT, 5, 18, "INPUT_BUTTON_RIGHT")
+QDEF1(MP_QSTR_INPUT_BUTTON_UP, 96, 15, "INPUT_BUTTON_UP")
+QDEF1(MP_QSTR_INPUT_TYPE_LONG, 97, 15, "INPUT_TYPE_LONG")
+QDEF1(MP_QSTR_INPUT_TYPE_PRESS, 108, 16, "INPUT_TYPE_PRESS")
+QDEF1(MP_QSTR_INPUT_TYPE_RELEASE, 34, 18, "INPUT_TYPE_RELEASE")
+QDEF1(MP_QSTR_INPUT_TYPE_REPEAT, 28, 17, "INPUT_TYPE_REPEAT")
+QDEF1(MP_QSTR_INPUT_TYPE_SHORT, 153, 16, "INPUT_TYPE_SHORT")
+QDEF1(MP_QSTR_LIGHT_BACKLIGHT, 17, 15, "LIGHT_BACKLIGHT")
+QDEF1(MP_QSTR_LIGHT_BLUE, 90, 10, "LIGHT_BLUE")
+QDEF1(MP_QSTR_LIGHT_GREEN, 95, 11, "LIGHT_GREEN")
+QDEF1(MP_QSTR_LIGHT_RED, 215, 9, "LIGHT_RED")
+QDEF1(MP_QSTR_NONE, 79, 4, "NONE")
+QDEF1(MP_QSTR_SEEK_CUR, 134, 8, "SEEK_CUR")
+QDEF1(MP_QSTR_SEEK_END, 237, 8, "SEEK_END")
+QDEF1(MP_QSTR_SEEK_SET, 128, 8, "SEEK_SET")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A0, 95, 15, "SPEAKER_NOTE_A0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A1, 94, 15, "SPEAKER_NOTE_A1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A2, 93, 15, "SPEAKER_NOTE_A2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A3, 92, 15, "SPEAKER_NOTE_A3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A4, 91, 15, "SPEAKER_NOTE_A4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A5, 90, 15, "SPEAKER_NOTE_A5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A6, 89, 15, "SPEAKER_NOTE_A6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A7, 88, 15, "SPEAKER_NOTE_A7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_A8, 87, 15, "SPEAKER_NOTE_A8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS0, 140, 16, "SPEAKER_NOTE_AS0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS1, 141, 16, "SPEAKER_NOTE_AS1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS2, 142, 16, "SPEAKER_NOTE_AS2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS3, 143, 16, "SPEAKER_NOTE_AS3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS4, 136, 16, "SPEAKER_NOTE_AS4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS5, 137, 16, "SPEAKER_NOTE_AS5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS6, 138, 16, "SPEAKER_NOTE_AS6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS7, 139, 16, "SPEAKER_NOTE_AS7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_AS8, 132, 16, "SPEAKER_NOTE_AS8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B0, 60, 15, "SPEAKER_NOTE_B0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B1, 61, 15, "SPEAKER_NOTE_B1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B2, 62, 15, "SPEAKER_NOTE_B2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B3, 63, 15, "SPEAKER_NOTE_B3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B4, 56, 15, "SPEAKER_NOTE_B4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B5, 57, 15, "SPEAKER_NOTE_B5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B6, 58, 15, "SPEAKER_NOTE_B6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B7, 59, 15, "SPEAKER_NOTE_B7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_B8, 52, 15, "SPEAKER_NOTE_B8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C0, 29, 15, "SPEAKER_NOTE_C0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C1, 28, 15, "SPEAKER_NOTE_C1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C2, 31, 15, "SPEAKER_NOTE_C2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C3, 30, 15, "SPEAKER_NOTE_C3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C4, 25, 15, "SPEAKER_NOTE_C4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C5, 24, 15, "SPEAKER_NOTE_C5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C6, 27, 15, "SPEAKER_NOTE_C6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C7, 26, 15, "SPEAKER_NOTE_C7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_C8, 21, 15, "SPEAKER_NOTE_C8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS0, 14, 16, "SPEAKER_NOTE_CS0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS1, 15, 16, "SPEAKER_NOTE_CS1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS2, 12, 16, "SPEAKER_NOTE_CS2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS3, 13, 16, "SPEAKER_NOTE_CS3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS4, 10, 16, "SPEAKER_NOTE_CS4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS5, 11, 16, "SPEAKER_NOTE_CS5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS6, 8, 16, "SPEAKER_NOTE_CS6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS7, 9, 16, "SPEAKER_NOTE_CS7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_CS8, 6, 16, "SPEAKER_NOTE_CS8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D0, 250, 15, "SPEAKER_NOTE_D0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D1, 251, 15, "SPEAKER_NOTE_D1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D2, 248, 15, "SPEAKER_NOTE_D2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D3, 249, 15, "SPEAKER_NOTE_D3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D4, 254, 15, "SPEAKER_NOTE_D4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D5, 255, 15, "SPEAKER_NOTE_D5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D6, 252, 15, "SPEAKER_NOTE_D6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D7, 253, 15, "SPEAKER_NOTE_D7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_D8, 242, 15, "SPEAKER_NOTE_D8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS0, 137, 16, "SPEAKER_NOTE_DS0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS1, 136, 16, "SPEAKER_NOTE_DS1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS2, 139, 16, "SPEAKER_NOTE_DS2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS3, 138, 16, "SPEAKER_NOTE_DS3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS4, 141, 16, "SPEAKER_NOTE_DS4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS5, 140, 16, "SPEAKER_NOTE_DS5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS6, 143, 16, "SPEAKER_NOTE_DS6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS7, 142, 16, "SPEAKER_NOTE_DS7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_DS8, 129, 16, "SPEAKER_NOTE_DS8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E0, 219, 15, "SPEAKER_NOTE_E0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E1, 218, 15, "SPEAKER_NOTE_E1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E2, 217, 15, "SPEAKER_NOTE_E2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E3, 216, 15, "SPEAKER_NOTE_E3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E4, 223, 15, "SPEAKER_NOTE_E4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E5, 222, 15, "SPEAKER_NOTE_E5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E6, 221, 15, "SPEAKER_NOTE_E6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E7, 220, 15, "SPEAKER_NOTE_E7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_E8, 211, 15, "SPEAKER_NOTE_E8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F0, 184, 15, "SPEAKER_NOTE_F0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F1, 185, 15, "SPEAKER_NOTE_F1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F2, 186, 15, "SPEAKER_NOTE_F2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F3, 187, 15, "SPEAKER_NOTE_F3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F4, 188, 15, "SPEAKER_NOTE_F4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F5, 189, 15, "SPEAKER_NOTE_F5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F6, 190, 15, "SPEAKER_NOTE_F6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F7, 191, 15, "SPEAKER_NOTE_F7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_F8, 176, 15, "SPEAKER_NOTE_F8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS0, 11, 16, "SPEAKER_NOTE_FS0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS1, 10, 16, "SPEAKER_NOTE_FS1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS2, 9, 16, "SPEAKER_NOTE_FS2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS3, 8, 16, "SPEAKER_NOTE_FS3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS4, 15, 16, "SPEAKER_NOTE_FS4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS5, 14, 16, "SPEAKER_NOTE_FS5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS6, 13, 16, "SPEAKER_NOTE_FS6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS7, 12, 16, "SPEAKER_NOTE_FS7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_FS8, 3, 16, "SPEAKER_NOTE_FS8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G0, 153, 15, "SPEAKER_NOTE_G0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G1, 152, 15, "SPEAKER_NOTE_G1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G2, 155, 15, "SPEAKER_NOTE_G2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G3, 154, 15, "SPEAKER_NOTE_G3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G4, 157, 15, "SPEAKER_NOTE_G4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G5, 156, 15, "SPEAKER_NOTE_G5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G6, 159, 15, "SPEAKER_NOTE_G6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G7, 158, 15, "SPEAKER_NOTE_G7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_G8, 145, 15, "SPEAKER_NOTE_G8")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS0, 10, 16, "SPEAKER_NOTE_GS0")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS1, 11, 16, "SPEAKER_NOTE_GS1")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS2, 8, 16, "SPEAKER_NOTE_GS2")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS3, 9, 16, "SPEAKER_NOTE_GS3")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS4, 14, 16, "SPEAKER_NOTE_GS4")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS5, 15, 16, "SPEAKER_NOTE_GS5")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS6, 12, 16, "SPEAKER_NOTE_GS6")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS7, 13, 16, "SPEAKER_NOTE_GS7")
+QDEF1(MP_QSTR_SPEAKER_NOTE_GS8, 2, 16, "SPEAKER_NOTE_GS8")
+QDEF1(MP_QSTR_SPEAKER_VOLUME_MAX, 66, 18, "SPEAKER_VOLUME_MAX")
+QDEF1(MP_QSTR_SPEAKER_VOLUME_MIN, 92, 18, "SPEAKER_VOLUME_MIN")
+QDEF1(MP_QSTR_TRACE, 196, 5, "TRACE")
+QDEF1(MP_QSTR_TextFileIO, 56, 10, "TextFileIO")
+QDEF1(MP_QSTR_UART, 183, 4, "UART")
+QDEF1(MP_QSTR_UART_MODE_LPUART, 250, 16, "UART_MODE_LPUART")
+QDEF1(MP_QSTR_UART_MODE_USART, 53, 15, "UART_MODE_USART")
+QDEF1(MP_QSTR_WARN, 239, 4, "WARN")
+QDEF0(MP_QSTR___add__, 196, 7, "__add__")
+QDEF1(MP_QSTR___bases__, 3, 9, "__bases__")
+QDEF0(MP_QSTR___bool__, 43, 8, "__bool__")
+QDEF1(MP_QSTR___build_class__, 66, 15, "__build_class__")
+QDEF0(MP_QSTR___contains__, 198, 12, "__contains__")
+QDEF1(MP_QSTR___del__, 104, 7, "__del__")
+QDEF1(MP_QSTR___dict__, 127, 8, "__dict__")
+QDEF0(MP_QSTR___eq__, 113, 6, "__eq__")
+QDEF1(MP_QSTR___file__, 3, 8, "__file__")
+QDEF0(MP_QSTR___float__, 53, 9, "__float__")
+QDEF0(MP_QSTR___ge__, 167, 6, "__ge__")
+QDEF1(MP_QSTR___globals__, 157, 11, "__globals__")
+QDEF0(MP_QSTR___gt__, 182, 6, "__gt__")
+QDEF0(MP_QSTR___iadd__, 109, 8, "__iadd__")
+QDEF1(MP_QSTR___import__, 56, 10, "__import__")
+QDEF0(MP_QSTR___isub__, 8, 8, "__isub__")
+QDEF0(MP_QSTR___le__, 204, 6, "__le__")
+QDEF0(MP_QSTR___lt__, 93, 6, "__lt__")
+QDEF0(MP_QSTR___ne__, 14, 6, "__ne__")
+QDEF1(MP_QSTR___path__, 200, 8, "__path__")
+QDEF1(MP_QSTR___repl_print__, 1, 14, "__repl_print__")
+QDEF1(MP_QSTR___reversed__, 97, 12, "__reversed__")
+QDEF0(MP_QSTR___sub__, 33, 7, "__sub__")
+QDEF1(MP_QSTR___traceback__, 79, 13, "__traceback__")
+QDEF1(MP_QSTR__gpio_trigger_handler, 235, 21, "_gpio_trigger_handler")
+QDEF1(MP_QSTR__input_trigger_handler, 108, 22, "_input_trigger_handler")
+QDEF1(MP_QSTR_adc_read_pin_value, 178, 18, "adc_read_pin_value")
+QDEF1(MP_QSTR_adc_read_pin_voltage, 251, 20, "adc_read_pin_voltage")
+QDEF1(MP_QSTR_add, 68, 3, "add")
+QDEF1(MP_QSTR_bin, 224, 3, "bin")
+QDEF1(MP_QSTR_bound_method, 151, 12, "bound_method")
+QDEF1(MP_QSTR_canvas_clear, 107, 12, "canvas_clear")
+QDEF1(MP_QSTR_canvas_draw_box, 56, 15, "canvas_draw_box")
+QDEF1(MP_QSTR_canvas_draw_circle, 63, 18, "canvas_draw_circle")
+QDEF1(MP_QSTR_canvas_draw_disc, 176, 16, "canvas_draw_disc")
+QDEF1(MP_QSTR_canvas_draw_dot, 178, 15, "canvas_draw_dot")
+QDEF1(MP_QSTR_canvas_draw_frame, 240, 17, "canvas_draw_frame")
+QDEF1(MP_QSTR_canvas_draw_line, 67, 16, "canvas_draw_line")
+QDEF1(MP_QSTR_canvas_height, 77, 13, "canvas_height")
+QDEF1(MP_QSTR_canvas_set_color, 178, 16, "canvas_set_color")
+QDEF1(MP_QSTR_canvas_set_font, 124, 15, "canvas_set_font")
+QDEF1(MP_QSTR_canvas_set_text, 114, 15, "canvas_set_text")
+QDEF1(MP_QSTR_canvas_set_text_align, 192, 21, "canvas_set_text_align")
+QDEF1(MP_QSTR_canvas_text_height, 239, 18, "canvas_text_height")
+QDEF1(MP_QSTR_canvas_text_width, 86, 17, "canvas_text_width")
+QDEF1(MP_QSTR_canvas_update, 131, 13, "canvas_update")
+QDEF1(MP_QSTR_canvas_width, 180, 12, "canvas_width")
+QDEF1(MP_QSTR_closure, 116, 7, "closure")
+QDEF1(MP_QSTR_debug, 212, 5, "debug")
+QDEF1(MP_QSTR_decode, 169, 6, "decode")
+QDEF1(MP_QSTR_default, 206, 7, "default")
+QDEF1(MP_QSTR_delattr, 219, 7, "delattr")
+QDEF1(MP_QSTR_dialog_message_clear, 95, 20, "dialog_message_clear")
+QDEF1(MP_QSTR_dialog_message_set_button, 45, 25, "dialog_message_set_button")
+QDEF1(MP_QSTR_dialog_message_set_header, 196, 25, "dialog_message_set_header")
+QDEF1(MP_QSTR_dialog_message_set_text, 198, 23, "dialog_message_set_text")
+QDEF1(MP_QSTR_dialog_message_show, 69, 19, "dialog_message_show")
+QDEF1(MP_QSTR_dict_view, 45, 9, "dict_view")
+QDEF1(MP_QSTR_difference, 114, 10, "difference")
+QDEF1(MP_QSTR_difference_update, 156, 17, "difference_update")
+QDEF1(MP_QSTR_discard, 15, 7, "discard")
+QDEF1(MP_QSTR_encode, 67, 6, "encode")
+QDEF1(MP_QSTR_errno, 193, 5, "errno")
+QDEF1(MP_QSTR_error, 189, 5, "error")
+QDEF1(MP_QSTR_filter, 37, 6, "filter")
+QDEF1(MP_QSTR_flipperzero, 179, 11, "flipperzero")
+QDEF1(MP_QSTR_float, 53, 5, "float")
+QDEF1(MP_QSTR_flush, 97, 5, "flush")
+QDEF1(MP_QSTR_function, 39, 8, "function")
+QDEF1(MP_QSTR_generator, 150, 9, "generator")
+QDEF1(MP_QSTR_getEffectiveLevel, 40, 17, "getEffectiveLevel")
+QDEF1(MP_QSTR_getrandbits, 102, 11, "getrandbits")
+QDEF1(MP_QSTR_gpio_deinit_pin, 120, 15, "gpio_deinit_pin")
+QDEF1(MP_QSTR_gpio_get_pin, 85, 12, "gpio_get_pin")
+QDEF1(MP_QSTR_gpio_init_pin, 185, 13, "gpio_init_pin")
+QDEF1(MP_QSTR_gpio_set_pin, 65, 12, "gpio_set_pin")
+QDEF1(MP_QSTR_hex, 112, 3, "hex")
+QDEF1(MP_QSTR_info, 235, 4, "info")
+QDEF1(MP_QSTR_infrared_is_busy, 195, 16, "infrared_is_busy")
+QDEF1(MP_QSTR_infrared_receive, 112, 16, "infrared_receive")
+QDEF1(MP_QSTR_infrared_transmit, 81, 17, "infrared_transmit")
+QDEF1(MP_QSTR_intersection, 40, 12, "intersection")
+QDEF1(MP_QSTR_intersection_update, 6, 19, "intersection_update")
+QDEF1(MP_QSTR_io, 35, 2, "io")
+QDEF1(MP_QSTR_isdisjoint, 247, 10, "isdisjoint")
+QDEF1(MP_QSTR_issubset, 185, 8, "issubset")
+QDEF1(MP_QSTR_issuperset, 252, 10, "issuperset")
+QDEF1(MP_QSTR_iterator, 71, 8, "iterator")
+QDEF1(MP_QSTR_level, 211, 5, "level")
+QDEF1(MP_QSTR_light_blink_set_color, 217, 21, "light_blink_set_color")
+QDEF1(MP_QSTR_light_blink_start, 121, 17, "light_blink_start")
+QDEF1(MP_QSTR_light_blink_stop, 33, 16, "light_blink_stop")
+QDEF1(MP_QSTR_light_set, 134, 9, "light_set")
+QDEF1(MP_QSTR_log, 33, 3, "log")
+QDEF1(MP_QSTR_logging, 70, 7, "logging")
+QDEF1(MP_QSTR_max, 177, 3, "max")
+QDEF1(MP_QSTR_maximum_space_recursion_space_depth_space_exceeded, 115, 32, "maximum recursion depth exceeded")
+QDEF1(MP_QSTR_min, 175, 3, "min")
+QDEF1(MP_QSTR_module, 191, 6, "module")
+QDEF1(MP_QSTR_name, 162, 4, "name")
+QDEF1(MP_QSTR_oct, 253, 3, "oct")
+QDEF1(MP_QSTR_on_gpio, 106, 7, "on_gpio")
+QDEF1(MP_QSTR_on_input, 141, 8, "on_input")
+QDEF1(MP_QSTR_pwm_is_running, 82, 14, "pwm_is_running")
+QDEF1(MP_QSTR_pwm_start, 240, 9, "pwm_start")
+QDEF1(MP_QSTR_pwm_stop, 200, 8, "pwm_stop")
+QDEF1(MP_QSTR_random, 190, 6, "random")
+QDEF1(MP_QSTR_rb, 213, 2, "rb")
+QDEF1(MP_QSTR_readable, 93, 8, "readable")
+QDEF1(MP_QSTR_readlines, 106, 9, "readlines")
+QDEF1(MP_QSTR_reversed, 161, 8, "reversed")
+QDEF1(MP_QSTR_seed, 146, 4, "seed")
+QDEF1(MP_QSTR_seek, 157, 4, "seek")
+QDEF1(MP_QSTR_setLevel, 81, 8, "setLevel")
+QDEF1(MP_QSTR_sleep, 234, 5, "sleep")
+QDEF1(MP_QSTR_sleep_ms, 11, 8, "sleep_ms")
+QDEF1(MP_QSTR_sleep_us, 19, 8, "sleep_us")
+QDEF1(MP_QSTR_speaker_set_volume, 116, 18, "speaker_set_volume")
+QDEF1(MP_QSTR_speaker_start, 1, 13, "speaker_start")
+QDEF1(MP_QSTR_speaker_stop, 153, 12, "speaker_stop")
+QDEF1(MP_QSTR_symmetric_difference, 206, 20, "symmetric_difference")
+QDEF1(MP_QSTR_symmetric_difference_update, 96, 27, "symmetric_difference_update")
+QDEF1(MP_QSTR_tell, 20, 4, "tell")
+QDEF1(MP_QSTR_ticks_add, 157, 9, "ticks_add")
+QDEF1(MP_QSTR_ticks_cpu, 26, 9, "ticks_cpu")
+QDEF1(MP_QSTR_ticks_diff, 177, 10, "ticks_diff")
+QDEF1(MP_QSTR_ticks_ms, 66, 8, "ticks_ms")
+QDEF1(MP_QSTR_ticks_us, 90, 8, "ticks_us")
+QDEF1(MP_QSTR_time, 240, 4, "time")
+QDEF1(MP_QSTR_time_ns, 114, 7, "time_ns")
+QDEF1(MP_QSTR_trace, 164, 5, "trace")
+QDEF1(MP_QSTR_uart_open, 220, 9, "uart_open")
+QDEF1(MP_QSTR_union, 246, 5, "union")
+QDEF1(MP_QSTR_vibro_set, 216, 9, "vibro_set")
+QDEF1(MP_QSTR_warn, 175, 4, "warn")
+QDEF1(MP_QSTR_writable, 247, 8, "writable")
+QDEF1(MP_QSTR__brace_open__colon__hash_b_brace_close_, 88, 5, "{:#b}")

+ 3 - 0
mp_flipper/lib/micropython/genhdr/root_pointers.h

@@ -0,0 +1,3 @@
+// Automatically generated by make_root_pointers.py.
+
+mp_sched_item_t sched_queue[(4)];

+ 57 - 0
mp_flipper/lib/micropython/mp_flipper_compiler.c

@@ -0,0 +1,57 @@
+#include <string.h>
+
+#include "py/mperrno.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/persistentcode.h"
+#include "py/gc.h"
+#include "py/stackctrl.h"
+#include "shared/runtime/gchelper.h"
+
+#include "mp_flipper_runtime.h"
+#include "mp_flipper_compiler.h"
+#include "mp_flipper_halport.h"
+
+void mp_flipper_exec_str(const char* code) {
+    nlr_buf_t nlr;
+
+    if(nlr_push(&nlr) == 0) {
+        // Compile, parse and execute the given string
+        mp_lexer_t* lex = mp_lexer_new_from_str_len(MP_QSTR__lt_stdin_gt_, code, strlen(code), 0);
+        mp_store_global(MP_QSTR___file__, MP_OBJ_NEW_QSTR(lex->source_name));
+        mp_parse_tree_t parse_tree = mp_parse(lex, MP_PARSE_FILE_INPUT);
+        mp_obj_t module_fun = mp_compile(&parse_tree, lex->source_name, true);
+        mp_call_function_0(module_fun);
+        nlr_pop();
+    } else {
+        // Uncaught exception: print it out.
+        mp_obj_print_exception(&mp_plat_print, (mp_obj_t)nlr.ret_val);
+    }
+}
+
+void mp_flipper_exec_py_file(const char* file_path) {
+    nlr_buf_t nlr;
+
+    if(nlr_push(&nlr) == 0) {
+        do {
+            // check if file exists
+            if(mp_flipper_import_stat(file_path) == MP_FLIPPER_IMPORT_STAT_NO_EXIST) {
+                mp_raise_OSError_with_filename(MP_ENOENT, file_path);
+
+                break;
+            }
+
+            // Compile, parse and execute the given file
+            mp_lexer_t* lex = mp_lexer_new_from_file(qstr_from_str(file_path));
+            mp_store_global(MP_QSTR___file__, MP_OBJ_NEW_QSTR(lex->source_name));
+            mp_parse_tree_t parse_tree = mp_parse(lex, MP_PARSE_FILE_INPUT);
+            mp_obj_t module_fun = mp_compile(&parse_tree, lex->source_name, false);
+            mp_call_function_0(module_fun);
+        } while(false);
+
+        nlr_pop();
+    } else {
+        // Uncaught exception: print it out.
+        mp_obj_print_exception(&mp_plat_print, (mp_obj_t)nlr.ret_val);
+    }
+}

+ 9 - 0
mp_flipper/lib/micropython/mp_flipper_compiler.h

@@ -0,0 +1,9 @@
+#pragma once
+
+#include <stddef.h>
+
+#include "mp_flipper_runtime.h"
+
+void mp_flipper_exec_str(const char* str);
+void mp_flipper_exec_py_file(const char* file_path);
+void mp_flipper_compile_and_save_file(const char* py_file_path, const char* mpy_file_path);

+ 16 - 0
mp_flipper/lib/micropython/mp_flipper_file_reader.c

@@ -0,0 +1,16 @@
+#include "py/reader.h"
+#include "py/qstr.h"
+
+#include "mp_flipper_file_reader.h"
+
+static mp_uint_t mp_flipper_file_reader_read_internal(void* data) {
+    uint32_t character = mp_flipper_file_reader_read(data);
+
+    return character == MP_FLIPPER_FILE_READER_EOF ? MP_READER_EOF : character;
+}
+
+void mp_reader_new_file(mp_reader_t* reader, qstr filename) {
+    reader->data = mp_flipper_file_reader_context_alloc(qstr_str(filename));
+    reader->readbyte = mp_flipper_file_reader_read_internal;
+    reader->close = mp_flipper_file_reader_close;
+}

+ 12 - 0
mp_flipper/lib/micropython/mp_flipper_file_reader.h

@@ -0,0 +1,12 @@
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define MP_FLIPPER_FILE_READER_EOF ((uint32_t)(-1))
+
+void* mp_flipper_file_reader_context_alloc(const char* filename);
+
+uint32_t mp_flipper_file_reader_read(void* data);
+
+void mp_flipper_file_reader_close(void* data);

+ 284 - 0
mp_flipper/lib/micropython/mp_flipper_fileio.c

@@ -0,0 +1,284 @@
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/stream.h"
+#include "py/runtime.h"
+#include "py/mperrno.h"
+
+#include "mp_flipper_fileio.h"
+
+extern const mp_obj_type_t mp_flipper_binary_fileio_type;
+extern const mp_obj_type_t mp_flipper_text_fileio_type;
+
+typedef struct _mp_flipper_fileio_file_descriptor_t {
+    mp_obj_base_t base;
+    void* handle;
+    mp_obj_t name;
+    uint8_t access_mode;
+    uint8_t open_mode;
+} mp_flipper_fileio_file_descriptor_t;
+
+void* mp_flipper_file_new_file_descriptor(void* handle, const char* name, uint8_t access_mode, uint8_t open_mode, bool is_text) {
+    mp_flipper_fileio_file_descriptor_t* fd = mp_obj_malloc_with_finaliser(
+        mp_flipper_fileio_file_descriptor_t, is_text ? &mp_flipper_text_fileio_type : &mp_flipper_binary_fileio_type);
+
+    fd->handle = handle;
+    fd->name = mp_obj_new_str(name, strlen(name));
+    fd->access_mode = access_mode;
+    fd->open_mode = open_mode;
+
+    return fd;
+}
+
+static mp_uint_t mp_flipper_fileio_read(mp_obj_t self, void* buf, mp_uint_t size, int* errcode) {
+    mp_flipper_fileio_file_descriptor_t* fd = MP_OBJ_TO_PTR(self);
+
+    if(fd->handle == NULL) {
+        *errcode = MP_EIO;
+
+        return MP_STREAM_ERROR;
+    }
+
+    return mp_flipper_file_read(fd->handle, buf, size, errcode);
+}
+
+static mp_uint_t mp_flipper_fileio_write(mp_obj_t self, const void* buf, mp_uint_t size, int* errcode) {
+    mp_flipper_fileio_file_descriptor_t* fd = MP_OBJ_TO_PTR(self);
+
+    if(fd->handle == NULL) {
+        *errcode = MP_EIO;
+
+        return MP_STREAM_ERROR;
+    }
+
+    return mp_flipper_file_write(fd->handle, buf, size, errcode);
+}
+
+static mp_uint_t mp_flipper_fileio_ioctl(mp_obj_t self, mp_uint_t request, uintptr_t arg, int* errcode) {
+    mp_flipper_fileio_file_descriptor_t* fd = MP_OBJ_TO_PTR(self);
+
+    if(fd->handle == NULL) {
+        return 0;
+    }
+
+    if(request == MP_STREAM_SEEK) {
+        struct mp_stream_seek_t* seek = (struct mp_stream_seek_t*)(uintptr_t)arg;
+        size_t position;
+        bool success;
+
+        switch(seek->whence) {
+        case MP_SEEK_SET:
+            mp_flipper_file_seek(fd->handle, seek->offset);
+
+            break;
+
+        case MP_SEEK_CUR:
+            position = mp_flipper_file_tell(fd->handle);
+
+            mp_flipper_file_seek(fd->handle, position + seek->offset);
+
+            break;
+
+        case MP_SEEK_END:
+            position = mp_flipper_file_size(fd->handle);
+
+            mp_flipper_file_seek(fd->handle, position + seek->offset);
+
+            break;
+        }
+
+        seek->offset = mp_flipper_file_tell(fd->handle);
+
+        return 0;
+    }
+
+    if(request == MP_STREAM_FLUSH) {
+        if(!mp_flipper_file_sync(fd->handle)) {
+            *errcode = MP_EIO;
+
+            return MP_STREAM_ERROR;
+        }
+
+        return 0;
+    }
+
+    if(request == MP_STREAM_CLOSE) {
+        if(!mp_flipper_file_close(fd->handle)) {
+            *errcode = MP_EIO;
+
+            fd->handle = NULL;
+
+            return MP_STREAM_ERROR;
+        }
+
+        fd->handle = NULL;
+
+        return 0;
+    }
+
+    *errcode = MP_EINVAL;
+
+    return MP_STREAM_ERROR;
+}
+
+static void fileio_attr(mp_obj_t self_in, qstr attr, mp_obj_t* dest) {
+    mp_flipper_fileio_file_descriptor_t* fd = MP_OBJ_TO_PTR(self_in);
+
+    if(dest[0] == MP_OBJ_NULL) {
+        if(attr == MP_QSTR_name) {
+            dest[0] = fd->name;
+
+            return;
+        }
+
+        if(attr == MP_QSTR_readable) {
+            dest[0] = (fd->access_mode & MP_FLIPPER_FILE_ACCESS_MODE_READ) ? mp_const_true : mp_const_false;
+
+            return;
+        }
+
+        if(attr == MP_QSTR_writable) {
+            dest[0] = (fd->access_mode & MP_FLIPPER_FILE_ACCESS_MODE_WRITE) ? mp_const_true : mp_const_false;
+
+            return;
+        }
+
+        dest[1] = MP_OBJ_SENTINEL;
+    } else {
+        return;
+    }
+}
+
+static const mp_map_elem_t mp_flipper_file_locals_dict_table[] = {
+    {MP_ROM_QSTR(MP_QSTR_read), MP_ROM_PTR(&mp_stream_read_obj)},
+    {MP_ROM_QSTR(MP_QSTR_readline), MP_ROM_PTR(&mp_stream_unbuffered_readline_obj)},
+    {MP_ROM_QSTR(MP_QSTR_readlines), MP_ROM_PTR(&mp_stream_unbuffered_readlines_obj)},
+    {MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj)},
+    {MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&mp_stream_flush_obj)},
+    {MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&mp_stream_close_obj)},
+    {MP_ROM_QSTR(MP_QSTR_seek), MP_ROM_PTR(&mp_stream_seek_obj)},
+    {MP_ROM_QSTR(MP_QSTR_tell), MP_ROM_PTR(&mp_stream_tell_obj)},
+    {MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&mp_stream_close_obj)},
+    {MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&mp_identity_obj)},
+    {MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&mp_stream___exit___obj)},
+};
+static MP_DEFINE_CONST_DICT(mp_flipper_file_locals_dict, mp_flipper_file_locals_dict_table);
+
+static const mp_stream_p_t mp_flipper_binary_fileio_stream_p = {
+    .read = mp_flipper_fileio_read,
+    .write = mp_flipper_fileio_write,
+    .ioctl = mp_flipper_fileio_ioctl,
+    .is_text = false,
+};
+
+MP_DEFINE_CONST_OBJ_TYPE(
+    mp_flipper_binary_fileio_type,
+    MP_QSTR_BinaryFileIO,
+    MP_TYPE_FLAG_ITER_IS_STREAM,
+    protocol,
+    &mp_flipper_binary_fileio_stream_p,
+    attr,
+    fileio_attr,
+    locals_dict,
+    &mp_flipper_file_locals_dict);
+
+static const mp_stream_p_t mp_flipper_text_fileio_stream_p = {
+    .read = mp_flipper_fileio_read,
+    .write = mp_flipper_fileio_write,
+    .ioctl = mp_flipper_fileio_ioctl,
+    .is_text = true,
+};
+
+MP_DEFINE_CONST_OBJ_TYPE(
+    mp_flipper_text_fileio_type,
+    MP_QSTR_TextFileIO,
+    MP_TYPE_FLAG_ITER_IS_STREAM,
+    protocol,
+    &mp_flipper_text_fileio_stream_p,
+    attr,
+    fileio_attr,
+    locals_dict,
+    &mp_flipper_file_locals_dict);
+
+mp_obj_t mp_flipper_builtin_open(size_t n_args, const mp_obj_t* args, mp_map_t* kwargs) {
+    const char* file_name = mp_obj_str_get_str(args[0]);
+
+    uint8_t access_mode = MP_FLIPPER_FILE_ACCESS_MODE_READ;
+    uint8_t open_mode = MP_FLIPPER_FILE_OPEN_MODE_OPEN_EXIST;
+    bool is_text = true;
+
+    if(n_args > 1) {
+        size_t len;
+
+        const char* mode = mp_obj_str_get_data(args[1], &len);
+
+        for(size_t i = 0; i < len; i++) {
+            if(i == 0 && mode[i] == 'r') {
+                access_mode = MP_FLIPPER_FILE_ACCESS_MODE_READ;
+                open_mode = MP_FLIPPER_FILE_OPEN_MODE_OPEN_EXIST;
+
+                continue;
+            }
+
+            if(i == 0 && mode[i] == 'w') {
+                access_mode = MP_FLIPPER_FILE_ACCESS_MODE_WRITE;
+                open_mode = MP_FLIPPER_FILE_OPEN_MODE_CREATE_ALWAYS;
+
+                continue;
+            }
+
+            if(i == 1 && mode[i] == 'b') {
+                is_text = false;
+
+                continue;
+            }
+
+            if(i == 1 && mode[i] == 't') {
+                is_text = true;
+
+                continue;
+            }
+
+            if(i >= 1 && mode[i] == '+') {
+                access_mode = MP_FLIPPER_FILE_ACCESS_MODE_READ | MP_FLIPPER_FILE_ACCESS_MODE_WRITE;
+                open_mode = MP_FLIPPER_FILE_OPEN_MODE_OPEN_APPEND;
+
+                continue;
+            }
+
+            mp_raise_OSError(MP_EINVAL);
+        }
+    }
+
+    void* handle = mp_flipper_file_open(file_name, access_mode, open_mode);
+    void* fd = mp_flipper_file_new_file_descriptor(handle, file_name, access_mode, open_mode, is_text);
+
+    if(handle == NULL) {
+        mp_raise_OSError(MP_ENOENT);
+    }
+
+    return MP_OBJ_FROM_PTR(fd);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_flipper_builtin_open_obj, 1, mp_flipper_builtin_open);
+
+static const mp_rom_map_elem_t mp_module_io_globals_table[] = {
+    {MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_io)},
+    {MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_flipper_builtin_open_obj)},
+    {MP_ROM_QSTR(MP_QSTR_BinaryFileIO), MP_ROM_PTR(&mp_flipper_binary_fileio_type)},
+    {MP_ROM_QSTR(MP_QSTR_TextFileIO), MP_ROM_PTR(&mp_flipper_text_fileio_type)},
+    {MP_ROM_QSTR(MP_QSTR_SEEK_SET), MP_ROM_INT(MP_SEEK_SET)},
+    {MP_ROM_QSTR(MP_QSTR_SEEK_CUR), MP_ROM_INT(MP_SEEK_CUR)},
+    {MP_ROM_QSTR(MP_QSTR_SEEK_END), MP_ROM_INT(MP_SEEK_END)},
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_io_globals, mp_module_io_globals_table);
+
+const mp_obj_module_t mp_module_io = {
+    .base = {&mp_type_module},
+    .globals = (mp_obj_dict_t*)&mp_module_io_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_io, mp_module_io);

+ 25 - 0
mp_flipper/lib/micropython/mp_flipper_fileio.h

@@ -0,0 +1,25 @@
+#pragma once
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+extern uint8_t MP_FLIPPER_FILE_ACCESS_MODE_READ;
+extern uint8_t MP_FLIPPER_FILE_ACCESS_MODE_WRITE;
+
+extern uint8_t MP_FLIPPER_FILE_OPEN_MODE_OPEN_EXIST;
+extern uint8_t MP_FLIPPER_FILE_OPEN_MODE_OPEN_ALWAYS;
+extern uint8_t MP_FLIPPER_FILE_OPEN_MODE_OPEN_APPEND;
+extern uint8_t MP_FLIPPER_FILE_OPEN_MODE_CREATE_NEW;
+extern uint8_t MP_FLIPPER_FILE_OPEN_MODE_CREATE_ALWAYS;
+
+void* mp_flipper_file_open(const char* name, uint8_t access_mode, uint8_t open_mode);
+void* mp_flipper_file_new_file_descriptor(void* handle, const char* name, uint8_t access_mode, uint8_t open_mode, bool is_text);
+bool mp_flipper_file_close(void* handle);
+size_t mp_flipper_file_seek(void* handle, uint32_t offset);
+size_t mp_flipper_file_tell(void* handle);
+size_t mp_flipper_file_size(void* handle);
+bool mp_flipper_file_sync(void* handle);
+bool mp_flipper_file_eof(void* handle);
+size_t mp_flipper_file_read(void* handle, void* buffer, size_t size, int* errcode);
+size_t mp_flipper_file_write(void* handle, const void* buffer, size_t size, int* errcode);

+ 36 - 0
mp_flipper/lib/micropython/mp_flipper_halport.c

@@ -0,0 +1,36 @@
+#include "py/mperrno.h"
+#include "py/obj.h"
+#include "py/runtime.h"
+#include <stdio.h>
+
+#include "py/mphal.h"
+#include "py/builtin.h"
+
+#include "mp_flipper_halport.h"
+#include "mp_flipper_fileio.h"
+
+void mp_hal_stdout_tx_str(const char* str) {
+    mp_flipper_stdout_tx_str(str);
+}
+
+void mp_hal_stdout_tx_strn_cooked(const char* str, size_t len) {
+    mp_flipper_stdout_tx_strn_cooked(str, len);
+}
+
+mp_import_stat_t mp_import_stat(const char* path) {
+    mp_flipper_import_stat_t stat = mp_flipper_import_stat(path);
+
+    if(stat == MP_FLIPPER_IMPORT_STAT_FILE) {
+        return MP_IMPORT_STAT_FILE;
+    }
+
+    if(stat == MP_FLIPPER_IMPORT_STAT_DIR) {
+        return MP_IMPORT_STAT_DIR;
+    }
+
+    return MP_IMPORT_STAT_NO_EXIST;
+}
+
+size_t gc_get_max_new_split(void) {
+    return mp_flipper_gc_get_max_new_split();
+}

+ 19 - 0
mp_flipper/lib/micropython/mp_flipper_halport.h

@@ -0,0 +1,19 @@
+#pragma once
+
+#include "mpconfigport.h"
+
+// Define so there's no dependency on extmod/virtpin.h
+#define mp_hal_pin_obj_t
+
+typedef enum {
+    MP_FLIPPER_IMPORT_STAT_NO_EXIST,
+    MP_FLIPPER_IMPORT_STAT_FILE,
+    MP_FLIPPER_IMPORT_STAT_DIR,
+} mp_flipper_import_stat_t;
+
+void mp_flipper_stdout_tx_str(const char* str);
+void mp_flipper_stdout_tx_strn_cooked(const char* str, size_t len);
+
+mp_flipper_import_stat_t mp_flipper_import_stat(const char* path);
+
+size_t mp_flipper_gc_get_max_new_split();

+ 110 - 0
mp_flipper/lib/micropython/mp_flipper_logging.c

@@ -0,0 +1,110 @@
+#include "py/objint.h"
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "py/obj.h"
+
+#include "mp_flipper_logging.h"
+
+static struct _mp_obj_int_t mp_flipper_log_level_obj = {&mp_type_int, MP_FLIPPER_LOG_LEVEL_INFO};
+
+static mp_obj_t mp_flipper_logging_log_internal(uint8_t level, size_t n_args, const mp_obj_t* args) {
+    if(n_args < 1 || level > mp_flipper_log_get_effective_level()) {
+        return mp_const_none;
+    }
+
+    mp_obj_t message = args[0];
+
+    if(n_args > 1) {
+        mp_obj_t values = mp_obj_new_tuple(n_args - 1, &args[1]);
+
+        message = mp_obj_str_binary_op(MP_BINARY_OP_MODULO, args[0], values);
+    }
+
+    mp_flipper_log(level, mp_obj_str_get_str(message));
+
+    return mp_const_none;
+}
+
+static mp_obj_t mp_flipper_logging_set_level(mp_obj_t raw_level) {
+    uint8_t level = mp_obj_get_int(raw_level);
+
+    if(level >= MP_FLIPPER_LOG_LEVEL_NONE && level <= MP_FLIPPER_LOG_LEVEL_TRACE) {
+        mp_flipper_log_level_obj.val = level;
+    }
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_flipper_logging_set_level_obj, mp_flipper_logging_set_level);
+
+static mp_obj_t mp_flipper_logging_get_effective_level() {
+    uint8_t level = mp_flipper_log_get_effective_level();
+
+    return mp_obj_new_int_from_uint(level);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_flipper_logging_get_effective_level_obj, mp_flipper_logging_get_effective_level);
+
+static mp_obj_t mp_flipper_logging_log(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 2) {
+        return mp_const_none;
+    }
+
+    uint8_t level = mp_obj_get_int(args[0]);
+
+    return mp_flipper_logging_log_internal(level, n_args - 1, &args[1]);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_log_obj, 2, mp_flipper_logging_log);
+
+static mp_obj_t mp_flipper_logging_trace(size_t n_args, const mp_obj_t* args) {
+    return mp_flipper_logging_log_internal(MP_FLIPPER_LOG_LEVEL_TRACE, n_args, args);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_trace_obj, 1, mp_flipper_logging_trace);
+
+static mp_obj_t mp_flipper_logging_debug(size_t n_args, const mp_obj_t* args) {
+    return mp_flipper_logging_log_internal(MP_FLIPPER_LOG_LEVEL_DEBUG, n_args, args);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_debug_obj, 1, mp_flipper_logging_debug);
+
+static mp_obj_t mp_flipper_logging_info(size_t n_args, const mp_obj_t* args) {
+    return mp_flipper_logging_log_internal(MP_FLIPPER_LOG_LEVEL_INFO, n_args, args);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_info_obj, 1, mp_flipper_logging_info);
+
+static mp_obj_t mp_flipper_logging_warn(size_t n_args, const mp_obj_t* args) {
+    return mp_flipper_logging_log_internal(MP_FLIPPER_LOG_LEVEL_WARN, n_args, args);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_warn_obj, 1, mp_flipper_logging_warn);
+
+static mp_obj_t mp_flipper_logging_error(size_t n_args, const mp_obj_t* args) {
+    return mp_flipper_logging_log_internal(MP_FLIPPER_LOG_LEVEL_ERROR, n_args, args);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR(mp_flipper_logging_error_obj, 1, mp_flipper_logging_error);
+
+static const mp_rom_map_elem_t mp_module_logging_globals_table[] = {
+    {MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_logging)},
+    {MP_ROM_QSTR(MP_QSTR_level), MP_ROM_PTR(&mp_flipper_log_level_obj)},
+    {MP_ROM_QSTR(MP_QSTR_setLevel), MP_ROM_PTR(&mp_flipper_logging_set_level_obj)},
+    {MP_ROM_QSTR(MP_QSTR_getEffectiveLevel), MP_ROM_PTR(&mp_flipper_logging_get_effective_level_obj)},
+    {MP_ROM_QSTR(MP_QSTR_trace), MP_ROM_PTR(&mp_flipper_logging_trace_obj)},
+    {MP_ROM_QSTR(MP_QSTR_debug), MP_ROM_PTR(&mp_flipper_logging_debug_obj)},
+    {MP_ROM_QSTR(MP_QSTR_info), MP_ROM_PTR(&mp_flipper_logging_info_obj)},
+    {MP_ROM_QSTR(MP_QSTR_warn), MP_ROM_PTR(&mp_flipper_logging_warn_obj)},
+    {MP_ROM_QSTR(MP_QSTR_error), MP_ROM_PTR(&mp_flipper_logging_error_obj)},
+    {MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_flipper_logging_log_obj)},
+    {MP_ROM_QSTR(MP_QSTR_TRACE), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_TRACE)},
+    {MP_ROM_QSTR(MP_QSTR_DEBUG), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_DEBUG)},
+    {MP_ROM_QSTR(MP_QSTR_INFO), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_INFO)},
+    {MP_ROM_QSTR(MP_QSTR_WARN), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_WARN)},
+    {MP_ROM_QSTR(MP_QSTR_ERROR), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_ERROR)},
+    {MP_ROM_QSTR(MP_QSTR_NONE), MP_ROM_INT(MP_FLIPPER_LOG_LEVEL_NONE)},
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_logging_globals, mp_module_logging_globals_table);
+
+const mp_obj_module_t mp_module_logging = {
+    .base = {&mp_type_module},
+    .globals = (mp_obj_dict_t*)&mp_module_logging_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_logging, mp_module_logging);

+ 14 - 0
mp_flipper/lib/micropython/mp_flipper_logging.h

@@ -0,0 +1,14 @@
+#pragma once
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#define MP_FLIPPER_LOG_LEVEL_TRACE (6)
+#define MP_FLIPPER_LOG_LEVEL_DEBUG (5)
+#define MP_FLIPPER_LOG_LEVEL_INFO (4)
+#define MP_FLIPPER_LOG_LEVEL_WARN (3)
+#define MP_FLIPPER_LOG_LEVEL_ERROR (2)
+#define MP_FLIPPER_LOG_LEVEL_NONE (1)
+
+uint8_t mp_flipper_log_get_effective_level();
+void mp_flipper_log(uint8_t raw_level, const char* message);

+ 1006 - 0
mp_flipper/lib/micropython/mp_flipper_modflipperzero.c

@@ -0,0 +1,1006 @@
+#include <stdint.h>
+#include <stdio.h>
+
+#include "py/mperrno.h"
+#include "py/objint.h"
+#include "py/objfun.h"
+#include "py/obj.h"
+#include "py/stream.h"
+#include "py/runtime.h"
+
+#include "mp_flipper_modflipperzero.h"
+
+static mp_obj_t flipperzero_light_set(mp_obj_t light_obj, mp_obj_t brightness_obj) {
+    mp_int_t light = mp_obj_get_int(light_obj);
+    mp_int_t brightness = mp_obj_get_int(brightness_obj);
+
+    mp_flipper_light_set(light, brightness);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_light_set_obj, flipperzero_light_set);
+
+static mp_obj_t flipperzero_light_blink_start(size_t n_args, const mp_obj_t* args) {
+    if(n_args != 4) {
+        return mp_const_none;
+    }
+
+    mp_int_t light = mp_obj_get_int(args[0]);
+    mp_int_t brightness = mp_obj_get_int(args[1]);
+    mp_int_t on_time = mp_obj_get_int(args[2]);
+    mp_int_t period = mp_obj_get_int(args[3]);
+
+    mp_flipper_light_blink_start(light, brightness, on_time, period);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_light_blink_start_obj, 4, 4, flipperzero_light_blink_start);
+
+static mp_obj_t flipperzero_light_blink_stop() {
+    mp_flipper_light_blink_stop();
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_light_blink_stop_obj, flipperzero_light_blink_stop);
+
+static mp_obj_t flipperzero_light_blink_set_color(mp_obj_t light_obj) {
+    mp_int_t light = mp_obj_get_int(light_obj);
+
+    mp_flipper_light_blink_set_color(light);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_light_blink_set_color_obj, flipperzero_light_blink_set_color);
+
+static mp_obj_t flipperzero_vibro_set(mp_obj_t state) {
+    bool state_bool = mp_obj_is_true(state);
+
+    mp_flipper_vibro(state_bool);
+
+    return state_bool ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_vibro_set_obj, flipperzero_vibro_set);
+
+typedef struct _mp_obj_float_t {
+    mp_obj_base_t base;
+    mp_float_t value;
+} mp_obj_float_t;
+
+/*
+Python script for notes generation
+
+# coding: utf-8
+# Python script for notes generation
+
+from typing import List
+
+note_names: List = ['C', 'CS', 'D', 'DS', 'E', 'F', 'FS', 'G', 'GS', 'A', 'AS', 'B']
+
+for octave in range(9):
+    for name in note_names:
+        print("static const struct _mp_obj_float_t flipperzero_speaker_note_%s%s_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_%s%s};" % (name.lower(),octave,name,octave))
+*/
+
+static const struct _mp_obj_float_t flipperzero_speaker_note_c0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b0_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B0};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b1_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B1};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b2_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B2};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b3_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B3};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b4_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B4};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b5_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B5};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b6_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B6};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b7_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B7};
+static const struct _mp_obj_float_t flipperzero_speaker_note_c8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_C8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_cs8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_CS8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_d8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_D8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_ds8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_DS8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_e8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_E8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_f8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_F8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_fs8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_FS8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_g8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_G8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_gs8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_GS8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_a8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_A8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_as8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_AS8};
+static const struct _mp_obj_float_t flipperzero_speaker_note_b8_obj = {{&mp_type_float}, (mp_float_t)MP_FLIPPER_SPEAKER_NOTE_B8};
+
+static const struct _mp_obj_float_t flipperzero_speaker_volume_min_obj = {
+    {&mp_type_float},
+    (mp_float_t)MP_FLIPPER_SPEAKER_VOLUME_MIN};
+static const struct _mp_obj_float_t flipperzero_speaker_volume_max_obj = {
+    {&mp_type_float},
+    (mp_float_t)MP_FLIPPER_SPEAKER_VOLUME_MAX};
+
+static mp_obj_t flipperzero_speaker_start(mp_obj_t frequency_obj, mp_obj_t volume_obj) {
+    mp_float_t frequency = mp_obj_get_float(frequency_obj);
+    mp_float_t volume = mp_obj_get_float(volume_obj);
+
+    return mp_flipper_speaker_start(frequency, volume) ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_speaker_start_obj, flipperzero_speaker_start);
+
+static mp_obj_t flipperzero_speaker_set_volume(mp_obj_t volume_obj) {
+    mp_float_t volume = mp_obj_get_float(volume_obj);
+
+    return mp_flipper_speaker_set_volume(volume) ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_speaker_set_volume_obj, flipperzero_speaker_set_volume);
+
+static mp_obj_t flipperzero_speaker_stop() {
+    mp_flipper_speaker_stop();
+
+    return mp_flipper_speaker_stop() ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_speaker_stop_obj, flipperzero_speaker_stop);
+
+static mp_obj_t flipperzero_canvas_width() {
+    uint8_t width = mp_flipper_canvas_width();
+
+    return mp_obj_new_int(width);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_canvas_width_obj, flipperzero_canvas_width);
+
+static mp_obj_t flipperzero_canvas_height() {
+    uint8_t height = mp_flipper_canvas_height();
+
+    return mp_obj_new_int(height);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_canvas_height_obj, flipperzero_canvas_height);
+
+static mp_obj_t flipperzero_canvas_text_width(mp_obj_t text_obj) {
+    const char* text = mp_obj_str_get_str(text_obj);
+
+    uint8_t width = mp_flipper_canvas_text_width(text);
+
+    return mp_obj_new_int(width);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_canvas_text_width_obj, flipperzero_canvas_text_width);
+
+static mp_obj_t flipperzero_canvas_text_height() {
+    uint8_t height = mp_flipper_canvas_text_height();
+
+    return mp_obj_new_int(height);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_canvas_text_height_obj, flipperzero_canvas_text_height);
+
+static mp_obj_t flipperzero_canvas_draw_dot(mp_obj_t x_obj, mp_obj_t y_obj) {
+    mp_int_t x = mp_obj_get_int(x_obj);
+    mp_int_t y = mp_obj_get_int(y_obj);
+
+    mp_flipper_canvas_draw_dot(x, y);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_canvas_draw_dot_obj, flipperzero_canvas_draw_dot);
+
+static mp_obj_t flipperzero_canvas_draw_box(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 4) {
+        return mp_const_none;
+    }
+
+    mp_int_t x = mp_obj_get_int(args[0]);
+    mp_int_t y = mp_obj_get_int(args[1]);
+    mp_int_t width = mp_obj_get_int(args[2]);
+    mp_int_t height = mp_obj_get_int(args[3]);
+    mp_int_t radius = n_args == 5 ? mp_obj_get_int(args[4]) : 0;
+
+    mp_flipper_canvas_draw_box(x, y, width, height, radius);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_canvas_draw_box_obj, 4, 5, flipperzero_canvas_draw_box);
+
+static mp_obj_t flipperzero_canvas_draw_frame(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 4) {
+        return mp_const_none;
+    }
+
+    mp_int_t x = mp_obj_get_int(args[0]);
+    mp_int_t y = mp_obj_get_int(args[1]);
+    mp_int_t width = mp_obj_get_int(args[2]);
+    mp_int_t height = mp_obj_get_int(args[3]);
+    mp_int_t radius = n_args == 5 ? mp_obj_get_int(args[4]) : 0;
+
+    mp_flipper_canvas_draw_frame(x, y, width, height, radius);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_canvas_draw_frame_obj, 4, 5, flipperzero_canvas_draw_frame);
+
+static mp_obj_t flipperzero_canvas_draw_line(size_t n_args, const mp_obj_t* args) {
+    if(n_args != 4) {
+        return mp_const_none;
+    }
+
+    mp_int_t x0 = mp_obj_get_int(args[0]);
+    mp_int_t y0 = mp_obj_get_int(args[1]);
+    mp_int_t x1 = mp_obj_get_int(args[2]);
+    mp_int_t y1 = mp_obj_get_int(args[3]);
+
+    mp_flipper_canvas_draw_line(x0, y0, x1, y1);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_canvas_draw_line_obj, 4, 4, flipperzero_canvas_draw_line);
+
+static mp_obj_t flipperzero_canvas_draw_circle(mp_obj_t x_obj, mp_obj_t y_obj, mp_obj_t r_obj) {
+    mp_int_t x = mp_obj_get_int(x_obj);
+    mp_int_t y = mp_obj_get_int(y_obj);
+    mp_int_t r = mp_obj_get_int(r_obj);
+
+    mp_flipper_canvas_draw_circle(x, y, r);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_3(flipperzero_canvas_draw_circle_obj, flipperzero_canvas_draw_circle);
+
+static mp_obj_t flipperzero_canvas_draw_disc(mp_obj_t x_obj, mp_obj_t y_obj, mp_obj_t r_obj) {
+    mp_int_t x = mp_obj_get_int(x_obj);
+    mp_int_t y = mp_obj_get_int(y_obj);
+    mp_int_t r = mp_obj_get_int(r_obj);
+
+    mp_flipper_canvas_draw_disc(x, y, r);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_3(flipperzero_canvas_draw_disc_obj, flipperzero_canvas_draw_disc);
+
+static mp_obj_t flipperzero_canvas_set_font(mp_obj_t font_obj) {
+    mp_int_t font = mp_obj_get_int(font_obj);
+
+    mp_flipper_canvas_set_font(font);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_canvas_set_font_obj, flipperzero_canvas_set_font);
+
+static mp_obj_t flipperzero_canvas_set_color(mp_obj_t color_obj) {
+    mp_int_t color = mp_obj_get_int(color_obj);
+
+    mp_flipper_canvas_set_color(color);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_canvas_set_color_obj, flipperzero_canvas_set_color);
+
+static mp_obj_t flipperzero_canvas_set_text(mp_obj_t x_obj, mp_obj_t y_obj, mp_obj_t str_obj) {
+    mp_int_t x = mp_obj_get_int(x_obj);
+    mp_int_t y = mp_obj_get_int(y_obj);
+
+    const char* str = mp_obj_str_get_str(str_obj);
+
+    mp_flipper_canvas_set_text(x, y, str);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_3(flipperzero_canvas_set_text_obj, flipperzero_canvas_set_text);
+
+static mp_obj_t flipperzero_canvas_set_text_align(mp_obj_t x_obj, mp_obj_t y_obj) {
+    mp_int_t x = mp_obj_get_int(x_obj);
+    mp_int_t y = mp_obj_get_int(y_obj);
+
+    mp_flipper_canvas_set_text_align(x, y);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_canvas_set_text_align_obj, flipperzero_canvas_set_text_align);
+
+static mp_obj_t flipperzero_canvas_update() {
+    mp_flipper_canvas_update();
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_canvas_update_obj, flipperzero_canvas_update);
+
+static mp_obj_t flipperzero_canvas_clear() {
+    mp_flipper_canvas_clear();
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_canvas_clear_obj, flipperzero_canvas_clear);
+
+static void* mp_flipper_on_input_callback = NULL;
+
+static mp_obj_t flipperzero_on_input(mp_obj_t callback_obj) {
+    mp_flipper_on_input_callback = callback_obj;
+
+    return callback_obj;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_on_input_obj, flipperzero_on_input);
+
+static mp_obj_t flipperzero_input_trigger_handler(mp_obj_t flags_obj) {
+    if(mp_flipper_on_input_callback != NULL) {
+        mp_int_t flags = mp_obj_get_int(flags_obj);
+
+        mp_obj_t button_obj = mp_obj_new_int(flags & MP_FLIPPER_INPUT_BUTTON);
+        mp_obj_t type_obj = mp_obj_new_int(flags & MP_FLIPPER_INPUT_TYPE);
+
+        mp_call_function_2_protected(mp_flipper_on_input_callback, button_obj, type_obj);
+    }
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_input_trigger_handler_obj, flipperzero_input_trigger_handler);
+
+static mp_obj_t flipperzero_dialog_message_set_text(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 3) {
+        return mp_const_none;
+    }
+
+    const char* text = mp_obj_str_get_str(args[0]);
+
+    mp_int_t x = mp_obj_get_int(args[1]);
+    mp_int_t y = mp_obj_get_int(args[2]);
+    mp_int_t h = n_args > 3 ? mp_obj_get_int(args[3]) : MP_FLIPPER_ALIGN_BEGIN;
+    mp_int_t v = n_args > 4 ? mp_obj_get_int(args[4]) : MP_FLIPPER_ALIGN_BEGIN;
+
+    mp_flipper_dialog_message_set_text(text, x, y, h, v);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_dialog_message_set_text_obj, 3, 5, flipperzero_dialog_message_set_text);
+
+static mp_obj_t flipperzero_dialog_message_set_header(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 3) {
+        return mp_const_none;
+    }
+
+    const char* text = mp_obj_str_get_str(args[0]);
+
+    mp_int_t x = mp_obj_get_int(args[1]);
+    mp_int_t y = mp_obj_get_int(args[2]);
+    mp_int_t h = n_args > 3 ? mp_obj_get_int(args[3]) : MP_FLIPPER_ALIGN_BEGIN;
+    mp_int_t v = n_args > 4 ? mp_obj_get_int(args[4]) : MP_FLIPPER_ALIGN_BEGIN;
+
+    mp_flipper_dialog_message_set_header(text, x, y, h, v);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_dialog_message_set_header_obj, 3, 5, flipperzero_dialog_message_set_header);
+
+static mp_obj_t flipperzero_dialog_message_set_button(mp_obj_t text_obj, mp_obj_t button_obj) {
+    const char* text = mp_obj_str_get_str(text_obj);
+
+    mp_int_t button = mp_obj_get_int(button_obj);
+
+    mp_flipper_dialog_message_set_button(text, button);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_dialog_message_set_button_obj, flipperzero_dialog_message_set_button);
+
+static mp_obj_t flipperzero_dialog_message_show() {
+    mp_int_t button = mp_flipper_dialog_message_show();
+
+    return mp_obj_new_int(button);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_dialog_message_show_obj, flipperzero_dialog_message_show);
+
+static mp_obj_t flipperzero_dialog_message_clear() {
+    mp_flipper_dialog_message_clear();
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_dialog_message_clear_obj, flipperzero_dialog_message_clear);
+
+static void* mp_flipper_on_gpio_callback = NULL;
+
+static mp_obj_t flipperzero_gpio_init_pin(size_t n_args, const mp_obj_t* args) {
+    if(n_args < 2) {
+        return mp_const_false;
+    }
+
+    mp_int_t pin = mp_obj_get_int(args[0]);
+    mp_int_t mode = mp_obj_get_int(args[1]);
+    mp_int_t pull = n_args > 2 ? mp_obj_get_int(args[2]) : MP_FLIPPER_GPIO_PULL_NO;
+    mp_int_t speed = n_args > 3 ? mp_obj_get_int(args[3]) : MP_FLIPPER_GPIO_SPEED_LOW;
+
+    bool success = mp_flipper_gpio_init_pin(pin, mode, pull, speed);
+
+    return success ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_gpio_init_pin_obj, 2, 4, flipperzero_gpio_init_pin);
+
+static mp_obj_t flipperzero_gpio_deinit_pin(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    mp_flipper_gpio_deinit_pin(pin);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_gpio_deinit_pin_obj, flipperzero_gpio_deinit_pin);
+
+static mp_obj_t flipperzero_gpio_set_pin(mp_obj_t pin_obj, mp_obj_t state_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+    bool state = mp_obj_is_true(state_obj);
+
+    mp_flipper_gpio_set_pin(pin, state);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_gpio_set_pin_obj, flipperzero_gpio_set_pin);
+
+static mp_obj_t flipperzero_gpio_get_pin(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    bool state = mp_flipper_gpio_get_pin(pin);
+
+    return state ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_gpio_get_pin_obj, flipperzero_gpio_get_pin);
+
+static mp_obj_t flipperzero_on_gpio(mp_obj_t callback_obj) {
+    mp_flipper_on_gpio_callback = callback_obj;
+
+    return callback_obj;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_on_gpio_obj, flipperzero_on_gpio);
+
+static mp_obj_t flipperzero_gpio_trigger_handler(mp_obj_t pin_obj) {
+    if(mp_flipper_on_gpio_callback != NULL) {
+        mp_call_function_1_protected(mp_flipper_on_gpio_callback, pin_obj);
+    }
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_gpio_trigger_handler_obj, flipperzero_gpio_trigger_handler);
+
+static mp_obj_t flipperzero_adc_read_pin_value(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    mp_int_t value = mp_flipper_adc_read_pin(pin);
+
+    return mp_obj_new_int(value);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_adc_read_pin_value_obj, flipperzero_adc_read_pin_value);
+
+static mp_obj_t flipperzero_adc_read_pin_voltage(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    uint16_t value = mp_flipper_adc_read_pin(pin);
+    float voltage = mp_flipper_adc_convert_to_voltage(value);
+
+    return mp_obj_new_float(voltage);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_adc_read_pin_voltage_obj, flipperzero_adc_read_pin_voltage);
+
+static mp_obj_t flipperzero_pwm_start(mp_obj_t pin_obj, mp_obj_t frequency_obj, mp_obj_t duty_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+    mp_int_t frequency = mp_obj_get_int(frequency_obj);
+    mp_int_t duty = mp_obj_get_int(duty_obj);
+
+    bool success = mp_flipper_pwm_start(pin, frequency, duty);
+
+    return success ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_3(flipperzero_pwm_start_obj, flipperzero_pwm_start);
+
+static mp_obj_t flipperzero_pwm_stop(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    mp_flipper_pwm_stop(pin);
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_pwm_stop_obj, flipperzero_pwm_stop);
+
+static mp_obj_t flipperzero_pwm_is_running(mp_obj_t pin_obj) {
+    mp_int_t pin = mp_obj_get_int(pin_obj);
+
+    return mp_flipper_pwm_is_running(pin) ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(flipperzero_pwm_is_running_obj, flipperzero_pwm_is_running);
+
+static mp_obj_t flipperzero_infrared_receive(size_t n_args, const mp_obj_t* args) {
+    mp_int_t timeout = n_args > 0 ? mp_obj_get_int(args[0]) : MP_FLIPPER_INFRARED_RX_DEFAULT_TIMEOUT;
+
+    size_t length = 0;
+    uint32_t* buffer = mp_flipper_infrared_receive(timeout, &length);
+    mp_obj_t* signal = length > 0 ? malloc(length * sizeof(mp_obj_t)) : NULL;
+
+    for(uint16_t i = 0; i < length; i++) {
+        signal[i] = mp_obj_new_int(buffer[i]);
+    }
+
+    return mp_obj_new_list(length, signal);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_infrared_receive_obj, 0, 1, flipperzero_infrared_receive);
+
+inline static uint32_t flipperzero_infrared_tx_signal_provider(mp_obj_t* signal, const size_t index) {
+    return mp_obj_get_int(signal[index]);
+}
+
+static mp_obj_t flipperzero_infrared_transmit(size_t n_args, const mp_obj_t* args) {
+    size_t length = 0;
+    mp_obj_t* signal;
+
+    mp_obj_get_array(args[0], &length, &signal);
+
+    mp_int_t repeat = n_args > 1 ? mp_obj_get_int(args[1]) : 1;
+    bool use_external_pin = n_args > 2 ? mp_obj_is_true(args[2]) : false;
+    mp_int_t frequency = n_args > 3 ? mp_obj_get_int(args[3]) : MP_FLIPPER_INFRARED_TX_DEFAULT_FREQUENCY;
+    mp_float_t duty_cycle = n_args > 4 ? mp_obj_get_float(args[4]) : MP_FLIPPER_INFRARED_TX_DEFAULT_DUTY_CYCLE;
+
+    return mp_flipper_infrared_transmit(
+               signal, length, flipperzero_infrared_tx_signal_provider, repeat, frequency, duty_cycle, use_external_pin) ?
+               mp_const_true :
+               mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(flipperzero_infrared_transmit_obj, 1, 5, flipperzero_infrared_transmit);
+
+static mp_obj_t flipperzero_infrared_is_busy() {
+    return mp_flipper_infrared_is_busy() ? mp_const_true : mp_const_false;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(flipperzero_infrared_is_busy_obj, flipperzero_infrared_is_busy);
+
+extern const mp_obj_type_t flipperzero_uart_connection_type;
+
+typedef struct _flipperzero_uart_connection_t {
+    mp_obj_base_t base;
+    void* handle;
+    mp_obj_t mode;
+    mp_obj_t baud_rate;
+} flipperzero_uart_connection_t;
+
+static mp_obj_t flipperzero_uart_open(mp_obj_t raw_mode, mp_obj_t raw_baud_rate) {
+    uint8_t mode = mp_obj_get_int(raw_mode);
+    uint32_t baud_rate = mp_obj_get_int(raw_baud_rate);
+
+    void* handle = mp_flipper_uart_open(mode, baud_rate);
+
+    if(handle == NULL) {
+        mp_flipper_raise_os_error(MP_EBUSY);
+
+        return mp_const_none;
+    }
+
+    flipperzero_uart_connection_t* connection =
+        mp_obj_malloc_with_finaliser(flipperzero_uart_connection_t, &flipperzero_uart_connection_type);
+
+    connection->handle = handle;
+    connection->mode = raw_mode;
+    connection->baud_rate = raw_baud_rate;
+
+    return connection;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(flipperzero_uart_open_obj, flipperzero_uart_open);
+
+static mp_uint_t flipperzero_uart_read(mp_obj_t self, void* buf, mp_uint_t size, int* errcode) {
+    flipperzero_uart_connection_t* connection = MP_OBJ_TO_PTR(self);
+
+    if(connection->handle == NULL) {
+        *errcode = MP_EIO;
+
+        return MP_STREAM_ERROR;
+    }
+
+    return mp_flipper_uart_read(connection->handle, buf, size, errcode);
+}
+
+static mp_uint_t flipperzero_uart_write(mp_obj_t self, const void* buf, mp_uint_t size, int* errcode) {
+    flipperzero_uart_connection_t* connection = MP_OBJ_TO_PTR(self);
+
+    if(connection->handle == NULL) {
+        *errcode = MP_EIO;
+
+        return MP_STREAM_ERROR;
+    }
+
+    return mp_flipper_uart_write(connection->handle, buf, size, errcode);
+}
+
+static mp_uint_t flipperzero_uart_ioctl(mp_obj_t self, mp_uint_t request, uintptr_t arg, int* errcode) {
+    flipperzero_uart_connection_t* connection = MP_OBJ_TO_PTR(self);
+
+    if(connection->handle == NULL) {
+        return 0;
+    }
+
+    if(request == MP_STREAM_SEEK) {
+        return 0;
+    }
+
+    if(request == MP_STREAM_FLUSH) {
+        if(!mp_flipper_uart_sync(connection->handle)) {
+            *errcode = MP_EIO;
+
+            return MP_STREAM_ERROR;
+        }
+
+        return 0;
+    }
+
+    if(request == MP_STREAM_CLOSE) {
+        if(!mp_flipper_uart_close(connection->handle)) {
+            *errcode = MP_EIO;
+
+            connection->handle = NULL;
+
+            return MP_STREAM_ERROR;
+        }
+
+        connection->handle = NULL;
+
+        return 0;
+    }
+
+    *errcode = MP_EINVAL;
+
+    return MP_STREAM_ERROR;
+}
+
+static const mp_map_elem_t flipperzero_uart_connection_locals_dict_table[] = {
+    {MP_ROM_QSTR(MP_QSTR_read), MP_ROM_PTR(&mp_stream_read_obj)},
+    {MP_ROM_QSTR(MP_QSTR_readline), MP_ROM_PTR(&mp_stream_unbuffered_readline_obj)},
+    {MP_ROM_QSTR(MP_QSTR_readlines), MP_ROM_PTR(&mp_stream_unbuffered_readlines_obj)},
+    {MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj)},
+    {MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&mp_stream_flush_obj)},
+    {MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&mp_stream_close_obj)},
+    {MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&mp_stream_close_obj)},
+    {MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&mp_identity_obj)},
+    {MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&mp_stream___exit___obj)},
+};
+static MP_DEFINE_CONST_DICT(flipperzero_uart_connection_locals_dict, flipperzero_uart_connection_locals_dict_table);
+
+static const mp_stream_p_t flipperzero_uart_connection_stream_p = {
+    .read = flipperzero_uart_read,
+    .write = flipperzero_uart_write,
+    .ioctl = flipperzero_uart_ioctl,
+    .is_text = false,
+};
+
+MP_DEFINE_CONST_OBJ_TYPE(
+    flipperzero_uart_connection_type,
+    MP_QSTR_UART,
+    MP_TYPE_FLAG_ITER_IS_STREAM,
+    protocol,
+    &flipperzero_uart_connection_stream_p,
+    locals_dict,
+    &flipperzero_uart_connection_locals_dict);
+
+static const mp_rom_map_elem_t flipperzero_module_globals_table[] = {
+    // light
+    {MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_flipperzero)},
+    {MP_ROM_QSTR(MP_QSTR_LIGHT_RED), MP_ROM_INT(MP_FLIPPER_LED_RED)},
+    {MP_ROM_QSTR(MP_QSTR_LIGHT_GREEN), MP_ROM_INT(MP_FLIPPER_LED_GREEN)},
+    {MP_ROM_QSTR(MP_QSTR_LIGHT_BLUE), MP_ROM_INT(MP_FLIPPER_LED_BLUE)},
+    {MP_ROM_QSTR(MP_QSTR_LIGHT_BACKLIGHT), MP_ROM_INT(MP_FLIPPER_LED_BACKLIGHT)},
+    {MP_ROM_QSTR(MP_QSTR_light_set), MP_ROM_PTR(&flipperzero_light_set_obj)},
+    {MP_ROM_QSTR(MP_QSTR_light_blink_start), MP_ROM_PTR(&flipperzero_light_blink_start_obj)},
+    {MP_ROM_QSTR(MP_QSTR_light_blink_set_color), MP_ROM_PTR(&flipperzero_light_blink_set_color_obj)},
+    {MP_ROM_QSTR(MP_QSTR_light_blink_stop), MP_ROM_PTR(&flipperzero_light_blink_stop_obj)},
+    // vibro
+    {MP_ROM_QSTR(MP_QSTR_vibro_set), MP_ROM_PTR(&flipperzero_vibro_set_obj)},
+    /*
+Python script for notes generation
+
+# coding: utf-8
+# Python script for notes generation
+
+from typing import List
+
+note_names: List = ['C', 'CS', 'D', 'DS', 'E', 'F', 'FS', 'G', 'GS', 'A', 'AS', 'B']
+
+for octave in range(9):
+    for name in note_names:
+        print("{MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_%s%s), MP_ROM_PTR(&flipperzero_speaker_note_%s%s_obj)}," % (name,octave,name.lower(),octave))
+*/
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C0), MP_ROM_PTR(&flipperzero_speaker_note_c0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS0), MP_ROM_PTR(&flipperzero_speaker_note_cs0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D0), MP_ROM_PTR(&flipperzero_speaker_note_d0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS0), MP_ROM_PTR(&flipperzero_speaker_note_ds0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E0), MP_ROM_PTR(&flipperzero_speaker_note_e0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F0), MP_ROM_PTR(&flipperzero_speaker_note_f0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS0), MP_ROM_PTR(&flipperzero_speaker_note_fs0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G0), MP_ROM_PTR(&flipperzero_speaker_note_g0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS0), MP_ROM_PTR(&flipperzero_speaker_note_gs0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A0), MP_ROM_PTR(&flipperzero_speaker_note_a0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS0), MP_ROM_PTR(&flipperzero_speaker_note_as0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B0), MP_ROM_PTR(&flipperzero_speaker_note_b0_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C1), MP_ROM_PTR(&flipperzero_speaker_note_c1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS1), MP_ROM_PTR(&flipperzero_speaker_note_cs1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D1), MP_ROM_PTR(&flipperzero_speaker_note_d1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS1), MP_ROM_PTR(&flipperzero_speaker_note_ds1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E1), MP_ROM_PTR(&flipperzero_speaker_note_e1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F1), MP_ROM_PTR(&flipperzero_speaker_note_f1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS1), MP_ROM_PTR(&flipperzero_speaker_note_fs1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G1), MP_ROM_PTR(&flipperzero_speaker_note_g1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS1), MP_ROM_PTR(&flipperzero_speaker_note_gs1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A1), MP_ROM_PTR(&flipperzero_speaker_note_a1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS1), MP_ROM_PTR(&flipperzero_speaker_note_as1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B1), MP_ROM_PTR(&flipperzero_speaker_note_b1_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C2), MP_ROM_PTR(&flipperzero_speaker_note_c2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS2), MP_ROM_PTR(&flipperzero_speaker_note_cs2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D2), MP_ROM_PTR(&flipperzero_speaker_note_d2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS2), MP_ROM_PTR(&flipperzero_speaker_note_ds2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E2), MP_ROM_PTR(&flipperzero_speaker_note_e2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F2), MP_ROM_PTR(&flipperzero_speaker_note_f2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS2), MP_ROM_PTR(&flipperzero_speaker_note_fs2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G2), MP_ROM_PTR(&flipperzero_speaker_note_g2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS2), MP_ROM_PTR(&flipperzero_speaker_note_gs2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A2), MP_ROM_PTR(&flipperzero_speaker_note_a2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS2), MP_ROM_PTR(&flipperzero_speaker_note_as2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B2), MP_ROM_PTR(&flipperzero_speaker_note_b2_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C3), MP_ROM_PTR(&flipperzero_speaker_note_c3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS3), MP_ROM_PTR(&flipperzero_speaker_note_cs3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D3), MP_ROM_PTR(&flipperzero_speaker_note_d3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS3), MP_ROM_PTR(&flipperzero_speaker_note_ds3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E3), MP_ROM_PTR(&flipperzero_speaker_note_e3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F3), MP_ROM_PTR(&flipperzero_speaker_note_f3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS3), MP_ROM_PTR(&flipperzero_speaker_note_fs3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G3), MP_ROM_PTR(&flipperzero_speaker_note_g3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS3), MP_ROM_PTR(&flipperzero_speaker_note_gs3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A3), MP_ROM_PTR(&flipperzero_speaker_note_a3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS3), MP_ROM_PTR(&flipperzero_speaker_note_as3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B3), MP_ROM_PTR(&flipperzero_speaker_note_b3_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C4), MP_ROM_PTR(&flipperzero_speaker_note_c4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS4), MP_ROM_PTR(&flipperzero_speaker_note_cs4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D4), MP_ROM_PTR(&flipperzero_speaker_note_d4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS4), MP_ROM_PTR(&flipperzero_speaker_note_ds4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E4), MP_ROM_PTR(&flipperzero_speaker_note_e4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F4), MP_ROM_PTR(&flipperzero_speaker_note_f4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS4), MP_ROM_PTR(&flipperzero_speaker_note_fs4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G4), MP_ROM_PTR(&flipperzero_speaker_note_g4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS4), MP_ROM_PTR(&flipperzero_speaker_note_gs4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A4), MP_ROM_PTR(&flipperzero_speaker_note_a4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS4), MP_ROM_PTR(&flipperzero_speaker_note_as4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B4), MP_ROM_PTR(&flipperzero_speaker_note_b4_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C5), MP_ROM_PTR(&flipperzero_speaker_note_c5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS5), MP_ROM_PTR(&flipperzero_speaker_note_cs5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D5), MP_ROM_PTR(&flipperzero_speaker_note_d5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS5), MP_ROM_PTR(&flipperzero_speaker_note_ds5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E5), MP_ROM_PTR(&flipperzero_speaker_note_e5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F5), MP_ROM_PTR(&flipperzero_speaker_note_f5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS5), MP_ROM_PTR(&flipperzero_speaker_note_fs5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G5), MP_ROM_PTR(&flipperzero_speaker_note_g5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS5), MP_ROM_PTR(&flipperzero_speaker_note_gs5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A5), MP_ROM_PTR(&flipperzero_speaker_note_a5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS5), MP_ROM_PTR(&flipperzero_speaker_note_as5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B5), MP_ROM_PTR(&flipperzero_speaker_note_b5_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C6), MP_ROM_PTR(&flipperzero_speaker_note_c6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS6), MP_ROM_PTR(&flipperzero_speaker_note_cs6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D6), MP_ROM_PTR(&flipperzero_speaker_note_d6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS6), MP_ROM_PTR(&flipperzero_speaker_note_ds6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E6), MP_ROM_PTR(&flipperzero_speaker_note_e6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F6), MP_ROM_PTR(&flipperzero_speaker_note_f6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS6), MP_ROM_PTR(&flipperzero_speaker_note_fs6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G6), MP_ROM_PTR(&flipperzero_speaker_note_g6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS6), MP_ROM_PTR(&flipperzero_speaker_note_gs6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A6), MP_ROM_PTR(&flipperzero_speaker_note_a6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS6), MP_ROM_PTR(&flipperzero_speaker_note_as6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B6), MP_ROM_PTR(&flipperzero_speaker_note_b6_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C7), MP_ROM_PTR(&flipperzero_speaker_note_c7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS7), MP_ROM_PTR(&flipperzero_speaker_note_cs7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D7), MP_ROM_PTR(&flipperzero_speaker_note_d7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS7), MP_ROM_PTR(&flipperzero_speaker_note_ds7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E7), MP_ROM_PTR(&flipperzero_speaker_note_e7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F7), MP_ROM_PTR(&flipperzero_speaker_note_f7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS7), MP_ROM_PTR(&flipperzero_speaker_note_fs7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G7), MP_ROM_PTR(&flipperzero_speaker_note_g7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS7), MP_ROM_PTR(&flipperzero_speaker_note_gs7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A7), MP_ROM_PTR(&flipperzero_speaker_note_a7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS7), MP_ROM_PTR(&flipperzero_speaker_note_as7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B7), MP_ROM_PTR(&flipperzero_speaker_note_b7_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_C8), MP_ROM_PTR(&flipperzero_speaker_note_c8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_CS8), MP_ROM_PTR(&flipperzero_speaker_note_cs8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_D8), MP_ROM_PTR(&flipperzero_speaker_note_d8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_DS8), MP_ROM_PTR(&flipperzero_speaker_note_ds8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_E8), MP_ROM_PTR(&flipperzero_speaker_note_e8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_F8), MP_ROM_PTR(&flipperzero_speaker_note_f8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_FS8), MP_ROM_PTR(&flipperzero_speaker_note_fs8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_G8), MP_ROM_PTR(&flipperzero_speaker_note_g8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_GS8), MP_ROM_PTR(&flipperzero_speaker_note_gs8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_A8), MP_ROM_PTR(&flipperzero_speaker_note_a8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_AS8), MP_ROM_PTR(&flipperzero_speaker_note_as8_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_NOTE_B8), MP_ROM_PTR(&flipperzero_speaker_note_b8_obj)},
+
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_VOLUME_MIN), MP_ROM_PTR(&flipperzero_speaker_volume_min_obj)},
+    {MP_ROM_QSTR(MP_QSTR_SPEAKER_VOLUME_MAX), MP_ROM_PTR(&flipperzero_speaker_volume_max_obj)},
+
+    {MP_ROM_QSTR(MP_QSTR_speaker_start), MP_ROM_PTR(&flipperzero_speaker_start_obj)},
+    {MP_ROM_QSTR(MP_QSTR_speaker_set_volume), MP_ROM_PTR(&flipperzero_speaker_set_volume_obj)},
+    {MP_ROM_QSTR(MP_QSTR_speaker_stop), MP_ROM_PTR(&flipperzero_speaker_stop_obj)},
+    // canvas
+    {MP_ROM_QSTR(MP_QSTR_CANVAS_BLACK), MP_ROM_INT(MP_FLIPPER_COLOR_BLACK)},
+    {MP_ROM_QSTR(MP_QSTR_CANVAS_WHITE), MP_ROM_INT(MP_FLIPPER_COLOR_WHITE)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_width), MP_ROM_PTR(&flipperzero_canvas_width_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_height), MP_ROM_PTR(&flipperzero_canvas_height_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_text_width), MP_ROM_PTR(&flipperzero_canvas_text_width_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_text_height), MP_ROM_PTR(&flipperzero_canvas_text_height_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_dot), MP_ROM_PTR(&flipperzero_canvas_draw_dot_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_box), MP_ROM_PTR(&flipperzero_canvas_draw_box_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_frame), MP_ROM_PTR(&flipperzero_canvas_draw_frame_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_line), MP_ROM_PTR(&flipperzero_canvas_draw_line_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_circle), MP_ROM_PTR(&flipperzero_canvas_draw_circle_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_draw_disc), MP_ROM_PTR(&flipperzero_canvas_draw_disc_obj)},
+    {MP_ROM_QSTR(MP_QSTR_FONT_PRIMARY), MP_ROM_INT(MP_FLIPPER_FONT_PRIMARY)},
+    {MP_ROM_QSTR(MP_QSTR_FONT_SECONDARY), MP_ROM_INT(MP_FLIPPER_FONT_SECONDARY)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_set_font), MP_ROM_PTR(&flipperzero_canvas_set_font_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_set_color), MP_ROM_PTR(&flipperzero_canvas_set_color_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_set_text), MP_ROM_PTR(&flipperzero_canvas_set_text_obj)},
+    {MP_ROM_QSTR(MP_QSTR_ALIGN_BEGIN), MP_ROM_INT(MP_FLIPPER_ALIGN_BEGIN)},
+    {MP_ROM_QSTR(MP_QSTR_ALIGN_CENTER), MP_ROM_INT(MP_FLIPPER_ALIGN_CENTER)},
+    {MP_ROM_QSTR(MP_QSTR_ALIGN_END), MP_ROM_INT(MP_FLIPPER_ALIGN_END)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_set_text_align), MP_ROM_PTR(&flipperzero_canvas_set_text_align_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_update), MP_ROM_PTR(&flipperzero_canvas_update_obj)},
+    {MP_ROM_QSTR(MP_QSTR_canvas_clear), MP_ROM_PTR(&flipperzero_canvas_clear_obj)},
+    // input
+    {MP_ROM_QSTR(MP_QSTR_on_input), MP_ROM_PTR(&flipperzero_on_input_obj)},
+    {MP_ROM_QSTR(MP_QSTR__input_trigger_handler), MP_ROM_PTR(&flipperzero_input_trigger_handler_obj)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_BACK), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_BACK)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_OK), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_OK)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_LEFT), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_LEFT)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_RIGHT), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_RIGHT)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_UP), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_UP)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_BUTTON_DOWN), MP_ROM_INT(MP_FLIPPER_INPUT_BUTTON_DOWN)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_TYPE_PRESS), MP_ROM_INT(MP_FLIPPER_INPUT_TYPE_PRESS)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_TYPE_RELEASE), MP_ROM_INT(MP_FLIPPER_INPUT_TYPE_RELEASE)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_TYPE_SHORT), MP_ROM_INT(MP_FLIPPER_INPUT_TYPE_SHORT)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_TYPE_LONG), MP_ROM_INT(MP_FLIPPER_INPUT_TYPE_LONG)},
+    {MP_ROM_QSTR(MP_QSTR_INPUT_TYPE_REPEAT), MP_ROM_INT(MP_FLIPPER_INPUT_TYPE_REPEAT)},
+    // dialog
+    {MP_ROM_QSTR(MP_QSTR_dialog_message_set_text), MP_ROM_PTR(&flipperzero_dialog_message_set_text_obj)},
+    {MP_ROM_QSTR(MP_QSTR_dialog_message_set_header), MP_ROM_PTR(&flipperzero_dialog_message_set_header_obj)},
+    {MP_ROM_QSTR(MP_QSTR_dialog_message_set_button), MP_ROM_PTR(&flipperzero_dialog_message_set_button_obj)},
+    {MP_ROM_QSTR(MP_QSTR_dialog_message_show), MP_ROM_PTR(&flipperzero_dialog_message_show_obj)},
+    {MP_ROM_QSTR(MP_QSTR_dialog_message_clear), MP_ROM_PTR(&flipperzero_dialog_message_clear_obj)},
+    // gpio - pins
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PC0), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PC0)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PC1), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PC1)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PC3), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PC3)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PB2), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PB2)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PB3), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PB3)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PA4), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PA4)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PA6), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PA6)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PIN_PA7), MP_ROM_INT(MP_FLIPPER_GPIO_PIN_PA7)},
+    // gpio - modes
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_INPUT), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_INPUT)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_OUTPUT_PUSH_PULL), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_OUTPUT_PUSH_PULL)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_OUTPUT_OPEN_DRAIN), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_OUTPUT_OPEN_DRAIN)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_ANALOG), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_ANALOG)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_INTERRUPT_RISE), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_INTERRUPT_RISE)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_MODE_INTERRUPT_FALL), MP_ROM_INT(MP_FLIPPER_GPIO_MODE_INTERRUPT_FALL)},
+    // gpio - pull
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PULL_NO), MP_ROM_INT(MP_FLIPPER_GPIO_PULL_NO)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PULL_UP), MP_ROM_INT(MP_FLIPPER_GPIO_PULL_UP)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_PULL_DOWN), MP_ROM_INT(MP_FLIPPER_GPIO_PULL_DOWN)},
+    // gpio - speed
+    {MP_ROM_QSTR(MP_QSTR_GPIO_SPEED_LOW), MP_ROM_INT(MP_FLIPPER_GPIO_SPEED_LOW)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_SPEED_MEDIUM), MP_ROM_INT(MP_FLIPPER_GPIO_SPEED_MEDIUM)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_SPEED_HIGH), MP_ROM_INT(MP_FLIPPER_GPIO_SPEED_HIGH)},
+    {MP_ROM_QSTR(MP_QSTR_GPIO_SPEED_VERY_HIGH), MP_ROM_INT(MP_FLIPPER_GPIO_SPEED_VERY_HIGH)},
+    // gpio - functions
+    {MP_ROM_QSTR(MP_QSTR_gpio_init_pin), MP_ROM_PTR(&flipperzero_gpio_init_pin_obj)},
+    {MP_ROM_QSTR(MP_QSTR_gpio_deinit_pin), MP_ROM_PTR(&flipperzero_gpio_deinit_pin_obj)},
+    {MP_ROM_QSTR(MP_QSTR_gpio_set_pin), MP_ROM_PTR(&flipperzero_gpio_set_pin_obj)},
+    {MP_ROM_QSTR(MP_QSTR_gpio_get_pin), MP_ROM_PTR(&flipperzero_gpio_get_pin_obj)},
+    {MP_ROM_QSTR(MP_QSTR_on_gpio), MP_ROM_PTR(&flipperzero_on_gpio_obj)},
+    {MP_ROM_QSTR(MP_QSTR__gpio_trigger_handler), MP_ROM_PTR(&flipperzero_gpio_trigger_handler_obj)},
+    // adc - functions
+    {MP_ROM_QSTR(MP_QSTR_adc_read_pin_value), MP_ROM_PTR(&flipperzero_adc_read_pin_value_obj)},
+    {MP_ROM_QSTR(MP_QSTR_adc_read_pin_voltage), MP_ROM_PTR(&flipperzero_adc_read_pin_voltage_obj)},
+    // pwm - functions
+    {MP_ROM_QSTR(MP_QSTR_pwm_start), MP_ROM_PTR(&flipperzero_pwm_start_obj)},
+    {MP_ROM_QSTR(MP_QSTR_pwm_stop), MP_ROM_PTR(&flipperzero_pwm_stop_obj)},
+    {MP_ROM_QSTR(MP_QSTR_pwm_is_running), MP_ROM_PTR(&flipperzero_pwm_is_running_obj)},
+    // infrared - functions
+    {MP_ROM_QSTR(MP_QSTR_infrared_receive), MP_ROM_PTR(&flipperzero_infrared_receive_obj)},
+    {MP_ROM_QSTR(MP_QSTR_infrared_transmit), MP_ROM_PTR(&flipperzero_infrared_transmit_obj)},
+    {MP_ROM_QSTR(MP_QSTR_infrared_is_busy), MP_ROM_PTR(&flipperzero_infrared_is_busy_obj)},
+    // UART
+    {MP_ROM_QSTR(MP_QSTR_UART), MP_ROM_PTR(&flipperzero_uart_connection_type)},
+    {MP_ROM_QSTR(MP_QSTR_UART_MODE_LPUART), MP_ROM_INT(MP_FLIPPER_UART_MODE_LPUART)},
+    {MP_ROM_QSTR(MP_QSTR_UART_MODE_USART), MP_ROM_INT(MP_FLIPPER_UART_MODE_USART)},
+    {MP_ROM_QSTR(MP_QSTR_uart_open), MP_ROM_PTR(&flipperzero_uart_open_obj)},
+};
+static MP_DEFINE_CONST_DICT(flipperzero_module_globals, flipperzero_module_globals_table);
+
+const mp_obj_module_t flipperzero_module = {
+    .base = {&mp_type_module},
+    .globals = (mp_obj_dict_t*)&flipperzero_module_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_flipperzero, flipperzero_module);
+
+void mp_flipper_on_input(uint16_t button, uint16_t type) {
+    if(mp_flipper_on_input_callback != NULL) {
+        uint16_t flags = button | type;
+        mp_obj_t flags_obj = mp_obj_new_int_from_uint(flags);
+
+        mp_sched_schedule(&flipperzero_input_trigger_handler_obj, flags_obj);
+    }
+}
+
+void mp_flipper_on_gpio(void* ctx) {
+    if(mp_flipper_on_gpio_callback != NULL) {
+        mp_obj_t pin_obj = mp_obj_new_int_from_uint((uint8_t)ctx);
+
+        mp_sched_schedule(&flipperzero_gpio_trigger_handler_obj, pin_obj);
+    }
+}

+ 269 - 0
mp_flipper/lib/micropython/mp_flipper_modflipperzero.h

@@ -0,0 +1,269 @@
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+
+#define MP_FLIPPER_LED_RED (1 << 0)
+#define MP_FLIPPER_LED_GREEN (1 << 1)
+#define MP_FLIPPER_LED_BLUE (1 << 2)
+#define MP_FLIPPER_LED_BACKLIGHT (1 << 3)
+
+#define MP_FLIPPER_COLOR_BLACK (1 << 0)
+#define MP_FLIPPER_COLOR_WHITE (1 << 1)
+
+#define MP_FLIPPER_INPUT_BUTTON_UP (1 << 0)
+#define MP_FLIPPER_INPUT_BUTTON_DOWN (1 << 1)
+#define MP_FLIPPER_INPUT_BUTTON_RIGHT (1 << 2)
+#define MP_FLIPPER_INPUT_BUTTON_LEFT (1 << 3)
+#define MP_FLIPPER_INPUT_BUTTON_OK (1 << 4)
+#define MP_FLIPPER_INPUT_BUTTON_BACK (1 << 5)
+#define MP_FLIPPER_INPUT_BUTTON ((1 << 6) - 1)
+
+#define MP_FLIPPER_INPUT_TYPE_PRESS (1 << 6)
+#define MP_FLIPPER_INPUT_TYPE_RELEASE (1 << 7)
+#define MP_FLIPPER_INPUT_TYPE_SHORT (1 << 8)
+#define MP_FLIPPER_INPUT_TYPE_LONG (1 << 9)
+#define MP_FLIPPER_INPUT_TYPE_REPEAT (1 << 10)
+#define MP_FLIPPER_INPUT_TYPE ((1 << 11) - 1 - MP_FLIPPER_INPUT_BUTTON)
+
+#define MP_FLIPPER_ALIGN_BEGIN (1 << 0)
+#define MP_FLIPPER_ALIGN_CENTER (1 << 1)
+#define MP_FLIPPER_ALIGN_END (1 << 2)
+
+#define MP_FLIPPER_FONT_PRIMARY (1 << 0)
+#define MP_FLIPPER_FONT_SECONDARY (1 << 1)
+
+void mp_flipper_light_set(uint8_t raw_light, uint8_t brightness);
+void mp_flipper_light_blink_start(uint8_t raw_light, uint8_t brightness, uint16_t on_time, uint16_t period);
+void mp_flipper_light_blink_set_color(uint8_t raw_light);
+void mp_flipper_light_blink_stop();
+
+void mp_flipper_vibro(bool state);
+
+/*
+Python script for notes generation
+
+# coding: utf-8
+# Python script for notes generation
+
+from typing import List
+
+note_names: List = ['C', 'CS', 'D', 'DS', 'E', 'F', 'FS', 'G', 'GS', 'A', 'AS', 'B']
+base_note: float = 16.3515979
+cf: float = 2 ** (1.0 / 12)
+
+note: float = base_note
+for octave in range(9):
+    for name in note_names:
+        print(f"#define MP_FLIPPER_SPEAKER_NOTE_{name}{octave} MICROPY_FLOAT_CONST({round(note, 2)})")
+        note = note * cf
+*/
+
+#define MP_FLIPPER_SPEAKER_NOTE_C0 MICROPY_FLOAT_CONST(16.35)
+#define MP_FLIPPER_SPEAKER_NOTE_CS0 MICROPY_FLOAT_CONST(17.32)
+#define MP_FLIPPER_SPEAKER_NOTE_D0 MICROPY_FLOAT_CONST(18.35)
+#define MP_FLIPPER_SPEAKER_NOTE_DS0 MICROPY_FLOAT_CONST(19.45)
+#define MP_FLIPPER_SPEAKER_NOTE_E0 MICROPY_FLOAT_CONST(20.6)
+#define MP_FLIPPER_SPEAKER_NOTE_F0 MICROPY_FLOAT_CONST(21.83)
+#define MP_FLIPPER_SPEAKER_NOTE_FS0 MICROPY_FLOAT_CONST(23.12)
+#define MP_FLIPPER_SPEAKER_NOTE_G0 MICROPY_FLOAT_CONST(24.5)
+#define MP_FLIPPER_SPEAKER_NOTE_GS0 MICROPY_FLOAT_CONST(25.96)
+#define MP_FLIPPER_SPEAKER_NOTE_A0 MICROPY_FLOAT_CONST(27.5)
+#define MP_FLIPPER_SPEAKER_NOTE_AS0 MICROPY_FLOAT_CONST(29.14)
+#define MP_FLIPPER_SPEAKER_NOTE_B0 MICROPY_FLOAT_CONST(30.87)
+#define MP_FLIPPER_SPEAKER_NOTE_C1 MICROPY_FLOAT_CONST(32.7)
+#define MP_FLIPPER_SPEAKER_NOTE_CS1 MICROPY_FLOAT_CONST(34.65)
+#define MP_FLIPPER_SPEAKER_NOTE_D1 MICROPY_FLOAT_CONST(36.71)
+#define MP_FLIPPER_SPEAKER_NOTE_DS1 MICROPY_FLOAT_CONST(38.89)
+#define MP_FLIPPER_SPEAKER_NOTE_E1 MICROPY_FLOAT_CONST(41.2)
+#define MP_FLIPPER_SPEAKER_NOTE_F1 MICROPY_FLOAT_CONST(43.65)
+#define MP_FLIPPER_SPEAKER_NOTE_FS1 MICROPY_FLOAT_CONST(46.25)
+#define MP_FLIPPER_SPEAKER_NOTE_G1 MICROPY_FLOAT_CONST(49.0)
+#define MP_FLIPPER_SPEAKER_NOTE_GS1 MICROPY_FLOAT_CONST(51.91)
+#define MP_FLIPPER_SPEAKER_NOTE_A1 MICROPY_FLOAT_CONST(55.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS1 MICROPY_FLOAT_CONST(58.27)
+#define MP_FLIPPER_SPEAKER_NOTE_B1 MICROPY_FLOAT_CONST(61.74)
+#define MP_FLIPPER_SPEAKER_NOTE_C2 MICROPY_FLOAT_CONST(65.41)
+#define MP_FLIPPER_SPEAKER_NOTE_CS2 MICROPY_FLOAT_CONST(69.3)
+#define MP_FLIPPER_SPEAKER_NOTE_D2 MICROPY_FLOAT_CONST(73.42)
+#define MP_FLIPPER_SPEAKER_NOTE_DS2 MICROPY_FLOAT_CONST(77.78)
+#define MP_FLIPPER_SPEAKER_NOTE_E2 MICROPY_FLOAT_CONST(82.41)
+#define MP_FLIPPER_SPEAKER_NOTE_F2 MICROPY_FLOAT_CONST(87.31)
+#define MP_FLIPPER_SPEAKER_NOTE_FS2 MICROPY_FLOAT_CONST(92.5)
+#define MP_FLIPPER_SPEAKER_NOTE_G2 MICROPY_FLOAT_CONST(98.0)
+#define MP_FLIPPER_SPEAKER_NOTE_GS2 MICROPY_FLOAT_CONST(103.83)
+#define MP_FLIPPER_SPEAKER_NOTE_A2 MICROPY_FLOAT_CONST(110.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS2 MICROPY_FLOAT_CONST(116.54)
+#define MP_FLIPPER_SPEAKER_NOTE_B2 MICROPY_FLOAT_CONST(123.47)
+#define MP_FLIPPER_SPEAKER_NOTE_C3 MICROPY_FLOAT_CONST(130.81)
+#define MP_FLIPPER_SPEAKER_NOTE_CS3 MICROPY_FLOAT_CONST(138.59)
+#define MP_FLIPPER_SPEAKER_NOTE_D3 MICROPY_FLOAT_CONST(146.83)
+#define MP_FLIPPER_SPEAKER_NOTE_DS3 MICROPY_FLOAT_CONST(155.56)
+#define MP_FLIPPER_SPEAKER_NOTE_E3 MICROPY_FLOAT_CONST(164.81)
+#define MP_FLIPPER_SPEAKER_NOTE_F3 MICROPY_FLOAT_CONST(174.61)
+#define MP_FLIPPER_SPEAKER_NOTE_FS3 MICROPY_FLOAT_CONST(185.0)
+#define MP_FLIPPER_SPEAKER_NOTE_G3 MICROPY_FLOAT_CONST(196.0)
+#define MP_FLIPPER_SPEAKER_NOTE_GS3 MICROPY_FLOAT_CONST(207.65)
+#define MP_FLIPPER_SPEAKER_NOTE_A3 MICROPY_FLOAT_CONST(220.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS3 MICROPY_FLOAT_CONST(233.08)
+#define MP_FLIPPER_SPEAKER_NOTE_B3 MICROPY_FLOAT_CONST(246.94)
+#define MP_FLIPPER_SPEAKER_NOTE_C4 MICROPY_FLOAT_CONST(261.63)
+#define MP_FLIPPER_SPEAKER_NOTE_CS4 MICROPY_FLOAT_CONST(277.18)
+#define MP_FLIPPER_SPEAKER_NOTE_D4 MICROPY_FLOAT_CONST(293.66)
+#define MP_FLIPPER_SPEAKER_NOTE_DS4 MICROPY_FLOAT_CONST(311.13)
+#define MP_FLIPPER_SPEAKER_NOTE_E4 MICROPY_FLOAT_CONST(329.63)
+#define MP_FLIPPER_SPEAKER_NOTE_F4 MICROPY_FLOAT_CONST(349.23)
+#define MP_FLIPPER_SPEAKER_NOTE_FS4 MICROPY_FLOAT_CONST(369.99)
+#define MP_FLIPPER_SPEAKER_NOTE_G4 MICROPY_FLOAT_CONST(392.0)
+#define MP_FLIPPER_SPEAKER_NOTE_GS4 MICROPY_FLOAT_CONST(415.3)
+#define MP_FLIPPER_SPEAKER_NOTE_A4 MICROPY_FLOAT_CONST(440.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS4 MICROPY_FLOAT_CONST(466.16)
+#define MP_FLIPPER_SPEAKER_NOTE_B4 MICROPY_FLOAT_CONST(493.88)
+#define MP_FLIPPER_SPEAKER_NOTE_C5 MICROPY_FLOAT_CONST(523.25)
+#define MP_FLIPPER_SPEAKER_NOTE_CS5 MICROPY_FLOAT_CONST(554.37)
+#define MP_FLIPPER_SPEAKER_NOTE_D5 MICROPY_FLOAT_CONST(587.33)
+#define MP_FLIPPER_SPEAKER_NOTE_DS5 MICROPY_FLOAT_CONST(622.25)
+#define MP_FLIPPER_SPEAKER_NOTE_E5 MICROPY_FLOAT_CONST(659.26)
+#define MP_FLIPPER_SPEAKER_NOTE_F5 MICROPY_FLOAT_CONST(698.46)
+#define MP_FLIPPER_SPEAKER_NOTE_FS5 MICROPY_FLOAT_CONST(739.99)
+#define MP_FLIPPER_SPEAKER_NOTE_G5 MICROPY_FLOAT_CONST(783.99)
+#define MP_FLIPPER_SPEAKER_NOTE_GS5 MICROPY_FLOAT_CONST(830.61)
+#define MP_FLIPPER_SPEAKER_NOTE_A5 MICROPY_FLOAT_CONST(880.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS5 MICROPY_FLOAT_CONST(932.33)
+#define MP_FLIPPER_SPEAKER_NOTE_B5 MICROPY_FLOAT_CONST(987.77)
+#define MP_FLIPPER_SPEAKER_NOTE_C6 MICROPY_FLOAT_CONST(1046.5)
+#define MP_FLIPPER_SPEAKER_NOTE_CS6 MICROPY_FLOAT_CONST(1108.73)
+#define MP_FLIPPER_SPEAKER_NOTE_D6 MICROPY_FLOAT_CONST(1174.66)
+#define MP_FLIPPER_SPEAKER_NOTE_DS6 MICROPY_FLOAT_CONST(1244.51)
+#define MP_FLIPPER_SPEAKER_NOTE_E6 MICROPY_FLOAT_CONST(1318.51)
+#define MP_FLIPPER_SPEAKER_NOTE_F6 MICROPY_FLOAT_CONST(1396.91)
+#define MP_FLIPPER_SPEAKER_NOTE_FS6 MICROPY_FLOAT_CONST(1479.98)
+#define MP_FLIPPER_SPEAKER_NOTE_G6 MICROPY_FLOAT_CONST(1567.98)
+#define MP_FLIPPER_SPEAKER_NOTE_GS6 MICROPY_FLOAT_CONST(1661.22)
+#define MP_FLIPPER_SPEAKER_NOTE_A6 MICROPY_FLOAT_CONST(1760.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS6 MICROPY_FLOAT_CONST(1864.66)
+#define MP_FLIPPER_SPEAKER_NOTE_B6 MICROPY_FLOAT_CONST(1975.53)
+#define MP_FLIPPER_SPEAKER_NOTE_C7 MICROPY_FLOAT_CONST(2093.0)
+#define MP_FLIPPER_SPEAKER_NOTE_CS7 MICROPY_FLOAT_CONST(2217.46)
+#define MP_FLIPPER_SPEAKER_NOTE_D7 MICROPY_FLOAT_CONST(2349.32)
+#define MP_FLIPPER_SPEAKER_NOTE_DS7 MICROPY_FLOAT_CONST(2489.02)
+#define MP_FLIPPER_SPEAKER_NOTE_E7 MICROPY_FLOAT_CONST(2637.02)
+#define MP_FLIPPER_SPEAKER_NOTE_F7 MICROPY_FLOAT_CONST(2793.83)
+#define MP_FLIPPER_SPEAKER_NOTE_FS7 MICROPY_FLOAT_CONST(2959.96)
+#define MP_FLIPPER_SPEAKER_NOTE_G7 MICROPY_FLOAT_CONST(3135.96)
+#define MP_FLIPPER_SPEAKER_NOTE_GS7 MICROPY_FLOAT_CONST(3322.44)
+#define MP_FLIPPER_SPEAKER_NOTE_A7 MICROPY_FLOAT_CONST(3520.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS7 MICROPY_FLOAT_CONST(3729.31)
+#define MP_FLIPPER_SPEAKER_NOTE_B7 MICROPY_FLOAT_CONST(3951.07)
+#define MP_FLIPPER_SPEAKER_NOTE_C8 MICROPY_FLOAT_CONST(4186.01)
+#define MP_FLIPPER_SPEAKER_NOTE_CS8 MICROPY_FLOAT_CONST(4434.92)
+#define MP_FLIPPER_SPEAKER_NOTE_D8 MICROPY_FLOAT_CONST(4698.64)
+#define MP_FLIPPER_SPEAKER_NOTE_DS8 MICROPY_FLOAT_CONST(4978.03)
+#define MP_FLIPPER_SPEAKER_NOTE_E8 MICROPY_FLOAT_CONST(5274.04)
+#define MP_FLIPPER_SPEAKER_NOTE_F8 MICROPY_FLOAT_CONST(5587.65)
+#define MP_FLIPPER_SPEAKER_NOTE_FS8 MICROPY_FLOAT_CONST(5919.91)
+#define MP_FLIPPER_SPEAKER_NOTE_G8 MICROPY_FLOAT_CONST(6271.93)
+#define MP_FLIPPER_SPEAKER_NOTE_GS8 MICROPY_FLOAT_CONST(6644.88)
+#define MP_FLIPPER_SPEAKER_NOTE_A8 MICROPY_FLOAT_CONST(7040.0)
+#define MP_FLIPPER_SPEAKER_NOTE_AS8 MICROPY_FLOAT_CONST(7458.62)
+#define MP_FLIPPER_SPEAKER_NOTE_B8 MICROPY_FLOAT_CONST(7902.13)
+
+#define MP_FLIPPER_SPEAKER_VOLUME_MIN MICROPY_FLOAT_CONST(0.0)
+#define MP_FLIPPER_SPEAKER_VOLUME_MAX MICROPY_FLOAT_CONST(1.0)
+
+bool mp_flipper_speaker_start(float frequency, float volume);
+bool mp_flipper_speaker_set_volume(float volume);
+bool mp_flipper_speaker_stop();
+
+uint8_t mp_flipper_canvas_width();
+uint8_t mp_flipper_canvas_height();
+uint8_t mp_flipper_canvas_text_width(const char* text);
+uint8_t mp_flipper_canvas_text_height();
+void mp_flipper_canvas_draw_dot(uint8_t x, uint8_t y);
+void mp_flipper_canvas_draw_box(uint8_t x, uint8_t y, uint8_t w, uint8_t h, uint8_t r);
+void mp_flipper_canvas_draw_frame(uint8_t x, uint8_t y, uint8_t w, uint8_t h, uint8_t r);
+void mp_flipper_canvas_draw_line(uint8_t x0, uint8_t y0, uint8_t x1, uint8_t y1);
+void mp_flipper_canvas_draw_circle(uint8_t x, uint8_t y, uint8_t r);
+void mp_flipper_canvas_draw_disc(uint8_t x, uint8_t y, uint8_t r);
+void mp_flipper_canvas_set_font(uint8_t font);
+void mp_flipper_canvas_set_color(uint8_t color);
+void mp_flipper_canvas_set_text(uint8_t x, uint8_t y, const char* text);
+void mp_flipper_canvas_set_text_align(uint8_t x, uint8_t y);
+void mp_flipper_canvas_update();
+void mp_flipper_canvas_clear();
+
+void mp_flipper_on_input(uint16_t button, uint16_t type);
+
+void mp_flipper_dialog_message_set_text(const char* text, uint8_t x, uint8_t y, uint8_t h, uint8_t v);
+void mp_flipper_dialog_message_set_header(const char* text, uint8_t x, uint8_t y, uint8_t h, uint8_t v);
+void mp_flipper_dialog_message_set_button(const char* text, uint8_t button);
+uint8_t mp_flipper_dialog_message_show();
+void mp_flipper_dialog_message_clear();
+
+#define MP_FLIPPER_GPIO_PIN_PC0 (0)
+#define MP_FLIPPER_GPIO_PIN_PC1 (1)
+#define MP_FLIPPER_GPIO_PIN_PC3 (2)
+#define MP_FLIPPER_GPIO_PIN_PB2 (3)
+#define MP_FLIPPER_GPIO_PIN_PB3 (4)
+#define MP_FLIPPER_GPIO_PIN_PA4 (5)
+#define MP_FLIPPER_GPIO_PIN_PA6 (6)
+#define MP_FLIPPER_GPIO_PIN_PA7 (7)
+
+#define MP_FLIPPER_GPIO_PINS (8)
+
+#define MP_FLIPPER_GPIO_MODE_INPUT (1 << 0)
+#define MP_FLIPPER_GPIO_MODE_OUTPUT_PUSH_PULL (1 << 1)
+#define MP_FLIPPER_GPIO_MODE_OUTPUT_OPEN_DRAIN (1 << 2)
+#define MP_FLIPPER_GPIO_MODE_ANALOG (1 << 3)
+#define MP_FLIPPER_GPIO_MODE_INTERRUPT_RISE (1 << 4)
+#define MP_FLIPPER_GPIO_MODE_INTERRUPT_FALL (1 << 5)
+
+#define MP_FLIPPER_GPIO_PULL_NO (0)
+#define MP_FLIPPER_GPIO_PULL_UP (1)
+#define MP_FLIPPER_GPIO_PULL_DOWN (2)
+
+#define MP_FLIPPER_GPIO_SPEED_LOW (0)
+#define MP_FLIPPER_GPIO_SPEED_MEDIUM (1)
+#define MP_FLIPPER_GPIO_SPEED_HIGH (2)
+#define MP_FLIPPER_GPIO_SPEED_VERY_HIGH (3)
+
+bool mp_flipper_gpio_init_pin(uint8_t raw_pin, uint8_t raw_mode, uint8_t raw_pull, uint8_t raw_speed);
+void mp_flipper_gpio_deinit_pin(uint8_t raw_pin);
+void mp_flipper_gpio_set_pin(uint8_t raw_pin, bool state);
+bool mp_flipper_gpio_get_pin(uint8_t raw_pin);
+void mp_flipper_on_gpio(void* ctx);
+
+uint16_t mp_flipper_adc_read_pin(uint8_t raw_pin);
+float mp_flipper_adc_convert_to_voltage(uint16_t value);
+
+bool mp_flipper_pwm_start(uint8_t raw_pin, uint32_t frequency, uint8_t duty);
+void mp_flipper_pwm_stop(uint8_t raw_pin);
+bool mp_flipper_pwm_is_running(uint8_t raw_pin);
+
+#define MP_FLIPPER_INFRARED_RX_DEFAULT_TIMEOUT (1000000)
+#define MP_FLIPPER_INFRARED_TX_DEFAULT_FREQUENCY (38000)
+#define MP_FLIPPER_INFRARED_TX_DEFAULT_DUTY_CYCLE (0.33)
+
+typedef uint32_t (*mp_flipper_infrared_signal_tx_provider)(void* signal, const size_t index);
+
+uint32_t* mp_flipper_infrared_receive(uint32_t timeout, size_t* length);
+bool mp_flipper_infrared_transmit(
+    void* signal,
+    size_t length,
+    mp_flipper_infrared_signal_tx_provider callback,
+    uint32_t repeat,
+    uint32_t frequency,
+    float duty,
+    bool use_external_pin);
+bool mp_flipper_infrared_is_busy();
+
+#define MP_FLIPPER_UART_MODE_USART (0)
+#define MP_FLIPPER_UART_MODE_LPUART (1)
+
+void* mp_flipper_uart_open(uint8_t raw_mode, uint32_t baud_rate);
+bool mp_flipper_uart_close(void* handle);
+bool mp_flipper_uart_sync(void* handle);
+size_t mp_flipper_uart_read(void* handle, void* buffer, size_t size, int* errcode);
+size_t mp_flipper_uart_write(void* handle, const void* buffer, size_t size, int* errcode);

+ 5 - 0
mp_flipper/lib/micropython/mp_flipper_modrandom.h

@@ -0,0 +1,5 @@
+#pragma once
+
+#include <stdint.h>
+
+uint32_t mp_flipper_seed_init();

+ 34 - 0
mp_flipper/lib/micropython/mp_flipper_modtime.c

@@ -0,0 +1,34 @@
+#include "py/mphal.h"
+#include "py/obj.h"
+
+#include "mp_flipper_modtime.h"
+
+mp_obj_t mp_time_time_get(void) {
+    uint32_t timestamp = mp_flipper_get_timestamp();
+
+    return mp_obj_new_int_from_uint(timestamp);
+}
+
+uint64_t mp_hal_time_ns(void) {
+    return mp_flipper_get_timestamp() * 1000;
+}
+
+mp_uint_t mp_hal_ticks_ms(void) {
+    return mp_flipper_get_tick_frequency() / 1000;
+}
+
+mp_uint_t mp_hal_ticks_us(void) {
+    return mp_flipper_get_tick_frequency() / 1000000;
+}
+
+mp_uint_t mp_hal_ticks_cpu(void) {
+    return mp_flipper_get_tick();
+}
+
+void mp_hal_delay_ms(mp_uint_t ms) {
+    mp_flipper_delay_ms(ms);
+}
+
+void mp_hal_delay_us(mp_uint_t us) {
+    mp_flipper_delay_us(us);
+}

+ 13 - 0
mp_flipper/lib/micropython/mp_flipper_modtime.h

@@ -0,0 +1,13 @@
+#pragma once
+
+#include <stdint.h>
+
+uint32_t mp_flipper_get_timestamp();
+
+uint32_t mp_flipper_get_tick_frequency();
+
+uint32_t mp_flipper_get_tick();
+
+void mp_flipper_delay_ms(uint32_t ms);
+
+void mp_flipper_delay_us(uint32_t us);

+ 12 - 0
mp_flipper/lib/micropython/mp_flipper_repl.c

@@ -0,0 +1,12 @@
+#include "py/repl.h"
+#include "py/mpprint.h"
+
+#include "mp_flipper_repl.h"
+
+inline bool mp_flipper_repl_continue_with_input(const char* input) {
+    return mp_repl_continue_with_input(input);
+}
+
+inline size_t mp_flipper_repl_autocomplete(const char* str, size_t len, const mp_print_t* print, char** compl_str) {
+    return mp_repl_autocomplete(str, len, print, compl_str);
+}

+ 12 - 0
mp_flipper/lib/micropython/mp_flipper_repl.h

@@ -0,0 +1,12 @@
+#pragma once
+
+#include <stddef.h>
+#include <stdbool.h>
+
+#include "py/mpprint.h"
+
+#include "mp_flipper_runtime.h"
+
+bool mp_flipper_repl_continue_with_input(const char* input);
+
+size_t mp_flipper_repl_autocomplete(const char* str, size_t len, const mp_print_t* print, char** compl_str);

+ 73 - 0
mp_flipper/lib/micropython/mp_flipper_runtime.c

@@ -0,0 +1,73 @@
+#include <string.h>
+
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/persistentcode.h"
+#include "py/gc.h"
+#include "py/stackctrl.h"
+#include "shared/runtime/gchelper.h"
+
+#include "mp_flipper_runtime.h"
+#include "mp_flipper_halport.h"
+
+const char* mp_flipper_root_module_path;
+
+void* mp_flipper_context;
+
+void mp_flipper_set_root_module_path(const char* path) {
+    mp_flipper_root_module_path = path;
+}
+
+void mp_flipper_init(void* heap, size_t heap_size, size_t stack_size, void* stack_top) {
+    mp_flipper_context = mp_flipper_context_alloc();
+
+    mp_stack_set_top(stack_top);
+    mp_stack_set_limit(stack_size);
+
+#if MICROPY_ENABLE_GC
+    gc_init(heap, (uint8_t*)heap + heap_size);
+#endif
+
+    mp_init();
+}
+
+void mp_flipper_deinit() {
+    gc_sweep_all();
+
+    mp_deinit();
+
+    mp_flipper_context_free(mp_flipper_context);
+}
+
+// Called if an exception is raised outside all C exception-catching handlers.
+void nlr_jump_fail(void* val) {
+    mp_flipper_nlr_jump_fail(val);
+}
+
+#if MICROPY_ENABLE_GC
+// Run a garbage collection cycle.
+void gc_collect(void) {
+    gc_collect_start();
+    gc_helper_collect_regs_and_stack();
+    gc_collect_end();
+}
+#endif
+
+#ifndef NDEBUG
+// Used when debugging is enabled.
+void __assert_func(const char* file, int line, const char* func, const char* expr) {
+    mp_flipper_assert(file, line, func, expr);
+}
+
+void NORETURN __fatal_error(const char* msg) {
+    mp_flipper_fatal_error(msg);
+}
+#endif
+
+void mp_flipper_raise_os_error(int error) {
+    mp_raise_OSError(error);
+}
+
+void mp_flipper_raise_os_error_with_filename(int error, const char* filename) {
+    mp_raise_OSError_with_filename(error, filename);
+}

+ 27 - 0
mp_flipper/lib/micropython/mp_flipper_runtime.h

@@ -0,0 +1,27 @@
+#pragma once
+
+#include <stddef.h>
+
+#include "mpconfigport.h"
+
+extern const char* mp_flipper_root_module_path;
+
+extern void* mp_flipper_context;
+
+void mp_flipper_set_root_module_path(const char* path);
+
+void mp_flipper_init(void* memory, size_t memory_size, size_t stack_size, void* stack_top);
+void mp_flipper_save_file(const char* file_path, const char* data, size_t size);
+void mp_flipper_deinit();
+void mp_flipper_nlr_jump_fail(void* value);
+void mp_flipper_assert(const char* file, int line, const char* func, const char* expr);
+void mp_flipper_fatal_error(const char* msg);
+void mp_flipper_raise_os_error(int error);
+void mp_flipper_raise_os_error_with_filename(int error, const char* filename);
+const char* mp_flipper_print_get_data(void* data);
+size_t mp_flipper_print_get_data_length(void* data);
+void* mp_flipper_print_data_alloc();
+void mp_flipper_print_strn(void* data, const char* str, size_t length);
+void mp_flipper_print_data_free(void* data);
+void* mp_flipper_context_alloc();
+void mp_flipper_context_free(void* context);

+ 164 - 0
mp_flipper/lib/micropython/mpconfigport.h

@@ -0,0 +1,164 @@
+// Need to provide a declaration/definition of alloca()
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <stdlib.h>
+#else
+#include <alloca.h>
+#endif
+
+#include <stdint.h>
+
+// Type definitions for the specific machine
+typedef int32_t mp_int_t; // must be pointer size
+typedef uint32_t mp_uint_t; // must be pointer size
+typedef long mp_off_t;
+
+#define MICROPY_GC_SPLIT_HEAP (1)
+#define MICROPY_GC_SPLIT_HEAP_AUTO (1)
+
+#define MICROPY_MPHALPORT_H "mp_flipper_halport.h"
+
+#define MICROPY_MIN_USE_CORTEX_CPU (1)
+#define MICROPY_MIN_USE_STM32_MCU (1)
+
+#define MICROPY_HW_BOARD_NAME "Flipper Zero"
+#define MICROPY_HW_MCU_NAME "STM32WB55RG"
+
+#define MICROPY_CONFIG_ROM_LEVEL (MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES)
+
+#define MICROPY_EMIT_THUMB_ARMV7M (0)
+#define MICROPY_EMIT_INLINE_THUMB_FLOAT (0)
+
+#define MICROPY_PERSISTENT_CODE_LOAD (0)
+#define MICROPY_PERSISTENT_CODE_SAVE (0)
+#define MICROPY_PERSISTENT_CODE_SAVE_FILE (0)
+
+#define MICROPY_ENABLE_COMPILER (1)
+#define MICROPY_ENABLE_GC (1)
+#define MICROPY_PY_GC_COLLECT_RETVAL (0)
+#define MICROPY_ENABLE_PYSTACK (0)
+#define MICROPY_STACK_CHECK (0)
+#define MICROPY_ALLOC_PATH_MAX (128)
+
+#define MICROPY_ENABLE_FINALISER (1)
+
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_NONE)
+
+#define MICROPY_GC_STACK_ENTRY_TYPE uint32_t
+
+#define MICROPY_PY___FILE__ (1)
+#define MICROPY_ENABLE_EXTERNAL_IMPORT (1)
+#define MICROPY_READER_VFS (1)
+
+#define MICROPY_ENABLE_VM_ABORT (0)
+
+#define MICROPY_PY_ERRNO (0)
+#define MICROPY_USE_INTERNAL_ERRNO (0)
+#define MICROPY_PY_ERRNO_ERRORCODE (0)
+
+#define MICROPY_PY_TIME (1)
+#define MICROPY_PY_TIME_TIME_TIME_NS (1)
+
+#define MICROPY_PY_RANDOM (1)
+#define MICROPY_PY_RANDOM_EXTRA_FUNCS (0)
+
+#define MICROPY_PY_RANDOM_SEED_INIT_FUNC (mp_flipper_seed_init())
+
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_LONGLONG)
+
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_FLOAT)
+
+#define MICROPY_ENABLE_SOURCE_LINE (0)
+#define MICROPY_ENABLE_DOC_STRING (0)
+
+#define MICROPY_HELPER_REPL (1)
+#define MICROPY_REPL_INFO (0)
+#define MICROPY_REPL_EMACS_KEYS (0)
+#define MICROPY_REPL_EMACS_WORDS_MOVE (0)
+#define MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE (0)
+#define MICROPY_REPL_AUTO_INDENT (0)
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#define MICROPY_READLINE_HISTORY_SIZE (0)
+
+#define MICROPY_CPYTHON_COMPAT (1)
+#define MICROPY_FULL_CHECKS (0)
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (0)
+
+#define MICROPY_MODULE_FROZEN_MPY (0)
+
+#define MICROPY_PY_CMATH (0)
+#define MICROPY_PY_BUILTINS_COMPLEX (0)
+#define MICROPY_MULTIPLE_INHERITANCE (0)
+#define MICROPY_MODULE_GETATTR (0)
+#define MICROPY_PY_FUNCTION_ATTRS (1)
+#define MICROPY_PY_DESCRIPTORS (0)
+#define MICROPY_PY_ASYNC_AWAIT (0)
+#define MICROPY_PY_ASSIGN_EXPR (0)
+#define MICROPY_PY_GENERATOR_PEND_THROW (0)
+#define MICROPY_PY_BUILTINS_BYTES_HEX (0)
+#define MICROPY_PY_BUILTINS_STR_UNICODE (0)
+#define MICROPY_PY_BUILTINS_STR_CENTER (0)
+#define MICROPY_PY_BUILTINS_STR_COUNT (0)
+#define MICROPY_PY_BUILTINS_STR_OP_MODULO (1)
+#define MICROPY_PY_BUILTINS_STR_PARTITION (0)
+#define MICROPY_PY_BUILTINS_STR_SPLITLINES (0)
+#define MICROPY_PY_BUILTINS_BYTEARRAY (0)
+#define MICROPY_PY_BUILTINS_DICT_FROMKEYS (0)
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (0)
+#define MICROPY_PY_BUILTINS_SET (1)
+#define MICROPY_PY_BUILTINS_SLICE (0)
+#define MICROPY_PY_BUILTINS_SLICE_ATTRS (0)
+#define MICROPY_PY_BUILTINS_SLICE_INDICES (0)
+#define MICROPY_PY_BUILTINS_FROZENSET (0)
+#define MICROPY_PY_BUILTINS_PROPERTY (0)
+#define MICROPY_PY_BUILTINS_RANGE_ATTRS (0)
+#define MICROPY_PY_BUILTINS_RANGE_BINOP (0)
+#define MICROPY_PY_BUILTINS_NEXT2 (0)
+#define MICROPY_PY_BUILTINS_ROUND_INT (0)
+#define MICROPY_PY_ALL_SPECIAL_METHODS (0)
+#define MICROPY_PY_REVERSE_SPECIAL_METHODS (0)
+#define MICROPY_PY_BUILTINS_ENUMERATE (0)
+#define MICROPY_PY_BUILTINS_COMPILE (0)
+#define MICROPY_PY_BUILTINS_EVAL_EXEC (0)
+#define MICROPY_PY_BUILTINS_EXECFILE (0)
+#define MICROPY_PY_BUILTINS_FILTER (1)
+#define MICROPY_PY_BUILTINS_REVERSED (1)
+#define MICROPY_PY_BUILTINS_NOTIMPLEMENTED (0)
+#define MICROPY_PY_BUILTINS_INPUT (0)
+#define MICROPY_PY_BUILTINS_MIN_MAX (1)
+#define MICROPY_PY_BUILTINS_POW3 (0)
+#define MICROPY_PY_BUILTINS_HELP (0)
+#define MICROPY_PY_MICROPYTHON (0)
+#define MICROPY_PY_MICROPYTHON_MEM_INFO (0)
+#define MICROPY_PY_MICROPYTHON_STACK_USE (0)
+#define MICROPY_PY_MICROPYTHON_HEAP_LOCKED (0)
+#define MICROPY_PY_ARRAY (0)
+#define MICROPY_PY_ARRAY_SLICE_ASSIGN (0)
+#define MICROPY_PY_ATTRTUPLE (0)
+#define MICROPY_PY_COLLECTIONS (0)
+#define MICROPY_PY_STRUCT (0)
+#define MICROPY_PY_GC (0)
+#define MICROPY_PY_SYS (0)
+#define MICROPY_PY_SYS_MODULES (0)
+#define MICROPY_PY_SELECT_SELECT (0)
+#define MICROPY_PY_SYS_EXIT (0)
+#define MICROPY_PY_RE (0)
+#define MICROPY_PY_CRYPTOLIB (0)
+#define MICROPY_PY_VFS (0)
+#define MICROPY_ENABLE_SCHEDULER (1)
+#define MICROPY_MODULE_BUILTIN_INIT (1)
+
+#define MICROPY_PY_MATH (0)
+
+#define MICROPY_PY_IO (0)
+#define MICROPY_PY_IO_BYTESIO (0)
+
+#define MICROPY_PY_JSON (0)
+#define MICROPY_PY_JSON_SEPARATORS (0)
+
+#define MICROPY_COMP_CONST_FOLDING (0)
+#define MICROPY_COMP_CONST_TUPLE (0)
+#define MICROPY_COMP_CONST_LITERAL (0)
+#define MICROPY_COMP_CONST (0)
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (0)
+
+#define MICROPY_USE_INTERNAL_PRINTF (0)

+ 148 - 0
mp_flipper/lib/micropython/py/argcheck.c

@@ -0,0 +1,148 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig) {
+    // TODO maybe take the function name as an argument so we can print nicer error messages
+
+    // The reverse of MP_OBJ_FUN_MAKE_SIG
+    bool takes_kw = sig & 1;
+    size_t n_args_min = sig >> 17;
+    size_t n_args_max = (sig >> 1) & 0xffff;
+
+    if (n_kw && !takes_kw) {
+        #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+        mp_arg_error_terse_mismatch();
+        #else
+        mp_raise_TypeError(MP_ERROR_TEXT("function doesn't take keyword arguments"));
+        #endif
+    }
+
+    if (n_args_min == n_args_max) {
+        if (n_args != n_args_min) {
+            #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+            mp_arg_error_terse_mismatch();
+            #else
+            mp_raise_msg_varg(&mp_type_TypeError,
+                MP_ERROR_TEXT("function takes %d positional arguments but %d were given"),
+                n_args_min, n_args);
+            #endif
+        }
+    } else {
+        if (n_args < n_args_min) {
+            #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+            mp_arg_error_terse_mismatch();
+            #else
+            mp_raise_msg_varg(&mp_type_TypeError,
+                MP_ERROR_TEXT("function missing %d required positional arguments"),
+                n_args_min - n_args);
+            #endif
+        } else if (n_args > n_args_max) {
+            #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+            mp_arg_error_terse_mismatch();
+            #else
+            mp_raise_msg_varg(&mp_type_TypeError,
+                MP_ERROR_TEXT("function expected at most %d arguments, got %d"),
+                n_args_max, n_args);
+            #endif
+        }
+    }
+}
+
+void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+    size_t pos_found = 0, kws_found = 0;
+    for (size_t i = 0; i < n_allowed; i++) {
+        mp_obj_t given_arg;
+        if (i < n_pos) {
+            if (allowed[i].flags & MP_ARG_KW_ONLY) {
+                goto extra_positional;
+            }
+            pos_found++;
+            given_arg = pos[i];
+        } else {
+            mp_map_elem_t *kw = mp_map_lookup(kws, MP_OBJ_NEW_QSTR(allowed[i].qst), MP_MAP_LOOKUP);
+            if (kw == NULL) {
+                if (allowed[i].flags & MP_ARG_REQUIRED) {
+                    #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+                    mp_arg_error_terse_mismatch();
+                    #else
+                    mp_raise_msg_varg(&mp_type_TypeError, MP_ERROR_TEXT("'%q' argument required"), allowed[i].qst);
+                    #endif
+                }
+                out_vals[i] = allowed[i].defval;
+                continue;
+            } else {
+                kws_found++;
+                given_arg = kw->value;
+            }
+        }
+        if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_BOOL) {
+            out_vals[i].u_bool = mp_obj_is_true(given_arg);
+        } else if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_INT) {
+            out_vals[i].u_int = mp_obj_get_int(given_arg);
+        } else {
+            assert((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_OBJ);
+            out_vals[i].u_obj = given_arg;
+        }
+    }
+    if (pos_found < n_pos) {
+    extra_positional:
+        #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+        mp_arg_error_terse_mismatch();
+        #else
+        // TODO better error message
+        mp_raise_TypeError(MP_ERROR_TEXT("extra positional arguments given"));
+        #endif
+    }
+    if (kws_found < kws->used) {
+        #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+        mp_arg_error_terse_mismatch();
+        #else
+        // TODO better error message
+        mp_raise_TypeError(MP_ERROR_TEXT("extra keyword arguments given"));
+        #endif
+    }
+}
+
+void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+    mp_map_t kw_args;
+    mp_map_init_fixed_table(&kw_args, n_kw, args + n_pos);
+    mp_arg_parse_all(n_pos, args, &kw_args, n_allowed, allowed, out_vals);
+}
+
+NORETURN void mp_arg_error_terse_mismatch(void) {
+    mp_raise_TypeError(MP_ERROR_TEXT("argument num/types mismatch"));
+}
+
+#if MICROPY_CPYTHON_COMPAT
+NORETURN void mp_arg_error_unimpl_kw(void) {
+    mp_raise_NotImplementedError(MP_ERROR_TEXT("keyword argument(s) not implemented - use normal args instead"));
+}
+#endif

+ 399 - 0
mp_flipper/lib/micropython/py/asmarm.c

@@ -0,0 +1,399 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Fabian Vogt
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_ARM
+
+#include "py/asmarm.h"
+
+#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
+
+// Insert word into instruction flow
+static void emit(asm_arm_t *as, uint op) {
+    uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
+    if (c != NULL) {
+        *(uint32_t *)c = op;
+    }
+}
+
+// Insert word into instruction flow, add "ALWAYS" condition code
+static void emit_al(asm_arm_t *as, uint op) {
+    emit(as, op | ASM_ARM_CC_AL);
+}
+
+// Basic instructions without condition code
+static uint asm_arm_op_push(uint reglist) {
+    // stmfd sp!, {reglist}
+    return 0x92d0000 | (reglist & 0xFFFF);
+}
+
+static uint asm_arm_op_pop(uint reglist) {
+    // ldmfd sp!, {reglist}
+    return 0x8bd0000 | (reglist & 0xFFFF);
+}
+
+static uint asm_arm_op_mov_reg(uint rd, uint rn) {
+    // mov rd, rn
+    return 0x1a00000 | (rd << 12) | rn;
+}
+
+static uint asm_arm_op_mov_imm(uint rd, uint imm) {
+    // mov rd, #imm
+    return 0x3a00000 | (rd << 12) | imm;
+}
+
+static uint asm_arm_op_mvn_imm(uint rd, uint imm) {
+    // mvn rd, #imm
+    return 0x3e00000 | (rd << 12) | imm;
+}
+
+static uint asm_arm_op_mvn_reg(uint rd, uint rm) {
+    // mvn rd, rm
+    return 0x1e00000 | (rd << 12) | rm;
+}
+
+static uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
+    // add rd, rn, #imm
+    return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+static uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
+    // add rd, rn, rm
+    return 0x0800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+static uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
+    // sub rd, rn, #imm
+    return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+static uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
+    // sub rd, rn, rm
+    return 0x0400000 | (rn << 16) | (rd << 12) | rm;
+}
+
+static uint asm_arm_op_rsb_imm(uint rd, uint rn, uint imm) {
+    // rsb rd, rn, #imm
+    return 0x2600000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+static uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
+    // mul rd, rm, rs
+    assert(rd != rm);
+    return 0x0000090 | (rd << 16) | (rs << 8) | rm;
+}
+
+static uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
+    // and rd, rn, rm
+    return 0x0000000 | (rn << 16) | (rd << 12) | rm;
+}
+
+static uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
+    // eor rd, rn, rm
+    return 0x0200000 | (rn << 16) | (rd << 12) | rm;
+}
+
+static uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
+    // orr rd, rn, rm
+    return 0x1800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+void asm_arm_bkpt(asm_arm_t *as) {
+    // bkpt #0
+    emit_al(as, 0x1200070);
+}
+
+// locals:
+//  - stored on the stack in ascending order
+//  - numbered 0 through num_locals-1
+//  - SP points to first local
+//
+//  | SP
+//  v
+//  l0  l1  l2  ...  l(n-1)
+//  ^                ^
+//  | low address    | high address in RAM
+
+void asm_arm_entry(asm_arm_t *as, int num_locals) {
+    assert(num_locals >= 0);
+
+    as->stack_adjust = 0;
+    as->push_reglist = 1 << ASM_ARM_REG_R1
+        | 1 << ASM_ARM_REG_R2
+        | 1 << ASM_ARM_REG_R3
+        | 1 << ASM_ARM_REG_R4
+        | 1 << ASM_ARM_REG_R5
+        | 1 << ASM_ARM_REG_R6
+        | 1 << ASM_ARM_REG_R7
+        | 1 << ASM_ARM_REG_R8;
+
+    // Only adjust the stack if there are more locals than usable registers
+    if (num_locals > 3) {
+        as->stack_adjust = num_locals * 4;
+        // Align stack to 8 bytes
+        if (num_locals & 1) {
+            as->stack_adjust += 4;
+        }
+    }
+
+    emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
+    if (as->stack_adjust > 0) {
+        emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+    }
+}
+
+void asm_arm_exit(asm_arm_t *as) {
+    if (as->stack_adjust > 0) {
+        emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+    }
+
+    emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
+}
+
+void asm_arm_push(asm_arm_t *as, uint reglist) {
+    emit_al(as, asm_arm_op_push(reglist));
+}
+
+void asm_arm_pop(asm_arm_t *as, uint reglist) {
+    emit_al(as, asm_arm_op_pop(reglist));
+}
+
+void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
+    emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
+}
+
+size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
+    // Insert immediate into code and jump over it
+    emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
+    emit_al(as, 0xa000000); // b pc
+    size_t loc = mp_asm_base_get_code_pos(&as->base);
+    emit(as, imm);
+    return loc;
+}
+
+void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm) {
+    // TODO: There are more variants of immediate values
+    if ((imm & 0xFF) == imm) {
+        emit_al(as, asm_arm_op_mov_imm(rd, imm));
+    } else if (imm < 0 && imm >= -256) {
+        // mvn is "move not", not "move negative"
+        emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
+    } else {
+        asm_arm_mov_reg_i32(as, rd, imm);
+    }
+}
+
+void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
+    // str rd, [sp, #local_num*4]
+    emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
+    // ldr rd, [sp, #local_num*4]
+    emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
+    // cmp rd, #imm
+    emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
+}
+
+void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+    // cmp rd, rn
+    emit_al(as, 0x1500000 | (rd << 16) | rn);
+}
+
+void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
+    emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
+    emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
+}
+
+void asm_arm_mvn_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+    // mvn rd, rm
+    // computes: rd := ~rm
+    emit_al(as, asm_arm_op_mvn_reg(rd, rm));
+}
+
+void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // add rd, rn, rm
+    emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
+}
+
+void asm_arm_rsb_reg_reg_imm(asm_arm_t *as, uint rd, uint rn, uint imm) {
+    // rsb rd, rn, #imm
+    // computes: rd := #imm - rn
+    emit_al(as, asm_arm_op_rsb_imm(rd, rn, imm));
+}
+
+void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // sub rd, rn, rm
+    emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
+}
+
+void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
+    // rs and rm are swapped because of restriction rd!=rm
+    // mul rd, rm, rs
+    emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
+}
+
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // and rd, rn, rm
+    emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
+}
+
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // eor rd, rn, rm
+    emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
+}
+
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // orr rd, rn, rm
+    emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
+}
+
+void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
+    // add rd, sp, #local_num*4
+    emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
+}
+
+void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label) {
+    assert(label < as->base.max_num_labels);
+    mp_uint_t dest = as->base.label_offsets[label];
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 12 + 8; // adjust for load of rel, and then PC+8 prefetch of add_reg_reg_reg
+
+    // To load rel int reg_dest, insert immediate into code and jump over it
+    emit_al(as, 0x59f0000 | (reg_dest << 12)); // ldr rd, [pc]
+    emit_al(as, 0xa000000); // b pc
+    emit(as, rel);
+
+    // Do reg_dest += PC
+    asm_arm_add_reg_reg_reg(as, reg_dest, reg_dest, ASM_ARM_REG_PC);
+}
+
+void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+    // mov rd, rd, lsl rs
+    emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+    // mov rd, rd, lsr rs
+    emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+    // mov rd, rd, asr rs
+    emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
+    // ldr rd, [rn, #off]
+    emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+    // ldrh rd, [rn]
+    emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
+    // ldrh rd, [rn, #off]
+    emit_al(as, 0x1f000b0 | (rn << 16) | (rd << 12) | ((byte_offset & 0xf0) << 4) | (byte_offset & 0xf));
+}
+
+void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+    // ldrb rd, [rn]
+    emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
+    // str rd, [rm, #off]
+    emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+    // strh rd, [rm]
+    emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+    // strb rd, [rm]
+    emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+    // str rd, [rm, rn, lsl #2]
+    emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+    // strh doesn't support scaled register index
+    emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
+    emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
+}
+
+void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+    // strb rd, [rm, rn]
+    emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
+    assert(label < as->base.max_num_labels);
+    mp_uint_t dest = as->base.label_offsets[label];
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
+    rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
+
+    if (SIGNED_FIT24(rel)) {
+        emit(as, cond | 0xa000000 | (rel & 0xffffff));
+    } else {
+        printf("asm_arm_bcc: branch does not fit in 24 bits\n");
+    }
+}
+
+void asm_arm_b_label(asm_arm_t *as, uint label) {
+    asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
+}
+
+void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp) {
+    // The table offset should fit into the ldr instruction
+    assert(fun_id < (0x1000 / 4));
+    emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
+    emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
+}
+
+void asm_arm_bx_reg(asm_arm_t *as, uint reg_src) {
+    emit_al(as, 0x012fff10 | reg_src);
+}
+
+#endif // MICROPY_EMIT_ARM

+ 220 - 0
mp_flipper/lib/micropython/py/asmarm.h

@@ -0,0 +1,220 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Fabian Vogt
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMARM_H
+#define MICROPY_INCLUDED_PY_ASMARM_H
+
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+#define ASM_ARM_REG_R0  (0)
+#define ASM_ARM_REG_R1  (1)
+#define ASM_ARM_REG_R2  (2)
+#define ASM_ARM_REG_R3  (3)
+#define ASM_ARM_REG_R4  (4)
+#define ASM_ARM_REG_R5  (5)
+#define ASM_ARM_REG_R6  (6)
+#define ASM_ARM_REG_R7  (7)
+#define ASM_ARM_REG_R8  (8)
+#define ASM_ARM_REG_R9  (9)
+#define ASM_ARM_REG_R10 (10)
+#define ASM_ARM_REG_R11 (11)
+#define ASM_ARM_REG_R12 (12)
+#define ASM_ARM_REG_R13 (13)
+#define ASM_ARM_REG_R14 (14)
+#define ASM_ARM_REG_R15 (15)
+#define ASM_ARM_REG_SP  (ASM_ARM_REG_R13)
+#define ASM_ARM_REG_LR  (ASM_ARM_REG_R14)
+#define ASM_ARM_REG_PC  (ASM_ARM_REG_R15)
+
+#define ASM_ARM_CC_EQ (0x0 << 28)
+#define ASM_ARM_CC_NE (0x1 << 28)
+#define ASM_ARM_CC_CS (0x2 << 28)
+#define ASM_ARM_CC_CC (0x3 << 28)
+#define ASM_ARM_CC_MI (0x4 << 28)
+#define ASM_ARM_CC_PL (0x5 << 28)
+#define ASM_ARM_CC_VS (0x6 << 28)
+#define ASM_ARM_CC_VC (0x7 << 28)
+#define ASM_ARM_CC_HI (0x8 << 28)
+#define ASM_ARM_CC_LS (0x9 << 28)
+#define ASM_ARM_CC_GE (0xa << 28)
+#define ASM_ARM_CC_LT (0xb << 28)
+#define ASM_ARM_CC_GT (0xc << 28)
+#define ASM_ARM_CC_LE (0xd << 28)
+#define ASM_ARM_CC_AL (0xe << 28)
+
+typedef struct _asm_arm_t {
+    mp_asm_base_t base;
+    uint push_reglist;
+    uint stack_adjust;
+} asm_arm_t;
+
+static inline void asm_arm_end_pass(asm_arm_t *as) {
+    (void)as;
+}
+
+void asm_arm_entry(asm_arm_t *as, int num_locals);
+void asm_arm_exit(asm_arm_t *as);
+
+void asm_arm_bkpt(asm_arm_t *as);
+
+// mov
+void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src);
+size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm);
+void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm);
+void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd);
+void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num);
+void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond);
+
+// compare
+void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm);
+void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn);
+
+// arithmetic
+void asm_arm_mvn_reg_reg(asm_arm_t *as, uint rd, uint rm);
+void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_rsb_reg_reg_imm(asm_arm_t *as, uint rd, uint rn, uint imm);
+void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num);
+void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label);
+void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
+void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs);
+void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);
+
+// memory
+void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset);
+void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn);
+void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset);
+void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn);
+void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset);
+void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm);
+void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm);
+// store to array
+void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+
+// stack
+void asm_arm_push(asm_arm_t *as, uint reglist);
+void asm_arm_pop(asm_arm_t *as, uint reglist);
+
+// control flow
+void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label);
+void asm_arm_b_label(asm_arm_t *as, uint label);
+void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp);
+void asm_arm_bx_reg(asm_arm_t *as, uint reg_src);
+
+// Holds a pointer to mp_fun_table
+#define ASM_ARM_REG_FUN_TABLE ASM_ARM_REG_R7
+
+#if GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_ARM_REG_R0
+#define REG_ARG_1 ASM_ARM_REG_R0
+#define REG_ARG_2 ASM_ARM_REG_R1
+#define REG_ARG_3 ASM_ARM_REG_R2
+#define REG_ARG_4 ASM_ARM_REG_R3
+
+#define REG_TEMP0 ASM_ARM_REG_R0
+#define REG_TEMP1 ASM_ARM_REG_R1
+#define REG_TEMP2 ASM_ARM_REG_R2
+
+#define REG_LOCAL_1 ASM_ARM_REG_R4
+#define REG_LOCAL_2 ASM_ARM_REG_R5
+#define REG_LOCAL_3 ASM_ARM_REG_R6
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_ARM_REG_FUN_TABLE
+
+#define ASM_T               asm_arm_t
+#define ASM_END_PASS        asm_arm_end_pass
+#define ASM_ENTRY           asm_arm_entry
+#define ASM_EXIT            asm_arm_exit
+
+#define ASM_JUMP            asm_arm_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+    do { \
+        asm_arm_cmp_reg_i8(as, reg, 0); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+    do { \
+        asm_arm_cmp_reg_i8(as, reg, 0); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_arm_cmp_reg_reg(as, reg1, reg2); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_REG(as, reg) asm_arm_bx_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_arm_bl_ind(as, idx, ASM_ARM_REG_R3)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_arm_mov_local_reg((as), (local_num), (reg_src))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_arm_mov_reg_local((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_arm_mov_reg_local_addr((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_arm_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_NOT_REG(as, reg_dest) asm_arm_mvn_reg_reg((as), (reg_dest), (reg_dest))
+#define ASM_NEG_REG(as, reg_dest) asm_arm_rsb_reg_reg_imm((as), (reg_dest), (reg_dest), 0)
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_arm_lsr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_arm_ldrh_reg_reg_offset((as), (reg_dest), (reg_base), 2 * (uint16_offset))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMARM_H

+ 113 - 0
mp_flipper/lib/micropython/py/asmbase.c

@@ -0,0 +1,113 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+#if MICROPY_EMIT_MACHINE_CODE
+
+void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels) {
+    as->max_num_labels = max_num_labels;
+    as->label_offsets = m_new(size_t, max_num_labels);
+}
+
+void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code) {
+    if (free_code) {
+        MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+    }
+    m_del(size_t, as->label_offsets, as->max_num_labels);
+}
+
+void mp_asm_base_start_pass(mp_asm_base_t *as, int pass) {
+    if (pass < MP_ASM_PASS_EMIT) {
+        // Reset labels so we can detect backwards jumps (and verify unique assignment)
+        memset(as->label_offsets, -1, as->max_num_labels * sizeof(size_t));
+    } else {
+        // allocating executable RAM is platform specific
+        MP_PLAT_ALLOC_EXEC(as->code_offset, (void **)&as->code_base, &as->code_size);
+        assert(as->code_base != NULL);
+    }
+    as->pass = pass;
+    as->suppress = false;
+    as->code_offset = 0;
+}
+
+// all functions must go through this one to emit bytes
+// if as->pass < MP_ASM_PASS_EMIT, then this function just counts the number
+// of bytes needed and returns NULL, and callers should not store any data
+// It also returns NULL if generated code should be suppressed at this point.
+uint8_t *mp_asm_base_get_cur_to_write_bytes(void *as_in, size_t num_bytes_to_write) {
+    mp_asm_base_t *as = as_in;
+    uint8_t *c = NULL;
+    if (as->suppress) {
+        return c;
+    }
+    if (as->pass == MP_ASM_PASS_EMIT) {
+        assert(as->code_offset + num_bytes_to_write <= as->code_size);
+        c = as->code_base + as->code_offset;
+    }
+    as->code_offset += num_bytes_to_write;
+    return c;
+}
+
+void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label) {
+    assert(label < as->max_num_labels);
+
+    // Assigning a label ends any dead-code region, and all following machine
+    // code should be emitted (until another mp_asm_base_suppress_code() call).
+    as->suppress = false;
+
+    if (as->pass < MP_ASM_PASS_EMIT) {
+        // assign label offset
+        assert(as->label_offsets[label] == (size_t)-1);
+        as->label_offsets[label] = as->code_offset;
+    } else {
+        // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+        assert(as->label_offsets[label] == as->code_offset);
+    }
+}
+
+// align must be a multiple of 2
+void mp_asm_base_align(mp_asm_base_t *as, unsigned int align) {
+    as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+// this function assumes a little endian machine
+void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val) {
+    uint8_t *c = mp_asm_base_get_cur_to_write_bytes(as, bytesize);
+    if (c != NULL) {
+        for (unsigned int i = 0; i < bytesize; i++) {
+            *c++ = val;
+            val >>= 8;
+        }
+    }
+}
+
+#endif // MICROPY_EMIT_MACHINE_CODE

+ 78 - 0
mp_flipper/lib/micropython/py/asmbase.h

@@ -0,0 +1,78 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMBASE_H
+#define MICROPY_INCLUDED_PY_ASMBASE_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#define MP_ASM_PASS_COMPUTE (1)
+#define MP_ASM_PASS_EMIT    (2)
+
+typedef struct _mp_asm_base_t {
+    uint8_t pass;
+
+    // Set to true using mp_asm_base_suppress_code() if the code generator
+    // should suppress emitted code due to it being dead code.
+    bool suppress;
+
+    size_t code_offset;
+    size_t code_size;
+    uint8_t *code_base;
+
+    size_t max_num_labels;
+    size_t *label_offsets;
+} mp_asm_base_t;
+
+void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels);
+void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code);
+void mp_asm_base_start_pass(mp_asm_base_t *as, int pass);
+uint8_t *mp_asm_base_get_cur_to_write_bytes(void *as, size_t num_bytes_to_write);
+void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label);
+void mp_asm_base_align(mp_asm_base_t *as, unsigned int align);
+void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val);
+
+static inline void mp_asm_base_suppress_code(mp_asm_base_t *as) {
+    as->suppress = true;
+}
+
+static inline size_t mp_asm_base_get_code_pos(mp_asm_base_t *as) {
+    return as->code_offset;
+}
+
+static inline size_t mp_asm_base_get_code_size(mp_asm_base_t *as) {
+    return as->code_size;
+}
+
+static inline void *mp_asm_base_get_code(mp_asm_base_t *as) {
+    #if defined(MP_PLAT_COMMIT_EXEC)
+    return MP_PLAT_COMMIT_EXEC(as->code_base, as->code_size, NULL);
+    #else
+    return as->code_base;
+    #endif
+}
+
+#endif // MICROPY_INCLUDED_PY_ASMBASE_H

+ 592 - 0
mp_flipper/lib/micropython/py/asmthumb.c

@@ -0,0 +1,592 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+
+#include "py/mpstate.h"
+#include "py/asmthumb.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+
+static uint32_t mp_clz(uint32_t x) {
+    unsigned long lz = 0;
+    return _BitScanReverse(&lz, x) ? (sizeof(x) * 8 - 1) - lz : 0;
+}
+
+static uint32_t mp_ctz(uint32_t x) {
+    unsigned long tz = 0;
+    return _BitScanForward(&tz, x) ? tz : 0;
+}
+#else
+#define mp_clz(x) __builtin_clz(x)
+#define mp_ctz(x) __builtin_ctz(x)
+#endif
+
+#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
+#define UNSIGNED_FIT7(x) ((uint32_t)(x) < 128)
+#define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
+#define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+#define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
+#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
+#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
+
+// Note: these actually take an imm12 but the high-bit is not encoded here
+#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src))
+#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
+#define OP_SUB_W_RRI_HI(reg_src) (0xf2a0 | (reg_src))
+#define OP_SUB_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
+
+#define OP_LDR_W_HI(reg_base) (0xf8d0 | (reg_base))
+#define OP_LDR_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
+
+#define OP_LDRH_W_HI(reg_base) (0xf8b0 | (reg_base))
+#define OP_LDRH_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
+
+static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
+    return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
+}
+
+/*
+static void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
+    byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
+    c[0] = b1;
+}
+*/
+
+/*
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+static void asm_thumb_write_word32(asm_thumb_t *as, int w32) {
+    byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+    c[0] = IMM32_L0(w32);
+    c[1] = IMM32_L1(w32);
+    c[2] = IMM32_L2(w32);
+    c[3] = IMM32_L3(w32);
+}
+*/
+
+// rlolist is a bit map indicating desired lo-registers
+#define OP_PUSH_RLIST(rlolist)      (0xb400 | (rlolist))
+#define OP_PUSH_RLIST_LR(rlolist)   (0xb400 | 0x0100 | (rlolist))
+#define OP_POP_RLIST(rlolist)       (0xbc00 | (rlolist))
+#define OP_POP_RLIST_PC(rlolist)    (0xbc00 | 0x0100 | (rlolist))
+
+// The number of words must fit in 7 unsigned bits
+#define OP_ADD_SP(num_words) (0xb000 | (num_words))
+#define OP_SUB_SP(num_words) (0xb080 | (num_words))
+
+// locals:
+//  - stored on the stack in ascending order
+//  - numbered 0 through num_locals-1
+//  - SP points to first local
+//
+//  | SP
+//  v
+//  l0  l1  l2  ...  l(n-1)
+//  ^                ^
+//  | low address    | high address in RAM
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
+    assert(num_locals >= 0);
+
+    // If this Thumb machine code is run from ARM state then add a prelude
+    // to switch to Thumb state for the duration of the function.
+    #if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__) && !defined(__thumb__))
+    #if MICROPY_DYNAMIC_COMPILER
+    if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_ARMV6)
+    #endif
+    {
+        asm_thumb_op32(as, 0x4010, 0xe92d); // push {r4, lr}
+        asm_thumb_op32(as, 0xe009, 0xe28f); // add lr, pc, 8 + 1
+        asm_thumb_op32(as, 0xff3e, 0xe12f); // blx lr
+        asm_thumb_op32(as, 0x4010, 0xe8bd); // pop {r4, lr}
+        asm_thumb_op32(as, 0xff1e, 0xe12f); // bx lr
+    }
+    #endif
+
+    // work out what to push and how many extra spaces to reserve on stack
+    // so that we have enough for all locals and it's aligned an 8-byte boundary
+    // we push extra regs (r1, r2, r3) to help do the stack adjustment
+    // we probably should just always subtract from sp, since this would be more efficient
+    // for push rlist, lowest numbered register at the lowest address
+    uint reglist;
+    uint stack_adjust;
+    // don't pop r0 because it's used for return value
+    switch (num_locals) {
+        case 0:
+            reglist = 0xf2;
+            stack_adjust = 0;
+            break;
+
+        case 1:
+            reglist = 0xf2;
+            stack_adjust = 0;
+            break;
+
+        case 2:
+            reglist = 0xfe;
+            stack_adjust = 0;
+            break;
+
+        case 3:
+            reglist = 0xfe;
+            stack_adjust = 0;
+            break;
+
+        default:
+            reglist = 0xfe;
+            stack_adjust = ((num_locals - 3) + 1) & (~1);
+            break;
+    }
+    asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
+    if (stack_adjust > 0) {
+        if (asm_thumb_allow_armv7m(as)) {
+            if (UNSIGNED_FIT7(stack_adjust)) {
+                asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
+            } else {
+                asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4));
+            }
+        } else {
+            int adj = stack_adjust;
+            // we don't expect the stack_adjust to be massive
+            while (!UNSIGNED_FIT7(adj)) {
+                asm_thumb_op16(as, OP_SUB_SP(127));
+                adj -= 127;
+            }
+            asm_thumb_op16(as, OP_SUB_SP(adj));
+        }
+    }
+    as->push_reglist = reglist;
+    as->stack_adjust = stack_adjust;
+}
+
+void asm_thumb_exit(asm_thumb_t *as) {
+    if (as->stack_adjust > 0) {
+        if (asm_thumb_allow_armv7m(as)) {
+            if (UNSIGNED_FIT7(as->stack_adjust)) {
+                asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
+            } else {
+                asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4));
+            }
+        } else {
+            int adj = as->stack_adjust;
+            // we don't expect the stack_adjust to be massive
+            while (!UNSIGNED_FIT7(adj)) {
+                asm_thumb_op16(as, OP_ADD_SP(127));
+                adj -= 127;
+            }
+            asm_thumb_op16(as, OP_ADD_SP(adj));
+        }
+    }
+    asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
+}
+
+static mp_uint_t get_label_dest(asm_thumb_t *as, uint label) {
+    assert(label < as->base.max_num_labels);
+    return as->base.label_offsets[label];
+}
+
+void asm_thumb_op16(asm_thumb_t *as, uint op) {
+    byte *c = asm_thumb_get_cur_to_write_bytes(as, 2);
+    if (c != NULL) {
+        // little endian
+        c[0] = op;
+        c[1] = op >> 8;
+    }
+}
+
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
+    byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+    if (c != NULL) {
+        // little endian, op1 then op2
+        c[0] = op1;
+        c[1] = op1 >> 8;
+        c[2] = op2;
+        c[3] = op2 >> 8;
+    }
+}
+
+#define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
+    asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
+}
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
+    uint op_lo;
+    if (reg_src < 8) {
+        op_lo = reg_src << 3;
+    } else {
+        op_lo = 0x40 | ((reg_src - 8) << 3);
+    }
+    if (reg_dest < 8) {
+        op_lo |= reg_dest;
+    } else {
+        op_lo |= 0x80 | (reg_dest - 8);
+    }
+    // mov reg_dest, reg_src
+    asm_thumb_op16(as, 0x4600 | op_lo);
+}
+
+// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
+void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
+    assert(reg_dest < ASM_THUMB_REG_R15);
+    // mov[wt] reg_dest, #i16_src
+    asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
+}
+
+static void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src) {
+    asm_thumb_mov_rlo_i8(as, rlo_dest, (i16_src >> 8) & 0xff);
+    asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, 8);
+    asm_thumb_add_rlo_i8(as, rlo_dest, i16_src & 0xff);
+}
+
+#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+    asm_thumb_op16(as, OP_B_N(rel));
+    return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT12(rel);
+}
+
+#define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
+
+// all these bit-arithmetic operations need coverage testing!
+#define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
+#define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
+
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+    if (!wide) {
+        asm_thumb_op16(as, OP_BCC_N(cond, rel));
+        return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
+    } else if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+        return true;
+    } else {
+        // this method should not be called for ARMV6M
+        return false;
+    }
+}
+
+#define OP_BL_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BL_LO(byte_offset) (0xf800 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+    asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel));
+    return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel);
+}
+
+size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
+    // movw, movt does it in 8 bytes
+    // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
+
+    size_t loc = mp_asm_base_get_code_pos(&as->base);
+
+    if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+        asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
+    } else {
+        // should only be called with lo reg for ARMV6M
+        assert(reg_dest < ASM_THUMB_REG_R8);
+
+        // sanity check that generated code is aligned
+        assert(!as->base.code_base || !(3u & (uintptr_t)as->base.code_base));
+
+        // basically:
+        //        (nop)
+        //        ldr reg_dest, _data
+        //        b 1f
+        // _data: .word i32
+        //  1:
+        if (as->base.code_offset & 2u) {
+            asm_thumb_op16(as, ASM_THUMB_OP_NOP);
+        }
+        asm_thumb_ldr_rlo_pcrel_i8(as, reg_dest, 0);
+        asm_thumb_op16(as, OP_B_N(2));
+        asm_thumb_op16(as, i32 & 0xffff);
+        asm_thumb_op16(as, i32 >> 16);
+    }
+
+    return loc;
+}
+
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
+    if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
+        asm_thumb_mov_rlo_i8(as, reg_dest, i32);
+    } else if (asm_thumb_allow_armv7m(as)) {
+        if (UNSIGNED_FIT16(i32)) {
+            asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+        } else {
+            asm_thumb_mov_reg_i32(as, reg_dest, i32);
+        }
+    } else {
+        uint rlo_dest = reg_dest;
+        assert(rlo_dest < ASM_THUMB_REG_R8); // should never be called for ARMV6M
+
+        bool negate = i32 < 0 && ((i32 + i32) & 0xffffffffu); // don't negate 0x80000000
+        if (negate) {
+            i32 = -i32;
+        }
+
+        uint clz = mp_clz(i32);
+        uint ctz = i32 ? mp_ctz(i32) : 0;
+        assert(clz + ctz <= 32);
+        if (clz + ctz >= 24) {
+            asm_thumb_mov_rlo_i8(as, rlo_dest, (i32 >> ctz) & 0xff);
+            asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, ctz);
+        } else if (UNSIGNED_FIT16(i32)) {
+            asm_thumb_mov_rlo_i16(as, rlo_dest, i32);
+        } else {
+            if (negate) {
+                // no point in negating if we're storing in 32 bit anyway
+                negate = false;
+                i32 = -i32;
+            }
+            asm_thumb_mov_reg_i32(as, rlo_dest, i32);
+        }
+        if (negate) {
+            asm_thumb_neg_rlo_rlo(as, rlo_dest, rlo_dest);
+        }
+    }
+}
+
+#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+static void asm_thumb_mov_local_check(asm_thumb_t *as, int word_offset) {
+    if (as->base.pass >= MP_ASM_PASS_EMIT) {
+        assert(word_offset >= 0);
+        if (!UNSIGNED_FIT8(word_offset)) {
+            mp_raise_NotImplementedError(MP_ERROR_TEXT("too many locals for native method"));
+        }
+    }
+}
+
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
+    assert(rlo_src < ASM_THUMB_REG_R8);
+    int word_offset = local_num;
+    asm_thumb_mov_local_check(as, word_offset);
+    asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
+}
+
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    int word_offset = local_num;
+    asm_thumb_mov_local_check(as, word_offset);
+    asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
+}
+
+#define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    int word_offset = local_num;
+    assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
+    asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
+}
+
+void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel |= 1; // to stay in Thumb state when jumping to this address
+    if (asm_thumb_allow_armv7m(as)) {
+        rel -= 6 + 4; // adjust for mov_reg_i16, sxth_rlo_rlo and then PC+4 prefetch of add_reg_reg
+        asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes
+        asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest); // 2 bytes
+    } else {
+        rel -= 8 + 4; // adjust for four instructions and then PC+4 prefetch of add_reg_reg
+        // 6 bytes
+        asm_thumb_mov_rlo_i16(as, rlo_dest, rel);
+        // 2 bytes - not always needed, but we want to keep the size the same
+        asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest);
+    }
+    asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes
+}
+
+// ARMv7-M only
+static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+    asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4));
+}
+
+// emits code for: reg_dest = reg_base + offset << offset_shift
+static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint reg_base, uint offset, uint offset_shift) {
+    if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8) {
+        if (offset << offset_shift < 256) {
+            if (reg_dest != reg_base) {
+                asm_thumb_mov_reg_reg(as, reg_dest, reg_base);
+            }
+            asm_thumb_add_rlo_i8(as, reg_dest, offset << offset_shift);
+        } else if (UNSIGNED_FIT8(offset) && reg_dest != reg_base) {
+            asm_thumb_mov_rlo_i8(as, reg_dest, offset);
+            asm_thumb_lsl_rlo_rlo_i5(as, reg_dest, reg_dest, offset_shift);
+            asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
+        } else if (reg_dest != reg_base) {
+            asm_thumb_mov_rlo_i16(as, reg_dest, offset << offset_shift);
+            asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_dest);
+        } else {
+            uint reg_other = reg_dest ^ 7;
+            asm_thumb_op16(as, OP_PUSH_RLIST((1 << reg_other)));
+            asm_thumb_mov_rlo_i16(as, reg_other, offset << offset_shift);
+            asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_other);
+            asm_thumb_op16(as, OP_POP_RLIST((1 << reg_other)));
+        }
+    } else {
+        assert(0); // should never be called for ARMV6M
+    }
+}
+
+void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+    if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) {
+        asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset);
+    } else if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset);
+    } else {
+        asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, word_offset - 31, 2);
+        asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
+    }
+}
+
+// ARMv7-M only
+static inline void asm_thumb_ldrh_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
+    asm_thumb_op32(as, OP_LDRH_W_HI(reg_base), OP_LDRH_W_LO(reg_dest, uint16_offset * 2));
+}
+
+void asm_thumb_ldrh_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset) {
+    if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(uint16_offset)) {
+        asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_base, uint16_offset);
+    } else if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_ldrh_reg_reg_i12(as, reg_dest, reg_base, uint16_offset);
+    } else {
+        asm_thumb_add_reg_reg_offset(as, reg_dest, reg_base, uint16_offset - 31, 1);
+        asm_thumb_ldrh_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
+    }
+}
+
+// this could be wrong, because it should have a range of +/- 16MiB...
+#define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff))
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+
+    if (dest != (mp_uint_t)-1 && rel <= -4) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 12 bit relative jump
+        if (SIGNED_FIT12(rel)) {
+            asm_thumb_op16(as, OP_B_N(rel));
+            return;
+        }
+    }
+
+    // is a large backwards jump, or a forwards jump (that must be assumed large)
+
+    if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
+    } else {
+        if (SIGNED_FIT12(rel)) {
+            // this code path has to be the same number of instructions irrespective of rel
+            asm_thumb_op16(as, OP_B_N(rel));
+        } else {
+            asm_thumb_op16(as, ASM_THUMB_OP_NOP);
+            if (dest != (mp_uint_t)-1) {
+                // we have an actual branch > 12 bits; this is not handled yet
+                mp_raise_NotImplementedError(MP_ERROR_TEXT("native method too big"));
+            }
+        }
+    }
+}
+
+void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+
+    if (dest != (mp_uint_t)-1 && rel <= -4) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 9 bit relative jump
+        if (SIGNED_FIT9(rel)) {
+            asm_thumb_op16(as, OP_BCC_N(cond, rel));
+            return;
+        }
+    }
+
+    // is a large backwards jump, or a forwards jump (that must be assumed large)
+
+    if (asm_thumb_allow_armv7m(as)) {
+        asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+    } else {
+        // reverse the sense of the branch to jump over a longer branch
+        asm_thumb_op16(as, OP_BCC_N(cond ^ 1, 0));
+        asm_thumb_b_label(as, label);
+    }
+}
+
+void asm_thumb_bcc_rel9(asm_thumb_t *as, int cond, int rel) {
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+    assert(SIGNED_FIT9(rel));
+    asm_thumb_op16(as, OP_BCC_N(cond, rel));
+}
+
+void asm_thumb_b_rel12(asm_thumb_t *as, int rel) {
+    rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+    assert(SIGNED_FIT12(rel));
+    asm_thumb_op16(as, OP_B_N(rel));
+}
+
+#define OP_BLX(reg) (0x4780 | ((reg) << 3))
+#define OP_SVC(arg) (0xdf00 | (arg))
+
+void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp) {
+    // Load ptr to function from table, indexed by fun_id, then call it
+    asm_thumb_ldr_reg_reg_i12_optimised(as, reg_temp, ASM_THUMB_REG_FUN_TABLE, fun_id);
+    asm_thumb_op16(as, OP_BLX(reg_temp));
+}
+
+#endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB

+ 436 - 0
mp_flipper/lib/micropython/py/asmthumb.h

@@ -0,0 +1,436 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMTHUMB_H
+#define MICROPY_INCLUDED_PY_ASMTHUMB_H
+
+#include <assert.h>
+#include "py/misc.h"
+#include "py/asmbase.h"
+#include "py/persistentcode.h"
+
+#define ASM_THUMB_REG_R0  (0)
+#define ASM_THUMB_REG_R1  (1)
+#define ASM_THUMB_REG_R2  (2)
+#define ASM_THUMB_REG_R3  (3)
+#define ASM_THUMB_REG_R4  (4)
+#define ASM_THUMB_REG_R5  (5)
+#define ASM_THUMB_REG_R6  (6)
+#define ASM_THUMB_REG_R7  (7)
+#define ASM_THUMB_REG_R8  (8)
+#define ASM_THUMB_REG_R9  (9)
+#define ASM_THUMB_REG_R10 (10)
+#define ASM_THUMB_REG_R11 (11)
+#define ASM_THUMB_REG_R12 (12)
+#define ASM_THUMB_REG_R13 (13)
+#define ASM_THUMB_REG_R14 (14)
+#define ASM_THUMB_REG_R15 (15)
+#define ASM_THUMB_REG_SP  (ASM_THUMB_REG_R13)
+#define ASM_THUMB_REG_LR  (REG_R14)
+
+#define ASM_THUMB_CC_EQ (0x0)
+#define ASM_THUMB_CC_NE (0x1)
+#define ASM_THUMB_CC_CS (0x2)
+#define ASM_THUMB_CC_CC (0x3)
+#define ASM_THUMB_CC_MI (0x4)
+#define ASM_THUMB_CC_PL (0x5)
+#define ASM_THUMB_CC_VS (0x6)
+#define ASM_THUMB_CC_VC (0x7)
+#define ASM_THUMB_CC_HI (0x8)
+#define ASM_THUMB_CC_LS (0x9)
+#define ASM_THUMB_CC_GE (0xa)
+#define ASM_THUMB_CC_LT (0xb)
+#define ASM_THUMB_CC_GT (0xc)
+#define ASM_THUMB_CC_LE (0xd)
+
+typedef struct _asm_thumb_t {
+    mp_asm_base_t base;
+    uint32_t push_reglist;
+    uint32_t stack_adjust;
+} asm_thumb_t;
+
+#if MICROPY_DYNAMIC_COMPILER
+
+static inline bool asm_thumb_allow_armv7m(asm_thumb_t *as) {
+    return MP_NATIVE_ARCH_ARMV7M <= mp_dynamic_compiler.native_arch
+           && mp_dynamic_compiler.native_arch <= MP_NATIVE_ARCH_ARMV7EMDP;
+}
+
+#else
+
+static inline bool asm_thumb_allow_armv7m(asm_thumb_t *as) {
+    return MICROPY_EMIT_THUMB_ARMV7M;
+}
+
+#endif
+
+static inline void asm_thumb_end_pass(asm_thumb_t *as) {
+    (void)as;
+}
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals);
+void asm_thumb_exit(asm_thumb_t *as);
+
+// argument order follows ARM, in general dest is first
+// note there is a difference between movw and mov.w, and many others!
+
+#define ASM_THUMB_OP_IT (0xbf00)
+#define ASM_THUMB_OP_ITE_EQ (0xbf0c)
+#define ASM_THUMB_OP_ITE_NE (0xbf14)
+#define ASM_THUMB_OP_ITE_CS (0xbf2c)
+#define ASM_THUMB_OP_ITE_CC (0xbf34)
+#define ASM_THUMB_OP_ITE_MI (0xbf4c)
+#define ASM_THUMB_OP_ITE_PL (0xbf54)
+#define ASM_THUMB_OP_ITE_VS (0xbf6c)
+#define ASM_THUMB_OP_ITE_VC (0xbf74)
+#define ASM_THUMB_OP_ITE_HI (0xbf8c)
+#define ASM_THUMB_OP_ITE_LS (0xbf94)
+#define ASM_THUMB_OP_ITE_GE (0xbfac)
+#define ASM_THUMB_OP_ITE_LT (0xbfb4)
+#define ASM_THUMB_OP_ITE_GT (0xbfcc)
+#define ASM_THUMB_OP_ITE_LE (0xbfd4)
+
+#define ASM_THUMB_OP_NOP        (0xbf00)
+#define ASM_THUMB_OP_WFI        (0xbf30)
+#define ASM_THUMB_OP_CPSID_I    (0xb672) // cpsid i, disable irq
+#define ASM_THUMB_OP_CPSIE_I    (0xb662) // cpsie i, enable irq
+
+void asm_thumb_op16(asm_thumb_t *as, uint op);
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2);
+
+static inline void asm_thumb_it_cc(asm_thumb_t *as, uint cc, uint mask) {
+    asm_thumb_op16(as, ASM_THUMB_OP_IT | (cc << 4) | mask);
+}
+
+// FORMAT 1: move shifted register
+
+#define ASM_THUMB_FORMAT_1_LSL (0x0000)
+#define ASM_THUMB_FORMAT_1_LSR (0x0800)
+#define ASM_THUMB_FORMAT_1_ASR (0x1000)
+
+#define ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset) \
+    ((op) | ((offset) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_1(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, uint offset) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset));
+}
+
+// FORMAT 2: add/subtract
+
+#define ASM_THUMB_FORMAT_2_ADD (0x1800)
+#define ASM_THUMB_FORMAT_2_SUB (0x1a00)
+#define ASM_THUMB_FORMAT_2_REG_OPERAND (0x0000)
+#define ASM_THUMB_FORMAT_2_IMM_OPERAND (0x0400)
+
+#define ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b) \
+    ((op) | ((src_b) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_2(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, int src_b) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b));
+}
+
+static inline void asm_thumb_add_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
+    asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
+}
+static inline void asm_thumb_add_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
+    asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
+}
+static inline void asm_thumb_sub_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
+    asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
+}
+static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
+    asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
+}
+
+// FORMAT 3: move/compare/add/subtract immediate
+// These instructions all do zero extension of the i8 value
+
+#define ASM_THUMB_FORMAT_3_MOV (0x2000)
+#define ASM_THUMB_FORMAT_3_CMP (0x2800)
+#define ASM_THUMB_FORMAT_3_ADD (0x3000)
+#define ASM_THUMB_FORMAT_3_SUB (0x3800)
+#define ASM_THUMB_FORMAT_3_LDR (0x4800)
+
+#define ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8) ((op) | ((rlo) << 8) | (i8))
+
+static inline void asm_thumb_format_3(asm_thumb_t *as, uint op, uint rlo, int i8) {
+    assert(rlo < ASM_THUMB_REG_R8);
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8));
+}
+
+static inline void asm_thumb_mov_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+    asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_MOV, rlo, i8);
+}
+static inline void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+    asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_CMP, rlo, i8);
+}
+static inline void asm_thumb_add_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+    asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_ADD, rlo, i8);
+}
+static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+    asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_SUB, rlo, i8);
+}
+static inline void asm_thumb_ldr_rlo_pcrel_i8(asm_thumb_t *as, uint rlo, uint i8) {
+    asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_LDR, rlo, i8);
+}
+
+// FORMAT 4: ALU operations
+
+#define ASM_THUMB_FORMAT_4_AND (0x4000)
+#define ASM_THUMB_FORMAT_4_EOR (0x4040)
+#define ASM_THUMB_FORMAT_4_LSL (0x4080)
+#define ASM_THUMB_FORMAT_4_LSR (0x40c0)
+#define ASM_THUMB_FORMAT_4_ASR (0x4100)
+#define ASM_THUMB_FORMAT_4_ADC (0x4140)
+#define ASM_THUMB_FORMAT_4_SBC (0x4180)
+#define ASM_THUMB_FORMAT_4_ROR (0x41c0)
+#define ASM_THUMB_FORMAT_4_TST (0x4200)
+#define ASM_THUMB_FORMAT_4_NEG (0x4240)
+#define ASM_THUMB_FORMAT_4_CMP (0x4280)
+#define ASM_THUMB_FORMAT_4_CMN (0x42c0)
+#define ASM_THUMB_FORMAT_4_ORR (0x4300)
+#define ASM_THUMB_FORMAT_4_MUL (0x4340)
+#define ASM_THUMB_FORMAT_4_BIC (0x4380)
+#define ASM_THUMB_FORMAT_4_MVN (0x43c0)
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src);
+
+static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+    asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src);
+}
+static inline void asm_thumb_mvn_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+    asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_MVN, rlo_dest, rlo_src);
+}
+static inline void asm_thumb_neg_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+    asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_NEG, rlo_dest, rlo_src);
+}
+
+// FORMAT 5: hi register operations (add, cmp, mov, bx)
+// For add/cmp/mov, at least one of the args must be a high register
+
+#define ASM_THUMB_FORMAT_5_ADD (0x4400)
+#define ASM_THUMB_FORMAT_5_BX (0x4700)
+
+#define ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src) \
+    ((op) | ((r_dest) << 4 & 0x0080) | ((r_src) << 3) | ((r_dest) & 0x0007))
+
+static inline void asm_thumb_format_5(asm_thumb_t *as, uint op, uint r_dest, uint r_src) {
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src));
+}
+
+static inline void asm_thumb_add_reg_reg(asm_thumb_t *as, uint r_dest, uint r_src) {
+    asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_ADD, r_dest, r_src);
+}
+static inline void asm_thumb_bx_reg(asm_thumb_t *as, uint r_src) {
+    asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_BX, 0, r_src);
+}
+
+// FORMAT 9: load/store with immediate offset
+// For word transfers the offset must be aligned, and >>2
+
+// FORMAT 10: load/store halfword
+// The offset must be aligned, and >>1
+// The load is zero extended into the register
+
+#define ASM_THUMB_FORMAT_9_STR (0x6000)
+#define ASM_THUMB_FORMAT_9_LDR (0x6800)
+#define ASM_THUMB_FORMAT_9_WORD_TRANSFER (0x0000)
+#define ASM_THUMB_FORMAT_9_BYTE_TRANSFER (0x1000)
+
+#define ASM_THUMB_FORMAT_10_STRH (0x8000)
+#define ASM_THUMB_FORMAT_10_LDRH (0x8800)
+
+#define ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset) \
+    ((op) | (((offset) << 6) & 0x07c0) | ((rlo_base) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_9_10(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint offset) {
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset));
+}
+
+static inline void asm_thumb_str_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint word_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_src, rlo_base, word_offset);
+}
+static inline void asm_thumb_strb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_src, rlo_base, byte_offset);
+}
+static inline void asm_thumb_strh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint uint16_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_STRH, rlo_src, rlo_base, uint16_offset);
+}
+static inline void asm_thumb_ldr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint word_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_dest, rlo_base, word_offset);
+}
+static inline void asm_thumb_ldrb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_dest, rlo_base, byte_offset);
+}
+static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint uint16_offset) {
+    asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_LDRH, rlo_dest, rlo_base, uint16_offset);
+}
+static inline void asm_thumb_lsl_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
+    asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_LSL, rlo_dest, rlo_src, shift);
+}
+static inline void asm_thumb_asr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
+    asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_ASR, rlo_dest, rlo_src, shift);
+}
+
+// FORMAT 11: sign/zero extend
+
+#define ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src) \
+    ((op) | ((rlo_src) << 3) | (rlo_dest))
+
+#define ASM_THUMB_FORMAT_11_SXTH (0xb200)
+#define ASM_THUMB_FORMAT_11_SXTB (0xb240)
+#define ASM_THUMB_FORMAT_11_UXTH (0xb280)
+#define ASM_THUMB_FORMAT_11_UXTB (0xb2c0)
+
+static inline void asm_thumb_format_11(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
+    asm_thumb_op16(as, ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src));
+}
+
+static inline void asm_thumb_sxth_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+    asm_thumb_format_11(as, ASM_THUMB_FORMAT_11_SXTH, rlo_dest, rlo_src);
+}
+
+// TODO convert these to above format style
+
+#define ASM_THUMB_OP_MOVW (0xf240)
+#define ASM_THUMB_OP_MOVT (0xf2c0)
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
+void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
+
+// these return true if the destination is in range, false otherwise
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide);
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label);
+
+size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label);
+
+void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset); // convenience
+void asm_thumb_ldrh_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint uint16_offset); // convenience
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenience
+void asm_thumb_bcc_rel9(asm_thumb_t *as, int cc, int rel);
+void asm_thumb_b_rel12(asm_thumb_t *as, int rel);
+
+// Holds a pointer to mp_fun_table
+#define ASM_THUMB_REG_FUN_TABLE ASM_THUMB_REG_R7
+
+#if GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_THUMB_REG_R0
+#define REG_ARG_1 ASM_THUMB_REG_R0
+#define REG_ARG_2 ASM_THUMB_REG_R1
+#define REG_ARG_3 ASM_THUMB_REG_R2
+#define REG_ARG_4 ASM_THUMB_REG_R3
+// rest of args go on stack
+
+#define REG_TEMP0 ASM_THUMB_REG_R0
+#define REG_TEMP1 ASM_THUMB_REG_R1
+#define REG_TEMP2 ASM_THUMB_REG_R2
+
+#define REG_LOCAL_1 ASM_THUMB_REG_R4
+#define REG_LOCAL_2 ASM_THUMB_REG_R5
+#define REG_LOCAL_3 ASM_THUMB_REG_R6
+#define REG_LOCAL_NUM (3)
+
+#define REG_FUN_TABLE ASM_THUMB_REG_FUN_TABLE
+
+#define ASM_T               asm_thumb_t
+#define ASM_END_PASS        asm_thumb_end_pass
+#define ASM_ENTRY           asm_thumb_entry
+#define ASM_EXIT            asm_thumb_exit
+
+#define ASM_JUMP            asm_thumb_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+    do { \
+        asm_thumb_cmp_rlo_i8(as, reg, 0); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+    do { \
+        asm_thumb_cmp_rlo_i8(as, reg, 0); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_REG(as, reg) asm_thumb_bx_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_thumb_bl_ind(as, idx, ASM_THUMB_REG_R3)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_thumb_mov_reg_local_addr((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_PCREL(as, rlo_dest, label) asm_thumb_mov_reg_pcrel((as), (rlo_dest), (label))
+
+#define ASM_NOT_REG(as, reg_dest) asm_thumb_mvn_rlo_rlo((as), (reg_dest), (reg_dest))
+#define ASM_NEG_REG(as, reg_dest) asm_thumb_neg_rlo_rlo((as), (reg_dest), (reg_dest))
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSR, (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_thumb_ldrh_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (uint16_offset))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMTHUMB_H

+ 642 - 0
mp_flipper/lib/micropython/py/asmx64.c

@@ -0,0 +1,642 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X64
+
+#include "py/asmx64.h"
+
+/* all offsets are measured in multiples of 8 bytes */
+#define WORD_SIZE                (8)
+
+#define OPCODE_NOP               (0x90)
+#define OPCODE_PUSH_R64          (0x50) /* +rq */
+#define OPCODE_PUSH_I64          (0x68)
+#define OPCODE_PUSH_M64          (0xff) /* /6 */
+#define OPCODE_POP_R64           (0x58) /* +rq */
+#define OPCODE_RET               (0xc3)
+#define OPCODE_MOV_I8_TO_R8      (0xb0) /* +rb */
+#define OPCODE_MOV_I64_TO_R64    (0xb8) /* +rq */
+#define OPCODE_MOV_I32_TO_RM32   (0xc7)
+#define OPCODE_MOV_R8_TO_RM8     (0x88) /* /r */
+#define OPCODE_MOV_R64_TO_RM64   (0x89) /* /r */
+#define OPCODE_MOV_RM64_TO_R64   (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R64  (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R64 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R64    (0x8d) /* /r */
+#define OPCODE_NOT_RM64          (0xf7) /* /2 */
+#define OPCODE_NEG_RM64          (0xf7) /* /3 */
+#define OPCODE_AND_R64_TO_RM64   (0x21) /* /r */
+#define OPCODE_OR_R64_TO_RM64    (0x09) /* /r */
+#define OPCODE_XOR_R64_TO_RM64   (0x31) /* /r */
+#define OPCODE_ADD_R64_TO_RM64   (0x01) /* /r */
+#define OPCODE_ADD_I32_TO_RM32   (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32    (0x83) /* /0 */
+#define OPCODE_SUB_R64_FROM_RM64 (0x29)
+#define OPCODE_SUB_I32_FROM_RM64 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM64  (0x83) /* /5 */
+// #define OPCODE_SHL_RM32_BY_I8    (0xc1) /* /4 */
+// #define OPCODE_SHR_RM32_BY_I8    (0xc1) /* /5 */
+// #define OPCODE_SAR_RM32_BY_I8    (0xc1) /* /7 */
+#define OPCODE_SHL_RM64_CL       (0xd3) /* /4 */
+#define OPCODE_SHR_RM64_CL       (0xd3) /* /5 */
+#define OPCODE_SAR_RM64_CL       (0xd3) /* /7 */
+// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+// #define OPCODE_CMP_I8_WITH_RM32  (0x83) /* /7 */
+#define OPCODE_CMP_R64_WITH_RM64 (0x39) /* /r */
+// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8  (0x84) /* /r */
+#define OPCODE_TEST_R64_WITH_RM64 (0x85) /* /r */
+#define OPCODE_JMP_REL8          (0xeb)
+#define OPCODE_JMP_REL32         (0xe9)
+#define OPCODE_JMP_RM64          (0xff) /* /4 */
+#define OPCODE_JCC_REL8          (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A       (0x0f)
+#define OPCODE_JCC_REL32_B       (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A       (0x0f)
+#define OPCODE_SETCC_RM8_B       (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32        (0xe8)
+#define OPCODE_CALL_RM32         (0xff) /* /2 */
+#define OPCODE_LEAVE             (0xc9)
+
+#define MODRM_R64(x)    (((x) & 0x7) << 3)
+#define MODRM_RM_DISP0  (0x00)
+#define MODRM_RM_DISP8  (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG    (0xc0)
+#define MODRM_RM_R64(x) ((x) & 0x7)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define REX_PREFIX  (0x40)
+#define REX_W       (0x08)  // width
+#define REX_R       (0x04)  // register
+#define REX_X       (0x02)  // index
+#define REX_B       (0x01)  // base
+#define REX_W_FROM_R64(r64) ((r64) >> 0 & 0x08)
+#define REX_R_FROM_R64(r64) ((r64) >> 1 & 0x04)
+#define REX_X_FROM_R64(r64) ((r64) >> 2 & 0x02)
+#define REX_B_FROM_R64(r64) ((r64) >> 3 & 0x01)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+#define IMM64_L4(x) (((x) >> 32) & 0xff)
+#define IMM64_L5(x) (((x) >> 40) & 0xff)
+#define IMM64_L6(x) (((x) >> 48) & 0xff)
+#define IMM64_L7(x) (((x) >> 56) & 0xff)
+
+#define UNSIGNED_FIT8(x) (((x) & 0xffffffffffffff00) == 0)
+#define UNSIGNED_FIT32(x) (((x) & 0xffffffff00000000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+static inline byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int n) {
+    return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
+}
+
+static void asm_x64_write_byte_1(asm_x64_t *as, byte b1) {
+    byte *c = asm_x64_get_cur_to_write_bytes(as, 1);
+    if (c != NULL) {
+        c[0] = b1;
+    }
+}
+
+static void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) {
+    byte *c = asm_x64_get_cur_to_write_bytes(as, 2);
+    if (c != NULL) {
+        c[0] = b1;
+        c[1] = b2;
+    }
+}
+
+static void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) {
+    byte *c = asm_x64_get_cur_to_write_bytes(as, 3);
+    if (c != NULL) {
+        c[0] = b1;
+        c[1] = b2;
+        c[2] = b3;
+    }
+}
+
+static void asm_x64_write_word32(asm_x64_t *as, int w32) {
+    byte *c = asm_x64_get_cur_to_write_bytes(as, 4);
+    if (c != NULL) {
+        c[0] = IMM32_L0(w32);
+        c[1] = IMM32_L1(w32);
+        c[2] = IMM32_L2(w32);
+        c[3] = IMM32_L3(w32);
+    }
+}
+
+static void asm_x64_write_word64(asm_x64_t *as, int64_t w64) {
+    byte *c = asm_x64_get_cur_to_write_bytes(as, 8);
+    if (c != NULL) {
+        c[0] = IMM32_L0(w64);
+        c[1] = IMM32_L1(w64);
+        c[2] = IMM32_L2(w64);
+        c[3] = IMM32_L3(w64);
+        c[4] = IMM64_L4(w64);
+        c[5] = IMM64_L5(w64);
+        c[6] = IMM64_L6(w64);
+        c[7] = IMM64_L7(w64);
+    }
+}
+
+/* unused
+static void asm_x64_write_word32_to(asm_x64_t *as, int offset, int w32) {
+    byte* c;
+    assert(offset + 4 <= as->code_size);
+    c = as->code_base + offset;
+    c[0] = IMM32_L0(w32);
+    c[1] = IMM32_L1(w32);
+    c[2] = IMM32_L2(w32);
+    c[3] = IMM32_L3(w32);
+}
+*/
+
+static void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int disp_offset) {
+    uint8_t rm_disp;
+    if (disp_offset == 0 && (disp_r64 & 7) != ASM_X64_REG_RBP) {
+        rm_disp = MODRM_RM_DISP0;
+    } else if (SIGNED_FIT8(disp_offset)) {
+        rm_disp = MODRM_RM_DISP8;
+    } else {
+        rm_disp = MODRM_RM_DISP32;
+    }
+    asm_x64_write_byte_1(as, MODRM_R64(r64) | rm_disp | MODRM_RM_R64(disp_r64));
+    if ((disp_r64 & 7) == ASM_X64_REG_RSP) {
+        // Special case for rsp and r12, they need a SIB byte
+        asm_x64_write_byte_1(as, 0x24);
+    }
+    if (rm_disp == MODRM_RM_DISP8) {
+        asm_x64_write_byte_1(as, IMM32_L0(disp_offset));
+    } else if (rm_disp == MODRM_RM_DISP32) {
+        asm_x64_write_word32(as, disp_offset);
+    }
+}
+
+static void asm_x64_generic_r64_r64(asm_x64_t *as, int dest_r64, int src_r64, int op) {
+    asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), op, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+}
+
+void asm_x64_nop(asm_x64_t *as) {
+    asm_x64_write_byte_1(as, OPCODE_NOP);
+}
+
+void asm_x64_push_r64(asm_x64_t *as, int src_r64) {
+    if (src_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_PUSH_R64 | src_r64);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_PUSH_R64 | (src_r64 & 7));
+    }
+}
+
+/*
+void asm_x64_push_i32(asm_x64_t *as, int src_i32) {
+    asm_x64_write_byte_1(as, OPCODE_PUSH_I64);
+    asm_x64_write_word32(as, src_i32); // will be sign extended to 64 bits
+}
+*/
+
+/*
+void asm_x64_push_disp(asm_x64_t *as, int src_r64, int src_offset) {
+    assert(src_r64 < 8);
+    asm_x64_write_byte_1(as, OPCODE_PUSH_M64);
+    asm_x64_write_r64_disp(as, 6, src_r64, src_offset);
+}
+*/
+
+void asm_x64_pop_r64(asm_x64_t *as, int dest_r64) {
+    if (dest_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_POP_R64 | dest_r64);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_POP_R64 | (dest_r64 & 7));
+    }
+}
+
+static void asm_x64_ret(asm_x64_t *as) {
+    asm_x64_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_MOV_R64_TO_RM64);
+}
+
+void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R8_TO_RM8);
+    }
+    asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64);
+    } else {
+        asm_x64_write_byte_3(as, OP_SIZE_PREFIX, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+    }
+    asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_MOV_R64_TO_RM64);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+    }
+    asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+    // use REX prefix for 64 bit operation
+    asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+    asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+    } else {
+        asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+    }
+    asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+    } else {
+        asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+    }
+    asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+    if (src_r64 < 8 && dest_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_MOV_RM64_TO_R64);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
+    }
+    asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+    // use REX prefix for 64 bit operation
+    asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
+    asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+static void asm_x64_lea_disp_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+    // use REX prefix for 64 bit operation
+    asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_LEA_MEM_TO_R64);
+    asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+/*
+void asm_x64_mov_i8_to_r8(asm_x64_t *as, int src_i8, int dest_r64) {
+    assert(dest_r64 < 8);
+    asm_x64_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r64, src_i8);
+}
+*/
+
+size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64) {
+    // cpu defaults to i32 to r64, with zero extension
+    if (dest_r64 < 8) {
+        asm_x64_write_byte_1(as, OPCODE_MOV_I64_TO_R64 | dest_r64);
+    } else {
+        asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7));
+    }
+    size_t loc = mp_asm_base_get_code_pos(&as->base);
+    asm_x64_write_word32(as, src_i32);
+    return loc;
+}
+
+void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+    // cpu defaults to i32 to r64
+    // to mov i64 to r64 need to use REX prefix
+    asm_x64_write_byte_2(as,
+        REX_PREFIX | REX_W | (dest_r64 < 8 ? 0 : REX_B),
+        OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7));
+    asm_x64_write_word64(as, src_i64);
+}
+
+void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+    // TODO use movzx, movsx if possible
+    if (UNSIGNED_FIT32(src_i64)) {
+        // 5 bytes
+        asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64);
+    } else {
+        // 10 bytes
+        asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
+    }
+}
+
+void asm_x64_not_r64(asm_x64_t *as, int dest_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, 2, OPCODE_NOT_RM64);
+}
+
+void asm_x64_neg_r64(asm_x64_t *as, int dest_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, 3, OPCODE_NEG_RM64);
+}
+
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64);
+}
+
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_OR_R64_TO_RM64);
+}
+
+void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
+}
+
+void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, 4, OPCODE_SHL_RM64_CL);
+}
+
+void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, 5, OPCODE_SHR_RM64_CL);
+}
+
+void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, 7, OPCODE_SAR_RM64_CL);
+}
+
+void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_ADD_R64_TO_RM64);
+}
+
+void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_SUB_R64_FROM_RM64);
+}
+
+void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    // imul reg64, reg/mem64 -- 0x0f 0xaf /r
+    asm_x64_write_byte_1(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64));
+    asm_x64_write_byte_3(as, 0x0f, 0xaf, MODRM_R64(dest_r64) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
+}
+
+/*
+void asm_x64_sub_i32_from_r32(asm_x64_t *as, int src_i32, int dest_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        // defaults to 32 bit operation
+        asm_x64_write_byte_2(as, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+        asm_x64_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        // defaults to 32 bit operation
+        asm_x64_write_byte_2(as, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+        asm_x64_write_word32(as, src_i32);
+    }
+}
+*/
+
+static void asm_x64_sub_r64_i32(asm_x64_t *as, int dest_r64, int src_i32) {
+    assert(dest_r64 < 8);
+    if (SIGNED_FIT8(src_i32)) {
+        // use REX prefix for 64 bit operation
+        asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+        asm_x64_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        // use REX prefix for 64 bit operation
+        asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+        asm_x64_write_word32(as, src_i32);
+    }
+}
+
+/*
+void asm_x64_shl_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+    asm_x64_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(r32));
+    asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_shr_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+    asm_x64_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(r32));
+    asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_sar_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+    asm_x64_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(r32));
+    asm_x64_write_byte_1(as, imm);
+}
+*/
+
+void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+    asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_CMP_R64_WITH_RM64);
+}
+
+/*
+void asm_x64_cmp_i32_with_r32(asm_x64_t *as, int src_i32, int src_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        asm_x64_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+        asm_x64_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        asm_x64_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+        asm_x64_write_word32(as, src_i32);
+    }
+}
+*/
+
+void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+    assert(src_r64_a < 8);
+    assert(src_r64_b < 8);
+    asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b));
+}
+
+void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+    asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_TEST_R64_WITH_RM64);
+}
+
+void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) {
+    assert(dest_r8 < 8);
+    asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8));
+}
+
+void asm_x64_jmp_reg(asm_x64_t *as, int src_r64) {
+    assert(src_r64 < 8);
+    asm_x64_write_byte_2(as, OPCODE_JMP_RM64, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
+}
+
+static mp_uint_t get_label_dest(asm_x64_t *as, mp_uint_t label) {
+    assert(label < as->base.max_num_labels);
+    return as->base.label_offsets[label];
+}
+
+void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    if (dest != (mp_uint_t)-1 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x64_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+    large_jump:
+        rel -= 5;
+        asm_x64_write_byte_1(as, OPCODE_JMP_REL32);
+        asm_x64_write_word32(as, rel);
+    }
+}
+
+void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    if (dest != (mp_uint_t)-1 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x64_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+    large_jump:
+        rel -= 6;
+        asm_x64_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+        asm_x64_write_word32(as, rel);
+    }
+}
+
+void asm_x64_entry(asm_x64_t *as, int num_locals) {
+    assert(num_locals >= 0);
+    asm_x64_push_r64(as, ASM_X64_REG_RBP);
+    asm_x64_push_r64(as, ASM_X64_REG_RBX);
+    asm_x64_push_r64(as, ASM_X64_REG_R12);
+    asm_x64_push_r64(as, ASM_X64_REG_R13);
+    num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary
+    asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, num_locals * WORD_SIZE);
+    as->num_locals = num_locals;
+}
+
+void asm_x64_exit(asm_x64_t *as) {
+    asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, -as->num_locals * WORD_SIZE);
+    asm_x64_pop_r64(as, ASM_X64_REG_R13);
+    asm_x64_pop_r64(as, ASM_X64_REG_R12);
+    asm_x64_pop_r64(as, ASM_X64_REG_RBX);
+    asm_x64_pop_r64(as, ASM_X64_REG_RBP);
+    asm_x64_ret(as);
+}
+
+// locals:
+//  - stored on the stack in ascending order
+//  - numbered 0 through as->num_locals-1
+//  - RSP points to the first local
+//
+//  | RSP
+//  v
+//  l0  l1  l2  ...  l(n-1)
+//  ^                ^
+//  | low address    | high address in RAM
+//
+static int asm_x64_local_offset_from_rsp(asm_x64_t *as, int local_num) {
+    (void)as;
+    // Stack is full descending, RSP points to local0
+    return local_num * WORD_SIZE;
+}
+
+void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) {
+    asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, src_local_num), dest_r64);
+}
+
+void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) {
+    asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, dest_local_num));
+}
+
+void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) {
+    int offset = asm_x64_local_offset_from_rsp(as, local_num);
+    if (offset == 0) {
+        asm_x64_mov_r64_r64(as, dest_r64, ASM_X64_REG_RSP);
+    } else {
+        asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RSP, offset, dest_r64);
+    }
+}
+
+void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - (as->base.code_offset + 7);
+    asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64), OPCODE_LEA_MEM_TO_R64, MODRM_R64(dest_r64) | MODRM_RM_R64(5));
+    asm_x64_write_word32(as, rel);
+}
+
+/*
+void asm_x64_push_local(asm_x64_t *as, int local_num) {
+    asm_x64_push_disp(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, local_num));
+}
+
+void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64) {
+    asm_x64_mov_r64_r64(as, temp_r64, ASM_X64_REG_RSP);
+    asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_rsp(as, local_num), temp_r64);
+    asm_x64_push_r64(as, temp_r64);
+}
+*/
+
+/*
+   can't use these because code might be relocated when resized
+
+void asm_x64_call(asm_x64_t *as, void* func) {
+    asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+    asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+    asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+    asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+
+void asm_x64_call_i1(asm_x64_t *as, void* func, int i1) {
+    asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+    asm_x64_sub_i32_from_r32(as, 12, ASM_X64_REG_RSP);
+    asm_x64_push_i32(as, i1);
+    asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+    asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+    asm_x64_add_i32_to_r32(as, 16, ASM_X64_REG_RSP);
+    asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+*/
+
+void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r64) {
+    assert(temp_r64 < 8);
+    asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r64);
+    asm_x64_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R64(2) | MODRM_RM_REG | MODRM_RM_R64(temp_r64));
+}
+
+#endif // MICROPY_EMIT_X64

+ 223 - 0
mp_flipper/lib/micropython/py/asmx64.h

@@ -0,0 +1,223 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMX64_H
+#define MICROPY_INCLUDED_PY_ASMX64_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// AMD64 calling convention is:
+//  - args pass in: RDI, RSI, RDX, RCX, R08, R09
+//  - return value in RAX
+//  - stack must be aligned on a 16-byte boundary before all calls
+//  - RAX, RCX, RDX, RSI, RDI, R08, R09, R10, R11 are caller-save
+//  - RBX, RBP, R12, R13, R14, R15 are callee-save
+
+// In the functions below, argument order follows x86 docs and generally
+// the destination is the first argument.
+// NOTE: this is a change from the old convention used in this file and
+// some functions still use the old (reverse) convention.
+
+#define ASM_X64_REG_RAX (0)
+#define ASM_X64_REG_RCX (1)
+#define ASM_X64_REG_RDX (2)
+#define ASM_X64_REG_RBX (3)
+#define ASM_X64_REG_RSP (4)
+#define ASM_X64_REG_RBP (5)
+#define ASM_X64_REG_RSI (6)
+#define ASM_X64_REG_RDI (7)
+#define ASM_X64_REG_R08 (8)
+#define ASM_X64_REG_R09 (9)
+#define ASM_X64_REG_R10 (10)
+#define ASM_X64_REG_R11 (11)
+#define ASM_X64_REG_R12 (12)
+#define ASM_X64_REG_R13 (13)
+#define ASM_X64_REG_R14 (14)
+#define ASM_X64_REG_R15 (15)
+
+// condition codes, used for jcc and setcc (despite their j-name!)
+#define ASM_X64_CC_JB  (0x2) // below, unsigned
+#define ASM_X64_CC_JAE (0x3) // above or equal, unsigned
+#define ASM_X64_CC_JZ  (0x4)
+#define ASM_X64_CC_JE  (0x4)
+#define ASM_X64_CC_JNZ (0x5)
+#define ASM_X64_CC_JNE (0x5)
+#define ASM_X64_CC_JBE (0x6) // below or equal, unsigned
+#define ASM_X64_CC_JA  (0x7) // above, unsigned
+#define ASM_X64_CC_JL  (0xc) // less, signed
+#define ASM_X64_CC_JGE (0xd) // greater or equal, signed
+#define ASM_X64_CC_JLE (0xe) // less or equal, signed
+#define ASM_X64_CC_JG  (0xf) // greater, signed
+
+typedef struct _asm_x64_t {
+    mp_asm_base_t base;
+    int num_locals;
+} asm_x64_t;
+
+static inline void asm_x64_end_pass(asm_x64_t *as) {
+    (void)as;
+}
+
+void asm_x64_nop(asm_x64_t *as);
+void asm_x64_push_r64(asm_x64_t *as, int src_r64);
+void asm_x64_pop_r64(asm_x64_t *as, int dest_r64);
+void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64);
+void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64);
+void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
+void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_not_r64(asm_x64_t *as, int dest_r64);
+void asm_x64_neg_r64(asm_x64_t *as, int dest_r64);
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8);
+void asm_x64_jmp_reg(asm_x64_t *as, int src_r64);
+void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label);
+void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label);
+void asm_x64_entry(asm_x64_t *as, int num_locals);
+void asm_x64_exit(asm_x64_t *as);
+void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64);
+void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num);
+void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64);
+void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label);
+void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32);
+
+// Holds a pointer to mp_fun_table
+#define ASM_X64_REG_FUN_TABLE ASM_X64_REG_RBP
+
+#if GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (8)
+
+#define REG_RET ASM_X64_REG_RAX
+#define REG_ARG_1 ASM_X64_REG_RDI
+#define REG_ARG_2 ASM_X64_REG_RSI
+#define REG_ARG_3 ASM_X64_REG_RDX
+#define REG_ARG_4 ASM_X64_REG_RCX
+#define REG_ARG_5 ASM_X64_REG_R08
+
+// caller-save
+#define REG_TEMP0 ASM_X64_REG_RAX
+#define REG_TEMP1 ASM_X64_REG_RDI
+#define REG_TEMP2 ASM_X64_REG_RSI
+
+// callee-save
+#define REG_LOCAL_1 ASM_X64_REG_RBX
+#define REG_LOCAL_2 ASM_X64_REG_R12
+#define REG_LOCAL_3 ASM_X64_REG_R13
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_X64_REG_FUN_TABLE
+
+#define ASM_T               asm_x64_t
+#define ASM_END_PASS        asm_x64_end_pass
+#define ASM_ENTRY           asm_x64_entry
+#define ASM_EXIT            asm_x64_exit
+
+#define ASM_JUMP            asm_x64_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+    do { \
+        if (bool_test) { \
+            asm_x64_test_r8_with_r8((as), (reg), (reg)); \
+        } else { \
+            asm_x64_test_r64_with_r64((as), (reg), (reg)); \
+        } \
+        asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+    do { \
+        if (bool_test) { \
+            asm_x64_test_r8_with_r8((as), (reg), (reg)); \
+        } else { \
+            asm_x64_test_r64_with_r64((as), (reg), (reg)); \
+        } \
+        asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
+        asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
+    } while (0)
+#define ASM_JUMP_REG(as, reg) asm_x64_jmp_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_x64_call_ind(as, idx, ASM_X64_REG_RAX)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x64_mov_r64_to_local((as), (reg_src), (local_num))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_optimised((as), (imm), (reg_dest))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x64_mov_local_to_r64((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x64_mov_local_addr_to_r64((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_NOT_REG(as, reg) asm_x64_not_r64((as), (reg))
+#define ASM_NEG_REG(as, reg) asm_x64_neg_r64((as), (reg))
+#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
+#define ASM_LSR_REG(as, reg) asm_x64_shr_r64_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 2 * (uint16_offset), (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMX64_H

+ 545 - 0
mp_flipper/lib/micropython/py/asmx86.c

@@ -0,0 +1,545 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X86
+
+#include "py/asmx86.h"
+
+/* all offsets are measured in multiples of 4 bytes */
+#define WORD_SIZE                (4)
+
+#define OPCODE_NOP               (0x90)
+#define OPCODE_PUSH_R32          (0x50)
+// #define OPCODE_PUSH_I32          (0x68)
+// #define OPCODE_PUSH_M32          (0xff) /* /6 */
+#define OPCODE_POP_R32           (0x58)
+#define OPCODE_RET               (0xc3)
+// #define OPCODE_MOV_I8_TO_R8      (0xb0) /* +rb */
+#define OPCODE_MOV_I32_TO_R32    (0xb8)
+// #define OPCODE_MOV_I32_TO_RM32   (0xc7)
+#define OPCODE_MOV_R8_TO_RM8     (0x88) /* /r */
+#define OPCODE_MOV_R32_TO_RM32   (0x89) /* /r */
+#define OPCODE_MOV_RM32_TO_R32   (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R32  (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R32 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R32    (0x8d) /* /r */
+#define OPCODE_NOT_RM32          (0xf7) /* /2 */
+#define OPCODE_NEG_RM32          (0xf7) /* /3 */
+#define OPCODE_AND_R32_TO_RM32   (0x21) /* /r */
+#define OPCODE_OR_R32_TO_RM32    (0x09) /* /r */
+#define OPCODE_XOR_R32_TO_RM32   (0x31) /* /r */
+#define OPCODE_ADD_R32_TO_RM32   (0x01)
+#define OPCODE_ADD_I32_TO_RM32   (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32    (0x83) /* /0 */
+#define OPCODE_SUB_R32_FROM_RM32 (0x29)
+#define OPCODE_SUB_I32_FROM_RM32 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM32  (0x83) /* /5 */
+// #define OPCODE_SHL_RM32_BY_I8    (0xc1) /* /4 */
+// #define OPCODE_SHR_RM32_BY_I8    (0xc1) /* /5 */
+// #define OPCODE_SAR_RM32_BY_I8    (0xc1) /* /7 */
+#define OPCODE_SHL_RM32_CL       (0xd3) /* /4 */
+#define OPCODE_SHR_RM32_CL       (0xd3) /* /5 */
+#define OPCODE_SAR_RM32_CL       (0xd3) /* /7 */
+// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+// #define OPCODE_CMP_I8_WITH_RM32  (0x83) /* /7 */
+#define OPCODE_CMP_R32_WITH_RM32 (0x39)
+// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8  (0x84) /* /r */
+#define OPCODE_TEST_R32_WITH_RM32 (0x85) /* /r */
+#define OPCODE_JMP_REL8          (0xeb)
+#define OPCODE_JMP_REL32         (0xe9)
+#define OPCODE_JMP_RM32          (0xff) /* /4 */
+#define OPCODE_JCC_REL8          (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A       (0x0f)
+#define OPCODE_JCC_REL32_B       (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A       (0x0f)
+#define OPCODE_SETCC_RM8_B       (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32        (0xe8)
+#define OPCODE_CALL_RM32         (0xff) /* /2 */
+#define OPCODE_LEAVE             (0xc9)
+
+#define MODRM_R32(x)    ((x) << 3)
+#define MODRM_RM_DISP0  (0x00)
+#define MODRM_RM_DISP8  (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG    (0xc0)
+#define MODRM_RM_R32(x) (x)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+static void asm_x86_write_byte_1(asm_x86_t *as, byte b1) {
+    byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 1);
+    if (c != NULL) {
+        c[0] = b1;
+    }
+}
+
+static void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
+    byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
+    if (c != NULL) {
+        c[0] = b1;
+        c[1] = b2;
+    }
+}
+
+static void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
+    byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
+    if (c != NULL) {
+        c[0] = b1;
+        c[1] = b2;
+        c[2] = b3;
+    }
+}
+
+static void asm_x86_write_word32(asm_x86_t *as, int w32) {
+    byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
+    if (c != NULL) {
+        c[0] = IMM32_L0(w32);
+        c[1] = IMM32_L1(w32);
+        c[2] = IMM32_L2(w32);
+        c[3] = IMM32_L3(w32);
+    }
+}
+
+static void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) {
+    uint8_t rm_disp;
+    if (disp_offset == 0 && disp_r32 != ASM_X86_REG_EBP) {
+        rm_disp = MODRM_RM_DISP0;
+    } else if (SIGNED_FIT8(disp_offset)) {
+        rm_disp = MODRM_RM_DISP8;
+    } else {
+        rm_disp = MODRM_RM_DISP32;
+    }
+    asm_x86_write_byte_1(as, MODRM_R32(r32) | rm_disp | MODRM_RM_R32(disp_r32));
+    if (disp_r32 == ASM_X86_REG_ESP) {
+        // Special case for esp, it needs a SIB byte
+        asm_x86_write_byte_1(as, 0x24);
+    }
+    if (rm_disp == MODRM_RM_DISP8) {
+        asm_x86_write_byte_1(as, IMM32_L0(disp_offset));
+    } else if (rm_disp == MODRM_RM_DISP32) {
+        asm_x86_write_word32(as, disp_offset);
+    }
+}
+
+static void asm_x86_generic_r32_r32(asm_x86_t *as, int dest_r32, int src_r32, int op) {
+    asm_x86_write_byte_2(as, op, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+#if 0
+static void asm_x86_nop(asm_x86_t *as) {
+    asm_x86_write_byte_1(as, OPCODE_NOP);
+}
+#endif
+
+static void asm_x86_push_r32(asm_x86_t *as, int src_r32) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_R32 | src_r32);
+}
+
+#if 0
+void asm_x86_push_i32(asm_x86_t *as, int src_i32) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_I32);
+    asm_x86_write_word32(as, src_i32);
+}
+
+void asm_x86_push_disp(asm_x86_t *as, int src_r32, int src_offset) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_M32);
+    asm_x86_write_r32_disp(as, 6, src_r32, src_offset);
+}
+#endif
+
+static void asm_x86_pop_r32(asm_x86_t *as, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_POP_R32 | dest_r32);
+}
+
+static void asm_x86_ret(asm_x86_t *as) {
+    asm_x86_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_MOV_R32_TO_RM32);
+}
+
+void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+    asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+    asm_x86_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R32_TO_RM32);
+    asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
+    asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_RM32_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+static void asm_x86_lea_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_LEA_MEM_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+#if 0
+void asm_x86_mov_i8_to_r8(asm_x86_t *as, int src_i8, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r32, src_i8);
+}
+#endif
+
+size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_I32_TO_R32 | dest_r32);
+    size_t loc = mp_asm_base_get_code_pos(&as->base);
+    asm_x86_write_word32(as, src_i32);
+    return loc;
+}
+
+void asm_x86_not_r32(asm_x86_t *as, int dest_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, 2, OPCODE_NOT_RM32);
+}
+
+void asm_x86_neg_r32(asm_x86_t *as, int dest_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, 3, OPCODE_NEG_RM32);
+}
+
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32);
+}
+
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_OR_R32_TO_RM32);
+}
+
+void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
+}
+
+void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, 4, OPCODE_SHL_RM32_CL);
+}
+
+void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, 5, OPCODE_SHR_RM32_CL);
+}
+
+void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, 7, OPCODE_SAR_RM32_CL);
+}
+
+void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_ADD_R32_TO_RM32);
+}
+
+static void asm_x86_add_i32_to_r32(asm_x86_t *as, int src_i32, int dest_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        asm_x86_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+
+void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_SUB_R32_FROM_RM32);
+}
+
+static void asm_x86_sub_r32_i32(asm_x86_t *as, int dest_r32, int src_i32) {
+    if (SIGNED_FIT8(src_i32)) {
+        // defaults to 32 bit operation
+        asm_x86_write_byte_2(as, OPCODE_SUB_I8_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        // defaults to 32 bit operation
+        asm_x86_write_byte_2(as, OPCODE_SUB_I32_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+
+void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    // imul reg32, reg/mem32 -- 0x0f 0xaf /r
+    asm_x86_write_byte_3(as, 0x0f, 0xaf, MODRM_R32(dest_r32) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+}
+
+#if 0
+/* shifts not tested */
+void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_shr_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_sar_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+#endif
+
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+    asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_CMP_R32_WITH_RM32);
+}
+
+#if 0
+void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        asm_x86_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        asm_x86_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+#endif
+
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+    asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+    asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_TEST_R32_WITH_RM32);
+}
+
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) {
+    asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8));
+}
+
+void asm_x86_jmp_reg(asm_x86_t *as, int src_r32) {
+    asm_x86_write_byte_2(as, OPCODE_JMP_RM32, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+}
+
+static mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) {
+    assert(label < as->base.max_num_labels);
+    return as->base.label_offsets[label];
+}
+
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    if (dest != (mp_uint_t)-1 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x86_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+    large_jump:
+        rel -= 5;
+        asm_x86_write_byte_1(as, OPCODE_JMP_REL32);
+        asm_x86_write_word32(as, rel);
+    }
+}
+
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    if (dest != (mp_uint_t)-1 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x86_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+    large_jump:
+        rel -= 6;
+        asm_x86_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+        asm_x86_write_word32(as, rel);
+    }
+}
+
+void asm_x86_entry(asm_x86_t *as, int num_locals) {
+    assert(num_locals >= 0);
+    asm_x86_push_r32(as, ASM_X86_REG_EBP);
+    asm_x86_push_r32(as, ASM_X86_REG_EBX);
+    asm_x86_push_r32(as, ASM_X86_REG_ESI);
+    asm_x86_push_r32(as, ASM_X86_REG_EDI);
+    num_locals |= 3; // make it odd so stack is aligned on 16 byte boundary
+    asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE);
+    as->num_locals = num_locals;
+}
+
+void asm_x86_exit(asm_x86_t *as) {
+    asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, -as->num_locals * WORD_SIZE);
+    asm_x86_pop_r32(as, ASM_X86_REG_EDI);
+    asm_x86_pop_r32(as, ASM_X86_REG_ESI);
+    asm_x86_pop_r32(as, ASM_X86_REG_EBX);
+    asm_x86_pop_r32(as, ASM_X86_REG_EBP);
+    asm_x86_ret(as);
+}
+
+static int asm_x86_arg_offset_from_esp(asm_x86_t *as, size_t arg_num) {
+    // Above esp are: locals, 4 saved registers, return eip, arguments
+    return (as->num_locals + 4 + 1 + arg_num) * WORD_SIZE;
+}
+
+#if 0
+void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
+    asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num));
+}
+#endif
+
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
+    asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num), dest_r32);
+}
+
+#if 0
+void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
+    asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, dest_arg_num));
+}
+#endif
+
+// locals:
+//  - stored on the stack in ascending order
+//  - numbered 0 through as->num_locals-1
+//  - ESP points to the first local
+//
+//  | ESP
+//  v
+//  l0  l1  l2  ...  l(n-1)
+//  ^                ^
+//  | low address    | high address in RAM
+//
+static int asm_x86_local_offset_from_esp(asm_x86_t *as, int local_num) {
+    (void)as;
+    // Stack is full descending, ESP points to local0
+    return local_num * WORD_SIZE;
+}
+
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
+    asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, src_local_num), dest_r32);
+}
+
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
+    asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, dest_local_num));
+}
+
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
+    int offset = asm_x86_local_offset_from_esp(as, local_num);
+    if (offset == 0) {
+        asm_x86_mov_r32_r32(as, dest_r32, ASM_X86_REG_ESP);
+    } else {
+        asm_x86_lea_disp_to_r32(as, ASM_X86_REG_ESP, offset, dest_r32);
+    }
+}
+
+void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r32, mp_uint_t label) {
+    asm_x86_write_byte_1(as, OPCODE_CALL_REL32);
+    asm_x86_write_word32(as, 0);
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->base.code_offset;
+    asm_x86_pop_r32(as, dest_r32);
+    // PC rel is usually a forward reference, so need to assume it's large
+    asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+    asm_x86_write_word32(as, rel);
+}
+
+#if 0
+void asm_x86_push_local(asm_x86_t *as, int local_num) {
+    asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, local_num));
+}
+
+void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32) {
+    asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_ESP);
+    asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_esp(as, local_num), temp_r32);
+    asm_x86_push_r32(as, temp_r32);
+}
+#endif
+
+void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32) {
+    assert(n_args <= 4);
+
+    // Align stack on 16-byte boundary during the call
+    unsigned int align = ((n_args + 3) & ~3) - n_args;
+    if (align) {
+        asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, align * WORD_SIZE);
+    }
+
+    if (n_args > 3) {
+        asm_x86_push_r32(as, ASM_X86_REG_ARG_4);
+    }
+    if (n_args > 2) {
+        asm_x86_push_r32(as, ASM_X86_REG_ARG_3);
+    }
+    if (n_args > 1) {
+        asm_x86_push_r32(as, ASM_X86_REG_ARG_2);
+    }
+    if (n_args > 0) {
+        asm_x86_push_r32(as, ASM_X86_REG_ARG_1);
+    }
+
+    // Load the pointer to the function and make the call
+    asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r32);
+    asm_x86_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R32(2) | MODRM_RM_REG | MODRM_RM_R32(temp_r32));
+
+    // the caller must clean up the stack
+    if (n_args > 0) {
+        asm_x86_add_i32_to_r32(as, (n_args + align) * WORD_SIZE, ASM_X86_REG_ESP);
+    }
+}
+
+#endif // MICROPY_EMIT_X86

+ 218 - 0
mp_flipper/lib/micropython/py/asmx86.h

@@ -0,0 +1,218 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMX86_H
+#define MICROPY_INCLUDED_PY_ASMX86_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// x86 cdecl calling convention is:
+//  - args passed on the stack in reverse order
+//  - return value in EAX
+//  - caller cleans up the stack after a call
+//  - stack must be aligned to 16-byte boundary before all calls
+//  - EAX, ECX, EDX are caller-save
+//  - EBX, ESI, EDI, EBP, ESP, EIP are callee-save
+
+// In the functions below, argument order follows x86 docs and generally
+// the destination is the first argument.
+// NOTE: this is a change from the old convention used in this file and
+// some functions still use the old (reverse) convention.
+
+#define ASM_X86_REG_EAX (0)
+#define ASM_X86_REG_ECX (1)
+#define ASM_X86_REG_EDX (2)
+#define ASM_X86_REG_EBX (3)
+#define ASM_X86_REG_ESP (4)
+#define ASM_X86_REG_EBP (5)
+#define ASM_X86_REG_ESI (6)
+#define ASM_X86_REG_EDI (7)
+
+// x86 passes values on the stack, but the emitter is register based, so we need
+// to define registers that can temporarily hold the function arguments.  They
+// need to be defined here so that asm_x86_call_ind can push them onto the stack
+// before the call.
+#define ASM_X86_REG_ARG_1 ASM_X86_REG_EAX
+#define ASM_X86_REG_ARG_2 ASM_X86_REG_ECX
+#define ASM_X86_REG_ARG_3 ASM_X86_REG_EDX
+#define ASM_X86_REG_ARG_4 ASM_X86_REG_EBX
+
+// condition codes, used for jcc and setcc (despite their j-name!)
+#define ASM_X86_CC_JB  (0x2) // below, unsigned
+#define ASM_X86_CC_JAE (0x3) // above or equal, unsigned
+#define ASM_X86_CC_JZ  (0x4)
+#define ASM_X86_CC_JE  (0x4)
+#define ASM_X86_CC_JNZ (0x5)
+#define ASM_X86_CC_JNE (0x5)
+#define ASM_X86_CC_JBE (0x6) // below or equal, unsigned
+#define ASM_X86_CC_JA  (0x7) // above, unsigned
+#define ASM_X86_CC_JL  (0xc) // less, signed
+#define ASM_X86_CC_JGE (0xd) // greater or equal, signed
+#define ASM_X86_CC_JLE (0xe) // less or equal, signed
+#define ASM_X86_CC_JG  (0xf) // greater, signed
+
+typedef struct _asm_x86_t {
+    mp_asm_base_t base;
+    int num_locals;
+} asm_x86_t;
+
+static inline void asm_x86_end_pass(asm_x86_t *as) {
+    (void)as;
+}
+
+void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
+void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_not_r32(asm_x86_t *as, int dest_r32);
+void asm_x86_neg_r32(asm_x86_t *as, int dest_r32);
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8);
+void asm_x86_jmp_reg(asm_x86_t *as, int src_r86);
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label);
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label);
+void asm_x86_entry(asm_x86_t *as, int num_locals);
+void asm_x86_exit(asm_x86_t *as);
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32);
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32);
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num);
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32);
+void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r64, mp_uint_t label);
+void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32);
+
+// Holds a pointer to mp_fun_table
+#define ASM_X86_REG_FUN_TABLE ASM_X86_REG_EBP
+
+#if GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_X86_REG_EAX
+#define REG_ARG_1 ASM_X86_REG_ARG_1
+#define REG_ARG_2 ASM_X86_REG_ARG_2
+#define REG_ARG_3 ASM_X86_REG_ARG_3
+#define REG_ARG_4 ASM_X86_REG_ARG_4
+
+// caller-save, so can be used as temporaries
+#define REG_TEMP0 ASM_X86_REG_EAX
+#define REG_TEMP1 ASM_X86_REG_ECX
+#define REG_TEMP2 ASM_X86_REG_EDX
+
+// callee-save, so can be used as locals
+#define REG_LOCAL_1 ASM_X86_REG_EBX
+#define REG_LOCAL_2 ASM_X86_REG_ESI
+#define REG_LOCAL_3 ASM_X86_REG_EDI
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_X86_REG_FUN_TABLE
+
+#define ASM_T               asm_x86_t
+#define ASM_END_PASS        asm_x86_end_pass
+#define ASM_ENTRY           asm_x86_entry
+#define ASM_EXIT            asm_x86_exit
+
+#define ASM_JUMP            asm_x86_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+    do { \
+        if (bool_test) { \
+            asm_x86_test_r8_with_r8(as, reg, reg); \
+        } else { \
+            asm_x86_test_r32_with_r32(as, reg, reg); \
+        } \
+        asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+    do { \
+        if (bool_test) { \
+            asm_x86_test_r8_with_r8(as, reg, reg); \
+        } else { \
+            asm_x86_test_r32_with_r32(as, reg, reg); \
+        } \
+        asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_x86_cmp_r32_with_r32(as, reg1, reg2); \
+        asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \
+    } while (0)
+#define ASM_JUMP_REG(as, reg) asm_x86_jmp_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_x86_call_ind(as, idx, mp_f_n_args[idx], ASM_X86_REG_EAX)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x86_mov_r32_to_local((as), (reg_src), (local_num))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x86_mov_local_to_r32((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x86_mov_local_addr_to_r32((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x86_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_NOT_REG(as, reg) asm_x86_not_r32((as), (reg))
+#define ASM_NEG_REG(as, reg) asm_x86_neg_r32((as), (reg))
+#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
+#define ASM_LSR_REG(as, reg) asm_x86_shr_r32_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 2 * (uint16_offset), (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMX86_H

+ 267 - 0
mp_flipper/lib/micropython/py/asmxtensa.c

@@ -0,0 +1,267 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_XTENSAWIN
+
+#include "py/asmxtensa.h"
+
+#define WORD_SIZE (4)
+#define SIGNED_FIT8(x) ((((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80))
+#define SIGNED_FIT12(x) ((((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800))
+
+void asm_xtensa_end_pass(asm_xtensa_t *as) {
+    as->num_const = as->cur_const;
+    as->cur_const = 0;
+
+    #if 0
+    // make a hex dump of the machine code
+    if (as->base.pass == MP_ASM_PASS_EMIT) {
+        uint8_t *d = as->base.code_base;
+        printf("XTENSA ASM:");
+        for (int i = 0; i < ((as->base.code_size + 15) & ~15); ++i) {
+            if (i % 16 == 0) {
+                printf("\n%08x:", (uint32_t)&d[i]);
+            }
+            if (i % 2 == 0) {
+                printf(" ");
+            }
+            printf("%02x", d[i]);
+        }
+        printf("\n");
+    }
+    #endif
+}
+
+void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) {
+    // jump over the constants
+    asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
+    mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
+    as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+
+    // adjust the stack-pointer to store a0, a12, a13, a14, a15 and locals, 16-byte aligned
+    as->stack_adjust = (((ASM_XTENSA_NUM_REGS_SAVED + num_locals) * WORD_SIZE) + 15) & ~15;
+    if (SIGNED_FIT8(-as->stack_adjust)) {
+        asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust);
+    } else {
+        asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust);
+        asm_xtensa_op_sub(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9);
+    }
+
+    // save return value (a0) and callee-save registers (a12, a13, a14, a15)
+    asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+    for (int i = 1; i < ASM_XTENSA_NUM_REGS_SAVED; ++i) {
+        asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i);
+    }
+}
+
+void asm_xtensa_exit(asm_xtensa_t *as) {
+    // restore registers
+    for (int i = ASM_XTENSA_NUM_REGS_SAVED - 1; i >= 1; --i) {
+        asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i);
+    }
+    asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+
+    // restore stack-pointer and return
+    if (SIGNED_FIT8(as->stack_adjust)) {
+        asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust);
+    } else {
+        asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust);
+        asm_xtensa_op_add_n(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9);
+    }
+
+    asm_xtensa_op_ret_n(as);
+}
+
+void asm_xtensa_entry_win(asm_xtensa_t *as, int num_locals) {
+    // jump over the constants
+    asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
+    mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
+    as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+
+    as->stack_adjust = 32 + ((((ASM_XTENSA_NUM_REGS_SAVED_WIN + num_locals) * WORD_SIZE) + 15) & ~15);
+    asm_xtensa_op_entry(as, ASM_XTENSA_REG_A1, as->stack_adjust);
+    asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+}
+
+void asm_xtensa_exit_win(asm_xtensa_t *as) {
+    asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+    asm_xtensa_op_retw_n(as);
+}
+
+static uint32_t get_label_dest(asm_xtensa_t *as, uint label) {
+    assert(label < as->base.max_num_labels);
+    return as->base.label_offsets[label];
+}
+
+void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op) {
+    uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
+    if (c != NULL) {
+        c[0] = op;
+        c[1] = op >> 8;
+    }
+}
+
+void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op) {
+    uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
+    if (c != NULL) {
+        c[0] = op;
+        c[1] = op >> 8;
+        c[2] = op >> 16;
+    }
+}
+
+void asm_xtensa_j_label(asm_xtensa_t *as, uint label) {
+    uint32_t dest = get_label_dest(as, label);
+    int32_t rel = dest - as->base.code_offset - 4;
+    // we assume rel, as a signed int, fits in 18-bits
+    asm_xtensa_op_j(as, rel);
+}
+
+void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label) {
+    uint32_t dest = get_label_dest(as, label);
+    int32_t rel = dest - as->base.code_offset - 4;
+    if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT12(rel)) {
+        printf("ERROR: xtensa bccz out of range\n");
+    }
+    asm_xtensa_op_bccz(as, cond, reg, rel);
+}
+
+void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label) {
+    uint32_t dest = get_label_dest(as, label);
+    int32_t rel = dest - as->base.code_offset - 4;
+    if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT8(rel)) {
+        printf("ERROR: xtensa bcc out of range\n");
+    }
+    asm_xtensa_op_bcc(as, cond, reg1, reg2, rel);
+}
+
+// convenience function; reg_dest must be different from reg_src[12]
+void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2) {
+    asm_xtensa_op_movi_n(as, reg_dest, 1);
+    asm_xtensa_op_bcc(as, cond, reg_src1, reg_src2, 1);
+    asm_xtensa_op_movi_n(as, reg_dest, 0);
+}
+
+size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
+    // load the constant
+    uint32_t const_table_offset = (uint8_t *)as->const_table - as->base.code_base;
+    size_t loc = const_table_offset + as->cur_const * WORD_SIZE;
+    asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, loc);
+    // store the constant in the table
+    if (as->const_table != NULL) {
+        as->const_table[as->cur_const] = i32;
+    }
+    ++as->cur_const;
+    return loc;
+}
+
+void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
+    if (-32 <= (int)i32 && (int)i32 <= 95) {
+        asm_xtensa_op_movi_n(as, reg_dest, i32);
+    } else if (SIGNED_FIT12(i32)) {
+        asm_xtensa_op_movi(as, reg_dest, i32);
+    } else {
+        asm_xtensa_mov_reg_i32(as, reg_dest, i32);
+    }
+}
+
+void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src) {
+    asm_xtensa_op_s32i(as, reg_src, ASM_XTENSA_REG_A1, local_num);
+}
+
+void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num) {
+    asm_xtensa_op_l32i(as, reg_dest, ASM_XTENSA_REG_A1, local_num);
+}
+
+void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num) {
+    uint off = local_num * WORD_SIZE;
+    if (SIGNED_FIT8(off)) {
+        asm_xtensa_op_addi(as, reg_dest, ASM_XTENSA_REG_A1, off);
+    } else {
+        asm_xtensa_op_movi(as, reg_dest, off);
+        asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A1);
+    }
+}
+
+void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label) {
+    // Get relative offset from PC
+    uint32_t dest = get_label_dest(as, label);
+    int32_t rel = dest - as->base.code_offset;
+    rel -= 3 + 3; // account for 3 bytes of movi instruction, 3 bytes call0 adjustment
+    asm_xtensa_op_movi(as, reg_dest, rel); // imm has 12-bit range
+
+    // Use call0 to get PC+3 into a0
+    // call0 destination must be aligned on 4 bytes:
+    //  - code_offset&3=0: off=0, pad=1
+    //  - code_offset&3=1: off=0, pad=0
+    //  - code_offset&3=2: off=1, pad=3
+    //  - code_offset&3=3: off=1, pad=2
+    uint32_t off = as->base.code_offset >> 1 & 1;
+    uint32_t pad = (5 - as->base.code_offset) & 3;
+    asm_xtensa_op_call0(as, off);
+    mp_asm_base_get_cur_to_write_bytes(&as->base, pad);
+
+    // Add PC to relative offset
+    asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A0);
+}
+
+void asm_xtensa_l32i_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+    if (word_offset < 16) {
+        asm_xtensa_op_l32i_n(as, reg_dest, reg_base, word_offset);
+    } else if (word_offset < 256) {
+        asm_xtensa_op_l32i(as, reg_dest, reg_base, word_offset);
+    } else {
+        mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("asm overflow"));
+    }
+}
+
+void asm_xtensa_s32i_optimised(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) {
+    if (word_offset < 16) {
+        asm_xtensa_op_s32i_n(as, reg_src, reg_base, word_offset);
+    } else if (word_offset < 256) {
+        asm_xtensa_op_s32i(as, reg_src, reg_base, word_offset);
+    } else {
+        mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("asm overflow"));
+    }
+}
+
+void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx) {
+    asm_xtensa_l32i_optimised(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_FUN_TABLE, idx);
+    asm_xtensa_op_callx0(as, ASM_XTENSA_REG_A0);
+}
+
+void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx) {
+    asm_xtensa_l32i_optimised(as, ASM_XTENSA_REG_A8, ASM_XTENSA_REG_FUN_TABLE_WIN, idx);
+    asm_xtensa_op_callx8(as, ASM_XTENSA_REG_A8);
+}
+
+#endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_XTENSAWIN

+ 415 - 0
mp_flipper/lib/micropython/py/asmxtensa.h

@@ -0,0 +1,415 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMXTENSA_H
+#define MICROPY_INCLUDED_PY_ASMXTENSA_H
+
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// calling conventions:
+// up to 6 args in a2-a7
+// return value in a2
+// PC stored in a0
+// stack pointer is a1, stack full descending, is aligned to 16 bytes
+// callee save: a1, a12, a13, a14, a15
+// caller save: a3
+
+// With windowed registers, size 8:
+// - a0: return PC
+// - a1: stack pointer, full descending, aligned to 16 bytes
+// - a2-a7: incoming args, and essentially callee save
+// - a2: return value
+// - a8-a15: caller save temporaries
+// - a10-a15: input args to called function
+// - a10: return value of called function
+// note: a0-a7 are saved automatically via window shift of called function
+
+#define ASM_XTENSA_REG_A0  (0)
+#define ASM_XTENSA_REG_A1  (1)
+#define ASM_XTENSA_REG_A2  (2)
+#define ASM_XTENSA_REG_A3  (3)
+#define ASM_XTENSA_REG_A4  (4)
+#define ASM_XTENSA_REG_A5  (5)
+#define ASM_XTENSA_REG_A6  (6)
+#define ASM_XTENSA_REG_A7  (7)
+#define ASM_XTENSA_REG_A8  (8)
+#define ASM_XTENSA_REG_A9  (9)
+#define ASM_XTENSA_REG_A10 (10)
+#define ASM_XTENSA_REG_A11 (11)
+#define ASM_XTENSA_REG_A12 (12)
+#define ASM_XTENSA_REG_A13 (13)
+#define ASM_XTENSA_REG_A14 (14)
+#define ASM_XTENSA_REG_A15 (15)
+
+// for bccz
+#define ASM_XTENSA_CCZ_EQ (0)
+#define ASM_XTENSA_CCZ_NE (1)
+
+// for bcc and setcc
+#define ASM_XTENSA_CC_NONE  (0)
+#define ASM_XTENSA_CC_EQ    (1)
+#define ASM_XTENSA_CC_LT    (2)
+#define ASM_XTENSA_CC_LTU   (3)
+#define ASM_XTENSA_CC_ALL   (4)
+#define ASM_XTENSA_CC_BC    (5)
+#define ASM_XTENSA_CC_ANY   (8)
+#define ASM_XTENSA_CC_NE    (9)
+#define ASM_XTENSA_CC_GE    (10)
+#define ASM_XTENSA_CC_GEU   (11)
+#define ASM_XTENSA_CC_NALL  (12)
+#define ASM_XTENSA_CC_BS    (13)
+
+// macros for encoding instructions (little endian versions)
+#define ASM_XTENSA_ENCODE_RRR(op0, op1, op2, r, s, t) \
+    ((((uint32_t)op2) << 20) | (((uint32_t)op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRI4(op0, op1, r, s, t, imm4) \
+    (((imm4) << 20) | ((op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRI8(op0, r, s, t, imm8) \
+    ((((uint32_t)imm8) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RI16(op0, t, imm16) \
+    (((imm16) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RSR(op0, op1, op2, rs, t) \
+    (((op2) << 20) | ((op1) << 16) | ((rs) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_CALL(op0, n, offset) \
+    (((offset) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_CALLX(op0, op1, op2, r, s, m, n) \
+    ((((uint32_t)op2) << 20) | (((uint32_t)op1) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_BRI8(op0, r, s, m, n, imm8) \
+    (((imm8) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_BRI12(op0, s, m, n, imm12) \
+    (((imm12) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRRN(op0, r, s, t) \
+    (((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RI7(op0, s, imm7) \
+    ((((imm7) & 0xf) << 12) | ((s) << 8) | ((imm7) & 0x70) | (op0))
+
+// Number of registers saved on the stack upon entry to function
+#define ASM_XTENSA_NUM_REGS_SAVED (5)
+#define ASM_XTENSA_NUM_REGS_SAVED_WIN (1)
+
+typedef struct _asm_xtensa_t {
+    mp_asm_base_t base;
+    uint32_t cur_const;
+    uint32_t num_const;
+    uint32_t *const_table;
+    uint32_t stack_adjust;
+} asm_xtensa_t;
+
+void asm_xtensa_end_pass(asm_xtensa_t *as);
+
+void asm_xtensa_entry(asm_xtensa_t *as, int num_locals);
+void asm_xtensa_exit(asm_xtensa_t *as);
+
+void asm_xtensa_entry_win(asm_xtensa_t *as, int num_locals);
+void asm_xtensa_exit_win(asm_xtensa_t *as);
+
+void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op);
+void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op);
+
+// raw instructions
+
+static inline void asm_xtensa_op_entry(asm_xtensa_t *as, uint reg_src, int32_t num_bytes) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, 0, 3, (num_bytes / 8) & 0xfff));
+}
+
+static inline void asm_xtensa_op_add_n(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(10, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_addi(asm_xtensa_t *as, uint reg_dest, uint reg_src, int imm8) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 12, reg_src, reg_dest, imm8 & 0xff));
+}
+
+static inline void asm_xtensa_op_and(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 1, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_bcc(asm_xtensa_t *as, uint cond, uint reg_src1, uint reg_src2, int32_t rel8) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(7, cond, reg_src1, reg_src2, rel8 & 0xff));
+}
+
+static inline void asm_xtensa_op_bccz(asm_xtensa_t *as, uint cond, uint reg_src, int32_t rel12) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, cond, 1, rel12 & 0xfff));
+}
+
+static inline void asm_xtensa_op_call0(asm_xtensa_t *as, int32_t rel18) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(5, 0, rel18 & 0x3ffff));
+}
+
+static inline void asm_xtensa_op_callx0(asm_xtensa_t *as, uint reg) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 0));
+}
+
+static inline void asm_xtensa_op_callx8(asm_xtensa_t *as, uint reg) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 2));
+}
+
+static inline void asm_xtensa_op_j(asm_xtensa_t *as, int32_t rel18) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(6, 0, rel18 & 0x3ffff));
+}
+
+static inline void asm_xtensa_op_jx(asm_xtensa_t *as, uint reg) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 2, 2));
+}
+
+static inline void asm_xtensa_op_l8ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint byte_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 0, reg_base, reg_dest, byte_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l16ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint half_word_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 1, reg_base, reg_dest, half_word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l32i(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 2, reg_base, reg_dest, word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l32i_n(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(8, word_offset & 0xf, reg_base, reg_dest));
+}
+
+static inline void asm_xtensa_op_l32r(asm_xtensa_t *as, uint reg_dest, uint32_t op_off, uint32_t dest_off) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RI16(1, reg_dest, ((dest_off - ((op_off + 3) & ~3)) >> 2) & 0xffff));
+}
+
+static inline void asm_xtensa_op_mov_n(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 0, reg_src, reg_dest));
+}
+
+static inline void asm_xtensa_op_movi(asm_xtensa_t *as, uint reg_dest, int32_t imm12) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 10, (imm12 >> 8) & 0xf, reg_dest, imm12 & 0xff));
+}
+
+// Argument must be in the range (-32 .. 95) inclusive.
+static inline void asm_xtensa_op_movi_n(asm_xtensa_t *as, uint reg_dest, int imm7) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RI7(12, reg_dest, imm7));
+}
+
+static inline void asm_xtensa_op_mull(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 2, 8, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_neg(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 6, reg_dest, 0, reg_src));
+}
+
+static inline void asm_xtensa_op_or(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 2, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_ret_n(asm_xtensa_t *as) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 15, 0, 0));
+}
+
+static inline void asm_xtensa_op_retw_n(asm_xtensa_t *as) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 15, 0, 1));
+}
+
+static inline void asm_xtensa_op_s8i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint byte_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 4, reg_base, reg_src, byte_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s16i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint half_word_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 5, reg_base, reg_src, half_word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s32i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 6, reg_base, reg_src, word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s32i_n(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) {
+    asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(9, word_offset & 0xf, reg_base, reg_src));
+}
+
+static inline void asm_xtensa_op_sll(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, reg_dest, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_srl(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 9, reg_dest, 0, reg_src));
+}
+
+static inline void asm_xtensa_op_sra(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, reg_dest, 0, reg_src));
+}
+
+static inline void asm_xtensa_op_ssl(asm_xtensa_t *as, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 1, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_ssr(asm_xtensa_t *as, uint reg_src) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 0, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_sub(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 12, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_xor(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+    asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 3, reg_dest, reg_src_a, reg_src_b));
+}
+
+// convenience functions
+void asm_xtensa_j_label(asm_xtensa_t *as, uint label);
+void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label);
+void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label);
+void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2);
+size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32);
+void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32);
+void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src);
+void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num);
+void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num);
+void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label);
+void asm_xtensa_l32i_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset);
+void asm_xtensa_s32i_optimised(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset);
+void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx);
+void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
+
+// Holds a pointer to mp_fun_table
+#define ASM_XTENSA_REG_FUN_TABLE ASM_XTENSA_REG_A15
+#define ASM_XTENSA_REG_FUN_TABLE_WIN ASM_XTENSA_REG_A7
+
+#if GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#if !GENERIC_ASM_API_WIN
+// Configuration for non-windowed calls
+
+#define REG_RET ASM_XTENSA_REG_A2
+#define REG_ARG_1 ASM_XTENSA_REG_A2
+#define REG_ARG_2 ASM_XTENSA_REG_A3
+#define REG_ARG_3 ASM_XTENSA_REG_A4
+#define REG_ARG_4 ASM_XTENSA_REG_A5
+#define REG_ARG_5 ASM_XTENSA_REG_A6
+
+#define REG_TEMP0 ASM_XTENSA_REG_A2
+#define REG_TEMP1 ASM_XTENSA_REG_A3
+#define REG_TEMP2 ASM_XTENSA_REG_A4
+
+#define REG_LOCAL_1 ASM_XTENSA_REG_A12
+#define REG_LOCAL_2 ASM_XTENSA_REG_A13
+#define REG_LOCAL_3 ASM_XTENSA_REG_A14
+#define REG_LOCAL_NUM (3)
+
+#define ASM_NUM_REGS_SAVED ASM_XTENSA_NUM_REGS_SAVED
+#define REG_FUN_TABLE ASM_XTENSA_REG_FUN_TABLE
+
+#define ASM_ENTRY(as, nlocal)   asm_xtensa_entry((as), (nlocal))
+#define ASM_EXIT(as)            asm_xtensa_exit((as))
+#define ASM_CALL_IND(as, idx)   asm_xtensa_call_ind((as), (idx))
+
+#else
+// Configuration for windowed calls with window size 8
+
+#define REG_PARENT_RET ASM_XTENSA_REG_A2
+#define REG_PARENT_ARG_1 ASM_XTENSA_REG_A2
+#define REG_PARENT_ARG_2 ASM_XTENSA_REG_A3
+#define REG_PARENT_ARG_3 ASM_XTENSA_REG_A4
+#define REG_PARENT_ARG_4 ASM_XTENSA_REG_A5
+#define REG_RET ASM_XTENSA_REG_A10
+#define REG_ARG_1 ASM_XTENSA_REG_A10
+#define REG_ARG_2 ASM_XTENSA_REG_A11
+#define REG_ARG_3 ASM_XTENSA_REG_A12
+#define REG_ARG_4 ASM_XTENSA_REG_A13
+
+#define REG_TEMP0 ASM_XTENSA_REG_A10
+#define REG_TEMP1 ASM_XTENSA_REG_A11
+#define REG_TEMP2 ASM_XTENSA_REG_A12
+
+#define REG_LOCAL_1 ASM_XTENSA_REG_A4
+#define REG_LOCAL_2 ASM_XTENSA_REG_A5
+#define REG_LOCAL_3 ASM_XTENSA_REG_A6
+#define REG_LOCAL_NUM (3)
+
+#define ASM_NUM_REGS_SAVED ASM_XTENSA_NUM_REGS_SAVED_WIN
+#define REG_FUN_TABLE ASM_XTENSA_REG_FUN_TABLE_WIN
+
+#define ASM_ENTRY(as, nlocal)   asm_xtensa_entry_win((as), (nlocal))
+#define ASM_EXIT(as)            asm_xtensa_exit_win((as))
+#define ASM_CALL_IND(as, idx)   asm_xtensa_call_ind_win((as), (idx))
+
+#endif
+
+#define ASM_T               asm_xtensa_t
+#define ASM_END_PASS        asm_xtensa_end_pass
+
+#define ASM_JUMP            asm_xtensa_j_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+    asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_EQ, reg, label)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+    asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_NE, reg, label)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    asm_xtensa_bcc_reg_reg_label(as, ASM_XTENSA_CC_EQ, reg1, reg2, label)
+#define ASM_JUMP_REG(as, reg) asm_xtensa_op_jx((as), (reg))
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_xtensa_mov_local_reg((as), ASM_NUM_REGS_SAVED + (local_num), (reg_src))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_xtensa_mov_reg_local((as), (reg_dest), ASM_NUM_REGS_SAVED + (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mov_n((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_xtensa_mov_reg_local_addr((as), (reg_dest), ASM_NUM_REGS_SAVED + (local_num))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_xtensa_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_NEG_REG(as, reg_dest) asm_xtensa_op_neg((as), (reg_dest), (reg_dest))
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) \
+    do { \
+        asm_xtensa_op_ssl((as), (reg_shift)); \
+        asm_xtensa_op_sll((as), (reg_dest), (reg_dest)); \
+    } while (0)
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) \
+    do { \
+        asm_xtensa_op_ssr((as), (reg_shift)); \
+        asm_xtensa_op_srl((as), (reg_dest), (reg_dest)); \
+    } while (0)
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) \
+    do { \
+        asm_xtensa_op_ssr((as), (reg_shift)); \
+        asm_xtensa_op_sra((as), (reg_dest), (reg_dest)); \
+    } while (0)
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_or((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_xor((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_and((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_add_n((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_sub((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mull((as), (reg_dest), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_l32i_optimised((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), (uint16_offset))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_s32i_optimised((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMXTENSA_H

+ 345 - 0
mp_flipper/lib/micropython/py/bc.c

@@ -0,0 +1,345 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/bc0.h"
+#include "py/bc.h"
+#include "py/objfun.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+void mp_encode_uint(void *env, mp_encode_uint_allocator_t allocator, mp_uint_t val) {
+    // We store each 7 bits in a separate byte, and that's how many bytes needed
+    byte buf[MP_ENCODE_UINT_MAX_BYTES];
+    byte *p = buf + sizeof(buf);
+    // We encode in little-ending order, but store in big-endian, to help decoding
+    do {
+        *--p = val & 0x7f;
+        val >>= 7;
+    } while (val != 0);
+    byte *c = allocator(env, buf + sizeof(buf) - p);
+    if (c != NULL) {
+        while (p != buf + sizeof(buf) - 1) {
+            *c++ = *p++ | 0x80;
+        }
+        *c = *p;
+    }
+}
+
+mp_uint_t mp_decode_uint(const byte **ptr) {
+    mp_uint_t unum = 0;
+    byte val;
+    const byte *p = *ptr;
+    do {
+        val = *p++;
+        unum = (unum << 7) | (val & 0x7f);
+    } while ((val & 0x80) != 0);
+    *ptr = p;
+    return unum;
+}
+
+// This function is used to help reduce stack usage at the caller, for the case when
+// the caller doesn't need to increase the ptr argument.  If ptr is a local variable
+// and the caller uses mp_decode_uint(&ptr) instead of this function, then the compiler
+// must allocate a slot on the stack for ptr, and this slot cannot be reused for
+// anything else in the function because the pointer may have been stored in a global
+// and reused later in the function.
+mp_uint_t mp_decode_uint_value(const byte *ptr) {
+    return mp_decode_uint(&ptr);
+}
+
+// This function is used to help reduce stack usage at the caller, for the case when
+// the caller doesn't need the actual value and just wants to skip over it.
+const byte *mp_decode_uint_skip(const byte *ptr) {
+    while ((*ptr++) & 0x80) {
+    }
+    return ptr;
+}
+
+static NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
+    #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+    // generic message, used also for other argument issues
+    (void)f;
+    (void)expected;
+    (void)given;
+    mp_arg_error_terse_mismatch();
+    #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+    (void)f;
+    mp_raise_msg_varg(&mp_type_TypeError,
+        MP_ERROR_TEXT("function takes %d positional arguments but %d were given"), expected, given);
+    #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+    mp_raise_msg_varg(&mp_type_TypeError,
+        MP_ERROR_TEXT("%q() takes %d positional arguments but %d were given"),
+        mp_obj_fun_get_name(MP_OBJ_FROM_PTR(f)), expected, given);
+    #endif
+}
+
+#if DEBUG_PRINT
+static void dump_args(const mp_obj_t *a, size_t sz) {
+    DEBUG_printf("%p: ", a);
+    for (size_t i = 0; i < sz; i++) {
+        DEBUG_printf("%p ", a[i]);
+    }
+    DEBUG_printf("\n");
+}
+#else
+#define dump_args(...) (void)0
+#endif
+
+// On entry code_state should be allocated somewhere (stack/heap) and
+// contain the following valid entries:
+//    - code_state->fun_bc should contain a pointer to the function object
+//    - code_state->ip should contain a pointer to the beginning of the prelude
+//    - code_state->sp should be: &code_state->state[0] - 1
+//    - code_state->n_state should be the number of objects in the local state
+static void mp_setup_code_state_helper(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+    // This function is pretty complicated.  It's main aim is to be efficient in speed and RAM
+    // usage for the common case of positional only args.
+
+    // get the function object that we want to set up (could be bytecode or native code)
+    mp_obj_fun_bc_t *self = code_state->fun_bc;
+
+    // Get cached n_state (rather than decode it again)
+    size_t n_state = code_state->n_state;
+
+    // Decode prelude
+    size_t n_state_unused, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args;
+    MP_BC_PRELUDE_SIG_DECODE_INTO(code_state->ip, n_state_unused, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args);
+    MP_BC_PRELUDE_SIZE_DECODE(code_state->ip);
+    (void)n_state_unused;
+    (void)n_exc_stack_unused;
+
+    mp_obj_t *code_state_state = code_state->sp + 1;
+    code_state->exc_sp_idx = 0;
+
+    // zero out the local stack to begin with
+    memset(code_state_state, 0, n_state * sizeof(*code_state->state));
+
+    const mp_obj_t *kwargs = args + n_args;
+
+    // var_pos_kw_args points to the stack where the var-args tuple, and var-kw dict, should go (if they are needed)
+    mp_obj_t *var_pos_kw_args = &code_state_state[n_state - 1 - n_pos_args - n_kwonly_args];
+
+    // check positional arguments
+
+    if (n_args > n_pos_args) {
+        // given more than enough arguments
+        if ((scope_flags & MP_SCOPE_FLAG_VARARGS) == 0) {
+            fun_pos_args_mismatch(self, n_pos_args, n_args);
+        }
+        // put extra arguments in varargs tuple
+        *var_pos_kw_args-- = mp_obj_new_tuple(n_args - n_pos_args, args + n_pos_args);
+        n_args = n_pos_args;
+    } else {
+        if ((scope_flags & MP_SCOPE_FLAG_VARARGS) != 0) {
+            DEBUG_printf("passing empty tuple as *args\n");
+            *var_pos_kw_args-- = mp_const_empty_tuple;
+        }
+        // Apply processing and check below only if we don't have kwargs,
+        // otherwise, kw handling code below has own extensive checks.
+        if (n_kw == 0 && (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) == 0) {
+            if (n_args >= (size_t)(n_pos_args - n_def_pos_args)) {
+                // given enough arguments, but may need to use some default arguments
+                for (size_t i = n_args; i < n_pos_args; i++) {
+                    code_state_state[n_state - 1 - i] = self->extra_args[i - (n_pos_args - n_def_pos_args)];
+                }
+            } else {
+                fun_pos_args_mismatch(self, n_pos_args - n_def_pos_args, n_args);
+            }
+        }
+    }
+
+    // copy positional args into state
+    for (size_t i = 0; i < n_args; i++) {
+        code_state_state[n_state - 1 - i] = args[i];
+    }
+
+    // check keyword arguments
+
+    if (n_kw != 0 || (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+        DEBUG_printf("Initial args: ");
+        dump_args(code_state_state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+        mp_obj_t dict = MP_OBJ_NULL;
+        if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+            dict = mp_obj_new_dict(n_kw); // TODO: better go conservative with 0?
+            *var_pos_kw_args = dict;
+        }
+
+        for (size_t i = 0; i < n_kw; i++) {
+            // the keys in kwargs are expected to be qstr objects
+            mp_obj_t wanted_arg_name = kwargs[2 * i];
+
+            // get pointer to arg_names array
+            const uint8_t *arg_names = code_state->ip;
+            arg_names = mp_decode_uint_skip(arg_names);
+
+            for (size_t j = 0; j < n_pos_args + n_kwonly_args; j++) {
+                qstr arg_qstr = mp_decode_uint(&arg_names);
+                #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+                arg_qstr = self->context->constants.qstr_table[arg_qstr];
+                #endif
+                if (wanted_arg_name == MP_OBJ_NEW_QSTR(arg_qstr)) {
+                    if (code_state_state[n_state - 1 - j] != MP_OBJ_NULL) {
+                    error_multiple:
+                        mp_raise_msg_varg(&mp_type_TypeError,
+                            MP_ERROR_TEXT("function got multiple values for argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
+                    }
+                    code_state_state[n_state - 1 - j] = kwargs[2 * i + 1];
+                    goto continue2;
+                }
+            }
+            // Didn't find name match with positional args
+            if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) == 0) {
+                #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+                mp_raise_TypeError(MP_ERROR_TEXT("unexpected keyword argument"));
+                #else
+                mp_raise_msg_varg(&mp_type_TypeError,
+                    MP_ERROR_TEXT("unexpected keyword argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
+                #endif
+            }
+            mp_map_elem_t *elem = mp_map_lookup(mp_obj_dict_get_map(dict), wanted_arg_name, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+            if (elem->value == MP_OBJ_NULL) {
+                elem->value = kwargs[2 * i + 1];
+            } else {
+                goto error_multiple;
+            }
+        continue2:;
+        }
+
+        DEBUG_printf("Args with kws flattened: ");
+        dump_args(code_state_state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+        // fill in defaults for positional args
+        mp_obj_t *d = &code_state_state[n_state - n_pos_args];
+        mp_obj_t *s = &self->extra_args[n_def_pos_args - 1];
+        for (size_t i = n_def_pos_args; i > 0; i--, d++, s--) {
+            if (*d == MP_OBJ_NULL) {
+                *d = *s;
+            }
+        }
+
+        DEBUG_printf("Args after filling default positional: ");
+        dump_args(code_state_state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+        // Check that all mandatory positional args are specified
+        while (d < &code_state_state[n_state]) {
+            if (*d++ == MP_OBJ_NULL) {
+                mp_raise_msg_varg(&mp_type_TypeError,
+                    MP_ERROR_TEXT("function missing required positional argument #%d"), &code_state_state[n_state] - d);
+            }
+        }
+
+        // Check that all mandatory keyword args are specified
+        // Fill in default kw args if we have them
+        const uint8_t *arg_names = mp_decode_uint_skip(code_state->ip);
+        for (size_t i = 0; i < n_pos_args; i++) {
+            arg_names = mp_decode_uint_skip(arg_names);
+        }
+        for (size_t i = 0; i < n_kwonly_args; i++) {
+            qstr arg_qstr = mp_decode_uint(&arg_names);
+            #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+            arg_qstr = self->context->constants.qstr_table[arg_qstr];
+            #endif
+            if (code_state_state[n_state - 1 - n_pos_args - i] == MP_OBJ_NULL) {
+                mp_map_elem_t *elem = NULL;
+                if ((scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+                    elem = mp_map_lookup(&((mp_obj_dict_t *)MP_OBJ_TO_PTR(self->extra_args[n_def_pos_args]))->map, MP_OBJ_NEW_QSTR(arg_qstr), MP_MAP_LOOKUP);
+                }
+                if (elem != NULL) {
+                    code_state_state[n_state - 1 - n_pos_args - i] = elem->value;
+                } else {
+                    mp_raise_msg_varg(&mp_type_TypeError,
+                        MP_ERROR_TEXT("function missing required keyword argument '%q'"), arg_qstr);
+                }
+            }
+        }
+
+    } else {
+        // no keyword arguments given
+        if (n_kwonly_args != 0) {
+            mp_raise_TypeError(MP_ERROR_TEXT("function missing keyword-only argument"));
+        }
+        if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+            *var_pos_kw_args = mp_obj_new_dict(0);
+        }
+    }
+
+    // jump over code info (source file, argument names and line-number mapping)
+    const uint8_t *ip = code_state->ip + n_info;
+
+    // bytecode prelude: initialise closed over variables
+    for (; n_cell; --n_cell) {
+        size_t local_num = *ip++;
+        code_state_state[n_state - 1 - local_num] =
+            mp_obj_new_cell(code_state_state[n_state - 1 - local_num]);
+    }
+
+    // now that we skipped over the prelude, set the ip for the VM
+    code_state->ip = ip;
+
+    DEBUG_printf("Calling: n_pos_args=%d, n_kwonly_args=%d\n", n_pos_args, n_kwonly_args);
+    dump_args(code_state_state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+    dump_args(code_state_state, n_state);
+}
+
+// On entry code_state should be allocated somewhere (stack/heap) and
+// contain the following valid entries:
+//    - code_state->fun_bc should contain a pointer to the function object
+//    - code_state->n_state should be the number of objects in the local state
+void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+    code_state->ip = code_state->fun_bc->bytecode;
+    code_state->sp = &code_state->state[0] - 1;
+    #if MICROPY_STACKLESS
+    code_state->prev = NULL;
+    #endif
+    #if MICROPY_PY_SYS_SETTRACE
+    code_state->prev_state = NULL;
+    code_state->frame = NULL;
+    #endif
+    mp_setup_code_state_helper(code_state, n_args, n_kw, args);
+}
+
+#if MICROPY_EMIT_NATIVE
+// On entry code_state should be allocated somewhere (stack/heap) and
+// contain the following valid entries:
+//    - code_state->fun_bc should contain a pointer to the function object
+//    - code_state->n_state should be the number of objects in the local state
+void mp_setup_code_state_native(mp_code_state_native_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+    code_state->ip = mp_obj_fun_native_get_prelude_ptr(code_state->fun_bc);
+    code_state->sp = &code_state->state[0] - 1;
+    mp_setup_code_state_helper((mp_code_state_t *)code_state, n_args, n_kw, args);
+}
+#endif

+ 338 - 0
mp_flipper/lib/micropython/py/bc.h

@@ -0,0 +1,338 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BC_H
+#define MICROPY_INCLUDED_PY_BC_H
+
+#include "py/runtime.h"
+
+// bytecode layout:
+//
+//  func signature  : var uint
+//      contains six values interleaved bit-wise as: xSSSSEAA [xFSSKAED repeated]
+//          x = extension           another byte follows
+//          S = n_state - 1         number of entries in Python value stack
+//          E = n_exc_stack         number of entries in exception stack
+//          F = scope_flags         four bits of flags, MP_SCOPE_FLAG_xxx
+//          A = n_pos_args          number of arguments this function takes
+//          K = n_kwonly_args       number of keyword-only arguments this function takes
+//          D = n_def_pos_args      number of default positional arguments
+//
+//  prelude size    : var uint
+//      contains two values interleaved bit-wise as: xIIIIIIC repeated
+//          x = extension           another byte follows
+//          I = n_info              number of bytes in source info section (always > 0)
+//          C = n_cells             number of bytes/cells in closure section
+//
+//  source info section:
+//      simple_name : var qstr      always exists
+//      argname0    : var qstr
+//      ...         : var qstr
+//      argnameN    : var qstr      N = num_pos_args + num_kwonly_args - 1
+//      <line number info>
+//
+//  closure section:
+//      local_num0  : byte
+//      ...         : byte
+//      local_numN  : byte          N = n_cells-1
+//
+//  <bytecode>
+//
+//
+// constant table layout:
+//
+//  const0          : obj
+//  constN          : obj
+
+#define MP_ENCODE_UINT_MAX_BYTES ((MP_BYTES_PER_OBJ_WORD * 8 + 6) / 7)
+
+#define MP_BC_PRELUDE_SIG_ENCODE(S, E, scope, out_byte, out_env) \
+    do {                                                            \
+        /*// Get values to store in prelude */                      \
+        size_t F = scope->scope_flags & MP_SCOPE_FLAG_ALL_SIG;      \
+        size_t A = scope->num_pos_args;                             \
+        size_t K = scope->num_kwonly_args;                          \
+        size_t D = scope->num_def_pos_args;                         \
+                                                                \
+        /* Adjust S to shrink range, to compress better */          \
+        S -= 1;                                                     \
+                                                                \
+        /* Encode prelude */                                        \
+        /* xSSSSEAA */                                              \
+        uint8_t z = (S & 0xf) << 3 | (E & 1) << 2 | (A & 3);        \
+        S >>= 4;                                                    \
+        E >>= 1;                                                    \
+        A >>= 2;                                                    \
+        while (S | E | F | A | K | D) {                             \
+            out_byte(out_env, 0x80 | z);                            \
+            /* xFSSKAED */                                          \
+            z = (F & 1) << 6 | (S & 3) << 4 | (K & 1) << 3          \
+                | (A & 1) << 2 | (E & 1) << 1 | (D & 1);            \
+            S >>= 2;                                                \
+            E >>= 1;                                                \
+            F >>= 1;                                                \
+            A >>= 1;                                                \
+            K >>= 1;                                                \
+            D >>= 1;                                                \
+        }                                                           \
+        out_byte(out_env, z);                                       \
+    } while (0)
+
+#define MP_BC_PRELUDE_SIG_DECODE_INTO(ip, S, E, F, A, K, D)     \
+    do {                                                            \
+        uint8_t z = *(ip)++;                                        \
+        /* xSSSSEAA */                                              \
+        S = (z >> 3) & 0xf;                                         \
+        E = (z >> 2) & 0x1;                                         \
+        F = 0;                                                      \
+        A = z & 0x3;                                                \
+        K = 0;                                                      \
+        D = 0;                                                      \
+        for (unsigned n = 0; z & 0x80; ++n) {                       \
+            z = *(ip)++;                                            \
+            /* xFSSKAED */                                          \
+            S |= (z & 0x30) << (2 * n);                             \
+            E |= (z & 0x02) << n;                                   \
+            F |= ((z & 0x40) >> 6) << n;                            \
+            A |= (z & 0x4) << n;                                    \
+            K |= ((z & 0x08) >> 3) << n;                            \
+            D |= (z & 0x1) << n;                                    \
+        }                                                           \
+        S += 1;                                                     \
+    } while (0)
+
+#define MP_BC_PRELUDE_SIG_DECODE(ip) \
+    size_t n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args; \
+    MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args); \
+    (void)n_state; (void)n_exc_stack; (void)scope_flags; \
+    (void)n_pos_args; (void)n_kwonly_args; (void)n_def_pos_args
+
+#define MP_BC_PRELUDE_SIZE_ENCODE(I, C, out_byte, out_env)      \
+    do {                                                            \
+        /* Encode bit-wise as: xIIIIIIC */                          \
+        uint8_t z = 0;                                              \
+        do {                                                        \
+            z = (I & 0x3f) << 1 | (C & 1);                          \
+            C >>= 1;                                                \
+            I >>= 6;                                                \
+            if (C | I) {                                            \
+                z |= 0x80;                                          \
+            }                                                       \
+            out_byte(out_env, z);                                   \
+        } while (C | I);                                            \
+    } while (0)
+
+#define MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, I, C)                \
+    do {                                                            \
+        uint8_t z;                                                  \
+        C = 0;                                                      \
+        I = 0;                                                      \
+        for (unsigned n = 0;; ++n) {                                \
+            z = *(ip)++;                                            \
+            /* xIIIIIIC */                                          \
+            C |= (z & 1) << n;                                      \
+            I |= ((z & 0x7e) >> 1) << (6 * n);                      \
+            if (!(z & 0x80)) {                                      \
+                break;                                              \
+            }                                                       \
+        }                                                           \
+    } while (0)
+
+#define MP_BC_PRELUDE_SIZE_DECODE(ip) \
+    size_t n_info, n_cell; \
+    MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, n_info, n_cell); \
+    (void)n_info; (void)n_cell
+
+// Sentinel value for mp_code_state_t.exc_sp_idx
+#define MP_CODE_STATE_EXC_SP_IDX_SENTINEL ((uint16_t)-1)
+
+// To convert mp_code_state_t.exc_sp_idx to/from a pointer to mp_exc_stack_t
+#define MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp) ((exc_sp) + 1 - (exc_stack))
+#define MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, exc_sp_idx) ((exc_stack) + (exc_sp_idx) - 1)
+
+typedef struct _mp_bytecode_prelude_t {
+    uint n_state;
+    uint n_exc_stack;
+    uint scope_flags;
+    uint n_pos_args;
+    uint n_kwonly_args;
+    uint n_def_pos_args;
+    qstr qstr_block_name_idx;
+    const byte *line_info;
+    const byte *line_info_top;
+    const byte *opcodes;
+} mp_bytecode_prelude_t;
+
+// Exception stack entry
+typedef struct _mp_exc_stack_t {
+    const byte *handler;
+    // bit 0 is currently unused
+    // bit 1 is whether the opcode was SETUP_WITH or SETUP_FINALLY
+    mp_obj_t *val_sp;
+    // Saved exception
+    mp_obj_base_t *prev_exc;
+} mp_exc_stack_t;
+
+// Constants associated with a module, to interface bytecode with runtime.
+typedef struct _mp_module_constants_t {
+    #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+    qstr_short_t *qstr_table;
+    #else
+    qstr source_file;
+    #endif
+    mp_obj_t *obj_table;
+} mp_module_constants_t;
+
+// State associated with a module.
+typedef struct _mp_module_context_t {
+    mp_obj_module_t module;
+    mp_module_constants_t constants;
+} mp_module_context_t;
+
+// Outer level struct defining a compiled module.
+typedef struct _mp_compiled_module_t {
+    mp_module_context_t *context;
+    const struct _mp_raw_code_t *rc;
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    bool has_native;
+    size_t n_qstr;
+    size_t n_obj;
+    #endif
+} mp_compiled_module_t;
+
+// Outer level struct defining a frozen module.
+typedef struct _mp_frozen_module_t {
+    const mp_module_constants_t constants;
+    const void *proto_fun;
+} mp_frozen_module_t;
+
+// State for an executing function.
+typedef struct _mp_code_state_t {
+    // The fun_bc entry points to the underlying function object that is being executed.
+    // It is needed to access the start of bytecode and the const_table.
+    // It is also needed to prevent the GC from reclaiming the bytecode during execution,
+    // because the ip pointer below will always point to the interior of the bytecode.
+    struct _mp_obj_fun_bc_t *fun_bc;
+    const byte *ip;
+    mp_obj_t *sp;
+    uint16_t n_state;
+    uint16_t exc_sp_idx;
+    mp_obj_dict_t *old_globals;
+    #if MICROPY_STACKLESS
+    struct _mp_code_state_t *prev;
+    #endif
+    #if MICROPY_PY_SYS_SETTRACE
+    struct _mp_code_state_t *prev_state;
+    struct _mp_obj_frame_t *frame;
+    #endif
+    // Variable-length
+    mp_obj_t state[0];
+    // Variable-length, never accessed by name, only as (void*)(state + n_state)
+    // mp_exc_stack_t exc_state[0];
+} mp_code_state_t;
+
+// State for an executing native function (based on mp_code_state_t).
+typedef struct _mp_code_state_native_t {
+    struct _mp_obj_fun_bc_t *fun_bc;
+    const byte *ip;
+    mp_obj_t *sp;
+    uint16_t n_state;
+    uint16_t exc_sp_idx;
+    mp_obj_dict_t *old_globals;
+    mp_obj_t state[0];
+} mp_code_state_native_t;
+
+// Allocator may return NULL, in which case data is not stored (can be used to compute size).
+typedef uint8_t *(*mp_encode_uint_allocator_t)(void *env, size_t nbytes);
+
+void mp_encode_uint(void *env, mp_encode_uint_allocator_t allocator, mp_uint_t val);
+mp_uint_t mp_decode_uint(const byte **ptr);
+mp_uint_t mp_decode_uint_value(const byte *ptr);
+const byte *mp_decode_uint_skip(const byte *ptr);
+
+mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state,
+#ifndef __cplusplus
+    volatile
+#endif
+    mp_obj_t inject_exc);
+mp_code_state_t *mp_obj_fun_bc_prepare_codestate(mp_obj_t func, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_setup_code_state_native(mp_code_state_native_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_bytecode_print(const mp_print_t *print, const struct _mp_raw_code_t *rc, size_t fun_data_len, const mp_module_constants_t *cm);
+void mp_bytecode_print2(const mp_print_t *print, const byte *ip, size_t len, struct _mp_raw_code_t *const *child_table, const mp_module_constants_t *cm);
+const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip_start, const byte *ip, struct _mp_raw_code_t *const *child_table, const mp_module_constants_t *cm);
+#define mp_bytecode_print_inst(print, code, x_table) mp_bytecode_print2(print, code, 1, x_table)
+
+// Helper macros to access pointer with least significant bits holding flags
+#define MP_TAGPTR_PTR(x) ((void *)((uintptr_t)(x) & ~((uintptr_t)3)))
+#define MP_TAGPTR_TAG0(x) ((uintptr_t)(x) & 1)
+#define MP_TAGPTR_TAG1(x) ((uintptr_t)(x) & 2)
+#define MP_TAGPTR_MAKE(ptr, tag) ((void *)((uintptr_t)(ptr) | (tag)))
+
+static inline void mp_module_context_alloc_tables(mp_module_context_t *context, size_t n_qstr, size_t n_obj) {
+    #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+    size_t nq = (n_qstr * sizeof(qstr_short_t) + sizeof(mp_uint_t) - 1) / sizeof(mp_uint_t);
+    size_t no = n_obj;
+    mp_uint_t *mem = m_new(mp_uint_t, nq + no);
+    context->constants.qstr_table = (qstr_short_t *)mem;
+    context->constants.obj_table = (mp_obj_t *)(mem + nq);
+    #else
+    if (n_obj == 0) {
+        context->constants.obj_table = NULL;
+    } else {
+        context->constants.obj_table = m_new(mp_obj_t, n_obj);
+    }
+    #endif
+}
+
+static inline size_t mp_bytecode_get_source_line(const byte *line_info, const byte *line_info_top, size_t bc_offset) {
+    size_t source_line = 1;
+    while (line_info < line_info_top) {
+        size_t c = *line_info;
+        size_t b, l;
+        if ((c & 0x80) == 0) {
+            // 0b0LLBBBBB encoding
+            b = c & 0x1f;
+            l = c >> 5;
+            line_info += 1;
+        } else {
+            // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+            b = c & 0xf;
+            l = ((c << 4) & 0x700) | line_info[1];
+            line_info += 2;
+        }
+        if (bc_offset >= b) {
+            bc_offset -= b;
+            source_line += l;
+        } else {
+            // found source line corresponding to bytecode offset
+            break;
+        }
+    }
+    return source_line;
+}
+
+#endif // MICROPY_INCLUDED_PY_BC_H

+ 162 - 0
mp_flipper/lib/micropython/py/bc0.h

@@ -0,0 +1,162 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BC0_H
+#define MICROPY_INCLUDED_PY_BC0_H
+
+// MicroPython bytecode opcodes, grouped based on the format of the opcode
+
+// All opcodes are encoded as a byte with an optional argument.  Arguments are
+// variable-length encoded so they can be as small as possible.  The possible
+// encodings for arguments are (ip[0] is the opcode):
+//
+//  - unsigned relative bytecode offset:
+//      - if ip[1] high bit is clear then: arg = ip[1]
+//      - if ip[1] high bit is set then:   arg = ip[1] & 0x7f | ip[2] << 7
+//
+//  - signed relative bytecode offset:
+//      - if ip[1] high bit is clear then: arg = ip[1] - 0x40
+//      - if ip[1] high bit is set then:   arg = (ip[1] & 0x7f | ip[2] << 7) - 0x4000
+
+#define MP_BC_MASK_FORMAT                   (0xf0)
+#define MP_BC_MASK_EXTRA_BYTE               (0x9e)
+
+#define MP_BC_FORMAT_BYTE                   (0)
+#define MP_BC_FORMAT_QSTR                   (1)
+#define MP_BC_FORMAT_VAR_UINT               (2)
+#define MP_BC_FORMAT_OFFSET                 (3)
+
+// Nibbles in magic number are: BB BB BB BB BB BO VV QU
+#define MP_BC_FORMAT(op) ((0x000003a4 >> (2 * ((op) >> 4))) & 3)
+
+// Load, Store, Delete, Import, Make, Build, Unpack, Call, Jump, Exception, For, sTack, Return, Yield, Op
+#define MP_BC_BASE_RESERVED                 (0x00) // ----------------
+#define MP_BC_BASE_QSTR_O                   (0x10) // LLLLLLSSSDDII---
+#define MP_BC_BASE_VINT_E                   (0x20) // MMLLLLSSDDBBBBBB
+#define MP_BC_BASE_VINT_O                   (0x30) // UUMMCCCC--------
+#define MP_BC_BASE_JUMP_E                   (0x40) // J-JJJJJEEEEF----
+#define MP_BC_BASE_BYTE_O                   (0x50) // LLLLSSDTTTTTEEFF
+#define MP_BC_BASE_BYTE_E                   (0x60) // --BREEEYYI------
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI    (0x70) // LLLLLLLLLLLLLLLL
+//                                          (0x80) // LLLLLLLLLLLLLLLL
+//                                          (0x90) // LLLLLLLLLLLLLLLL
+//                                          (0xa0) // LLLLLLLLLLLLLLLL
+#define MP_BC_LOAD_FAST_MULTI               (0xb0) // LLLLLLLLLLLLLLLL
+#define MP_BC_STORE_FAST_MULTI              (0xc0) // SSSSSSSSSSSSSSSS
+#define MP_BC_UNARY_OP_MULTI                (0xd0) // OOOOOOO
+#define MP_BC_BINARY_OP_MULTI               (0xd7) //        OOOOOOOOO
+//                                          (0xe0) // OOOOOOOOOOOOOOOO
+//                                          (0xf0) // OOOOOOOOOO------
+
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM (64)
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS (16)
+#define MP_BC_LOAD_FAST_MULTI_NUM           (16)
+#define MP_BC_STORE_FAST_MULTI_NUM          (16)
+#define MP_BC_UNARY_OP_MULTI_NUM            (MP_UNARY_OP_NUM_BYTECODE)
+#define MP_BC_BINARY_OP_MULTI_NUM           (MP_BINARY_OP_NUM_BYTECODE)
+
+#define MP_BC_LOAD_CONST_FALSE              (MP_BC_BASE_BYTE_O + 0x00)
+#define MP_BC_LOAD_CONST_NONE               (MP_BC_BASE_BYTE_O + 0x01)
+#define MP_BC_LOAD_CONST_TRUE               (MP_BC_BASE_BYTE_O + 0x02)
+#define MP_BC_LOAD_CONST_SMALL_INT          (MP_BC_BASE_VINT_E + 0x02) // signed var-int
+#define MP_BC_LOAD_CONST_STRING             (MP_BC_BASE_QSTR_O + 0x00) // qstr
+#define MP_BC_LOAD_CONST_OBJ                (MP_BC_BASE_VINT_E + 0x03) // ptr
+#define MP_BC_LOAD_NULL                     (MP_BC_BASE_BYTE_O + 0x03)
+
+#define MP_BC_LOAD_FAST_N                   (MP_BC_BASE_VINT_E + 0x04) // uint
+#define MP_BC_LOAD_DEREF                    (MP_BC_BASE_VINT_E + 0x05) // uint
+#define MP_BC_LOAD_NAME                     (MP_BC_BASE_QSTR_O + 0x01) // qstr
+#define MP_BC_LOAD_GLOBAL                   (MP_BC_BASE_QSTR_O + 0x02) // qstr
+#define MP_BC_LOAD_ATTR                     (MP_BC_BASE_QSTR_O + 0x03) // qstr
+#define MP_BC_LOAD_METHOD                   (MP_BC_BASE_QSTR_O + 0x04) // qstr
+#define MP_BC_LOAD_SUPER_METHOD             (MP_BC_BASE_QSTR_O + 0x05) // qstr
+#define MP_BC_LOAD_BUILD_CLASS              (MP_BC_BASE_BYTE_O + 0x04)
+#define MP_BC_LOAD_SUBSCR                   (MP_BC_BASE_BYTE_O + 0x05)
+
+#define MP_BC_STORE_FAST_N                  (MP_BC_BASE_VINT_E + 0x06) // uint
+#define MP_BC_STORE_DEREF                   (MP_BC_BASE_VINT_E + 0x07) // uint
+#define MP_BC_STORE_NAME                    (MP_BC_BASE_QSTR_O + 0x06) // qstr
+#define MP_BC_STORE_GLOBAL                  (MP_BC_BASE_QSTR_O + 0x07) // qstr
+#define MP_BC_STORE_ATTR                    (MP_BC_BASE_QSTR_O + 0x08) // qstr
+#define MP_BC_STORE_SUBSCR                  (MP_BC_BASE_BYTE_O + 0x06)
+
+#define MP_BC_DELETE_FAST                   (MP_BC_BASE_VINT_E + 0x08) // uint
+#define MP_BC_DELETE_DEREF                  (MP_BC_BASE_VINT_E + 0x09) // uint
+#define MP_BC_DELETE_NAME                   (MP_BC_BASE_QSTR_O + 0x09) // qstr
+#define MP_BC_DELETE_GLOBAL                 (MP_BC_BASE_QSTR_O + 0x0a) // qstr
+
+#define MP_BC_DUP_TOP                       (MP_BC_BASE_BYTE_O + 0x07)
+#define MP_BC_DUP_TOP_TWO                   (MP_BC_BASE_BYTE_O + 0x08)
+#define MP_BC_POP_TOP                       (MP_BC_BASE_BYTE_O + 0x09)
+#define MP_BC_ROT_TWO                       (MP_BC_BASE_BYTE_O + 0x0a)
+#define MP_BC_ROT_THREE                     (MP_BC_BASE_BYTE_O + 0x0b)
+
+#define MP_BC_UNWIND_JUMP                   (MP_BC_BASE_JUMP_E + 0x00) // signed relative bytecode offset; then a byte
+#define MP_BC_JUMP                          (MP_BC_BASE_JUMP_E + 0x02) // signed relative bytecode offset
+#define MP_BC_POP_JUMP_IF_TRUE              (MP_BC_BASE_JUMP_E + 0x03) // signed relative bytecode offset
+#define MP_BC_POP_JUMP_IF_FALSE             (MP_BC_BASE_JUMP_E + 0x04) // signed relative bytecode offset
+#define MP_BC_JUMP_IF_TRUE_OR_POP           (MP_BC_BASE_JUMP_E + 0x05) // unsigned relative bytecode offset
+#define MP_BC_JUMP_IF_FALSE_OR_POP          (MP_BC_BASE_JUMP_E + 0x06) // unsigned relative bytecode offset
+#define MP_BC_SETUP_WITH                    (MP_BC_BASE_JUMP_E + 0x07) // unsigned relative bytecode offset
+#define MP_BC_SETUP_EXCEPT                  (MP_BC_BASE_JUMP_E + 0x08) // unsigned relative bytecode offset
+#define MP_BC_SETUP_FINALLY                 (MP_BC_BASE_JUMP_E + 0x09) // unsigned relative bytecode offset
+#define MP_BC_POP_EXCEPT_JUMP               (MP_BC_BASE_JUMP_E + 0x0a) // unsigned relative bytecode offset
+#define MP_BC_FOR_ITER                      (MP_BC_BASE_JUMP_E + 0x0b) // unsigned relative bytecode offset
+#define MP_BC_WITH_CLEANUP                  (MP_BC_BASE_BYTE_O + 0x0c)
+#define MP_BC_END_FINALLY                   (MP_BC_BASE_BYTE_O + 0x0d)
+#define MP_BC_GET_ITER                      (MP_BC_BASE_BYTE_O + 0x0e)
+#define MP_BC_GET_ITER_STACK                (MP_BC_BASE_BYTE_O + 0x0f)
+
+#define MP_BC_BUILD_TUPLE                   (MP_BC_BASE_VINT_E + 0x0a) // uint
+#define MP_BC_BUILD_LIST                    (MP_BC_BASE_VINT_E + 0x0b) // uint
+#define MP_BC_BUILD_MAP                     (MP_BC_BASE_VINT_E + 0x0c) // uint
+#define MP_BC_STORE_MAP                     (MP_BC_BASE_BYTE_E + 0x02)
+#define MP_BC_BUILD_SET                     (MP_BC_BASE_VINT_E + 0x0d) // uint
+#define MP_BC_BUILD_SLICE                   (MP_BC_BASE_VINT_E + 0x0e) // uint
+#define MP_BC_STORE_COMP                    (MP_BC_BASE_VINT_E + 0x0f) // uint
+#define MP_BC_UNPACK_SEQUENCE               (MP_BC_BASE_VINT_O + 0x00) // uint
+#define MP_BC_UNPACK_EX                     (MP_BC_BASE_VINT_O + 0x01) // uint
+
+#define MP_BC_RETURN_VALUE                  (MP_BC_BASE_BYTE_E + 0x03)
+#define MP_BC_RAISE_LAST                    (MP_BC_BASE_BYTE_E + 0x04)
+#define MP_BC_RAISE_OBJ                     (MP_BC_BASE_BYTE_E + 0x05)
+#define MP_BC_RAISE_FROM                    (MP_BC_BASE_BYTE_E + 0x06)
+#define MP_BC_YIELD_VALUE                   (MP_BC_BASE_BYTE_E + 0x07)
+#define MP_BC_YIELD_FROM                    (MP_BC_BASE_BYTE_E + 0x08)
+
+#define MP_BC_MAKE_FUNCTION                 (MP_BC_BASE_VINT_O + 0x02) // uint
+#define MP_BC_MAKE_FUNCTION_DEFARGS         (MP_BC_BASE_VINT_O + 0x03) // uint
+#define MP_BC_MAKE_CLOSURE                  (MP_BC_BASE_VINT_E + 0x00) // uint; extra byte
+#define MP_BC_MAKE_CLOSURE_DEFARGS          (MP_BC_BASE_VINT_E + 0x01) // uint; extra byte
+#define MP_BC_CALL_FUNCTION                 (MP_BC_BASE_VINT_O + 0x04) // uint
+#define MP_BC_CALL_FUNCTION_VAR_KW          (MP_BC_BASE_VINT_O + 0x05) // uint
+#define MP_BC_CALL_METHOD                   (MP_BC_BASE_VINT_O + 0x06) // uint
+#define MP_BC_CALL_METHOD_VAR_KW            (MP_BC_BASE_VINT_O + 0x07) // uint
+
+#define MP_BC_IMPORT_NAME                   (MP_BC_BASE_QSTR_O + 0x0b) // qstr
+#define MP_BC_IMPORT_FROM                   (MP_BC_BASE_QSTR_O + 0x0c) // qstr
+#define MP_BC_IMPORT_STAR                   (MP_BC_BASE_BYTE_E + 0x09)
+
+#endif // MICROPY_INCLUDED_PY_BC0_H

+ 542 - 0
mp_flipper/lib/micropython/py/binary.c

@@ -0,0 +1,542 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014-2017 Paul Sokolovsky
+ * Copyright (c) 2014-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/binary.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+// Helpers to work with binary-encoded data
+
+#ifndef alignof
+#define alignof(type) offsetof(struct { char c; type t; }, t)
+#endif
+
+size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign) {
+    size_t size = 0;
+    int align = 1;
+    switch (struct_type) {
+        case '<':
+        case '>':
+            switch (val_type) {
+                case 'b':
+                case 'B':
+                    size = 1;
+                    break;
+                case 'h':
+                case 'H':
+                    size = 2;
+                    break;
+                case 'i':
+                case 'I':
+                    size = 4;
+                    break;
+                case 'l':
+                case 'L':
+                    size = 4;
+                    break;
+                case 'q':
+                case 'Q':
+                    size = 8;
+                    break;
+                case 'P':
+                case 'O':
+                case 'S':
+                    size = sizeof(void *);
+                    break;
+                case 'e':
+                    size = 2;
+                    break;
+                case 'f':
+                    size = 4;
+                    break;
+                case 'd':
+                    size = 8;
+                    break;
+            }
+            break;
+        case '@': {
+            // TODO:
+            // The simplest heuristic for alignment is to align by value
+            // size, but that doesn't work for "bigger than int" types,
+            // for example, long long may very well have long alignment
+            // So, we introduce separate alignment handling, but having
+            // formal support for that is different from actually supporting
+            // particular (or any) ABI.
+            switch (val_type) {
+                case BYTEARRAY_TYPECODE:
+                case 'b':
+                case 'B':
+                    align = size = 1;
+                    break;
+                case 'h':
+                case 'H':
+                    align = alignof(short);
+                    size = sizeof(short);
+                    break;
+                case 'i':
+                case 'I':
+                    align = alignof(int);
+                    size = sizeof(int);
+                    break;
+                case 'l':
+                case 'L':
+                    align = alignof(long);
+                    size = sizeof(long);
+                    break;
+                case 'q':
+                case 'Q':
+                    align = alignof(long long);
+                    size = sizeof(long long);
+                    break;
+                case 'P':
+                case 'O':
+                case 'S':
+                    align = alignof(void *);
+                    size = sizeof(void *);
+                    break;
+                case 'e':
+                    align = 2;
+                    size = 2;
+                    break;
+                case 'f':
+                    align = alignof(float);
+                    size = sizeof(float);
+                    break;
+                case 'd':
+                    align = alignof(double);
+                    size = sizeof(double);
+                    break;
+            }
+        }
+    }
+
+    if (size == 0) {
+        mp_raise_ValueError(MP_ERROR_TEXT("bad typecode"));
+    }
+
+    if (palign != NULL) {
+        *palign = align;
+    }
+    return size;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_FLOAT_USE_NATIVE_FLT16
+
+static inline float mp_decode_half_float(uint16_t hf) {
+    union {
+        uint16_t i;
+        _Float16 f;
+    } fpu = { .i = hf };
+    return fpu.f;
+}
+
+static inline uint16_t mp_encode_half_float(float x) {
+    union {
+        uint16_t i;
+        _Float16 f;
+    } fp_sp = { .f = (_Float16)x };
+    return fp_sp.i;
+}
+
+#elif MICROPY_PY_BUILTINS_FLOAT
+
+static float mp_decode_half_float(uint16_t hf) {
+    union {
+        uint32_t i;
+        float f;
+    } fpu;
+
+    uint16_t m = hf & 0x3ff;
+    int e = (hf >> 10) & 0x1f;
+    if (e == 0x1f) {
+        // Half-float is infinity.
+        e = 0xff;
+    } else if (e) {
+        // Half-float is normal.
+        e += 127 - 15;
+    } else if (m) {
+        // Half-float is subnormal, make it normal.
+        e = 127 - 15;
+        while (!(m & 0x400)) {
+            m <<= 1;
+            --e;
+        }
+        m -= 0x400;
+        ++e;
+    }
+
+    fpu.i = ((hf & 0x8000) << 16) | (e << 23) | (m << 13);
+    return fpu.f;
+}
+
+static uint16_t mp_encode_half_float(float x) {
+    union {
+        uint32_t i;
+        float f;
+    } fpu = { .f = x };
+
+    uint16_t m = (fpu.i >> 13) & 0x3ff;
+    if (fpu.i & (1 << 12)) {
+        // Round up.
+        ++m;
+    }
+    int e = (fpu.i >> 23) & 0xff;
+
+    if (e == 0xff) {
+        // Infinity.
+        e = 0x1f;
+    } else if (e != 0) {
+        e -= 127 - 15;
+        if (e < 0) {
+            // Underflow: denormalized, or zero.
+            if (e >= -11) {
+                m = (m | 0x400) >> -e;
+                if (m & 1) {
+                    m = (m >> 1) + 1;
+                } else {
+                    m >>= 1;
+                }
+            } else {
+                m = 0;
+            }
+            e = 0;
+        } else if (e > 0x3f) {
+            // Overflow: infinity.
+            e = 0x1f;
+            m = 0;
+        }
+    }
+
+    uint16_t bits = ((fpu.i >> 16) & 0x8000) | (e << 10) | m;
+    return bits;
+}
+
+#endif
+
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, size_t index) {
+    mp_int_t val = 0;
+    switch (typecode) {
+        case 'b':
+            val = ((signed char *)p)[index];
+            break;
+        case BYTEARRAY_TYPECODE:
+        case 'B':
+            val = ((unsigned char *)p)[index];
+            break;
+        case 'h':
+            val = ((short *)p)[index];
+            break;
+        case 'H':
+            val = ((unsigned short *)p)[index];
+            break;
+        case 'i':
+            return mp_obj_new_int(((int *)p)[index]);
+        case 'I':
+            return mp_obj_new_int_from_uint(((unsigned int *)p)[index]);
+        case 'l':
+            return mp_obj_new_int(((long *)p)[index]);
+        case 'L':
+            return mp_obj_new_int_from_uint(((unsigned long *)p)[index]);
+        #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+        case 'q':
+            return mp_obj_new_int_from_ll(((long long *)p)[index]);
+        case 'Q':
+            return mp_obj_new_int_from_ull(((unsigned long long *)p)[index]);
+        #endif
+        #if MICROPY_PY_BUILTINS_FLOAT
+        case 'f':
+            return mp_obj_new_float_from_f(((float *)p)[index]);
+        case 'd':
+            return mp_obj_new_float_from_d(((double *)p)[index]);
+        #endif
+        // Extension to CPython: array of objects
+        case 'O':
+            return ((mp_obj_t *)p)[index];
+        // Extension to CPython: array of pointers
+        case 'P':
+            return mp_obj_new_int((mp_int_t)(uintptr_t)((void **)p)[index]);
+    }
+    return MP_OBJ_NEW_SMALL_INT(val);
+}
+
+// The long long type is guaranteed to hold at least 64 bits, and size is at
+// most 8 (for q and Q), so we will always be able to parse the given data
+// and fit it into a long long.
+long long mp_binary_get_int(size_t size, bool is_signed, bool big_endian, const byte *src) {
+    int delta;
+    if (!big_endian) {
+        delta = -1;
+        src += size - 1;
+    } else {
+        delta = 1;
+    }
+
+    unsigned long long val = 0;
+    if (is_signed && *src & 0x80) {
+        val = -1;
+    }
+    for (uint i = 0; i < size; i++) {
+        val <<= 8;
+        val |= *src;
+        src += delta;
+    }
+
+    return val;
+}
+
+#define is_signed(typecode) (typecode > 'Z')
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte *p_base, byte **ptr) {
+    byte *p = *ptr;
+    size_t align;
+
+    size_t size = mp_binary_get_size(struct_type, val_type, &align);
+    if (struct_type == '@') {
+        // Align p relative to p_base
+        p = p_base + (uintptr_t)MP_ALIGN(p - p_base, align);
+        #if MP_ENDIANNESS_LITTLE
+        struct_type = '<';
+        #else
+        struct_type = '>';
+        #endif
+    }
+    *ptr = p + size;
+
+    long long val = mp_binary_get_int(size, is_signed(val_type), (struct_type == '>'), p);
+
+    if (val_type == 'O') {
+        return (mp_obj_t)(mp_uint_t)val;
+    } else if (val_type == 'S') {
+        const char *s_val = (const char *)(uintptr_t)(mp_uint_t)val;
+        return mp_obj_new_str(s_val, strlen(s_val));
+    #if MICROPY_PY_BUILTINS_FLOAT
+    } else if (val_type == 'e') {
+        return mp_obj_new_float_from_f(mp_decode_half_float(val));
+    } else if (val_type == 'f') {
+        union {
+            uint32_t i;
+            float f;
+        } fpu = {val};
+        return mp_obj_new_float_from_f(fpu.f);
+    } else if (val_type == 'd') {
+        union {
+            uint64_t i;
+            double f;
+        } fpu = {val};
+        return mp_obj_new_float_from_d(fpu.f);
+    #endif
+    } else if (is_signed(val_type)) {
+        if ((long long)MP_SMALL_INT_MIN <= val && val <= (long long)MP_SMALL_INT_MAX) {
+            return mp_obj_new_int((mp_int_t)val);
+        } else {
+            return mp_obj_new_int_from_ll(val);
+        }
+    } else {
+        if ((unsigned long long)val <= (unsigned long long)MP_SMALL_INT_MAX) {
+            return mp_obj_new_int_from_uint((mp_uint_t)val);
+        } else {
+            return mp_obj_new_int_from_ull(val);
+        }
+    }
+}
+
+void mp_binary_set_int(size_t val_sz, bool big_endian, byte *dest, mp_uint_t val) {
+    if (MP_ENDIANNESS_LITTLE && !big_endian) {
+        memcpy(dest, &val, val_sz);
+    } else if (MP_ENDIANNESS_BIG && big_endian) {
+        // only copy the least-significant val_sz bytes
+        memcpy(dest, (byte *)&val + sizeof(mp_uint_t) - val_sz, val_sz);
+    } else {
+        const byte *src;
+        if (MP_ENDIANNESS_LITTLE) {
+            src = (const byte *)&val + val_sz;
+        } else {
+            src = (const byte *)&val + sizeof(mp_uint_t);
+        }
+        while (val_sz--) {
+            *dest++ = *--src;
+        }
+    }
+}
+
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p_base, byte **ptr) {
+    byte *p = *ptr;
+    size_t align;
+
+    size_t size = mp_binary_get_size(struct_type, val_type, &align);
+    if (struct_type == '@') {
+        // Align p relative to p_base
+        p = p_base + (uintptr_t)MP_ALIGN(p - p_base, align);
+        if (MP_ENDIANNESS_LITTLE) {
+            struct_type = '<';
+        } else {
+            struct_type = '>';
+        }
+    }
+    *ptr = p + size;
+
+    mp_uint_t val;
+    switch (val_type) {
+        case 'O':
+            val = (mp_uint_t)val_in;
+            break;
+        #if MICROPY_PY_BUILTINS_FLOAT
+        case 'e':
+            val = mp_encode_half_float(mp_obj_get_float_to_f(val_in));
+            break;
+        case 'f': {
+            union {
+                uint32_t i;
+                float f;
+            } fp_sp;
+            fp_sp.f = mp_obj_get_float_to_f(val_in);
+            val = fp_sp.i;
+            break;
+        }
+        case 'd': {
+            union {
+                uint64_t i64;
+                uint32_t i32[2];
+                double f;
+            } fp_dp;
+            fp_dp.f = mp_obj_get_float_to_d(val_in);
+            if (MP_BYTES_PER_OBJ_WORD == 8) {
+                val = fp_dp.i64;
+            } else {
+                int be = struct_type == '>';
+                mp_binary_set_int(sizeof(uint32_t), be, p, fp_dp.i32[MP_ENDIANNESS_BIG ^ be]);
+                p += sizeof(uint32_t);
+                val = fp_dp.i32[MP_ENDIANNESS_LITTLE ^ be];
+            }
+            break;
+        }
+        #endif
+        default:
+            #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+            if (mp_obj_is_exact_type(val_in, &mp_type_int)) {
+                mp_obj_int_to_bytes_impl(val_in, struct_type == '>', size, p);
+                return;
+            }
+            #endif
+
+            val = mp_obj_get_int(val_in);
+            // zero/sign extend if needed
+            if (MP_BYTES_PER_OBJ_WORD < 8 && size > sizeof(val)) {
+                int c = (mp_int_t)val < 0 ? 0xff : 0x00;
+                memset(p, c, size);
+                if (struct_type == '>') {
+                    p += size - sizeof(val);
+                }
+            }
+            break;
+    }
+
+    mp_binary_set_int(MIN((size_t)size, sizeof(val)), struct_type == '>', p, val);
+}
+
+void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_in) {
+    switch (typecode) {
+        #if MICROPY_PY_BUILTINS_FLOAT
+        case 'f':
+            ((float *)p)[index] = mp_obj_get_float_to_f(val_in);
+            break;
+        case 'd':
+            ((double *)p)[index] = mp_obj_get_float_to_d(val_in);
+            break;
+        #endif
+        // Extension to CPython: array of objects
+        case 'O':
+            ((mp_obj_t *)p)[index] = val_in;
+            break;
+        default:
+            #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+            if (mp_obj_is_exact_type(val_in, &mp_type_int)) {
+                size_t size = mp_binary_get_size('@', typecode, NULL);
+                mp_obj_int_to_bytes_impl(val_in, MP_ENDIANNESS_BIG,
+                    size, (uint8_t *)p + index * size);
+                return;
+            }
+            #endif
+            mp_binary_set_val_array_from_int(typecode, p, index, mp_obj_get_int(val_in));
+    }
+}
+
+void mp_binary_set_val_array_from_int(char typecode, void *p, size_t index, mp_int_t val) {
+    switch (typecode) {
+        case 'b':
+            ((signed char *)p)[index] = val;
+            break;
+        case BYTEARRAY_TYPECODE:
+        case 'B':
+            ((unsigned char *)p)[index] = val;
+            break;
+        case 'h':
+            ((short *)p)[index] = val;
+            break;
+        case 'H':
+            ((unsigned short *)p)[index] = val;
+            break;
+        case 'i':
+            ((int *)p)[index] = val;
+            break;
+        case 'I':
+            ((unsigned int *)p)[index] = val;
+            break;
+        case 'l':
+            ((long *)p)[index] = val;
+            break;
+        case 'L':
+            ((unsigned long *)p)[index] = val;
+            break;
+        #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+        case 'q':
+            ((long long *)p)[index] = val;
+            break;
+        case 'Q':
+            ((unsigned long long *)p)[index] = val;
+            break;
+        #endif
+        #if MICROPY_PY_BUILTINS_FLOAT
+        case 'f':
+            ((float *)p)[index] = (float)val;
+            break;
+        case 'd':
+            ((double *)p)[index] = (double)val;
+            break;
+        #endif
+        // Extension to CPython: array of pointers
+        case 'P':
+            ((void **)p)[index] = (void *)(uintptr_t)val;
+            break;
+    }
+}

+ 46 - 0
mp_flipper/lib/micropython/py/binary.h

@@ -0,0 +1,46 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Paul Sokolovsky
+ * Copyright (c) 2014-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BINARY_H
+#define MICROPY_INCLUDED_PY_BINARY_H
+
+#include "py/obj.h"
+
+// Use special typecode to differentiate repr() of bytearray vs array.array('B')
+// (underlyingly they're same).  Can't use 0 here because that's used to detect
+// type-specification errors due to end-of-string.
+#define BYTEARRAY_TYPECODE 1
+
+size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign);
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, size_t index);
+void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_in);
+void mp_binary_set_val_array_from_int(char typecode, void *p, size_t index, mp_int_t val);
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte *p_base, byte **ptr);
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p_base, byte **ptr);
+long long mp_binary_get_int(size_t size, bool is_signed, bool big_endian, const byte *src);
+void mp_binary_set_int(size_t val_sz, bool big_endian, byte *dest, mp_uint_t val);
+
+#endif // MICROPY_INCLUDED_PY_BINARY_H

+ 142 - 0
mp_flipper/lib/micropython/py/builtin.h

@@ -0,0 +1,142 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BUILTIN_H
+#define MICROPY_INCLUDED_PY_BUILTIN_H
+
+#include "py/obj.h"
+
+typedef enum {
+    MP_IMPORT_STAT_NO_EXIST,
+    MP_IMPORT_STAT_DIR,
+    MP_IMPORT_STAT_FILE,
+} mp_import_stat_t;
+
+#if MICROPY_VFS
+
+// Delegate to the VFS for import stat and builtin open.
+
+#define mp_builtin_open_obj mp_vfs_open_obj
+
+mp_import_stat_t mp_vfs_import_stat(const char *path);
+mp_obj_t mp_vfs_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_vfs_open_obj);
+
+static inline mp_import_stat_t mp_import_stat(const char *path) {
+    return mp_vfs_import_stat(path);
+}
+
+static inline mp_obj_t mp_builtin_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+    return mp_vfs_open(n_args, args, kwargs);
+}
+
+#else
+
+// A port can provide implementations of these functions.
+mp_import_stat_t mp_import_stat(const char *path);
+mp_obj_t mp_builtin_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+// A port can provide this object.
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_open_obj);
+
+#endif
+
+// A port can provide its own import handler by defining mp_builtin___import__.
+#ifndef mp_builtin___import__
+#define mp_builtin___import__ mp_builtin___import___default
+#endif
+mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args);
+mp_obj_t mp_builtin___import___default(size_t n_args, const mp_obj_t *args);
+
+mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args);
+
+MP_DECLARE_CONST_FUN_OBJ_VAR(mp_builtin___build_class___obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin___import___obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_abs_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_all_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_any_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_bin_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_callable_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_compile_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_chr_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_delattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_dir_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_divmod_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_eval_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_exec_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_execfile_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_getattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_3(mp_builtin_setattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_builtin_globals_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_hash_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_help_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_hex_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_id_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_isinstance_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_issubclass_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_iter_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_len_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_builtin_locals_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_max_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_min_obj);
+#if MICROPY_PY_BUILTINS_NEXT2
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj);
+#else
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_next_obj);
+#endif
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_oct_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_ord_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_print_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_repr_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_input_obj);
+
+MP_DECLARE_CONST_FUN_OBJ_2(mp_namedtuple_obj);
+
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_contains_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_getitem_obj);
+MP_DECLARE_CONST_FUN_OBJ_3(mp_op_setitem_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_delitem_obj);
+
+// Modules needed by the runtime.
+extern const mp_obj_dict_t mp_module_builtins_globals;
+extern const mp_obj_module_t mp_module___main__;
+extern const mp_obj_module_t mp_module_builtins;
+extern const mp_obj_module_t mp_module_sys;
+
+// Modules needed by the parser when MICROPY_COMP_MODULE_CONST is enabled.
+extern const mp_obj_module_t mp_module_errno;
+extern const mp_obj_module_t mp_module_uctypes;
+extern const mp_obj_module_t mp_module_machine;
+
+extern const char MICROPY_PY_BUILTINS_HELP_TEXT[];
+
+#endif // MICROPY_INCLUDED_PY_BUILTIN_H

+ 179 - 0
mp_flipper/lib/micropython/py/builtinevex.c

@@ -0,0 +1,179 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/objfun.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_COMPILE
+
+typedef struct _mp_obj_code_t {
+    mp_obj_base_t base;
+    mp_obj_t module_fun;
+} mp_obj_code_t;
+
+static MP_DEFINE_CONST_OBJ_TYPE(
+    mp_type_code,
+    MP_QSTR_code,
+    MP_TYPE_FLAG_NONE
+    );
+
+static mp_obj_t code_execute(mp_obj_code_t *self, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
+    // save context
+    nlr_jump_callback_node_globals_locals_t ctx;
+    ctx.globals = mp_globals_get();
+    ctx.locals = mp_locals_get();
+
+    // set new context
+    mp_globals_set(globals);
+    mp_locals_set(locals);
+
+    // set exception handler to restore context if an exception is raised
+    nlr_push_jump_callback(&ctx.callback, mp_globals_locals_set_from_nlr_jump_callback);
+
+    // The call to mp_parse_compile_execute() in mp_builtin_compile() below passes
+    // NULL for the globals, so repopulate that entry now with the correct globals.
+    if (mp_obj_is_type(self->module_fun, &mp_type_fun_bc)
+        #if MICROPY_EMIT_NATIVE
+        || mp_obj_is_type(self->module_fun, &mp_type_fun_native)
+        #endif
+        ) {
+        mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(self->module_fun);
+        ((mp_module_context_t *)fun_bc->context)->module.globals = globals;
+    }
+
+    // execute code
+    mp_obj_t ret = mp_call_function_0(self->module_fun);
+
+    // deregister exception handler and restore context
+    nlr_pop_jump_callback(true);
+
+    // return value
+    return ret;
+}
+
+static mp_obj_t mp_builtin_compile(size_t n_args, const mp_obj_t *args) {
+    (void)n_args;
+
+    // get the source
+    size_t str_len;
+    const char *str = mp_obj_str_get_data(args[0], &str_len);
+
+    // get the filename
+    qstr filename = mp_obj_str_get_qstr(args[1]);
+
+    // create the lexer
+    mp_lexer_t *lex = mp_lexer_new_from_str_len(filename, str, str_len, 0);
+
+    // get the compile mode
+    qstr mode = mp_obj_str_get_qstr(args[2]);
+    mp_parse_input_kind_t parse_input_kind;
+    switch (mode) {
+        case MP_QSTR_single:
+            parse_input_kind = MP_PARSE_SINGLE_INPUT;
+            break;
+        case MP_QSTR_exec:
+            parse_input_kind = MP_PARSE_FILE_INPUT;
+            break;
+        case MP_QSTR_eval:
+            parse_input_kind = MP_PARSE_EVAL_INPUT;
+            break;
+        default:
+            mp_raise_ValueError(MP_ERROR_TEXT("bad compile mode"));
+    }
+
+    mp_obj_code_t *code = mp_obj_malloc(mp_obj_code_t, &mp_type_code);
+    code->module_fun = mp_parse_compile_execute(lex, parse_input_kind, NULL, NULL);
+    return MP_OBJ_FROM_PTR(code);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_compile_obj, 3, 6, mp_builtin_compile);
+
+#endif // MICROPY_PY_BUILTINS_COMPILE
+
+#if MICROPY_PY_BUILTINS_EVAL_EXEC
+
+static mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_input_kind_t parse_input_kind) {
+    // work out the context
+    mp_obj_dict_t *globals = mp_globals_get();
+    mp_obj_dict_t *locals = mp_locals_get();
+    for (size_t i = 1; i < 3 && i < n_args; ++i) {
+        if (args[i] != mp_const_none) {
+            if (!mp_obj_is_type(args[i], &mp_type_dict)) {
+                mp_raise_TypeError(NULL);
+            }
+            locals = MP_OBJ_TO_PTR(args[i]);
+            if (i == 1) {
+                globals = locals;
+            }
+        }
+    }
+
+    #if MICROPY_PY_BUILTINS_COMPILE
+    if (mp_obj_is_type(args[0], &mp_type_code)) {
+        return code_execute(MP_OBJ_TO_PTR(args[0]), globals, locals);
+    }
+    #endif
+
+
+    // create the lexer
+    // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+    mp_lexer_t *lex;
+    if (MICROPY_PY_BUILTINS_EXECFILE && parse_input_kind == MP_PARSE_SINGLE_INPUT) {
+        lex = mp_lexer_new_from_file(mp_obj_str_get_qstr(args[0]));
+        parse_input_kind = MP_PARSE_FILE_INPUT;
+    } else {
+        // Extract the source code.
+        mp_buffer_info_t bufinfo;
+        mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+
+        lex = mp_lexer_new_from_str_len(MP_QSTR__lt_string_gt_, bufinfo.buf, bufinfo.len, 0);
+    }
+
+    return mp_parse_compile_execute(lex, parse_input_kind, globals, locals);
+}
+
+static mp_obj_t mp_builtin_eval(size_t n_args, const mp_obj_t *args) {
+    return eval_exec_helper(n_args, args, MP_PARSE_EVAL_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_eval_obj, 1, 3, mp_builtin_eval);
+
+static mp_obj_t mp_builtin_exec(size_t n_args, const mp_obj_t *args) {
+    return eval_exec_helper(n_args, args, MP_PARSE_FILE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_exec_obj, 1, 3, mp_builtin_exec);
+
+#endif // MICROPY_PY_BUILTINS_EVAL_EXEC
+
+#if MICROPY_PY_BUILTINS_EXECFILE
+static mp_obj_t mp_builtin_execfile(size_t n_args, const mp_obj_t *args) {
+    // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+    return eval_exec_helper(n_args, args, MP_PARSE_SINGLE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_execfile_obj, 1, 3, mp_builtin_execfile);
+#endif

+ 174 - 0
mp_flipper/lib/micropython/py/builtinhelp.c

@@ -0,0 +1,174 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "py/builtin.h"
+#include "py/objmodule.h"
+
+#if MICROPY_PY_BUILTINS_HELP
+
+const char mp_help_default_text[] =
+    "Welcome to MicroPython!\n"
+    "\n"
+    "For online docs please visit http://docs.micropython.org/\n"
+    "\n"
+    "Control commands:\n"
+    "  CTRL-A        -- on a blank line, enter raw REPL mode\n"
+    "  CTRL-B        -- on a blank line, enter normal REPL mode\n"
+    "  CTRL-C        -- interrupt a running program\n"
+    "  CTRL-D        -- on a blank line, exit or do a soft reset\n"
+    "  CTRL-E        -- on a blank line, enter paste mode\n"
+    "\n"
+    "For further help on a specific object, type help(obj)\n"
+;
+
+static void mp_help_print_info_about_object(mp_obj_t name_o, mp_obj_t value) {
+    mp_print_str(MP_PYTHON_PRINTER, "  ");
+    mp_obj_print(name_o, PRINT_STR);
+    mp_print_str(MP_PYTHON_PRINTER, " -- ");
+    mp_obj_print(value, PRINT_STR);
+    mp_print_str(MP_PYTHON_PRINTER, "\n");
+}
+
+#if MICROPY_PY_BUILTINS_HELP_MODULES
+static void mp_help_add_from_map(mp_obj_t list, const mp_map_t *map) {
+    for (size_t i = 0; i < map->alloc; i++) {
+        if (mp_map_slot_is_filled(map, i)) {
+            mp_obj_list_append(list, map->table[i].key);
+        }
+    }
+}
+
+#if MICROPY_MODULE_FROZEN
+static void mp_help_add_from_names(mp_obj_t list, const char *name) {
+    while (*name) {
+        size_t len = strlen(name);
+        // name should end in '.py' and we strip it off
+        mp_obj_list_append(list, mp_obj_new_str(name, len - 3));
+        name += len + 1;
+    }
+}
+#endif
+
+static void mp_help_print_modules(void) {
+    mp_obj_t list = mp_obj_new_list(0, NULL);
+
+    mp_help_add_from_map(list, &mp_builtin_module_map);
+    mp_help_add_from_map(list, &mp_builtin_extensible_module_map);
+
+    #if MICROPY_MODULE_FROZEN
+    extern const char mp_frozen_names[];
+    mp_help_add_from_names(list, mp_frozen_names);
+    #endif
+
+    // sort the list so it's printed in alphabetical order
+    mp_obj_list_sort(1, &list, (mp_map_t *)&mp_const_empty_map);
+
+    // print the list of modules in a column-first order
+    #define NUM_COLUMNS (4)
+    #define COLUMN_WIDTH (18)
+    size_t len;
+    mp_obj_t *items;
+    mp_obj_list_get(list, &len, &items);
+    unsigned int num_rows = (len + NUM_COLUMNS - 1) / NUM_COLUMNS;
+    for (unsigned int i = 0; i < num_rows; ++i) {
+        unsigned int j = i;
+        for (;;) {
+            int l = mp_print_str(MP_PYTHON_PRINTER, mp_obj_str_get_str(items[j]));
+            j += num_rows;
+            if (j >= len) {
+                break;
+            }
+            int gap = COLUMN_WIDTH - l;
+            while (gap < 1) {
+                gap += COLUMN_WIDTH;
+            }
+            while (gap--) {
+                mp_print_str(MP_PYTHON_PRINTER, " ");
+            }
+        }
+        mp_print_str(MP_PYTHON_PRINTER, "\n");
+    }
+
+    #if MICROPY_ENABLE_EXTERNAL_IMPORT
+    // let the user know there may be other modules available from the filesystem
+    mp_print_str(MP_PYTHON_PRINTER, "Plus any modules on the filesystem\n");
+    #endif
+}
+#endif
+
+static void mp_help_print_obj(const mp_obj_t obj) {
+    #if MICROPY_PY_BUILTINS_HELP_MODULES
+    if (obj == MP_OBJ_NEW_QSTR(MP_QSTR_modules)) {
+        mp_help_print_modules();
+        return;
+    }
+    #endif
+
+    const mp_obj_type_t *type = mp_obj_get_type(obj);
+
+    // try to print something sensible about the given object
+    mp_print_str(MP_PYTHON_PRINTER, "object ");
+    mp_obj_print(obj, PRINT_STR);
+    mp_printf(MP_PYTHON_PRINTER, " is of type %q\n", type->name);
+
+    mp_map_t *map = NULL;
+    if (type == &mp_type_module) {
+        map = &mp_obj_module_get_globals(obj)->map;
+    } else {
+        if (type == &mp_type_type) {
+            type = MP_OBJ_TO_PTR(obj);
+        }
+        if (MP_OBJ_TYPE_HAS_SLOT(type, locals_dict)) {
+            map = &MP_OBJ_TYPE_GET_SLOT(type, locals_dict)->map;
+        }
+    }
+    if (map != NULL) {
+        for (uint i = 0; i < map->alloc; i++) {
+            mp_obj_t key = map->table[i].key;
+            if (key != MP_OBJ_NULL) {
+                mp_help_print_info_about_object(key, map->table[i].value);
+            }
+        }
+    }
+}
+
+static mp_obj_t mp_builtin_help(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 0) {
+        // print a general help message
+        mp_print_str(MP_PYTHON_PRINTER, MICROPY_PY_BUILTINS_HELP_TEXT);
+    } else {
+        // try to print something sensible about the given object
+        mp_help_print_obj(args[0]);
+    }
+
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_help_obj, 0, 1, mp_builtin_help);
+
+#endif // MICROPY_PY_BUILTINS_HELP

+ 665 - 0
mp_flipper/lib/micropython/py/builtinimport.c

@@ -0,0 +1,665 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2019 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ * Copyright (c) 2021 Jim Mussared
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/compile.h"
+#include "py/objmodule.h"
+#include "py/persistentcode.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_ENABLE_EXTERNAL_IMPORT
+
+// Must be a string of one byte.
+#define PATH_SEP_CHAR "/"
+
+// Virtual sys.path entry that maps to the frozen modules.
+#define MP_FROZEN_PATH_PREFIX ".frozen/"
+
+// Wrapper for mp_import_stat (which is provided by the port, and typically
+// uses mp_vfs_import_stat) to also search frozen modules. Given an exact
+// path to a file or directory (e.g. "foo/bar", foo/bar.py" or "foo/bar.mpy"),
+// will return whether the path is a file, directory, or doesn't exist.
+static mp_import_stat_t stat_path(vstr_t *path) {
+    const char *str = vstr_null_terminated_str(path);
+    #if MICROPY_MODULE_FROZEN
+    // Only try and load as a frozen module if it starts with .frozen/.
+    const int frozen_path_prefix_len = strlen(MP_FROZEN_PATH_PREFIX);
+    if (strncmp(str, MP_FROZEN_PATH_PREFIX, frozen_path_prefix_len) == 0) {
+        // Just stat (which is the return value), don't get the data.
+        return mp_find_frozen_module(str + frozen_path_prefix_len, NULL, NULL);
+    }
+    #endif
+    return mp_import_stat(str);
+}
+
+// Stat a given filesystem path to a .py file. If the file does not exist,
+// then attempt to stat the corresponding .mpy file, and update the path
+// argument. This is the logic that makes .py files take precedent over .mpy
+// files. This uses stat_path above, rather than mp_import_stat directly, so
+// that the .frozen path prefix is handled.
+static mp_import_stat_t stat_file_py_or_mpy(vstr_t *path) {
+    mp_import_stat_t stat = stat_path(path);
+    if (stat == MP_IMPORT_STAT_FILE) {
+        return stat;
+    }
+
+    #if MICROPY_PERSISTENT_CODE_LOAD
+    // Didn't find .py -- try the .mpy instead by inserting an 'm' into the '.py'.
+    // Note: There's no point doing this if it's a frozen path, but adding the check
+    // would be extra code, and no harm letting mp_find_frozen_module fail instead.
+    vstr_ins_byte(path, path->len - 2, 'm');
+    stat = stat_path(path);
+    if (stat == MP_IMPORT_STAT_FILE) {
+        return stat;
+    }
+    #endif
+
+    return MP_IMPORT_STAT_NO_EXIST;
+}
+
+// Given an import path (e.g. "foo/bar"), try and find "foo/bar" (a directory)
+// or "foo/bar.(m)py" in either the filesystem or frozen modules. If the
+// result is a file, the path argument will be updated to include the file
+// extension.
+static mp_import_stat_t stat_module(vstr_t *path) {
+    mp_import_stat_t stat = stat_path(path);
+    DEBUG_printf("stat %s: %d\n", vstr_str(path), stat);
+    if (stat == MP_IMPORT_STAT_DIR) {
+        return stat;
+    }
+
+    // Not a directory, add .py and try as a file.
+    vstr_add_str(path, ".py");
+    return stat_file_py_or_mpy(path);
+}
+
+// Given a top-level module name, try and find it in each of the sys.path
+// entries. Note: On success, the dest argument will be updated to the matching
+// path (i.e. "<entry>/mod_name(.py)").
+static mp_import_stat_t stat_top_level(qstr mod_name, vstr_t *dest) {
+    DEBUG_printf("stat_top_level: '%s'\n", qstr_str(mod_name));
+    #if MICROPY_PY_SYS
+    size_t path_num;
+    mp_obj_t *path_items;
+    mp_obj_get_array(mp_sys_path, &path_num, &path_items);
+
+    // go through each sys.path entry, trying to import "<entry>/<mod_name>".
+    for (size_t i = 0; i < path_num; i++) {
+        vstr_reset(dest);
+        size_t p_len;
+        const char *p = mp_obj_str_get_data(path_items[i], &p_len);
+        if (p_len > 0) {
+            // Add the path separator (unless the entry is "", i.e. cwd).
+            vstr_add_strn(dest, p, p_len);
+            vstr_add_char(dest, PATH_SEP_CHAR[0]);
+        }
+        vstr_add_str(dest, qstr_str(mod_name));
+        mp_import_stat_t stat = stat_module(dest);
+        if (stat != MP_IMPORT_STAT_NO_EXIST) {
+            return stat;
+        }
+    }
+
+    // sys.path was empty or no matches, do not search the filesystem or
+    // frozen code.
+    return MP_IMPORT_STAT_NO_EXIST;
+
+    #else
+
+    // mp_sys_path is not enabled, so just stat the given path directly.
+    vstr_add_str(dest, qstr_str(mod_name));
+    return stat_module(dest);
+
+    #endif
+}
+
+#if MICROPY_MODULE_FROZEN_STR || MICROPY_ENABLE_COMPILER
+static void do_load_from_lexer(mp_module_context_t *context, mp_lexer_t *lex) {
+    #if MICROPY_PY___FILE__
+    qstr source_name = lex->source_name;
+    mp_store_attr(MP_OBJ_FROM_PTR(&context->module), MP_QSTR___file__, MP_OBJ_NEW_QSTR(source_name));
+    #endif
+
+    // parse, compile and execute the module in its context
+    mp_obj_dict_t *mod_globals = context->module.globals;
+    mp_parse_compile_execute(lex, MP_PARSE_FILE_INPUT, mod_globals, mod_globals);
+}
+#endif
+
+#if (MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD) || MICROPY_MODULE_FROZEN_MPY
+static void do_execute_proto_fun(const mp_module_context_t *context, mp_proto_fun_t proto_fun, qstr source_name) {
+    #if MICROPY_PY___FILE__
+    mp_store_attr(MP_OBJ_FROM_PTR(&context->module), MP_QSTR___file__, MP_OBJ_NEW_QSTR(source_name));
+    #else
+    (void)source_name;
+    #endif
+
+    // execute the module in its context
+    mp_obj_dict_t *mod_globals = context->module.globals;
+
+    // save context
+    nlr_jump_callback_node_globals_locals_t ctx;
+    ctx.globals = mp_globals_get();
+    ctx.locals = mp_locals_get();
+
+    // set new context
+    mp_globals_set(mod_globals);
+    mp_locals_set(mod_globals);
+
+    // set exception handler to restore context if an exception is raised
+    nlr_push_jump_callback(&ctx.callback, mp_globals_locals_set_from_nlr_jump_callback);
+
+    // make and execute the function
+    mp_obj_t module_fun = mp_make_function_from_proto_fun(proto_fun, context, NULL);
+    mp_call_function_0(module_fun);
+
+    // deregister exception handler and restore context
+    nlr_pop_jump_callback(true);
+}
+#endif
+
+static void do_load(mp_module_context_t *module_obj, vstr_t *file) {
+    #if MICROPY_MODULE_FROZEN || MICROPY_ENABLE_COMPILER || (MICROPY_PERSISTENT_CODE_LOAD && MICROPY_HAS_FILE_READER)
+    const char *file_str = vstr_null_terminated_str(file);
+    #endif
+
+    // If we support frozen modules (either as str or mpy) then try to find the
+    // requested filename in the list of frozen module filenames.
+    #if MICROPY_MODULE_FROZEN
+    void *modref;
+    int frozen_type;
+    const int frozen_path_prefix_len = strlen(MP_FROZEN_PATH_PREFIX);
+    if (strncmp(file_str, MP_FROZEN_PATH_PREFIX, frozen_path_prefix_len) == 0) {
+        mp_find_frozen_module(file_str + frozen_path_prefix_len, &frozen_type, &modref);
+
+        // If we support frozen str modules and the compiler is enabled, and we
+        // found the filename in the list of frozen files, then load and execute it.
+        #if MICROPY_MODULE_FROZEN_STR
+        if (frozen_type == MP_FROZEN_STR) {
+            do_load_from_lexer(module_obj, modref);
+            return;
+        }
+        #endif
+
+        // If we support frozen mpy modules and we found a corresponding file (and
+        // its data) in the list of frozen files, execute it.
+        #if MICROPY_MODULE_FROZEN_MPY
+        if (frozen_type == MP_FROZEN_MPY) {
+            const mp_frozen_module_t *frozen = modref;
+            module_obj->constants = frozen->constants;
+            #if MICROPY_PY___FILE__
+            qstr frozen_file_qstr = qstr_from_str(file_str + frozen_path_prefix_len);
+            #else
+            qstr frozen_file_qstr = MP_QSTRnull;
+            #endif
+            do_execute_proto_fun(module_obj, frozen->proto_fun, frozen_file_qstr);
+            return;
+        }
+        #endif
+    }
+
+    #endif // MICROPY_MODULE_FROZEN
+
+    qstr file_qstr = qstr_from_str(file_str);
+
+    // If we support loading .mpy files then check if the file extension is of
+    // the correct format and, if so, load and execute the file.
+    #if MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD
+    if (file_str[file->len - 3] == 'm') {
+        mp_compiled_module_t cm;
+        cm.context = module_obj;
+        mp_raw_code_load_file(file_qstr, &cm);
+        do_execute_proto_fun(cm.context, cm.rc, file_qstr);
+        return;
+    }
+    #endif
+
+    // If we can compile scripts then load the file and compile and execute it.
+    #if MICROPY_ENABLE_COMPILER
+    {
+        mp_lexer_t *lex = mp_lexer_new_from_file(file_qstr);
+        do_load_from_lexer(module_obj, lex);
+        return;
+    }
+    #else
+    // If we get here then the file was not frozen and we can't compile scripts.
+    mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("script compilation not supported"));
+    #endif
+}
+
+// Convert a relative (to the current module) import, going up "level" levels,
+// into an absolute import.
+static void evaluate_relative_import(mp_int_t level, const char **module_name, size_t *module_name_len) {
+    // What we want to do here is to take the name of the current module,
+    // remove <level> trailing components, and concatenate the passed-in
+    // module name.
+    // For example, level=3, module_name="foo.bar", __name__="a.b.c.d" --> "a.foo.bar"
+    // "Relative imports use a module's __name__ attribute to determine that
+    // module's position in the package hierarchy."
+    // http://legacy.python.org/dev/peps/pep-0328/#relative-imports-and-name
+
+    mp_obj_t current_module_name_obj = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___name__));
+    assert(current_module_name_obj != MP_OBJ_NULL);
+
+    #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT && MICROPY_CPYTHON_COMPAT
+    if (MP_OBJ_QSTR_VALUE(current_module_name_obj) == MP_QSTR___main__) {
+        // This is a module loaded by -m command-line switch (e.g. unix port),
+        // and so its __name__ has been set to "__main__". Get its real name
+        // that we stored during import in the __main__ attribute.
+        current_module_name_obj = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+    }
+    #endif
+
+    // If we have a __path__ in the globals dict, then we're a package.
+    bool is_pkg = mp_map_lookup(&mp_globals_get()->map, MP_OBJ_NEW_QSTR(MP_QSTR___path__), MP_MAP_LOOKUP);
+
+    #if DEBUG_PRINT
+    DEBUG_printf("Current module/package: ");
+    mp_obj_print_helper(MICROPY_DEBUG_PRINTER, current_module_name_obj, PRINT_REPR);
+    DEBUG_printf(", is_package: %d", is_pkg);
+    DEBUG_printf("\n");
+    #endif
+
+    size_t current_module_name_len;
+    const char *current_module_name = mp_obj_str_get_data(current_module_name_obj, &current_module_name_len);
+
+    const char *p = current_module_name + current_module_name_len;
+    if (is_pkg) {
+        // If we're evaluating relative to a package, then take off one fewer
+        // level (i.e. the relative search starts inside the package, rather
+        // than as a sibling of the package).
+        --level;
+    }
+
+    // Walk back 'level' dots (or run out of path).
+    while (level && p > current_module_name) {
+        if (*--p == '.') {
+            --level;
+        }
+    }
+
+    // We must have some component left over to import from.
+    if (p == current_module_name) {
+        mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("can't perform relative import"));
+    }
+
+    // New length is len("<chopped path>.<module_name>"). Note: might be one byte
+    // more than we need if module_name is empty (for the extra . we will
+    // append).
+    uint new_module_name_len = (size_t)(p - current_module_name) + 1 + *module_name_len;
+    char *new_mod = mp_local_alloc(new_module_name_len);
+    memcpy(new_mod, current_module_name, p - current_module_name);
+
+    // Only append ".<module_name>" if there was one).
+    if (*module_name_len != 0) {
+        new_mod[p - current_module_name] = '.';
+        memcpy(new_mod + (p - current_module_name) + 1, *module_name, *module_name_len);
+    } else {
+        --new_module_name_len;
+    }
+
+    // Copy into a QSTR.
+    qstr new_mod_q = qstr_from_strn(new_mod, new_module_name_len);
+    mp_local_free(new_mod);
+
+    DEBUG_printf("Resolved base name for relative import: '%s'\n", qstr_str(new_mod_q));
+    *module_name = qstr_str(new_mod_q);
+    *module_name_len = new_module_name_len;
+}
+
+typedef struct _nlr_jump_callback_node_unregister_module_t {
+    nlr_jump_callback_node_t callback;
+    qstr name;
+} nlr_jump_callback_node_unregister_module_t;
+
+static void unregister_module_from_nlr_jump_callback(void *ctx_in) {
+    nlr_jump_callback_node_unregister_module_t *ctx = ctx_in;
+    mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+    mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(ctx->name), MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+}
+
+// Load a module at the specified absolute path, possibly as a submodule of the given outer module.
+// full_mod_name:    The full absolute path up to this level (e.g. "foo.bar.baz").
+// level_mod_name:   The final component of the path (e.g. "baz").
+// outer_module_obj: The parent module (we need to store this module as an
+//                   attribute on it) (or MP_OBJ_NULL for top-level).
+// override_main:    Whether to set the __name__ to "__main__" (and use __main__
+//                   for the actual path).
+static mp_obj_t process_import_at_level(qstr full_mod_name, qstr level_mod_name, mp_obj_t outer_module_obj, bool override_main) {
+    // Immediately return if the module at this level is already loaded.
+    mp_map_elem_t *elem;
+
+    #if MICROPY_PY_SYS
+    // If sys.path is empty, the intention is to force using a built-in. This
+    // means we should also ignore any loaded modules with the same name
+    // which may have come from the filesystem.
+    size_t path_num;
+    mp_obj_t *path_items;
+    mp_obj_get_array(mp_sys_path, &path_num, &path_items);
+    if (path_num)
+    #endif
+    {
+        elem = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_dict).map, MP_OBJ_NEW_QSTR(full_mod_name), MP_MAP_LOOKUP);
+        if (elem) {
+            return elem->value;
+        }
+    }
+
+    VSTR_FIXED(path, MICROPY_ALLOC_PATH_MAX);
+    mp_import_stat_t stat = MP_IMPORT_STAT_NO_EXIST;
+    mp_obj_t module_obj;
+
+    if (outer_module_obj == MP_OBJ_NULL) {
+        // First module in the dotted-name path.
+        DEBUG_printf("Searching for top-level module\n");
+
+        // An import of a non-extensible built-in will always bypass the
+        // filesystem. e.g. `import micropython` or `import pyb`. So try and
+        // match a non-extensible built-ins first.
+        module_obj = mp_module_get_builtin(level_mod_name, false);
+        if (module_obj != MP_OBJ_NULL) {
+            return module_obj;
+        }
+
+        // Next try the filesystem. Search for a directory or file relative to
+        // all the locations in sys.path.
+        stat = stat_top_level(level_mod_name, &path);
+
+        // If filesystem failed, now try and see if it matches an extensible
+        // built-in module.
+        if (stat == MP_IMPORT_STAT_NO_EXIST) {
+            module_obj = mp_module_get_builtin(level_mod_name, true);
+            if (module_obj != MP_OBJ_NULL) {
+                return module_obj;
+            }
+        }
+    } else {
+        DEBUG_printf("Searching for sub-module\n");
+
+        #if MICROPY_MODULE_BUILTIN_SUBPACKAGES
+        // If the outer module is a built-in (because its map is in ROM), then
+        // treat it like a package if it contains this submodule in its
+        // globals dict.
+        mp_obj_module_t *mod = MP_OBJ_TO_PTR(outer_module_obj);
+        if (mod->globals->map.is_fixed) {
+            elem = mp_map_lookup(&mod->globals->map, MP_OBJ_NEW_QSTR(level_mod_name), MP_MAP_LOOKUP);
+            // Also verify that the entry in the globals dict is in fact a module.
+            if (elem && mp_obj_is_type(elem->value, &mp_type_module)) {
+                return elem->value;
+            }
+        }
+        #endif
+
+        // If the outer module is a package, it will have __path__ set.
+        // We can use that as the path to search inside.
+        mp_obj_t dest[2];
+        mp_load_method_maybe(outer_module_obj, MP_QSTR___path__, dest);
+        if (dest[0] != MP_OBJ_NULL) {
+            // e.g. __path__ will be "<matched search path>/foo/bar"
+            vstr_add_str(&path, mp_obj_str_get_str(dest[0]));
+
+            // Add the level module name to the path to get "<matched search path>/foo/bar/baz".
+            vstr_add_char(&path, PATH_SEP_CHAR[0]);
+            vstr_add_str(&path, qstr_str(level_mod_name));
+
+            stat = stat_module(&path);
+        }
+    }
+
+    // Not already loaded, and not a built-in, so look at the stat result from the filesystem/frozen.
+
+    if (stat == MP_IMPORT_STAT_NO_EXIST) {
+        // Not found -- fail.
+        #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+        mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
+        #else
+        mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), full_mod_name);
+        #endif
+    }
+
+    // Module was found on the filesystem/frozen, try and load it.
+    DEBUG_printf("Found path to load: %.*s\n", (int)vstr_len(&path), vstr_str(&path));
+
+    // Prepare for loading from the filesystem. Create a new shell module
+    // and register it in sys.modules.  Also make sure we remove it if
+    // there is any problem below.
+    module_obj = mp_obj_new_module(full_mod_name);
+    nlr_jump_callback_node_unregister_module_t ctx;
+    ctx.name = full_mod_name;
+    nlr_push_jump_callback(&ctx.callback, unregister_module_from_nlr_jump_callback);
+
+    #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+    // If this module is being loaded via -m on unix, then
+    // override __name__ to "__main__". Do this only for *modules*
+    // however - packages never have their names replaced, instead
+    // they're -m'ed using a special __main__ submodule in them. (This all
+    // apparently is done to not touch the package name itself, which is
+    // important for future imports).
+    if (override_main && stat != MP_IMPORT_STAT_DIR) {
+        mp_obj_module_t *o = MP_OBJ_TO_PTR(module_obj);
+        mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+        #if MICROPY_CPYTHON_COMPAT
+        // Store module as "__main__" in the dictionary of loaded modules (returned by sys.modules).
+        mp_obj_dict_store(MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_loaded_modules_dict)), MP_OBJ_NEW_QSTR(MP_QSTR___main__), module_obj);
+        // Store real name in "__main__" attribute. Need this for
+        // resolving relative imports later. "__main__ was chosen
+        // semi-randonly, to reuse existing qstr's.
+        mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___main__), MP_OBJ_NEW_QSTR(full_mod_name));
+        #endif
+    }
+    #endif // MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+
+    if (stat == MP_IMPORT_STAT_DIR) {
+        // Directory (i.e. a package).
+        DEBUG_printf("%.*s is dir\n", (int)vstr_len(&path), vstr_str(&path));
+
+        // Store the __path__ attribute onto this module.
+        // https://docs.python.org/3/reference/import.html
+        // "Specifically, any module that contains a __path__ attribute is considered a package."
+        // This gets used later to locate any subpackages of this module.
+        mp_store_attr(module_obj, MP_QSTR___path__, mp_obj_new_str(vstr_str(&path), vstr_len(&path)));
+        size_t orig_path_len = path.len;
+        vstr_add_str(&path, PATH_SEP_CHAR "__init__.py");
+
+        // execute "path/__init__.py" (if available).
+        if (stat_file_py_or_mpy(&path) == MP_IMPORT_STAT_FILE) {
+            do_load(MP_OBJ_TO_PTR(module_obj), &path);
+        } else {
+            // No-op. Nothing to load.
+            // mp_warning("%s is imported as namespace package", vstr_str(&path));
+        }
+        // Remove /__init__.py suffix from path.
+        path.len = orig_path_len;
+    } else { // MP_IMPORT_STAT_FILE
+        // File -- execute "path.(m)py".
+        do_load(MP_OBJ_TO_PTR(module_obj), &path);
+        // Note: This should be the last component in the import path. If
+        // there are remaining components then in the next call to
+        // process_import_at_level will detect that it doesn't have
+        // a __path__ attribute, and not attempt to stat it.
+    }
+
+    if (outer_module_obj != MP_OBJ_NULL) {
+        // If it's a sub-module then make it available on the parent module.
+        mp_store_attr(outer_module_obj, level_mod_name, module_obj);
+    }
+
+    nlr_pop_jump_callback(false);
+
+    return module_obj;
+}
+
+mp_obj_t mp_builtin___import___default(size_t n_args, const mp_obj_t *args) {
+    #if DEBUG_PRINT
+    DEBUG_printf("__import__:\n");
+    for (size_t i = 0; i < n_args; i++) {
+        DEBUG_printf("  ");
+        mp_obj_print_helper(MICROPY_DEBUG_PRINTER, args[i], PRINT_REPR);
+        DEBUG_printf("\n");
+    }
+    #endif
+
+    // This is the import path, with any leading dots stripped.
+    // "import foo.bar" --> module_name="foo.bar"
+    // "from foo.bar import baz" --> module_name="foo.bar"
+    // "from . import foo" --> module_name=""
+    // "from ...foo.bar import baz" --> module_name="foo.bar"
+    mp_obj_t module_name_obj = args[0];
+
+    // These are the imported names.
+    // i.e. "from foo.bar import baz, zap" --> fromtuple=("baz", "zap",)
+    // Note: There's a special case on the Unix port, where this is set to mp_const_false which means that it's __main__.
+    mp_obj_t fromtuple = mp_const_none;
+
+    // Level is the number of leading dots in a relative import.
+    // i.e. "from . import foo" --> level=1
+    // i.e. "from ...foo.bar import baz" --> level=3
+    mp_int_t level = 0;
+    if (n_args >= 4) {
+        fromtuple = args[3];
+        if (n_args >= 5) {
+            level = MP_OBJ_SMALL_INT_VALUE(args[4]);
+            if (level < 0) {
+                mp_raise_ValueError(NULL);
+            }
+        }
+    }
+
+    size_t module_name_len;
+    const char *module_name = mp_obj_str_get_data(module_name_obj, &module_name_len);
+
+    if (level != 0) {
+        // Turn "foo.bar" with level=3 into "<current module 3 components>.foo.bar".
+        // Current module name is extracted from globals().__name__.
+        evaluate_relative_import(level, &module_name, &module_name_len);
+        // module_name is now an absolute module path.
+    }
+
+    if (module_name_len == 0) {
+        mp_raise_ValueError(NULL);
+    }
+
+    DEBUG_printf("Starting module search for '%s'\n", module_name);
+
+    mp_obj_t top_module_obj = MP_OBJ_NULL;
+    mp_obj_t outer_module_obj = MP_OBJ_NULL;
+
+    // Iterate the absolute path, finding the end of each component of the path.
+    // foo.bar.baz
+    //    ^   ^   ^
+    size_t current_component_start = 0;
+    for (size_t i = 1; i <= module_name_len; i++) {
+        if (i == module_name_len || module_name[i] == '.') {
+            // The module name up to this depth (e.g. foo.bar.baz).
+            qstr full_mod_name = qstr_from_strn(module_name, i);
+            // The current level name (e.g. baz).
+            qstr level_mod_name = qstr_from_strn(module_name + current_component_start, i - current_component_start);
+
+            DEBUG_printf("Processing module: '%s' at level '%s'\n", qstr_str(full_mod_name), qstr_str(level_mod_name));
+
+            #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+            // On unix, if this is being loaded via -m (indicated by sentinel
+            // fromtuple=mp_const_false), then handle that if it's the final
+            // component.
+            bool override_main = (i == module_name_len && fromtuple == mp_const_false);
+            #else
+            bool override_main = false;
+            #endif
+
+            // Import this module.
+            mp_obj_t module_obj = process_import_at_level(full_mod_name, level_mod_name, outer_module_obj, override_main);
+
+            // Set this as the parent module, and remember the top-level module if it's the first.
+            outer_module_obj = module_obj;
+            if (top_module_obj == MP_OBJ_NULL) {
+                top_module_obj = module_obj;
+            }
+
+            current_component_start = i + 1;
+        }
+    }
+
+    if (fromtuple != mp_const_none) {
+        // If fromtuple is not empty, return leaf module
+        return outer_module_obj;
+    } else {
+        // Otherwise, we need to return top-level package
+        return top_module_obj;
+    }
+}
+
+#else // MICROPY_ENABLE_EXTERNAL_IMPORT
+
+mp_obj_t mp_builtin___import___default(size_t n_args, const mp_obj_t *args) {
+    // Check that it's not a relative import.
+    if (n_args >= 5 && MP_OBJ_SMALL_INT_VALUE(args[4]) != 0) {
+        mp_raise_NotImplementedError(MP_ERROR_TEXT("relative import"));
+    }
+
+    // Check if the module is already loaded.
+    mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_dict).map, args[0], MP_MAP_LOOKUP);
+    if (elem) {
+        return elem->value;
+    }
+
+    // Try the name directly as a non-extensible built-in (e.g. `micropython`).
+    qstr module_name_qstr = mp_obj_str_get_qstr(args[0]);
+    mp_obj_t module_obj = mp_module_get_builtin(module_name_qstr, false);
+    if (module_obj != MP_OBJ_NULL) {
+        return module_obj;
+    }
+    // Now try as an extensible built-in (e.g. `time`).
+    module_obj = mp_module_get_builtin(module_name_qstr, true);
+    if (module_obj != MP_OBJ_NULL) {
+        return module_obj;
+    }
+
+    // Couldn't find the module, so fail
+    #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+    mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
+    #else
+    mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), module_name_qstr);
+    #endif
+}
+
+#endif // MICROPY_ENABLE_EXTERNAL_IMPORT
+
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin___import___obj, 1, 5, mp_builtin___import__);

+ 3680 - 0
mp_flipper/lib/micropython/py/compile.c

@@ -0,0 +1,3680 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/scope.h"
+#include "py/emit.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/asmbase.h"
+#include "py/nativeglue.h"
+#include "py/persistentcode.h"
+#include "py/smallint.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define INVALID_LABEL (0xffff)
+
+typedef enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+    #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+    PN_const_object, // special node for a constant, generic Python object
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
+    #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+} pn_kind_t;
+
+// Whether a mp_parse_node_struct_t that has pns->kind == PN_testlist_comp
+// corresponds to a list comprehension or generator.
+#define MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns) \
+    (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2 && \
+    MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for))
+
+#define NEED_METHOD_TABLE MICROPY_EMIT_NATIVE
+
+#if NEED_METHOD_TABLE
+
+// we need a method table to do the lookup for the emitter functions
+#define EMIT(fun) (comp->emit_method_table->fun(comp->emit))
+#define EMIT_ARG(fun, ...) (comp->emit_method_table->fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (comp->emit_method_table->load_id.local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST))
+#define EMIT_LOAD_GLOBAL(qst) (comp->emit_method_table->load_id.global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL))
+
+#else
+
+// if we only have the bytecode emitter enabled then we can do a direct call to the functions
+#define EMIT(fun) (mp_emit_bc_##fun(comp->emit))
+#define EMIT_ARG(fun, ...) (mp_emit_bc_##fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (mp_emit_bc_load_local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST))
+#define EMIT_LOAD_GLOBAL(qst) (mp_emit_bc_load_global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL))
+
+#endif
+
+#if MICROPY_EMIT_NATIVE && MICROPY_DYNAMIC_COMPILER
+
+#define NATIVE_EMITTER(f) emit_native_table[mp_dynamic_compiler.native_arch]->emit_##f
+#define NATIVE_EMITTER_TABLE (emit_native_table[mp_dynamic_compiler.native_arch])
+
+static const emit_method_table_t *emit_native_table[] = {
+    NULL,
+    &emit_native_x86_method_table,
+    &emit_native_x64_method_table,
+    &emit_native_arm_method_table,
+    &emit_native_thumb_method_table,
+    &emit_native_thumb_method_table,
+    &emit_native_thumb_method_table,
+    &emit_native_thumb_method_table,
+    &emit_native_thumb_method_table,
+    &emit_native_xtensa_method_table,
+    &emit_native_xtensawin_method_table,
+};
+
+#elif MICROPY_EMIT_NATIVE
+// define a macro to access external native emitter
+#if MICROPY_EMIT_X64
+#define NATIVE_EMITTER(f) emit_native_x64_##f
+#elif MICROPY_EMIT_X86
+#define NATIVE_EMITTER(f) emit_native_x86_##f
+#elif MICROPY_EMIT_THUMB
+#define NATIVE_EMITTER(f) emit_native_thumb_##f
+#elif MICROPY_EMIT_ARM
+#define NATIVE_EMITTER(f) emit_native_arm_##f
+#elif MICROPY_EMIT_XTENSA
+#define NATIVE_EMITTER(f) emit_native_xtensa_##f
+#elif MICROPY_EMIT_XTENSAWIN
+#define NATIVE_EMITTER(f) emit_native_xtensawin_##f
+#else
+#error "unknown native emitter"
+#endif
+#define NATIVE_EMITTER_TABLE (&NATIVE_EMITTER(method_table))
+#endif
+
+#if MICROPY_EMIT_INLINE_ASM && MICROPY_DYNAMIC_COMPILER
+
+#define ASM_EMITTER(f) emit_asm_table[mp_dynamic_compiler.native_arch]->asm_##f
+#define ASM_EMITTER_TABLE emit_asm_table[mp_dynamic_compiler.native_arch]
+
+static const emit_inline_asm_method_table_t *emit_asm_table[] = {
+    NULL,
+    NULL,
+    NULL,
+    &emit_inline_thumb_method_table,
+    &emit_inline_thumb_method_table,
+    &emit_inline_thumb_method_table,
+    &emit_inline_thumb_method_table,
+    &emit_inline_thumb_method_table,
+    &emit_inline_thumb_method_table,
+    &emit_inline_xtensa_method_table,
+    NULL,
+};
+
+#elif MICROPY_EMIT_INLINE_ASM
+// define macros for inline assembler
+#if MICROPY_EMIT_INLINE_THUMB
+#define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb
+#define ASM_EMITTER(f) emit_inline_thumb_##f
+#elif MICROPY_EMIT_INLINE_XTENSA
+#define ASM_DECORATOR_QSTR MP_QSTR_asm_xtensa
+#define ASM_EMITTER(f) emit_inline_xtensa_##f
+#else
+#error "unknown asm emitter"
+#endif
+#define ASM_EMITTER_TABLE &ASM_EMITTER(method_table)
+#endif
+
+#define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm))
+#define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__))
+
+// elements in this struct are ordered to make it compact
+typedef struct _compiler_t {
+    uint8_t is_repl;
+    uint8_t pass; // holds enum type pass_kind_t
+    uint8_t have_star;
+
+    // try to keep compiler clean from nlr
+    mp_obj_t compile_error; // set to an exception object if there's an error
+    size_t compile_error_line; // set to best guess of line of error
+
+    uint next_label;
+
+    uint16_t num_dict_params;
+    uint16_t num_default_params;
+
+    uint16_t break_label; // highest bit set indicates we are breaking out of a for loop
+    uint16_t continue_label;
+    uint16_t cur_except_level; // increased for SETUP_EXCEPT, SETUP_FINALLY; decreased for POP_BLOCK, POP_EXCEPT
+    uint16_t break_continue_except_level;
+
+    scope_t *scope_head;
+    scope_t *scope_cur;
+
+    emit_t *emit;                                   // current emitter
+    #if NEED_METHOD_TABLE
+    const emit_method_table_t *emit_method_table;   // current emit method table
+    #endif
+
+    #if MICROPY_EMIT_INLINE_ASM
+    emit_inline_asm_t *emit_inline_asm;                                   // current emitter for inline asm
+    const emit_inline_asm_method_table_t *emit_inline_asm_method_table;   // current emit method table for inline asm
+    #endif
+
+    mp_emit_common_t emit_common;
+} compiler_t;
+
+#if MICROPY_COMP_ALLOW_TOP_LEVEL_AWAIT
+bool mp_compile_allow_top_level_await = false;
+#endif
+
+/******************************************************************************/
+// mp_emit_common_t helper functions
+// These are defined here so they can be inlined, to reduce code size.
+
+static void mp_emit_common_init(mp_emit_common_t *emit, qstr source_file) {
+    #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+    mp_map_init(&emit->qstr_map, 1);
+
+    // add the source file as the first entry in the qstr table
+    mp_map_elem_t *elem = mp_map_lookup(&emit->qstr_map, MP_OBJ_NEW_QSTR(source_file), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+    elem->value = MP_OBJ_NEW_SMALL_INT(0);
+    #endif
+    mp_obj_list_init(&emit->const_obj_list, 0);
+}
+
+static void mp_emit_common_start_pass(mp_emit_common_t *emit, pass_kind_t pass) {
+    emit->pass = pass;
+    if (pass == MP_PASS_CODE_SIZE) {
+        if (emit->ct_cur_child == 0) {
+            emit->children = NULL;
+        } else {
+            emit->children = m_new0(mp_raw_code_t *, emit->ct_cur_child);
+        }
+    }
+    emit->ct_cur_child = 0;
+}
+
+static void mp_emit_common_populate_module_context(mp_emit_common_t *emit, qstr source_file, mp_module_context_t *context) {
+    #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+    size_t qstr_map_used = emit->qstr_map.used;
+    mp_module_context_alloc_tables(context, qstr_map_used, emit->const_obj_list.len);
+    for (size_t i = 0; i < emit->qstr_map.alloc; ++i) {
+        if (mp_map_slot_is_filled(&emit->qstr_map, i)) {
+            size_t idx = MP_OBJ_SMALL_INT_VALUE(emit->qstr_map.table[i].value);
+            qstr qst = MP_OBJ_QSTR_VALUE(emit->qstr_map.table[i].key);
+            context->constants.qstr_table[idx] = qst;
+        }
+    }
+    #else
+    mp_module_context_alloc_tables(context, 0, emit->const_obj_list.len);
+    context->constants.source_file = source_file;
+    #endif
+
+    for (size_t i = 0; i < emit->const_obj_list.len; ++i) {
+        context->constants.obj_table[i] = emit->const_obj_list.items[i];
+    }
+}
+
+/******************************************************************************/
+
+static void compile_error_set_line(compiler_t *comp, mp_parse_node_t pn) {
+    // if the line of the error is unknown then try to update it from the pn
+    if (comp->compile_error_line == 0 && MP_PARSE_NODE_IS_STRUCT(pn)) {
+        comp->compile_error_line = ((mp_parse_node_struct_t *)pn)->source_line;
+    }
+}
+
+static void compile_syntax_error(compiler_t *comp, mp_parse_node_t pn, mp_rom_error_text_t msg) {
+    // only register the error if there has been no other error
+    if (comp->compile_error == MP_OBJ_NULL) {
+        comp->compile_error = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+        compile_error_set_line(comp, pn);
+    }
+}
+
+static void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra);
+static void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind);
+static void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map);
+static void compile_node(compiler_t *comp, mp_parse_node_t pn);
+
+static uint comp_next_label(compiler_t *comp) {
+    return comp->next_label++;
+}
+
+#if MICROPY_EMIT_NATIVE
+static void reserve_labels_for_native(compiler_t *comp, int n) {
+    if (comp->scope_cur->emit_options != MP_EMIT_OPT_BYTECODE) {
+        comp->next_label += n;
+    }
+}
+#else
+#define reserve_labels_for_native(comp, n)
+#endif
+
+static void compile_increase_except_level(compiler_t *comp, uint label, int kind) {
+    EMIT_ARG(setup_block, label, kind);
+    comp->cur_except_level += 1;
+    if (comp->cur_except_level > comp->scope_cur->exc_stack_size) {
+        comp->scope_cur->exc_stack_size = comp->cur_except_level;
+    }
+}
+
+static void compile_decrease_except_level(compiler_t *comp) {
+    assert(comp->cur_except_level > 0);
+    comp->cur_except_level -= 1;
+    EMIT(end_finally);
+    reserve_labels_for_native(comp, 1);
+}
+
+static scope_t *scope_new_and_link(compiler_t *comp, scope_kind_t kind, mp_parse_node_t pn, uint emit_options) {
+    scope_t *scope = scope_new(kind, pn, emit_options);
+    scope->parent = comp->scope_cur;
+    scope->next = NULL;
+    if (comp->scope_head == NULL) {
+        comp->scope_head = scope;
+    } else {
+        scope_t *s = comp->scope_head;
+        while (s->next != NULL) {
+            s = s->next;
+        }
+        s->next = scope;
+    }
+    return scope;
+}
+
+typedef void (*apply_list_fun_t)(compiler_t *comp, mp_parse_node_t pn);
+
+static void apply_to_single_or_list(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_list_kind, apply_list_fun_t f) {
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, pn_list_kind)) {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+        for (int i = 0; i < num_nodes; i++) {
+            f(comp, pns->nodes[i]);
+        }
+    } else if (!MP_PARSE_NODE_IS_NULL(pn)) {
+        f(comp, pn);
+    }
+}
+
+static void compile_generic_all_nodes(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    for (int i = 0; i < num_nodes; i++) {
+        compile_node(comp, pns->nodes[i]);
+        if (comp->compile_error != MP_OBJ_NULL) {
+            // add line info for the error in case it didn't have a line number
+            compile_error_set_line(comp, pns->nodes[i]);
+            return;
+        }
+    }
+}
+
+static void compile_load_id(compiler_t *comp, qstr qst) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        mp_emit_common_get_id_for_load(comp->scope_cur, qst);
+    } else {
+        #if NEED_METHOD_TABLE
+        mp_emit_common_id_op(comp->emit, &comp->emit_method_table->load_id, comp->scope_cur, qst);
+        #else
+        mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_load_id_ops, comp->scope_cur, qst);
+        #endif
+    }
+}
+
+static void compile_store_id(compiler_t *comp, qstr qst) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+    } else {
+        #if NEED_METHOD_TABLE
+        mp_emit_common_id_op(comp->emit, &comp->emit_method_table->store_id, comp->scope_cur, qst);
+        #else
+        mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_store_id_ops, comp->scope_cur, qst);
+        #endif
+    }
+}
+
+static void compile_delete_id(compiler_t *comp, qstr qst) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+    } else {
+        #if NEED_METHOD_TABLE
+        mp_emit_common_id_op(comp->emit, &comp->emit_method_table->delete_id, comp->scope_cur, qst);
+        #else
+        mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_delete_id_ops, comp->scope_cur, qst);
+        #endif
+    }
+}
+
+static void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // a simple tuple expression
+    size_t num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    for (size_t i = 0; i < num_nodes; i++) {
+        compile_node(comp, pns->nodes[i]);
+    }
+    EMIT_ARG(build, num_nodes, MP_EMIT_BUILD_TUPLE);
+}
+
+static void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int label) {
+    if (mp_parse_node_is_const_false(pn)) {
+        if (jump_if == false) {
+            EMIT_ARG(jump, label);
+        }
+        return;
+    } else if (mp_parse_node_is_const_true(pn)) {
+        if (jump_if == true) {
+            EMIT_ARG(jump, label);
+        }
+        return;
+    } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+        if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test) {
+            if (jump_if == false) {
+            and_or_logic1:;
+                uint label2 = comp_next_label(comp);
+                for (int i = 0; i < n - 1; i++) {
+                    c_if_cond(comp, pns->nodes[i], !jump_if, label2);
+                }
+                c_if_cond(comp, pns->nodes[n - 1], jump_if, label);
+                EMIT_ARG(label_assign, label2);
+            } else {
+            and_or_logic2:
+                for (int i = 0; i < n; i++) {
+                    c_if_cond(comp, pns->nodes[i], jump_if, label);
+                }
+            }
+            return;
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_and_test) {
+            if (jump_if == false) {
+                goto and_or_logic2;
+            } else {
+                goto and_or_logic1;
+            }
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_not_test_2) {
+            c_if_cond(comp, pns->nodes[0], !jump_if, label);
+            return;
+        }
+    }
+
+    // nothing special, fall back to default compiling for node and jump
+    compile_node(comp, pn);
+    EMIT_ARG(pop_jump_if, jump_if, label);
+}
+
+typedef enum { ASSIGN_STORE, ASSIGN_AUG_LOAD, ASSIGN_AUG_STORE } assign_kind_t;
+static void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t kind);
+
+static void c_assign_atom_expr(compiler_t *comp, mp_parse_node_struct_t *pns, assign_kind_t assign_kind) {
+    if (assign_kind != ASSIGN_AUG_STORE) {
+        compile_node(comp, pns->nodes[0]);
+    }
+
+    if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+        mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+        if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_atom_expr_trailers) {
+            int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+            if (assign_kind != ASSIGN_AUG_STORE) {
+                for (int i = 0; i < n - 1; i++) {
+                    compile_node(comp, pns1->nodes[i]);
+                }
+            }
+            assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+            pns1 = (mp_parse_node_struct_t *)pns1->nodes[n - 1];
+        }
+        if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+            if (assign_kind == ASSIGN_AUG_STORE) {
+                EMIT(rot_three);
+                EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE);
+            } else {
+                compile_node(comp, pns1->nodes[0]);
+                if (assign_kind == ASSIGN_AUG_LOAD) {
+                    EMIT(dup_top_two);
+                    EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD);
+                } else {
+                    EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE);
+                }
+            }
+            return;
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+            assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+            if (assign_kind == ASSIGN_AUG_LOAD) {
+                EMIT(dup_top);
+                EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_LOAD);
+            } else {
+                if (assign_kind == ASSIGN_AUG_STORE) {
+                    EMIT(rot_two);
+                }
+                EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_STORE);
+            }
+            return;
+        }
+    }
+
+    compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("can't assign to expression"));
+}
+
+static void c_assign_tuple(compiler_t *comp, uint num_tail, mp_parse_node_t *nodes_tail) {
+    // look for star expression
+    uint have_star_index = -1;
+    for (uint i = 0; i < num_tail; i++) {
+        if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes_tail[i], PN_star_expr)) {
+            if (have_star_index == (uint)-1) {
+                EMIT_ARG(unpack_ex, i, num_tail - i - 1);
+                have_star_index = i;
+            } else {
+                compile_syntax_error(comp, nodes_tail[i], MP_ERROR_TEXT("multiple *x in assignment"));
+                return;
+            }
+        }
+    }
+    if (have_star_index == (uint)-1) {
+        EMIT_ARG(unpack_sequence, num_tail);
+    }
+    for (uint i = 0; i < num_tail; i++) {
+        if (i == have_star_index) {
+            c_assign(comp, ((mp_parse_node_struct_t *)nodes_tail[i])->nodes[0], ASSIGN_STORE);
+        } else {
+            c_assign(comp, nodes_tail[i], ASSIGN_STORE);
+        }
+    }
+}
+
+// assigns top of stack to pn
+static void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
+    assert(!MP_PARSE_NODE_IS_NULL(pn));
+    if (MP_PARSE_NODE_IS_LEAF(pn)) {
+        if (MP_PARSE_NODE_IS_ID(pn)) {
+            qstr arg = MP_PARSE_NODE_LEAF_ARG(pn);
+            switch (assign_kind) {
+                case ASSIGN_STORE:
+                case ASSIGN_AUG_STORE:
+                    compile_store_id(comp, arg);
+                    break;
+                case ASSIGN_AUG_LOAD:
+                default:
+                    compile_load_id(comp, arg);
+                    break;
+            }
+        } else {
+            goto cannot_assign;
+        }
+    } else {
+        // pn must be a struct
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        switch (MP_PARSE_NODE_STRUCT_KIND(pns)) {
+            case PN_atom_expr_normal:
+                // lhs is an index or attribute
+                c_assign_atom_expr(comp, pns, assign_kind);
+                break;
+
+            case PN_testlist_star_expr:
+            case PN_exprlist:
+                // lhs is a tuple
+                if (assign_kind != ASSIGN_STORE) {
+                    goto cannot_assign;
+                }
+                c_assign_tuple(comp, MP_PARSE_NODE_STRUCT_NUM_NODES(pns), pns->nodes);
+                break;
+
+            case PN_atom_paren:
+                // lhs is something in parenthesis
+                if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+                    // empty tuple
+                    goto cannot_assign;
+                } else {
+                    assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+                    if (assign_kind != ASSIGN_STORE) {
+                        goto cannot_assign;
+                    }
+                    pns = (mp_parse_node_struct_t *)pns->nodes[0];
+                    goto testlist_comp;
+                }
+                break;
+
+            case PN_atom_bracket:
+                // lhs is something in brackets
+                if (assign_kind != ASSIGN_STORE) {
+                    goto cannot_assign;
+                }
+                if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+                    // empty list, assignment allowed
+                    c_assign_tuple(comp, 0, NULL);
+                } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+                    pns = (mp_parse_node_struct_t *)pns->nodes[0];
+                    goto testlist_comp;
+                } else {
+                    // brackets around 1 item
+                    c_assign_tuple(comp, 1, pns->nodes);
+                }
+                break;
+
+            default:
+                goto cannot_assign;
+        }
+        return;
+
+    testlist_comp:
+        // lhs is a sequence
+        if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+            goto cannot_assign;
+        }
+        c_assign_tuple(comp, MP_PARSE_NODE_STRUCT_NUM_NODES(pns), pns->nodes);
+        return;
+    }
+    return;
+
+cannot_assign:
+    compile_syntax_error(comp, pn, MP_ERROR_TEXT("can't assign to expression"));
+}
+
+// stuff for lambda and comprehensions and generators:
+//  if n_pos_defaults > 0 then there is a tuple on the stack with the positional defaults
+//  if n_kw_defaults > 0 then there is a dictionary on the stack with the keyword defaults
+//  if both exist, the tuple is above the dictionary (ie the first pop gets the tuple)
+static void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_defaults, int n_kw_defaults) {
+    assert(n_pos_defaults >= 0);
+    assert(n_kw_defaults >= 0);
+
+    // set flags
+    if (n_kw_defaults > 0) {
+        this_scope->scope_flags |= MP_SCOPE_FLAG_DEFKWARGS;
+    }
+    this_scope->num_def_pos_args = n_pos_defaults;
+
+    #if MICROPY_EMIT_NATIVE
+    // When creating a function/closure it will take a reference to the current globals
+    comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS;
+    #endif
+
+    // make closed over variables, if any
+    // ensure they are closed over in the order defined in the outer scope (mainly to agree with CPython)
+    int nfree = 0;
+    if (comp->scope_cur->kind != SCOPE_MODULE) {
+        for (int i = 0; i < comp->scope_cur->id_info_len; i++) {
+            id_info_t *id = &comp->scope_cur->id_info[i];
+            if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+                for (int j = 0; j < this_scope->id_info_len; j++) {
+                    id_info_t *id2 = &this_scope->id_info[j];
+                    if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+                        // in MicroPython we load closures using LOAD_FAST
+                        EMIT_LOAD_FAST(id->qst, id->local_num);
+                        nfree += 1;
+                    }
+                }
+            }
+        }
+    }
+
+    // make the function/closure
+    if (nfree == 0) {
+        EMIT_ARG(make_function, this_scope, n_pos_defaults, n_kw_defaults);
+    } else {
+        EMIT_ARG(make_closure, this_scope, nfree, n_pos_defaults, n_kw_defaults);
+    }
+}
+
+static void compile_funcdef_lambdef_param(compiler_t *comp, mp_parse_node_t pn) {
+    // For efficiency of the code below we extract the parse-node kind here
+    int pn_kind;
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        pn_kind = -1;
+    } else {
+        assert(MP_PARSE_NODE_IS_STRUCT(pn));
+        pn_kind = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn);
+    }
+
+    if (pn_kind == PN_typedargslist_star || pn_kind == PN_varargslist_star) {
+        comp->have_star = true;
+        /* don't need to distinguish bare from named star
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+        if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+            // bare star
+        } else {
+            // named star
+        }
+        */
+
+    } else if (pn_kind == PN_typedargslist_dbl_star || pn_kind == PN_varargslist_dbl_star) {
+        // named double star
+        // TODO do we need to do anything with this?
+
+    } else {
+        mp_parse_node_t pn_id;
+        mp_parse_node_t pn_equal;
+        if (pn_kind == -1) {
+            // this parameter is just an id
+
+            pn_id = pn;
+            pn_equal = MP_PARSE_NODE_NULL;
+
+        } else if (pn_kind == PN_typedargslist_name) {
+            // this parameter has a colon and/or equal specifier
+
+            mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+            pn_id = pns->nodes[0];
+            // pn_colon = pns->nodes[1]; // unused
+            pn_equal = pns->nodes[2];
+
+        } else {
+            assert(pn_kind == PN_varargslist_name); // should be
+            // this parameter has an equal specifier
+
+            mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+            pn_id = pns->nodes[0];
+            pn_equal = pns->nodes[1];
+        }
+
+        if (MP_PARSE_NODE_IS_NULL(pn_equal)) {
+            // this parameter does not have a default value
+
+            // check for non-default parameters given after default parameters (allowed by parser, but not syntactically valid)
+            if (!comp->have_star && comp->num_default_params != 0) {
+                compile_syntax_error(comp, pn, MP_ERROR_TEXT("non-default argument follows default argument"));
+                return;
+            }
+
+        } else {
+            // this parameter has a default value
+            // in CPython, None (and True, False?) as default parameters are loaded with LOAD_NAME; don't understandy why
+
+            if (comp->have_star) {
+                comp->num_dict_params += 1;
+                // in MicroPython we put the default dict parameters into a dictionary using the bytecode
+                if (comp->num_dict_params == 1) {
+                    // in MicroPython we put the default positional parameters into a tuple using the bytecode
+                    // we need to do this here before we start building the map for the default keywords
+                    if (comp->num_default_params > 0) {
+                        EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE);
+                    } else {
+                        EMIT(load_null); // sentinel indicating empty default positional args
+                    }
+                    // first default dict param, so make the map
+                    EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+                }
+
+                // compile value then key, then store it to the dict
+                compile_node(comp, pn_equal);
+                EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pn_id));
+                EMIT(store_map);
+            } else {
+                comp->num_default_params += 1;
+                compile_node(comp, pn_equal);
+            }
+        }
+    }
+}
+
+static void compile_funcdef_lambdef(compiler_t *comp, scope_t *scope, mp_parse_node_t pn_params, pn_kind_t pn_list_kind) {
+    // When we call compile_funcdef_lambdef_param below it can compile an arbitrary
+    // expression for default arguments, which may contain a lambda.  The lambda will
+    // call here in a nested way, so we must save and restore the relevant state.
+    bool orig_have_star = comp->have_star;
+    uint16_t orig_num_dict_params = comp->num_dict_params;
+    uint16_t orig_num_default_params = comp->num_default_params;
+
+    // compile default parameters
+    comp->have_star = false;
+    comp->num_dict_params = 0;
+    comp->num_default_params = 0;
+    apply_to_single_or_list(comp, pn_params, pn_list_kind, compile_funcdef_lambdef_param);
+
+    if (comp->compile_error != MP_OBJ_NULL) {
+        return;
+    }
+
+    // in MicroPython we put the default positional parameters into a tuple using the bytecode
+    // the default keywords args may have already made the tuple; if not, do it now
+    if (comp->num_default_params > 0 && comp->num_dict_params == 0) {
+        EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE);
+        EMIT(load_null); // sentinel indicating empty default keyword args
+    }
+
+    // make the function
+    close_over_variables_etc(comp, scope, comp->num_default_params, comp->num_dict_params);
+
+    // restore state
+    comp->have_star = orig_have_star;
+    comp->num_dict_params = orig_num_dict_params;
+    comp->num_default_params = orig_num_default_params;
+}
+
+// leaves function object on stack
+// returns function name
+static qstr compile_funcdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        // create a new scope for this function
+        scope_t *s = scope_new_and_link(comp, SCOPE_FUNCTION, (mp_parse_node_t)pns, emit_options);
+        // store the function scope so the compiling function can use it at each pass
+        pns->nodes[4] = (mp_parse_node_t)s;
+    }
+
+    // get the scope for this function
+    scope_t *fscope = (scope_t *)pns->nodes[4];
+
+    // compile the function definition
+    compile_funcdef_lambdef(comp, fscope, pns->nodes[1], PN_typedargslist);
+
+    // return its name (the 'f' in "def f(...):")
+    return fscope->simple_name;
+}
+
+// leaves class object on stack
+// returns class name
+static qstr compile_classdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        // create a new scope for this class
+        scope_t *s = scope_new_and_link(comp, SCOPE_CLASS, (mp_parse_node_t)pns, emit_options);
+        // store the class scope so the compiling function can use it at each pass
+        pns->nodes[3] = (mp_parse_node_t)s;
+    }
+
+    EMIT(load_build_class);
+
+    // scope for this class
+    scope_t *cscope = (scope_t *)pns->nodes[3];
+
+    // compile the class
+    close_over_variables_etc(comp, cscope, 0, 0);
+
+    // get its name
+    EMIT_ARG(load_const_str, cscope->simple_name);
+
+    // nodes[1] has parent classes, if any
+    // empty parenthesis (eg class C():) gets here as an empty PN_classdef_2 and needs special handling
+    mp_parse_node_t parents = pns->nodes[1];
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(parents, PN_classdef_2)) {
+        parents = MP_PARSE_NODE_NULL;
+    }
+    compile_trailer_paren_helper(comp, parents, false, 2);
+
+    // return its name (the 'C' in class C(...):")
+    return cscope->simple_name;
+}
+
+// returns true if it was a built-in decorator (even if the built-in had an error)
+static bool compile_built_in_decorator(compiler_t *comp, size_t name_len, mp_parse_node_t *name_nodes, uint *emit_options) {
+    if (MP_PARSE_NODE_LEAF_ARG(name_nodes[0]) != MP_QSTR_micropython) {
+        return false;
+    }
+
+    if (name_len != 2) {
+        compile_syntax_error(comp, name_nodes[0], MP_ERROR_TEXT("invalid micropython decorator"));
+        return true;
+    }
+
+    qstr attr = MP_PARSE_NODE_LEAF_ARG(name_nodes[1]);
+    if (attr == MP_QSTR_bytecode) {
+        *emit_options = MP_EMIT_OPT_BYTECODE;
+    #if MICROPY_EMIT_NATIVE
+    } else if (attr == MP_QSTR_native) {
+        *emit_options = MP_EMIT_OPT_NATIVE_PYTHON;
+    } else if (attr == MP_QSTR_viper) {
+        *emit_options = MP_EMIT_OPT_VIPER;
+    #endif
+        #if MICROPY_EMIT_INLINE_ASM
+    #if MICROPY_DYNAMIC_COMPILER
+    } else if (attr == MP_QSTR_asm_thumb) {
+        *emit_options = MP_EMIT_OPT_ASM;
+    } else if (attr == MP_QSTR_asm_xtensa) {
+        *emit_options = MP_EMIT_OPT_ASM;
+    #else
+    } else if (attr == ASM_DECORATOR_QSTR) {
+        *emit_options = MP_EMIT_OPT_ASM;
+    #endif
+        #endif
+    } else {
+        compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid micropython decorator"));
+    }
+
+    #if MICROPY_EMIT_NATIVE && MICROPY_DYNAMIC_COMPILER
+    if (*emit_options == MP_EMIT_OPT_NATIVE_PYTHON || *emit_options == MP_EMIT_OPT_VIPER) {
+        if (emit_native_table[mp_dynamic_compiler.native_arch] == NULL) {
+            compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid arch"));
+        }
+    } else if (*emit_options == MP_EMIT_OPT_ASM) {
+        if (emit_asm_table[mp_dynamic_compiler.native_arch] == NULL) {
+            compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid arch"));
+        }
+    }
+    #endif
+
+    return true;
+}
+
+static void compile_decorated(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // get the list of decorators
+    mp_parse_node_t *nodes;
+    size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_decorators, &nodes);
+
+    // inherit emit options for this function/class definition
+    uint emit_options = comp->scope_cur->emit_options;
+
+    // compile each decorator
+    size_t num_built_in_decorators = 0;
+    for (size_t i = 0; i < n; i++) {
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(nodes[i], PN_decorator)); // should be
+        mp_parse_node_struct_t *pns_decorator = (mp_parse_node_struct_t *)nodes[i];
+
+        // nodes[0] contains the decorator function, which is a dotted name
+        mp_parse_node_t *name_nodes;
+        size_t name_len = mp_parse_node_extract_list(&pns_decorator->nodes[0], PN_dotted_name, &name_nodes);
+
+        // check for built-in decorators
+        if (compile_built_in_decorator(comp, name_len, name_nodes, &emit_options)) {
+            // this was a built-in
+            num_built_in_decorators += 1;
+
+        } else {
+            // not a built-in, compile normally
+
+            // compile the decorator function
+            compile_node(comp, name_nodes[0]);
+            for (size_t j = 1; j < name_len; j++) {
+                assert(MP_PARSE_NODE_IS_ID(name_nodes[j])); // should be
+                EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(name_nodes[j]), MP_EMIT_ATTR_LOAD);
+            }
+
+            // nodes[1] contains arguments to the decorator function, if any
+            if (!MP_PARSE_NODE_IS_NULL(pns_decorator->nodes[1])) {
+                // call the decorator function with the arguments in nodes[1]
+                compile_node(comp, pns_decorator->nodes[1]);
+            }
+        }
+    }
+
+    // compile the body (funcdef, async funcdef or classdef) and get its name
+    mp_parse_node_struct_t *pns_body = (mp_parse_node_struct_t *)pns->nodes[1];
+    qstr body_name = 0;
+    if (MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_funcdef) {
+        body_name = compile_funcdef_helper(comp, pns_body, emit_options);
+    #if MICROPY_PY_ASYNC_AWAIT
+    } else if (MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_async_funcdef) {
+        assert(MP_PARSE_NODE_IS_STRUCT(pns_body->nodes[0]));
+        mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns_body->nodes[0];
+        body_name = compile_funcdef_helper(comp, pns0, emit_options);
+        scope_t *fscope = (scope_t *)pns0->nodes[4];
+        fscope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+    #endif
+    } else {
+        assert(MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_classdef); // should be
+        body_name = compile_classdef_helper(comp, pns_body, emit_options);
+    }
+
+    // call each decorator
+    for (size_t i = 0; i < n - num_built_in_decorators; i++) {
+        EMIT_ARG(call_function, 1, 0, 0);
+    }
+
+    // store func/class object into name
+    compile_store_id(comp, body_name);
+}
+
+static void compile_funcdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    qstr fname = compile_funcdef_helper(comp, pns, comp->scope_cur->emit_options);
+    // store function object into function name
+    compile_store_id(comp, fname);
+}
+
+static void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) {
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        compile_delete_id(comp, MP_PARSE_NODE_LEAF_ARG(pn));
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_expr_normal)) {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+
+        compile_node(comp, pns->nodes[0]); // base of the atom_expr_normal node
+
+        if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+            mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+            if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_atom_expr_trailers) {
+                int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+                for (int i = 0; i < n - 1; i++) {
+                    compile_node(comp, pns1->nodes[i]);
+                }
+                assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+                pns1 = (mp_parse_node_struct_t *)pns1->nodes[n - 1];
+            }
+            if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+                compile_node(comp, pns1->nodes[0]);
+                EMIT_ARG(subscr, MP_EMIT_SUBSCR_DELETE);
+            } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+                assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+                EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_DELETE);
+            } else {
+                goto cannot_delete;
+            }
+        } else {
+            goto cannot_delete;
+        }
+
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_paren)) {
+        pn = ((mp_parse_node_struct_t *)pn)->nodes[0];
+        if (MP_PARSE_NODE_IS_NULL(pn)) {
+            goto cannot_delete;
+        } else {
+            assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_testlist_comp));
+            mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+            if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+                goto cannot_delete;
+            }
+            for (size_t i = 0; i < MP_PARSE_NODE_STRUCT_NUM_NODES(pns); ++i) {
+                c_del_stmt(comp, pns->nodes[i]);
+            }
+        }
+    } else {
+        // some arbitrary statement that we can't delete (eg del 1)
+        goto cannot_delete;
+    }
+
+    return;
+
+cannot_delete:
+    compile_syntax_error(comp, (mp_parse_node_t)pn, MP_ERROR_TEXT("can't delete expression"));
+}
+
+static void compile_del_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    apply_to_single_or_list(comp, pns->nodes[0], PN_exprlist, c_del_stmt);
+}
+
+static void compile_break_cont_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    uint16_t label;
+    if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_break_stmt) {
+        label = comp->break_label;
+    } else {
+        label = comp->continue_label;
+    }
+    if (label == INVALID_LABEL) {
+        compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'break'/'continue' outside loop"));
+    }
+    assert(comp->cur_except_level >= comp->break_continue_except_level);
+    EMIT_ARG(unwind_jump, label, comp->cur_except_level - comp->break_continue_except_level);
+}
+
+static void compile_return_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    #if MICROPY_CPYTHON_COMPAT
+    if (comp->scope_cur->kind != SCOPE_FUNCTION) {
+        compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'return' outside function"));
+        return;
+    }
+    #endif
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+        // no argument to 'return', so return None
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+    } else if (MICROPY_COMP_RETURN_IF_EXPR
+               && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_test_if_expr)) {
+        // special case when returning an if-expression; to match CPython optimisation
+        mp_parse_node_struct_t *pns_test_if_expr = (mp_parse_node_struct_t *)pns->nodes[0];
+        mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t *)pns_test_if_expr->nodes[1];
+
+        uint l_fail = comp_next_label(comp);
+        c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+        compile_node(comp, pns_test_if_expr->nodes[0]); // success value
+        EMIT(return_value);
+        EMIT_ARG(label_assign, l_fail);
+        compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+    } else {
+        compile_node(comp, pns->nodes[0]);
+    }
+    EMIT(return_value);
+}
+
+static void compile_yield_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_node(comp, pns->nodes[0]);
+    EMIT(pop_top);
+}
+
+static void compile_raise_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+        // raise
+        EMIT_ARG(raise_varargs, 0);
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_raise_stmt_arg)) {
+        // raise x from y
+        pns = (mp_parse_node_struct_t *)pns->nodes[0];
+        compile_node(comp, pns->nodes[0]);
+        compile_node(comp, pns->nodes[1]);
+        EMIT_ARG(raise_varargs, 2);
+    } else {
+        // raise x
+        compile_node(comp, pns->nodes[0]);
+        EMIT_ARG(raise_varargs, 1);
+    }
+}
+
+// q_base holds the base of the name
+// eg   a -> q_base=a
+//      a.b.c -> q_base=a
+static void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
+    bool is_as = false;
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_as_name)) {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        // a name of the form x as y; unwrap it
+        *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[1]);
+        pn = pns->nodes[0];
+        is_as = true;
+    }
+    if (MP_PARSE_NODE_IS_NULL(pn)) {
+        // empty name (eg, from . import x)
+        *q_base = MP_QSTR_;
+        EMIT_ARG(import, MP_QSTR_, MP_EMIT_IMPORT_NAME); // import the empty string
+    } else if (MP_PARSE_NODE_IS_ID(pn)) {
+        // just a simple name
+        qstr q_full = MP_PARSE_NODE_LEAF_ARG(pn);
+        if (!is_as) {
+            *q_base = q_full;
+        }
+        EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME);
+    } else {
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_name)); // should be
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        {
+            // a name of the form a.b.c
+            if (!is_as) {
+                *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+            }
+            size_t n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+            if (n == 0) {
+                // There must be at least one node in this PN_dotted_name.
+                // Let the compiler know this so it doesn't warn, and can generate better code.
+                MP_UNREACHABLE;
+            }
+            size_t len = n - 1;
+            for (size_t i = 0; i < n; i++) {
+                len += qstr_len(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]));
+            }
+            char *q_ptr = mp_local_alloc(len);
+            char *str_dest = q_ptr;
+            for (size_t i = 0; i < n; i++) {
+                if (i > 0) {
+                    *str_dest++ = '.';
+                }
+                size_t str_src_len;
+                const byte *str_src = qstr_data(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), &str_src_len);
+                memcpy(str_dest, str_src, str_src_len);
+                str_dest += str_src_len;
+            }
+            qstr q_full = qstr_from_strn(q_ptr, len);
+            mp_local_free(q_ptr);
+            EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME);
+            if (is_as) {
+                for (size_t i = 1; i < n; i++) {
+                    EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), MP_EMIT_ATTR_LOAD);
+                }
+            }
+        }
+    }
+}
+
+static void compile_dotted_as_name(compiler_t *comp, mp_parse_node_t pn) {
+    EMIT_ARG(load_const_small_int, 0); // level 0 import
+    EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // not importing from anything
+    qstr q_base;
+    do_import_name(comp, pn, &q_base);
+    compile_store_id(comp, q_base);
+}
+
+static void compile_import_name(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    apply_to_single_or_list(comp, pns->nodes[0], PN_dotted_as_names, compile_dotted_as_name);
+}
+
+static void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    mp_parse_node_t pn_import_source = pns->nodes[0];
+
+    // extract the preceding .'s (if any) for a relative import, to compute the import level
+    uint import_level = 0;
+    do {
+        mp_parse_node_t pn_rel;
+        if (MP_PARSE_NODE_IS_TOKEN(pn_import_source) || MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_one_or_more_period_or_ellipsis)) {
+            // This covers relative imports with dots only like "from .. import"
+            pn_rel = pn_import_source;
+            pn_import_source = MP_PARSE_NODE_NULL;
+        } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_import_from_2b)) {
+            // This covers relative imports starting with dot(s) like "from .foo import"
+            mp_parse_node_struct_t *pns_2b = (mp_parse_node_struct_t *)pn_import_source;
+            pn_rel = pns_2b->nodes[0];
+            pn_import_source = pns_2b->nodes[1];
+            assert(!MP_PARSE_NODE_IS_NULL(pn_import_source)); // should not be
+        } else {
+            // Not a relative import
+            break;
+        }
+
+        // get the list of . and/or ...'s
+        mp_parse_node_t *nodes;
+        size_t n = mp_parse_node_extract_list(&pn_rel, PN_one_or_more_period_or_ellipsis, &nodes);
+
+        // count the total number of .'s
+        for (size_t i = 0; i < n; i++) {
+            if (MP_PARSE_NODE_IS_TOKEN_KIND(nodes[i], MP_TOKEN_DEL_PERIOD)) {
+                import_level++;
+            } else {
+                // should be an MP_TOKEN_ELLIPSIS
+                import_level += 3;
+            }
+        }
+    } while (0);
+
+    if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[1], MP_TOKEN_OP_STAR)) {
+        #if MICROPY_CPYTHON_COMPAT
+        if (comp->scope_cur->kind != SCOPE_MODULE) {
+            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("import * not at module level"));
+            return;
+        }
+        #endif
+
+        EMIT_ARG(load_const_small_int, import_level);
+
+        // build the "fromlist" tuple
+        EMIT_ARG(load_const_str, MP_QSTR__star_);
+        EMIT_ARG(build, 1, MP_EMIT_BUILD_TUPLE);
+
+        // do the import
+        qstr dummy_q;
+        do_import_name(comp, pn_import_source, &dummy_q);
+        EMIT_ARG(import, MP_QSTRnull, MP_EMIT_IMPORT_STAR);
+
+    } else {
+        EMIT_ARG(load_const_small_int, import_level);
+
+        // build the "fromlist" tuple
+        mp_parse_node_t *pn_nodes;
+        size_t n = mp_parse_node_extract_list(&pns->nodes[1], PN_import_as_names, &pn_nodes);
+        for (size_t i = 0; i < n; i++) {
+            assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+            mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pn_nodes[i];
+            qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+            EMIT_ARG(load_const_str, id2);
+        }
+        EMIT_ARG(build, n, MP_EMIT_BUILD_TUPLE);
+
+        // do the import
+        qstr dummy_q;
+        do_import_name(comp, pn_import_source, &dummy_q);
+        for (size_t i = 0; i < n; i++) {
+            assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+            mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pn_nodes[i];
+            qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+            EMIT_ARG(import, id2, MP_EMIT_IMPORT_FROM);
+            if (MP_PARSE_NODE_IS_NULL(pns3->nodes[1])) {
+                compile_store_id(comp, id2);
+            } else {
+                compile_store_id(comp, MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]));
+            }
+        }
+        EMIT(pop_top);
+    }
+}
+
+static void compile_declare_global(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) {
+    if (id_info->kind != ID_INFO_KIND_UNDECIDED && id_info->kind != ID_INFO_KIND_GLOBAL_EXPLICIT) {
+        compile_syntax_error(comp, pn, MP_ERROR_TEXT("identifier redefined as global"));
+        return;
+    }
+    id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+
+    // if the id exists in the global scope, set its kind to EXPLICIT_GLOBAL
+    id_info = scope_find_global(comp->scope_cur, id_info->qst);
+    if (id_info != NULL) {
+        id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+    }
+}
+
+static void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) {
+    if (id_info->kind == ID_INFO_KIND_UNDECIDED) {
+        id_info->kind = ID_INFO_KIND_GLOBAL_IMPLICIT;
+        scope_check_to_close_over(comp->scope_cur, id_info);
+        if (id_info->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+            compile_syntax_error(comp, pn, MP_ERROR_TEXT("no binding for nonlocal found"));
+        }
+    } else if (id_info->kind != ID_INFO_KIND_FREE) {
+        compile_syntax_error(comp, pn, MP_ERROR_TEXT("identifier redefined as nonlocal"));
+    }
+}
+
+static void compile_declare_global_or_nonlocal(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info, bool is_global) {
+    if (is_global) {
+        compile_declare_global(comp, pn, id_info);
+    } else {
+        compile_declare_nonlocal(comp, pn, id_info);
+    }
+}
+
+static void compile_global_nonlocal_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        bool is_global = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_global_stmt;
+
+        if (!is_global && comp->scope_cur->kind == SCOPE_MODULE) {
+            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("can't declare nonlocal in outer code"));
+            return;
+        }
+
+        mp_parse_node_t *nodes;
+        size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes);
+        for (size_t i = 0; i < n; i++) {
+            qstr qst = MP_PARSE_NODE_LEAF_ARG(nodes[i]);
+            id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, ID_INFO_KIND_UNDECIDED);
+            compile_declare_global_or_nonlocal(comp, (mp_parse_node_t)pns, id_info, is_global);
+        }
+    }
+}
+
+static void compile_assert_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // with optimisations enabled we don't compile assertions
+    if (MP_STATE_VM(mp_optimise_value) != 0) {
+        return;
+    }
+
+    uint l_end = comp_next_label(comp);
+    c_if_cond(comp, pns->nodes[0], true, l_end);
+    EMIT_LOAD_GLOBAL(MP_QSTR_AssertionError); // we load_global instead of load_id, to be consistent with CPython
+    if (!MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+        // assertion message
+        compile_node(comp, pns->nodes[1]);
+        EMIT_ARG(call_function, 1, 0, 0);
+    }
+    EMIT_ARG(raise_varargs, 1);
+    EMIT_ARG(label_assign, l_end);
+}
+
+static void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    uint l_end = comp_next_label(comp);
+
+    // optimisation: don't emit anything when "if False"
+    if (!mp_parse_node_is_const_false(pns->nodes[0])) {
+        uint l_fail = comp_next_label(comp);
+        c_if_cond(comp, pns->nodes[0], false, l_fail); // if condition
+
+        compile_node(comp, pns->nodes[1]); // if block
+
+        // optimisation: skip everything else when "if True"
+        if (mp_parse_node_is_const_true(pns->nodes[0])) {
+            goto done;
+        }
+
+        // optimisation: don't jump over non-existent elif/else blocks
+        if (!(MP_PARSE_NODE_IS_NULL(pns->nodes[2]) && MP_PARSE_NODE_IS_NULL(pns->nodes[3]))) {
+            // jump over elif/else blocks
+            EMIT_ARG(jump, l_end);
+        }
+
+        EMIT_ARG(label_assign, l_fail);
+    }
+
+    // compile elif blocks (if any)
+    mp_parse_node_t *pn_elif;
+    size_t n_elif = mp_parse_node_extract_list(&pns->nodes[2], PN_if_stmt_elif_list, &pn_elif);
+    for (size_t i = 0; i < n_elif; i++) {
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_elif[i], PN_if_stmt_elif)); // should be
+        mp_parse_node_struct_t *pns_elif = (mp_parse_node_struct_t *)pn_elif[i];
+
+        // optimisation: don't emit anything when "if False"
+        if (!mp_parse_node_is_const_false(pns_elif->nodes[0])) {
+            uint l_fail = comp_next_label(comp);
+            c_if_cond(comp, pns_elif->nodes[0], false, l_fail); // elif condition
+
+            compile_node(comp, pns_elif->nodes[1]); // elif block
+
+            // optimisation: skip everything else when "elif True"
+            if (mp_parse_node_is_const_true(pns_elif->nodes[0])) {
+                goto done;
+            }
+
+            EMIT_ARG(jump, l_end);
+            EMIT_ARG(label_assign, l_fail);
+        }
+    }
+
+    // compile else block
+    compile_node(comp, pns->nodes[3]); // can be null
+
+done:
+    EMIT_ARG(label_assign, l_end);
+}
+
+#define START_BREAK_CONTINUE_BLOCK \
+    uint16_t old_break_label = comp->break_label; \
+    uint16_t old_continue_label = comp->continue_label; \
+    uint16_t old_break_continue_except_level = comp->break_continue_except_level; \
+    uint break_label = comp_next_label(comp); \
+    uint continue_label = comp_next_label(comp); \
+    comp->break_label = break_label; \
+    comp->continue_label = continue_label; \
+    comp->break_continue_except_level = comp->cur_except_level;
+
+#define END_BREAK_CONTINUE_BLOCK \
+    comp->break_label = old_break_label; \
+    comp->continue_label = old_continue_label; \
+    comp->break_continue_except_level = old_break_continue_except_level;
+
+static void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    START_BREAK_CONTINUE_BLOCK
+
+    if (!mp_parse_node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False"
+        uint top_label = comp_next_label(comp);
+        if (!mp_parse_node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True"
+            EMIT_ARG(jump, continue_label);
+        }
+        EMIT_ARG(label_assign, top_label);
+        compile_node(comp, pns->nodes[1]); // body
+        EMIT_ARG(label_assign, continue_label);
+        c_if_cond(comp, pns->nodes[0], true, top_label); // condition
+    }
+
+    // break/continue apply to outer loop (if any) in the else block
+    END_BREAK_CONTINUE_BLOCK
+
+    compile_node(comp, pns->nodes[2]); // else
+
+    EMIT_ARG(label_assign, break_label);
+}
+
+// This function compiles an optimised for-loop of the form:
+//      for <var> in range(<start>, <end>, <step>):
+//          <body>
+//      else:
+//          <else>
+// <var> must be an identifier and <step> must be a small-int.
+//
+// Semantics of for-loop require:
+//  - final failing value should not be stored in the loop variable
+//  - if the loop never runs, the loop variable should never be assigned
+//  - assignments to <var>, <end> or <step> in the body do not alter the loop
+//    (<step> is a constant for us, so no need to worry about it changing)
+//
+// If <end> is a small-int, then the stack during the for-loop contains just
+// the current value of <var>.  Otherwise, the stack contains <end> then the
+// current value of <var>.
+static void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var, mp_parse_node_t pn_start, mp_parse_node_t pn_end, mp_parse_node_t pn_step, mp_parse_node_t pn_body, mp_parse_node_t pn_else) {
+    START_BREAK_CONTINUE_BLOCK
+
+    uint top_label = comp_next_label(comp);
+    uint entry_label = comp_next_label(comp);
+
+    // put the end value on the stack if it's not a small-int constant
+    bool end_on_stack = !MP_PARSE_NODE_IS_SMALL_INT(pn_end);
+    if (end_on_stack) {
+        compile_node(comp, pn_end);
+    }
+
+    // compile: start
+    compile_node(comp, pn_start);
+
+    EMIT_ARG(jump, entry_label);
+    EMIT_ARG(label_assign, top_label);
+
+    // duplicate next value and store it to var
+    EMIT(dup_top);
+    c_assign(comp, pn_var, ASSIGN_STORE);
+
+    // compile body
+    compile_node(comp, pn_body);
+
+    EMIT_ARG(label_assign, continue_label);
+
+    // compile: var + step
+    compile_node(comp, pn_step);
+    EMIT_ARG(binary_op, MP_BINARY_OP_INPLACE_ADD);
+
+    EMIT_ARG(label_assign, entry_label);
+
+    // compile: if var <cond> end: goto top
+    if (end_on_stack) {
+        EMIT(dup_top_two);
+        EMIT(rot_two);
+    } else {
+        EMIT(dup_top);
+        compile_node(comp, pn_end);
+    }
+    assert(MP_PARSE_NODE_IS_SMALL_INT(pn_step));
+    if (MP_PARSE_NODE_LEAF_SMALL_INT(pn_step) >= 0) {
+        EMIT_ARG(binary_op, MP_BINARY_OP_LESS);
+    } else {
+        EMIT_ARG(binary_op, MP_BINARY_OP_MORE);
+    }
+    EMIT_ARG(pop_jump_if, true, top_label);
+
+    // break/continue apply to outer loop (if any) in the else block
+    END_BREAK_CONTINUE_BLOCK
+
+    // Compile the else block.  We must pop the iterator variables before
+    // executing the else code because it may contain break/continue statements.
+    uint end_label = 0;
+    if (!MP_PARSE_NODE_IS_NULL(pn_else)) {
+        // discard final value of "var", and possible "end" value
+        EMIT(pop_top);
+        if (end_on_stack) {
+            EMIT(pop_top);
+        }
+        compile_node(comp, pn_else);
+        end_label = comp_next_label(comp);
+        EMIT_ARG(jump, end_label);
+        EMIT_ARG(adjust_stack_size, 1 + end_on_stack);
+    }
+
+    EMIT_ARG(label_assign, break_label);
+
+    // discard final value of var that failed the loop condition
+    EMIT(pop_top);
+
+    // discard <end> value if it's on the stack
+    if (end_on_stack) {
+        EMIT(pop_top);
+    }
+
+    if (!MP_PARSE_NODE_IS_NULL(pn_else)) {
+        EMIT_ARG(label_assign, end_label);
+    }
+}
+
+static void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // this bit optimises: for <x> in range(...), turning it into an explicitly incremented variable
+    // this is actually slower, but uses no heap memory
+    // for viper it will be much, much faster
+    if (/*comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER &&*/ MP_PARSE_NODE_IS_ID(pns->nodes[0]) && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_atom_expr_normal)) {
+        mp_parse_node_struct_t *pns_it = (mp_parse_node_struct_t *)pns->nodes[1];
+        if (MP_PARSE_NODE_IS_ID(pns_it->nodes[0])
+            && MP_PARSE_NODE_LEAF_ARG(pns_it->nodes[0]) == MP_QSTR_range
+            && MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pns_it->nodes[1]) == PN_trailer_paren) {
+            mp_parse_node_t pn_range_args = ((mp_parse_node_struct_t *)pns_it->nodes[1])->nodes[0];
+            mp_parse_node_t *args;
+            size_t n_args = mp_parse_node_extract_list(&pn_range_args, PN_arglist, &args);
+            mp_parse_node_t pn_range_start;
+            mp_parse_node_t pn_range_end;
+            mp_parse_node_t pn_range_step;
+            bool optimize = false;
+            if (1 <= n_args && n_args <= 3) {
+                optimize = true;
+                if (n_args == 1) {
+                    pn_range_start = mp_parse_node_new_small_int(0);
+                    pn_range_end = args[0];
+                    pn_range_step = mp_parse_node_new_small_int(1);
+                } else if (n_args == 2) {
+                    pn_range_start = args[0];
+                    pn_range_end = args[1];
+                    pn_range_step = mp_parse_node_new_small_int(1);
+                } else {
+                    pn_range_start = args[0];
+                    pn_range_end = args[1];
+                    pn_range_step = args[2];
+                    // the step must be a non-zero constant integer to do the optimisation
+                    if (!MP_PARSE_NODE_IS_SMALL_INT(pn_range_step)
+                        || MP_PARSE_NODE_LEAF_SMALL_INT(pn_range_step) == 0) {
+                        optimize = false;
+                    }
+                }
+                // arguments must be able to be compiled as standard expressions
+                if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_start)) {
+                    int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_range_start);
+                    if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+                        optimize = false;
+                    }
+                }
+                if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_end)) {
+                    int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_range_end);
+                    if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+                        optimize = false;
+                    }
+                }
+            }
+            if (optimize) {
+                compile_for_stmt_optimised_range(comp, pns->nodes[0], pn_range_start, pn_range_end, pn_range_step, pns->nodes[2], pns->nodes[3]);
+                return;
+            }
+        }
+    }
+
+    START_BREAK_CONTINUE_BLOCK
+    comp->break_label |= MP_EMIT_BREAK_FROM_FOR;
+
+    uint pop_label = comp_next_label(comp);
+
+    compile_node(comp, pns->nodes[1]); // iterator
+    EMIT_ARG(get_iter, true);
+    EMIT_ARG(label_assign, continue_label);
+    EMIT_ARG(for_iter, pop_label);
+    c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable
+    compile_node(comp, pns->nodes[2]); // body
+    EMIT_ARG(jump, continue_label);
+    EMIT_ARG(label_assign, pop_label);
+    EMIT(for_iter_end);
+
+    // break/continue apply to outer loop (if any) in the else block
+    END_BREAK_CONTINUE_BLOCK
+
+    compile_node(comp, pns->nodes[3]); // else (may be empty)
+
+    EMIT_ARG(label_assign, break_label);
+}
+
+static void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_excepts, mp_parse_node_t pn_else) {
+    // setup code
+    uint l1 = comp_next_label(comp);
+    uint success_label = comp_next_label(comp);
+
+    compile_increase_except_level(comp, l1, MP_EMIT_SETUP_BLOCK_EXCEPT);
+
+    compile_node(comp, pn_body); // body
+    EMIT_ARG(pop_except_jump, success_label, false); // jump over exception handler
+
+    EMIT_ARG(label_assign, l1); // start of exception handler
+    EMIT(start_except_handler);
+
+    // at this point the top of the stack contains the exception instance that was raised
+
+    uint l2 = comp_next_label(comp);
+
+    for (int i = 0; i < n_except; i++) {
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_excepts[i], PN_try_stmt_except)); // should be
+        mp_parse_node_struct_t *pns_except = (mp_parse_node_struct_t *)pn_excepts[i];
+
+        qstr qstr_exception_local = 0;
+        uint end_finally_label = comp_next_label(comp);
+        #if MICROPY_PY_SYS_SETTRACE
+        EMIT_ARG(set_source_line, pns_except->source_line);
+        #endif
+
+        if (MP_PARSE_NODE_IS_NULL(pns_except->nodes[0])) {
+            // this is a catch all exception handler
+            if (i + 1 != n_except) {
+                compile_syntax_error(comp, pn_excepts[i], MP_ERROR_TEXT("default 'except' must be last"));
+                compile_decrease_except_level(comp);
+                return;
+            }
+        } else {
+            // this exception handler requires a match to a certain type of exception
+            mp_parse_node_t pns_exception_expr = pns_except->nodes[0];
+            if (MP_PARSE_NODE_IS_STRUCT(pns_exception_expr)) {
+                mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pns_exception_expr;
+                if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_try_stmt_as_name) {
+                    // handler binds the exception to a local
+                    pns_exception_expr = pns3->nodes[0];
+                    qstr_exception_local = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]);
+                }
+            }
+            EMIT(dup_top);
+            compile_node(comp, pns_exception_expr);
+            EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+            EMIT_ARG(pop_jump_if, false, end_finally_label);
+        }
+
+        // either discard or store the exception instance
+        if (qstr_exception_local == 0) {
+            EMIT(pop_top);
+        } else {
+            compile_store_id(comp, qstr_exception_local);
+        }
+
+        // If the exception is bound to a variable <e> then the <body> of the
+        // exception handler is wrapped in a try-finally so that the name <e> can
+        // be deleted (per Python semantics) even if the <body> has an exception.
+        // In such a case the generated code for the exception handler is:
+        //      try:
+        //          <body>
+        //      finally:
+        //          <e> = None
+        //          del <e>
+        uint l3 = 0;
+        if (qstr_exception_local != 0) {
+            l3 = comp_next_label(comp);
+            compile_increase_except_level(comp, l3, MP_EMIT_SETUP_BLOCK_FINALLY);
+        }
+        compile_node(comp, pns_except->nodes[1]); // the <body>
+        if (qstr_exception_local != 0) {
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+            EMIT_ARG(label_assign, l3);
+            EMIT_ARG(adjust_stack_size, 1); // stack adjust for possible return value
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+            compile_store_id(comp, qstr_exception_local);
+            compile_delete_id(comp, qstr_exception_local);
+            EMIT_ARG(adjust_stack_size, -1);
+            compile_decrease_except_level(comp);
+        }
+
+        EMIT_ARG(pop_except_jump, l2, true);
+        EMIT_ARG(label_assign, end_finally_label);
+        EMIT_ARG(adjust_stack_size, 1); // stack adjust for the exception instance
+    }
+
+    compile_decrease_except_level(comp);
+    EMIT(end_except_handler);
+
+    EMIT_ARG(label_assign, success_label);
+    compile_node(comp, pn_else); // else block, can be null
+    EMIT_ARG(label_assign, l2);
+}
+
+static void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) {
+    uint l_finally_block = comp_next_label(comp);
+
+    compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY);
+
+    if (n_except == 0) {
+        assert(MP_PARSE_NODE_IS_NULL(pn_else));
+        EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state
+        compile_node(comp, pn_body);
+        EMIT_ARG(adjust_stack_size, -3);
+    } else {
+        compile_try_except(comp, pn_body, n_except, pn_except, pn_else);
+    }
+
+    // If the code reaches this point then the try part of the try-finally exited normally.
+    // This is indicated to the runtime by None sitting on the stack.
+    EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+
+    // Compile the finally block.
+    // The stack needs to be adjusted by 1 to account for the possibility that the finally is
+    // being executed as part of a return, and the return value is on the top of the stack.
+    EMIT_ARG(label_assign, l_finally_block);
+    EMIT_ARG(adjust_stack_size, 1);
+    compile_node(comp, pn_finally);
+    EMIT_ARG(adjust_stack_size, -1);
+
+    compile_decrease_except_level(comp);
+}
+
+static void compile_try_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should be
+    {
+        mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[1];
+        if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_finally) {
+            // just try-finally
+            compile_try_finally(comp, pns->nodes[0], 0, NULL, MP_PARSE_NODE_NULL, pns2->nodes[0]);
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_except_and_more) {
+            // try-except and possibly else and/or finally
+            mp_parse_node_t *pn_excepts;
+            size_t n_except = mp_parse_node_extract_list(&pns2->nodes[0], PN_try_stmt_except_list, &pn_excepts);
+            if (MP_PARSE_NODE_IS_NULL(pns2->nodes[2])) {
+                // no finally
+                compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1]);
+            } else {
+                // have finally
+                compile_try_finally(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1], ((mp_parse_node_struct_t *)pns2->nodes[2])->nodes[0]);
+            }
+        } else {
+            // just try-except
+            mp_parse_node_t *pn_excepts;
+            size_t n_except = mp_parse_node_extract_list(&pns->nodes[1], PN_try_stmt_except_list, &pn_excepts);
+            compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, MP_PARSE_NODE_NULL);
+        }
+    }
+}
+
+static void compile_with_stmt_helper(compiler_t *comp, size_t n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+    if (n == 0) {
+        // no more pre-bits, compile the body of the with
+        compile_node(comp, body);
+    } else {
+        uint l_end = comp_next_label(comp);
+        if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) {
+            // this pre-bit is of the form "a as b"
+            mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)nodes[0];
+            compile_node(comp, pns->nodes[0]);
+            compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH);
+            c_assign(comp, pns->nodes[1], ASSIGN_STORE);
+        } else {
+            // this pre-bit is just an expression
+            compile_node(comp, nodes[0]);
+            compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH);
+            EMIT(pop_top);
+        }
+        // compile additional pre-bits and the body
+        compile_with_stmt_helper(comp, n - 1, nodes + 1, body);
+        // finish this with block
+        EMIT_ARG(with_cleanup, l_end);
+        reserve_labels_for_native(comp, 3); // used by native's with_cleanup
+        compile_decrease_except_level(comp);
+    }
+}
+
+static void compile_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // get the nodes for the pre-bit of the with (the a as b, c as d, ... bit)
+    mp_parse_node_t *nodes;
+    size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_with_stmt_list, &nodes);
+    assert(n > 0);
+
+    // compile in a nested fashion
+    compile_with_stmt_helper(comp, n, nodes, pns->nodes[1]);
+}
+
+static void compile_yield_from(compiler_t *comp) {
+    EMIT_ARG(get_iter, false);
+    EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+    EMIT_ARG(yield, MP_EMIT_YIELD_FROM);
+    reserve_labels_for_native(comp, 3);
+}
+
+#if MICROPY_PY_ASYNC_AWAIT
+static void compile_await_object_method(compiler_t *comp, qstr method) {
+    EMIT_ARG(load_method, method, false);
+    EMIT_ARG(call_method, 0, 0, 0);
+    compile_yield_from(comp);
+}
+
+static void compile_async_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // Allocate labels.
+    uint while_else_label = comp_next_label(comp);
+    uint try_exception_label = comp_next_label(comp);
+    uint try_else_label = comp_next_label(comp);
+    uint try_finally_label = comp_next_label(comp);
+
+    // Stack: (...)
+
+    // Compile the iterator expression and load and call its __aiter__ method.
+    compile_node(comp, pns->nodes[1]); // iterator
+    // Stack: (..., iterator)
+    EMIT_ARG(load_method, MP_QSTR___aiter__, false);
+    // Stack: (..., iterator, __aiter__)
+    EMIT_ARG(call_method, 0, 0, 0);
+    // Stack: (..., iterable)
+
+    START_BREAK_CONTINUE_BLOCK
+
+    EMIT_ARG(label_assign, continue_label);
+
+    compile_increase_except_level(comp, try_exception_label, MP_EMIT_SETUP_BLOCK_EXCEPT);
+
+    EMIT(dup_top);
+    // Stack: (..., iterable, iterable)
+
+    // Compile: yield from iterable.__anext__()
+    compile_await_object_method(comp, MP_QSTR___anext__);
+    // Stack: (..., iterable, yielded_value)
+
+    c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable
+    // Stack: (..., iterable)
+    EMIT_ARG(pop_except_jump, try_else_label, false);
+
+    EMIT_ARG(label_assign, try_exception_label);
+    EMIT(start_except_handler);
+    EMIT(dup_top);
+    EMIT_LOAD_GLOBAL(MP_QSTR_StopAsyncIteration);
+    EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+    EMIT_ARG(pop_jump_if, false, try_finally_label);
+    EMIT(pop_top); // pop exception instance
+    EMIT_ARG(pop_except_jump, while_else_label, true);
+
+    EMIT_ARG(label_assign, try_finally_label);
+    EMIT_ARG(adjust_stack_size, 1); // if we jump here, the exc is on the stack
+    compile_decrease_except_level(comp);
+    EMIT(end_except_handler);
+
+    // Stack: (..., iterable)
+
+    EMIT_ARG(label_assign, try_else_label);
+    compile_node(comp, pns->nodes[2]); // body
+
+    EMIT_ARG(jump, continue_label);
+    // break/continue apply to outer loop (if any) in the else block
+    END_BREAK_CONTINUE_BLOCK
+
+    EMIT_ARG(label_assign, while_else_label);
+    compile_node(comp, pns->nodes[3]); // else
+
+    EMIT_ARG(label_assign, break_label);
+    // Stack: (..., iterable)
+
+    EMIT(pop_top);
+    // Stack: (...)
+}
+
+static void compile_async_with_stmt_helper(compiler_t *comp, size_t n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+    if (n == 0) {
+        // no more pre-bits, compile the body of the with
+        compile_node(comp, body);
+    } else {
+        uint l_finally_block = comp_next_label(comp);
+        uint l_aexit_no_exc = comp_next_label(comp);
+        uint l_ret_unwind_jump = comp_next_label(comp);
+        uint l_end = comp_next_label(comp);
+
+        if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) {
+            // this pre-bit is of the form "a as b"
+            mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)nodes[0];
+            compile_node(comp, pns->nodes[0]);
+            EMIT(dup_top);
+            compile_await_object_method(comp, MP_QSTR___aenter__);
+            c_assign(comp, pns->nodes[1], ASSIGN_STORE);
+        } else {
+            // this pre-bit is just an expression
+            compile_node(comp, nodes[0]);
+            EMIT(dup_top);
+            compile_await_object_method(comp, MP_QSTR___aenter__);
+            EMIT(pop_top);
+        }
+
+        // To keep the Python stack size down, and because we can't access values on
+        // this stack further down than 3 elements (via rot_three), we don't preload
+        // __aexit__ (as per normal with) but rather wait until we need it below.
+
+        // Start the try-finally statement
+        compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY);
+
+        // Compile any additional pre-bits of the "async with", and also the body
+        EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state
+        compile_async_with_stmt_helper(comp, n - 1, nodes + 1, body);
+        EMIT_ARG(adjust_stack_size, -3);
+
+        // We have now finished the "try" block and fall through to the "finally"
+
+        // At this point, after the with body has executed, we have 3 cases:
+        // 1. no exception, we just fall through to this point; stack: (..., ctx_mgr)
+        // 2. exception propagating out, we get to the finally block; stack: (..., ctx_mgr, exc)
+        // 3. return or unwind jump, we get to the finally block; stack: (..., ctx_mgr, X, INT)
+
+        // Handle case 1: call __aexit__
+        // Stack: (..., ctx_mgr)
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // to tell end_finally there's no exception
+        EMIT(rot_two);
+        EMIT_ARG(jump, l_aexit_no_exc); // jump to code below to call __aexit__
+
+        // Start of "finally" block
+        // At this point we have case 2 or 3, we detect which one by the TOS being an exception or not
+        EMIT_ARG(label_assign, l_finally_block);
+
+        // Detect if TOS an exception or not
+        EMIT(dup_top);
+        EMIT_LOAD_GLOBAL(MP_QSTR_BaseException);
+        EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+        EMIT_ARG(pop_jump_if, false, l_ret_unwind_jump); // if not an exception then we have case 3
+
+        // Handle case 2: call __aexit__ and either swallow or re-raise the exception
+        // Stack: (..., ctx_mgr, exc)
+        EMIT(dup_top);
+        EMIT(rot_three);
+        EMIT(rot_two);
+        EMIT_ARG(load_method, MP_QSTR___aexit__, false);
+        EMIT(rot_three);
+        EMIT(rot_three);
+        EMIT(dup_top);
+        #if MICROPY_CPYTHON_COMPAT
+        EMIT_ARG(attr, MP_QSTR___class__, MP_EMIT_ATTR_LOAD); // get type(exc)
+        #else
+        compile_load_id(comp, MP_QSTR_type);
+        EMIT(rot_two);
+        EMIT_ARG(call_function, 1, 0, 0); // get type(exc)
+        #endif
+        EMIT(rot_two);
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // dummy traceback value
+        // Stack: (..., exc, __aexit__, ctx_mgr, type(exc), exc, None)
+        EMIT_ARG(call_method, 3, 0, 0);
+        compile_yield_from(comp);
+        EMIT_ARG(pop_jump_if, false, l_end);
+        EMIT(pop_top); // pop exception
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // replace with None to swallow exception
+        EMIT_ARG(jump, l_end);
+        EMIT_ARG(adjust_stack_size, 2);
+
+        // Handle case 3: call __aexit__
+        // Stack: (..., ctx_mgr, X, INT)
+        EMIT_ARG(label_assign, l_ret_unwind_jump);
+        EMIT(rot_three);
+        EMIT(rot_three);
+        EMIT_ARG(label_assign, l_aexit_no_exc);
+        EMIT_ARG(load_method, MP_QSTR___aexit__, false);
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        EMIT(dup_top);
+        EMIT(dup_top);
+        EMIT_ARG(call_method, 3, 0, 0);
+        compile_yield_from(comp);
+        EMIT(pop_top);
+        EMIT_ARG(adjust_stack_size, -1);
+
+        // End of "finally" block
+        // Stack can have one of three configurations:
+        // a. (..., None) - from either case 1, or case 2 with swallowed exception
+        // b. (..., exc) - from case 2 with re-raised exception
+        // c. (..., X, INT) - from case 3
+        EMIT_ARG(label_assign, l_end);
+        compile_decrease_except_level(comp);
+    }
+}
+
+static void compile_async_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // get the nodes for the pre-bit of the with (the a as b, c as d, ... bit)
+    mp_parse_node_t *nodes;
+    size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_with_stmt_list, &nodes);
+    assert(n > 0);
+
+    // compile in a nested fashion
+    compile_async_with_stmt_helper(comp, n, nodes, pns->nodes[1]);
+}
+
+static void compile_async_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[0]));
+    mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns->nodes[0];
+    if (MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_funcdef) {
+        // async def
+        compile_funcdef(comp, pns0);
+        scope_t *fscope = (scope_t *)pns0->nodes[4];
+        fscope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+    } else {
+        // async for/with; first verify the scope is a generator
+        int scope_flags = comp->scope_cur->scope_flags;
+        if (!(scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+            compile_syntax_error(comp, (mp_parse_node_t)pns0,
+                MP_ERROR_TEXT("async for/with outside async function"));
+            return;
+        }
+
+        if (MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_for_stmt) {
+            // async for
+            compile_async_for_stmt(comp, pns0);
+        } else {
+            // async with
+            assert(MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_with_stmt);
+            compile_async_with_stmt(comp, pns0);
+        }
+    }
+}
+#endif
+
+static void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    mp_parse_node_t pn_rhs = pns->nodes[1];
+    if (MP_PARSE_NODE_IS_NULL(pn_rhs)) {
+        if (comp->is_repl && comp->scope_cur->kind == SCOPE_MODULE) {
+            // for REPL, evaluate then print the expression
+            compile_load_id(comp, MP_QSTR___repl_print__);
+            compile_node(comp, pns->nodes[0]);
+            EMIT_ARG(call_function, 1, 0, 0);
+            EMIT(pop_top);
+
+        } else {
+            // for non-REPL, evaluate then discard the expression
+            if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0]) && !MP_PARSE_NODE_IS_ID(pns->nodes[0]))
+                || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object)) {
+                // do nothing with a lonely constant
+            } else {
+                compile_node(comp, pns->nodes[0]); // just an expression
+                EMIT(pop_top); // discard last result since this is a statement and leaves nothing on the stack
+            }
+        }
+    } else if (MP_PARSE_NODE_IS_STRUCT(pn_rhs)) {
+        mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pn_rhs;
+        int kind = MP_PARSE_NODE_STRUCT_KIND(pns1);
+        if (kind == PN_annassign) {
+            // the annotation is in pns1->nodes[0] and is ignored
+            if (MP_PARSE_NODE_IS_NULL(pns1->nodes[1])) {
+                // an annotation of the form "x: y"
+                // inside a function this declares "x" as a local
+                if (comp->scope_cur->kind == SCOPE_FUNCTION) {
+                    if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) {
+                        qstr lhs = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+                        scope_find_or_add_id(comp->scope_cur, lhs, ID_INFO_KIND_LOCAL);
+                    }
+                }
+            } else {
+                // an assigned annotation of the form "x: y = z"
+                pn_rhs = pns1->nodes[1];
+                goto plain_assign;
+            }
+        } else if (kind == PN_expr_stmt_augassign) {
+            c_assign(comp, pns->nodes[0], ASSIGN_AUG_LOAD); // lhs load for aug assign
+            compile_node(comp, pns1->nodes[1]); // rhs
+            assert(MP_PARSE_NODE_IS_TOKEN(pns1->nodes[0]));
+            mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]);
+            mp_binary_op_t op = MP_BINARY_OP_INPLACE_OR + (tok - MP_TOKEN_DEL_PIPE_EQUAL);
+            EMIT_ARG(binary_op, op);
+            c_assign(comp, pns->nodes[0], ASSIGN_AUG_STORE); // lhs store for aug assign
+        } else if (kind == PN_expr_stmt_assign_list) {
+            int rhs = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1) - 1;
+            compile_node(comp, pns1->nodes[rhs]); // rhs
+            // following CPython, we store left-most first
+            if (rhs > 0) {
+                EMIT(dup_top);
+            }
+            c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+            for (int i = 0; i < rhs; i++) {
+                if (i + 1 < rhs) {
+                    EMIT(dup_top);
+                }
+                c_assign(comp, pns1->nodes[i], ASSIGN_STORE); // middle store
+            }
+        } else {
+        plain_assign:
+            #if MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+            if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_rhs, PN_testlist_star_expr)
+                && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_star_expr)) {
+                mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns->nodes[0];
+                pns1 = (mp_parse_node_struct_t *)pn_rhs;
+                uint32_t n_pns0 = MP_PARSE_NODE_STRUCT_NUM_NODES(pns0);
+                // Can only optimise a tuple-to-tuple assignment when all of the following hold:
+                //  - equal number of items in LHS and RHS tuples
+                //  - 2 or 3 items in the tuples
+                //  - there are no star expressions in the LHS tuple
+                if (n_pns0 == MP_PARSE_NODE_STRUCT_NUM_NODES(pns1)
+                    && (n_pns0 == 2
+                        #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+                        || n_pns0 == 3
+                        #endif
+                        )
+                    && !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[0], PN_star_expr)
+                    && !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[1], PN_star_expr)
+                    #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+                    && (n_pns0 == 2 || !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[2], PN_star_expr))
+                    #endif
+                    ) {
+                    // Optimisation for a, b = c, d or a, b, c = d, e, f
+                    compile_node(comp, pns1->nodes[0]); // rhs
+                    compile_node(comp, pns1->nodes[1]); // rhs
+                    #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+                    if (n_pns0 == 3) {
+                        compile_node(comp, pns1->nodes[2]); // rhs
+                        EMIT(rot_three);
+                    }
+                    #endif
+                    EMIT(rot_two);
+                    c_assign(comp, pns0->nodes[0], ASSIGN_STORE); // lhs store
+                    c_assign(comp, pns0->nodes[1], ASSIGN_STORE); // lhs store
+                    #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+                    if (n_pns0 == 3) {
+                        c_assign(comp, pns0->nodes[2], ASSIGN_STORE); // lhs store
+                    }
+                    #endif
+                    return;
+                }
+            }
+            #endif
+
+            compile_node(comp, pn_rhs); // rhs
+            c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+        }
+    } else {
+        goto plain_assign;
+    }
+}
+
+static void compile_test_if_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_test_if_else));
+    mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t *)pns->nodes[1];
+
+    uint l_fail = comp_next_label(comp);
+    uint l_end = comp_next_label(comp);
+    c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+    compile_node(comp, pns->nodes[0]); // success value
+    EMIT_ARG(jump, l_end);
+    EMIT_ARG(label_assign, l_fail);
+    EMIT_ARG(adjust_stack_size, -1); // adjust stack size
+    compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+    EMIT_ARG(label_assign, l_end);
+}
+
+static void compile_lambdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (comp->pass == MP_PASS_SCOPE) {
+        // create a new scope for this lambda
+        scope_t *s = scope_new_and_link(comp, SCOPE_LAMBDA, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+        // store the lambda scope so the compiling function (this one) can use it at each pass
+        pns->nodes[2] = (mp_parse_node_t)s;
+    }
+
+    // get the scope for this lambda
+    scope_t *this_scope = (scope_t *)pns->nodes[2];
+
+    // compile the lambda definition
+    compile_funcdef_lambdef(comp, this_scope, pns->nodes[0], PN_varargslist);
+}
+
+#if MICROPY_PY_ASSIGN_EXPR
+static void compile_namedexpr_helper(compiler_t *comp, mp_parse_node_t pn_name, mp_parse_node_t pn_expr) {
+    if (!MP_PARSE_NODE_IS_ID(pn_name)) {
+        compile_syntax_error(comp, (mp_parse_node_t)pn_name, MP_ERROR_TEXT("can't assign to expression"));
+    }
+    compile_node(comp, pn_expr);
+    EMIT(dup_top);
+
+    qstr target = MP_PARSE_NODE_LEAF_ARG(pn_name);
+
+    // When a variable is assigned via := in a comprehension then that variable is bound to
+    // the parent scope.  Any global or nonlocal declarations in the parent scope are honoured.
+    // For details see: https://peps.python.org/pep-0572/#scope-of-the-target
+    if (comp->pass == MP_PASS_SCOPE && SCOPE_IS_COMP_LIKE(comp->scope_cur->kind)) {
+        id_info_t *id_info_parent = mp_emit_common_get_id_for_modification(comp->scope_cur->parent, target);
+        if (id_info_parent->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+            scope_find_or_add_id(comp->scope_cur, target, ID_INFO_KIND_GLOBAL_EXPLICIT);
+        } else {
+            id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, target, ID_INFO_KIND_UNDECIDED);
+            bool is_global = comp->scope_cur->parent->parent == NULL; // comprehension is defined in outer scope
+            if (!is_global && id_info->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+                // Variable was already referenced but now needs to be closed over, so reset the kind
+                // such that scope_check_to_close_over() is called in compile_declare_nonlocal().
+                id_info->kind = ID_INFO_KIND_UNDECIDED;
+            }
+            compile_declare_global_or_nonlocal(comp, pn_name, id_info, is_global);
+        }
+    }
+
+    // Do the store to the target variable.
+    compile_store_id(comp, target);
+}
+
+static void compile_namedexpr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_namedexpr_helper(comp, pns->nodes[0], pns->nodes[1]);
+}
+#endif
+
+static void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    bool cond = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test;
+    uint l_end = comp_next_label(comp);
+    int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    for (int i = 0; i < n; i += 1) {
+        compile_node(comp, pns->nodes[i]);
+        if (i + 1 < n) {
+            EMIT_ARG(jump_if_or_pop, cond, l_end);
+        }
+    }
+    EMIT_ARG(label_assign, l_end);
+}
+
+static void compile_not_test_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_node(comp, pns->nodes[0]);
+    EMIT_ARG(unary_op, MP_UNARY_OP_NOT);
+}
+
+static void compile_comparison(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    compile_node(comp, pns->nodes[0]);
+    bool multi = (num_nodes > 3);
+    uint l_fail = 0;
+    if (multi) {
+        l_fail = comp_next_label(comp);
+    }
+    for (int i = 1; i + 1 < num_nodes; i += 2) {
+        compile_node(comp, pns->nodes[i + 1]);
+        if (i + 2 < num_nodes) {
+            EMIT(dup_top);
+            EMIT(rot_three);
+        }
+        if (MP_PARSE_NODE_IS_TOKEN(pns->nodes[i])) {
+            mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]);
+            mp_binary_op_t op;
+            if (tok == MP_TOKEN_KW_IN) {
+                op = MP_BINARY_OP_IN;
+            } else {
+                op = MP_BINARY_OP_LESS + (tok - MP_TOKEN_OP_LESS);
+            }
+            EMIT_ARG(binary_op, op);
+        } else {
+            assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[i])); // should be
+            mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[i];
+            int kind = MP_PARSE_NODE_STRUCT_KIND(pns2);
+            if (kind == PN_comp_op_not_in) {
+                EMIT_ARG(binary_op, MP_BINARY_OP_NOT_IN);
+            } else {
+                assert(kind == PN_comp_op_is); // should be
+                if (MP_PARSE_NODE_IS_NULL(pns2->nodes[0])) {
+                    EMIT_ARG(binary_op, MP_BINARY_OP_IS);
+                } else {
+                    EMIT_ARG(binary_op, MP_BINARY_OP_IS_NOT);
+                }
+            }
+        }
+        if (i + 2 < num_nodes) {
+            EMIT_ARG(jump_if_or_pop, false, l_fail);
+        }
+    }
+    if (multi) {
+        uint l_end = comp_next_label(comp);
+        EMIT_ARG(jump, l_end);
+        EMIT_ARG(label_assign, l_fail);
+        EMIT_ARG(adjust_stack_size, 1);
+        EMIT(rot_two);
+        EMIT(pop_top);
+        EMIT_ARG(label_assign, l_end);
+    }
+}
+
+static void compile_star_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("*x must be assignment target"));
+}
+
+static void compile_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_xor_expr - PN_expr == MP_BINARY_OP_XOR);
+    MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_and_expr - PN_expr == MP_BINARY_OP_AND);
+    mp_binary_op_t binary_op = MP_BINARY_OP_OR + MP_PARSE_NODE_STRUCT_KIND(pns) - PN_expr;
+    int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    compile_node(comp, pns->nodes[0]);
+    for (int i = 1; i < num_nodes; ++i) {
+        compile_node(comp, pns->nodes[i]);
+        EMIT_ARG(binary_op, binary_op);
+    }
+}
+
+static void compile_term(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+    compile_node(comp, pns->nodes[0]);
+    for (int i = 1; i + 1 < num_nodes; i += 2) {
+        compile_node(comp, pns->nodes[i + 1]);
+        mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]);
+        mp_binary_op_t op = MP_BINARY_OP_LSHIFT + (tok - MP_TOKEN_OP_DBL_LESS);
+        EMIT_ARG(binary_op, op);
+    }
+}
+
+static void compile_factor_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_node(comp, pns->nodes[1]);
+    mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+    mp_unary_op_t op;
+    if (tok == MP_TOKEN_OP_TILDE) {
+        op = MP_UNARY_OP_INVERT;
+    } else {
+        assert(tok == MP_TOKEN_OP_PLUS || tok == MP_TOKEN_OP_MINUS);
+        op = MP_UNARY_OP_POSITIVE + (tok - MP_TOKEN_OP_PLUS);
+    }
+    EMIT_ARG(unary_op, op);
+}
+
+static void compile_atom_expr_normal(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // compile the subject of the expression
+    compile_node(comp, pns->nodes[0]);
+
+    // compile_atom_expr_await may call us with a NULL node
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+        return;
+    }
+
+    // get the array of trailers (known to be an array of PARSE_NODE_STRUCT)
+    size_t num_trail = 1;
+    mp_parse_node_struct_t **pns_trail = (mp_parse_node_struct_t **)&pns->nodes[1];
+    if (MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_atom_expr_trailers) {
+        num_trail = MP_PARSE_NODE_STRUCT_NUM_NODES(pns_trail[0]);
+        pns_trail = (mp_parse_node_struct_t **)&pns_trail[0]->nodes[0];
+    }
+
+    // the current index into the array of trailers
+    size_t i = 0;
+
+    // handle special super() call
+    if (comp->scope_cur->kind == SCOPE_FUNCTION
+        && MP_PARSE_NODE_IS_ID(pns->nodes[0])
+        && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_super
+        && MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_trailer_paren
+        && MP_PARSE_NODE_IS_NULL(pns_trail[0]->nodes[0])) {
+        // at this point we have matched "super()" within a function
+
+        // load the class for super to search for a parent
+        compile_load_id(comp, MP_QSTR___class__);
+
+        // look for first argument to function (assumes it's "self")
+        bool found = false;
+        id_info_t *id = &comp->scope_cur->id_info[0];
+        for (size_t n = comp->scope_cur->id_info_len; n > 0; --n, ++id) {
+            if (id->flags & ID_FLAG_IS_PARAM) {
+                // first argument found; load it
+                compile_load_id(comp, id->qst);
+                found = true;
+                break;
+            }
+        }
+        if (!found) {
+            compile_syntax_error(comp, (mp_parse_node_t)pns_trail[0],
+                MP_ERROR_TEXT("super() can't find self")); // really a TypeError
+            return;
+        }
+
+        if (num_trail >= 3
+            && MP_PARSE_NODE_STRUCT_KIND(pns_trail[1]) == PN_trailer_period
+            && MP_PARSE_NODE_STRUCT_KIND(pns_trail[2]) == PN_trailer_paren) {
+            // optimisation for method calls super().f(...), to eliminate heap allocation
+            mp_parse_node_struct_t *pns_period = pns_trail[1];
+            mp_parse_node_struct_t *pns_paren = pns_trail[2];
+            EMIT_ARG(load_method, MP_PARSE_NODE_LEAF_ARG(pns_period->nodes[0]), true);
+            compile_trailer_paren_helper(comp, pns_paren->nodes[0], true, 0);
+            i = 3;
+        } else {
+            // a super() call
+            EMIT_ARG(call_function, 2, 0, 0);
+            i = 1;
+        }
+
+        #if MICROPY_COMP_CONST_LITERAL && MICROPY_PY_COLLECTIONS_ORDEREDDICT
+        // handle special OrderedDict constructor
+    } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])
+               && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_OrderedDict
+               && MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_trailer_paren
+               && MP_PARSE_NODE_IS_STRUCT_KIND(pns_trail[0]->nodes[0], PN_atom_brace)) {
+        // at this point we have matched "OrderedDict({...})"
+
+        EMIT_ARG(call_function, 0, 0, 0);
+        mp_parse_node_struct_t *pns_dict = (mp_parse_node_struct_t *)pns_trail[0]->nodes[0];
+        compile_atom_brace_helper(comp, pns_dict, false);
+        i = 1;
+        #endif
+    }
+
+    // compile the remaining trailers
+    for (; i < num_trail; i++) {
+        if (i + 1 < num_trail
+            && MP_PARSE_NODE_STRUCT_KIND(pns_trail[i]) == PN_trailer_period
+            && MP_PARSE_NODE_STRUCT_KIND(pns_trail[i + 1]) == PN_trailer_paren) {
+            // optimisation for method calls a.f(...), following PyPy
+            mp_parse_node_struct_t *pns_period = pns_trail[i];
+            mp_parse_node_struct_t *pns_paren = pns_trail[i + 1];
+            EMIT_ARG(load_method, MP_PARSE_NODE_LEAF_ARG(pns_period->nodes[0]), false);
+            compile_trailer_paren_helper(comp, pns_paren->nodes[0], true, 0);
+            i += 1;
+        } else {
+            // node is one of: trailer_paren, trailer_bracket, trailer_period
+            compile_node(comp, (mp_parse_node_t)pns_trail[i]);
+        }
+    }
+}
+
+static void compile_power(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_generic_all_nodes(comp, pns); // 2 nodes, arguments of power
+    EMIT_ARG(binary_op, MP_BINARY_OP_POWER);
+}
+
+static void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra) {
+    // function to call is on top of stack
+
+    // get the list of arguments
+    mp_parse_node_t *args;
+    size_t n_args = mp_parse_node_extract_list(&pn_arglist, PN_arglist, &args);
+
+    // compile the arguments
+    // Rather than calling compile_node on the list, we go through the list of args
+    // explicitly here so that we can count the number of arguments and give sensible
+    // error messages.
+    int n_positional = n_positional_extra;
+    uint n_keyword = 0;
+    uint star_flags = 0;
+    mp_uint_t star_args = 0;
+    for (size_t i = 0; i < n_args; i++) {
+        if (MP_PARSE_NODE_IS_STRUCT(args[i])) {
+            mp_parse_node_struct_t *pns_arg = (mp_parse_node_struct_t *)args[i];
+            if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_star) {
+                if (star_flags & MP_EMIT_STAR_FLAG_DOUBLE) {
+                    compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("* arg after **"));
+                    return;
+                }
+                #if MICROPY_DYNAMIC_COMPILER
+                if (i >= (size_t)mp_dynamic_compiler.small_int_bits - 1)
+                #else
+                if (i >= MP_SMALL_INT_BITS - 1)
+                #endif
+                {
+                    // If there are not enough bits in a small int to fit the flag, then we consider
+                    // it a syntax error. It should be unlikely to have this many args in practice.
+                    compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("too many args"));
+                    return;
+                }
+                star_flags |= MP_EMIT_STAR_FLAG_SINGLE;
+                star_args |= (mp_uint_t)1 << i;
+                compile_node(comp, pns_arg->nodes[0]);
+                n_positional++;
+            } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_dbl_star) {
+                star_flags |= MP_EMIT_STAR_FLAG_DOUBLE;
+                // double-star args are stored as kw arg with key of None
+                EMIT(load_null);
+                compile_node(comp, pns_arg->nodes[0]);
+                n_keyword++;
+            } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_argument) {
+                #if MICROPY_PY_ASSIGN_EXPR
+                if (MP_PARSE_NODE_IS_STRUCT_KIND(pns_arg->nodes[1], PN_argument_3)) {
+                    compile_namedexpr_helper(comp, pns_arg->nodes[0], ((mp_parse_node_struct_t *)pns_arg->nodes[1])->nodes[0]);
+                    n_positional++;
+                } else
+                #endif
+                if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns_arg->nodes[1], PN_comp_for)) {
+                    if (!MP_PARSE_NODE_IS_ID(pns_arg->nodes[0])) {
+                        compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("LHS of keyword arg must be an id"));
+                        return;
+                    }
+                    EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns_arg->nodes[0]));
+                    compile_node(comp, pns_arg->nodes[1]);
+                    n_keyword++;
+                } else {
+                    compile_comprehension(comp, pns_arg, SCOPE_GEN_EXPR);
+                    n_positional++;
+                }
+            } else {
+                goto normal_argument;
+            }
+        } else {
+        normal_argument:
+            if (star_flags & MP_EMIT_STAR_FLAG_DOUBLE) {
+                compile_syntax_error(comp, args[i], MP_ERROR_TEXT("positional arg after **"));
+                return;
+            }
+            if (n_keyword > 0) {
+                compile_syntax_error(comp, args[i], MP_ERROR_TEXT("positional arg after keyword arg"));
+                return;
+            }
+            compile_node(comp, args[i]);
+            n_positional++;
+        }
+    }
+
+    if (star_flags != 0) {
+        // one extra object that contains the star_args map
+        EMIT_ARG(load_const_small_int, star_args);
+    }
+
+    // emit the function/method call
+    if (is_method_call) {
+        EMIT_ARG(call_method, n_positional, n_keyword, star_flags);
+    } else {
+        EMIT_ARG(call_function, n_positional, n_keyword, star_flags);
+    }
+}
+
+// pns needs to have 2 nodes, first is lhs of comprehension, second is PN_comp_for node
+static void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind) {
+    assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+    assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+    mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t *)pns->nodes[1];
+
+    if (comp->pass == MP_PASS_SCOPE) {
+        // create a new scope for this comprehension
+        scope_t *s = scope_new_and_link(comp, kind, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+        // store the comprehension scope so the compiling function (this one) can use it at each pass
+        pns_comp_for->nodes[3] = (mp_parse_node_t)s;
+    }
+
+    // get the scope for this comprehension
+    scope_t *this_scope = (scope_t *)pns_comp_for->nodes[3];
+
+    // compile the comprehension
+    close_over_variables_etc(comp, this_scope, 0, 0);
+
+    compile_node(comp, pns_comp_for->nodes[1]); // source of the iterator
+    if (kind == SCOPE_GEN_EXPR) {
+        EMIT_ARG(get_iter, false);
+    }
+    EMIT_ARG(call_function, 1, 0, 0);
+}
+
+static void compile_atom_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+        // an empty tuple
+        EMIT_ARG(build, 0, MP_EMIT_BUILD_TUPLE);
+    } else {
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+        pns = (mp_parse_node_struct_t *)pns->nodes[0];
+        if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+            // generator expression
+            compile_comprehension(comp, pns, SCOPE_GEN_EXPR);
+        } else {
+            // tuple with N items
+            compile_generic_tuple(comp, pns);
+        }
+    }
+}
+
+static void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+        // empty list
+        EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST);
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+        mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[0];
+        if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns2)) {
+            // list comprehension
+            compile_comprehension(comp, pns2, SCOPE_LIST_COMP);
+        } else {
+            // list with N items
+            compile_generic_all_nodes(comp, pns2);
+            EMIT_ARG(build, MP_PARSE_NODE_STRUCT_NUM_NODES(pns2), MP_EMIT_BUILD_LIST);
+        }
+    } else {
+        // list with 1 item
+        compile_node(comp, pns->nodes[0]);
+        EMIT_ARG(build, 1, MP_EMIT_BUILD_LIST);
+    }
+}
+
+static void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map) {
+    mp_parse_node_t pn = pns->nodes[0];
+    if (MP_PARSE_NODE_IS_NULL(pn)) {
+        // empty dict
+        if (create_map) {
+            EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+        }
+    } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+        pns = (mp_parse_node_struct_t *)pn;
+        if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker_item) {
+            // dict with one element
+            if (create_map) {
+                EMIT_ARG(build, 1, MP_EMIT_BUILD_MAP);
+            }
+            compile_node(comp, pn);
+            EMIT(store_map);
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+            assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+            mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+            if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+                // dict/set with multiple elements
+
+                // get tail elements (2nd, 3rd, ...)
+                mp_parse_node_t *nodes;
+                size_t n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+                // first element sets whether it's a dict or set
+                bool is_dict;
+                if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+                    // a dictionary
+                    if (create_map) {
+                        EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_MAP);
+                    }
+                    compile_node(comp, pns->nodes[0]);
+                    EMIT(store_map);
+                    is_dict = true;
+                } else {
+                    // a set
+                    compile_node(comp, pns->nodes[0]); // 1st value of set
+                    is_dict = false;
+                }
+
+                // process rest of elements
+                for (size_t i = 0; i < n; i++) {
+                    mp_parse_node_t pn_i = nodes[i];
+                    bool is_key_value = MP_PARSE_NODE_IS_STRUCT_KIND(pn_i, PN_dictorsetmaker_item);
+                    compile_node(comp, pn_i);
+                    if (is_dict) {
+                        if (!is_key_value) {
+                            #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+                            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("invalid syntax"));
+                            #else
+                            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("expecting key:value for dict"));
+                            #endif
+                            return;
+                        }
+                        EMIT(store_map);
+                    } else {
+                        if (is_key_value) {
+                            #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+                            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("invalid syntax"));
+                            #else
+                            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("expecting just a value for set"));
+                            #endif
+                            return;
+                        }
+                    }
+                }
+
+                #if MICROPY_PY_BUILTINS_SET
+                // if it's a set, build it
+                if (!is_dict) {
+                    EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_SET);
+                }
+                #endif
+            } else {
+                assert(MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_comp_for); // should be
+                // dict/set comprehension
+                if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+                    // a dictionary comprehension
+                    compile_comprehension(comp, pns, SCOPE_DICT_COMP);
+                } else {
+                    // a set comprehension
+                    compile_comprehension(comp, pns, SCOPE_SET_COMP);
+                }
+            }
+        } else {
+            // set with one element
+            goto set_with_one_element;
+        }
+    } else {
+        // set with one element
+    set_with_one_element:
+        #if MICROPY_PY_BUILTINS_SET
+        compile_node(comp, pn);
+        EMIT_ARG(build, 1, MP_EMIT_BUILD_SET);
+        #else
+        assert(0);
+        #endif
+    }
+}
+
+static void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_atom_brace_helper(comp, pns, true);
+}
+
+static void compile_trailer_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    compile_trailer_paren_helper(comp, pns->nodes[0], false, 0);
+}
+
+static void compile_trailer_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // object who's index we want is on top of stack
+    compile_node(comp, pns->nodes[0]); // the index
+    EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD);
+}
+
+static void compile_trailer_period(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // object who's attribute we want is on top of stack
+    EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]), MP_EMIT_ATTR_LOAD); // attribute to get
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+static void compile_subscript(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_2) {
+        compile_node(comp, pns->nodes[0]); // start of slice
+        assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+        pns = (mp_parse_node_struct_t *)pns->nodes[1];
+    } else {
+        // pns is a PN_subscript_3, load None for start of slice
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+    }
+
+    assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3); // should always be
+    mp_parse_node_t pn = pns->nodes[0];
+    if (MP_PARSE_NODE_IS_NULL(pn)) {
+        // [?:]
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+    } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+        pns = (mp_parse_node_struct_t *)pn;
+        if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3c) {
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+            pn = pns->nodes[0];
+            if (MP_PARSE_NODE_IS_NULL(pn)) {
+                // [?::]
+                EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+            } else {
+                // [?::x]
+                compile_node(comp, pn);
+                EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE);
+            }
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3d) {
+            compile_node(comp, pns->nodes[0]);
+            assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+            pns = (mp_parse_node_struct_t *)pns->nodes[1];
+            assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_sliceop); // should always be
+            if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+                // [?:x:]
+                EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+            } else {
+                // [?:x:x]
+                compile_node(comp, pns->nodes[0]);
+                EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE);
+            }
+        } else {
+            // [?:x]
+            compile_node(comp, pn);
+            EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+        }
+    } else {
+        // [?:x]
+        compile_node(comp, pn);
+        EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+    }
+}
+#endif // MICROPY_PY_BUILTINS_SLICE
+
+static void compile_dictorsetmaker_item(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    // if this is called then we are compiling a dict key:value pair
+    compile_node(comp, pns->nodes[1]); // value
+    compile_node(comp, pns->nodes[0]); // key
+}
+
+static void compile_classdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    qstr cname = compile_classdef_helper(comp, pns, comp->scope_cur->emit_options);
+    // store class object into class name
+    compile_store_id(comp, cname);
+}
+
+static void compile_yield_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (comp->scope_cur->kind != SCOPE_FUNCTION && comp->scope_cur->kind != SCOPE_LAMBDA) {
+        compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'yield' outside function"));
+        return;
+    }
+    if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+        reserve_labels_for_native(comp, 1);
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_yield_arg_from)) {
+        pns = (mp_parse_node_struct_t *)pns->nodes[0];
+        compile_node(comp, pns->nodes[0]);
+        compile_yield_from(comp);
+    } else {
+        compile_node(comp, pns->nodes[0]);
+        EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+        reserve_labels_for_native(comp, 1);
+    }
+}
+
+#if MICROPY_PY_ASYNC_AWAIT
+static void compile_atom_expr_await(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    if (comp->scope_cur->kind != SCOPE_FUNCTION && comp->scope_cur->kind != SCOPE_LAMBDA) {
+        #if MICROPY_COMP_ALLOW_TOP_LEVEL_AWAIT
+        if (!mp_compile_allow_top_level_await)
+        #endif
+        {
+            compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'await' outside function"));
+            return;
+        }
+    }
+    compile_atom_expr_normal(comp, pns);
+    compile_yield_from(comp);
+}
+#endif
+
+static mp_obj_t get_const_object(mp_parse_node_struct_t *pns) {
+    return mp_parse_node_extract_const_object(pns);
+}
+
+static void compile_const_object(compiler_t *comp, mp_parse_node_struct_t *pns) {
+    EMIT_ARG(load_const_obj, get_const_object(pns));
+}
+
+typedef void (*compile_function_t)(compiler_t *, mp_parse_node_struct_t *);
+static const compile_function_t compile_function[] = {
+// only define rules with a compile function
+#define c(f) compile_##f
+#define DEF_RULE(rule, comp, kind, ...) comp,
+#define DEF_RULE_NC(rule, kind, ...)
+    #include "py/grammar.h"
+#undef c
+#undef DEF_RULE
+#undef DEF_RULE_NC
+    compile_const_object,
+};
+
+static void compile_node(compiler_t *comp, mp_parse_node_t pn) {
+    if (MP_PARSE_NODE_IS_NULL(pn)) {
+        // pass
+    } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+        mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn);
+        EMIT_ARG(load_const_small_int, arg);
+    } else if (MP_PARSE_NODE_IS_LEAF(pn)) {
+        uintptr_t arg = MP_PARSE_NODE_LEAF_ARG(pn);
+        switch (MP_PARSE_NODE_LEAF_KIND(pn)) {
+            case MP_PARSE_NODE_ID:
+                compile_load_id(comp, arg);
+                break;
+            case MP_PARSE_NODE_STRING:
+                EMIT_ARG(load_const_str, arg);
+                break;
+            case MP_PARSE_NODE_TOKEN:
+            default:
+                if (arg == MP_TOKEN_NEWLINE) {
+                    // this can occur when file_input lets through a NEWLINE (eg if file starts with a newline)
+                    // or when single_input lets through a NEWLINE (user enters a blank line)
+                    // do nothing
+                } else {
+                    EMIT_ARG(load_const_tok, arg);
+                }
+                break;
+        }
+    } else {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        EMIT_ARG(set_source_line, pns->source_line);
+        assert(MP_PARSE_NODE_STRUCT_KIND(pns) <= PN_const_object);
+        compile_function_t f = compile_function[MP_PARSE_NODE_STRUCT_KIND(pns)];
+        f(comp, pns);
+    }
+}
+
+#if MICROPY_EMIT_NATIVE
+static int compile_viper_type_annotation(compiler_t *comp, mp_parse_node_t pn_annotation) {
+    int native_type = MP_NATIVE_TYPE_OBJ;
+    if (MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+        // No annotation, type defaults to object
+    } else if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+        qstr type_name = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+        native_type = mp_native_type_from_qstr(type_name);
+        if (native_type < 0) {
+            comp->compile_error = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, MP_ERROR_TEXT("unknown type '%q'"), type_name);
+            native_type = 0;
+        }
+    } else {
+        compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("annotation must be an identifier"));
+    }
+    return native_type;
+}
+#endif
+
+static void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star) {
+    (void)pn_dbl_star;
+
+    // check that **kw is last
+    if ((comp->scope_cur->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+        compile_syntax_error(comp, pn, MP_ERROR_TEXT("invalid syntax"));
+        return;
+    }
+
+    qstr param_name = MP_QSTRnull;
+    uint param_flag = ID_FLAG_IS_PARAM;
+    mp_parse_node_struct_t *pns = NULL;
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        param_name = MP_PARSE_NODE_LEAF_ARG(pn);
+        if (comp->have_star) {
+            // comes after a star, so counts as a keyword-only parameter
+            comp->scope_cur->num_kwonly_args += 1;
+        } else {
+            // comes before a star, so counts as a positional parameter
+            comp->scope_cur->num_pos_args += 1;
+        }
+    } else {
+        assert(MP_PARSE_NODE_IS_STRUCT(pn));
+        pns = (mp_parse_node_struct_t *)pn;
+        if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_name) {
+            // named parameter with possible annotation
+            param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+            if (comp->have_star) {
+                // comes after a star, so counts as a keyword-only parameter
+                comp->scope_cur->num_kwonly_args += 1;
+            } else {
+                // comes before a star, so counts as a positional parameter
+                comp->scope_cur->num_pos_args += 1;
+            }
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_star) {
+            if (comp->have_star) {
+                // more than one star
+                compile_syntax_error(comp, pn, MP_ERROR_TEXT("invalid syntax"));
+                return;
+            }
+            comp->have_star = true;
+            param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_STAR_PARAM;
+            if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+                // bare star
+                // TODO see http://www.python.org/dev/peps/pep-3102/
+                // assert(comp->scope_cur->num_dict_params == 0);
+                pns = NULL;
+            } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) {
+                // named star
+                comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+                param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+                pns = NULL;
+            } else {
+                assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)); // should be
+                // named star with possible annotation
+                comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+                pns = (mp_parse_node_struct_t *)pns->nodes[0];
+                param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+            }
+        } else {
+            // double star with possible annotation
+            assert(MP_PARSE_NODE_STRUCT_KIND(pns) == pn_dbl_star); // should be
+            param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+            param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_DBL_STAR_PARAM;
+            comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARKEYWORDS;
+        }
+    }
+
+    if (param_name != MP_QSTRnull) {
+        id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, ID_INFO_KIND_UNDECIDED);
+        if (id_info->kind != ID_INFO_KIND_UNDECIDED) {
+            compile_syntax_error(comp, pn, MP_ERROR_TEXT("argument name reused"));
+            return;
+        }
+        id_info->kind = ID_INFO_KIND_LOCAL;
+        id_info->flags = param_flag;
+
+        #if MICROPY_EMIT_NATIVE
+        if (comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER && pn_name == PN_typedargslist_name && pns != NULL) {
+            id_info->flags |= compile_viper_type_annotation(comp, pns->nodes[1]) << ID_FLAG_VIPER_TYPE_POS;
+        }
+        #else
+        (void)pns;
+        #endif
+    }
+}
+
+static void compile_scope_func_param(compiler_t *comp, mp_parse_node_t pn) {
+    compile_scope_func_lambda_param(comp, pn, PN_typedargslist_name, PN_typedargslist_star, PN_typedargslist_dbl_star);
+}
+
+static void compile_scope_lambda_param(compiler_t *comp, mp_parse_node_t pn) {
+    compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star);
+}
+
+static void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_struct_t *pns_comp_for, mp_parse_node_t pn_inner_expr, int for_depth) {
+    uint l_top = comp_next_label(comp);
+    uint l_end = comp_next_label(comp);
+    EMIT_ARG(label_assign, l_top);
+    EMIT_ARG(for_iter, l_end);
+    c_assign(comp, pns_comp_for->nodes[0], ASSIGN_STORE);
+    mp_parse_node_t pn_iter = pns_comp_for->nodes[2];
+
+tail_recursion:
+    if (MP_PARSE_NODE_IS_NULL(pn_iter)) {
+        // no more nested if/for; compile inner expression
+        compile_node(comp, pn_inner_expr);
+        if (comp->scope_cur->kind == SCOPE_GEN_EXPR) {
+            EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+            reserve_labels_for_native(comp, 1);
+            EMIT(pop_top);
+        } else {
+            EMIT_ARG(store_comp, comp->scope_cur->kind, 4 * for_depth + 5);
+        }
+    } else if (MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_iter) == PN_comp_if) {
+        // if condition
+        mp_parse_node_struct_t *pns_comp_if = (mp_parse_node_struct_t *)pn_iter;
+        c_if_cond(comp, pns_comp_if->nodes[0], false, l_top);
+        pn_iter = pns_comp_if->nodes[1];
+        goto tail_recursion;
+    } else {
+        assert(MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_iter) == PN_comp_for); // should be
+        // for loop
+        mp_parse_node_struct_t *pns_comp_for2 = (mp_parse_node_struct_t *)pn_iter;
+        compile_node(comp, pns_comp_for2->nodes[1]);
+        EMIT_ARG(get_iter, true);
+        compile_scope_comp_iter(comp, pns_comp_for2, pn_inner_expr, for_depth + 1);
+    }
+
+    EMIT_ARG(jump, l_top);
+    EMIT_ARG(label_assign, l_end);
+    EMIT(for_iter_end);
+}
+
+static void check_for_doc_string(compiler_t *comp, mp_parse_node_t pn) {
+    #if MICROPY_ENABLE_DOC_STRING
+    // see http://www.python.org/dev/peps/pep-0257/
+
+    // look for the first statement
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+        // a statement; fall through
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_file_input_2)) {
+        // file input; find the first non-newline node
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+        for (int i = 0; i < num_nodes; i++) {
+            pn = pns->nodes[i];
+            if (!(MP_PARSE_NODE_IS_LEAF(pn) && MP_PARSE_NODE_LEAF_KIND(pn) == MP_PARSE_NODE_TOKEN && MP_PARSE_NODE_LEAF_ARG(pn) == MP_TOKEN_NEWLINE)) {
+                // not a newline, so this is the first statement; finish search
+                break;
+            }
+        }
+        // if we didn't find a non-newline then it's okay to fall through; pn will be a newline and so doc-string test below will fail gracefully
+    } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_suite_block_stmts)) {
+        // a list of statements; get the first one
+        pn = ((mp_parse_node_struct_t *)pn)->nodes[0];
+    } else {
+        return;
+    }
+
+    // check the first statement for a doc string
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+        if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0])
+             && MP_PARSE_NODE_LEAF_KIND(pns->nodes[0]) == MP_PARSE_NODE_STRING)
+            || (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object)
+                && mp_obj_is_str(get_const_object((mp_parse_node_struct_t *)pns->nodes[0])))) {
+            // compile the doc string
+            compile_node(comp, pns->nodes[0]);
+            // store the doc string
+            compile_store_id(comp, MP_QSTR___doc__);
+        }
+    }
+    #else
+    (void)comp;
+    (void)pn;
+    #endif
+}
+
+static bool compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+    comp->pass = pass;
+    comp->scope_cur = scope;
+    comp->next_label = 0;
+    mp_emit_common_start_pass(&comp->emit_common, pass);
+    EMIT_ARG(start_pass, pass, scope);
+    reserve_labels_for_native(comp, 6); // used by native's start_pass
+
+    if (comp->pass == MP_PASS_SCOPE) {
+        // reset maximum stack sizes in scope
+        // they will be computed in this first pass
+        scope->stack_size = 0;
+        scope->exc_stack_size = 0;
+    }
+
+    // compile
+    if (MP_PARSE_NODE_IS_STRUCT_KIND(scope->pn, PN_eval_input)) {
+        assert(scope->kind == SCOPE_MODULE);
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+        compile_node(comp, pns->nodes[0]); // compile the expression
+        EMIT(return_value);
+    } else if (scope->kind == SCOPE_MODULE) {
+        if (!comp->is_repl) {
+            check_for_doc_string(comp, scope->pn);
+        }
+        compile_node(comp, scope->pn);
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        EMIT(return_value);
+    } else if (scope->kind == SCOPE_FUNCTION) {
+        assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+        assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+        // work out number of parameters, keywords and default parameters, and add them to the id_info array
+        // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+        if (comp->pass == MP_PASS_SCOPE) {
+            comp->have_star = false;
+            apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param);
+
+            #if MICROPY_EMIT_NATIVE
+            if (scope->emit_options == MP_EMIT_OPT_VIPER) {
+                // Compile return type; pns->nodes[2] is return/whole function annotation
+                scope->scope_flags |= compile_viper_type_annotation(comp, pns->nodes[2]) << MP_SCOPE_FLAG_VIPERRET_POS;
+            }
+            #endif // MICROPY_EMIT_NATIVE
+        }
+
+        compile_node(comp, pns->nodes[3]); // 3 is function body
+        EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        EMIT(return_value);
+    } else if (scope->kind == SCOPE_LAMBDA) {
+        assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+        assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 3);
+
+        // Set the source line number for the start of the lambda
+        EMIT_ARG(set_source_line, pns->source_line);
+
+        // work out number of parameters, keywords and default parameters, and add them to the id_info array
+        // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+        if (comp->pass == MP_PASS_SCOPE) {
+            comp->have_star = false;
+            apply_to_single_or_list(comp, pns->nodes[0], PN_varargslist, compile_scope_lambda_param);
+        }
+
+        compile_node(comp, pns->nodes[1]); // 1 is lambda body
+
+        // if the lambda is a generator, then we return None, not the result of the expression of the lambda
+        if (scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+            EMIT(pop_top);
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        }
+        EMIT(return_value);
+    } else if (SCOPE_IS_COMP_LIKE(scope->kind)) {
+        // a bit of a hack at the moment
+
+        assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+        assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+        assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+        mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t *)pns->nodes[1];
+
+        // We need a unique name for the comprehension argument (the iterator).
+        // CPython uses .0, but we should be able to use anything that won't
+        // clash with a user defined variable.  Best to use an existing qstr,
+        // so we use the blank qstr.
+        qstr qstr_arg = MP_QSTR_;
+        if (comp->pass == MP_PASS_SCOPE) {
+            scope_find_or_add_id(comp->scope_cur, qstr_arg, ID_INFO_KIND_LOCAL);
+            scope->num_pos_args = 1;
+        }
+
+        // Set the source line number for the start of the comprehension
+        EMIT_ARG(set_source_line, pns->source_line);
+
+        if (scope->kind == SCOPE_LIST_COMP) {
+            EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST);
+        } else if (scope->kind == SCOPE_DICT_COMP) {
+            EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+        #if MICROPY_PY_BUILTINS_SET
+        } else if (scope->kind == SCOPE_SET_COMP) {
+            EMIT_ARG(build, 0, MP_EMIT_BUILD_SET);
+        #endif
+        }
+
+        // There are 4 slots on the stack for the iterator, and the first one is
+        // NULL to indicate that the second one points to the iterator object.
+        if (scope->kind == SCOPE_GEN_EXPR) {
+            MP_STATIC_ASSERT(MP_OBJ_ITER_BUF_NSLOTS == 4);
+            EMIT(load_null);
+            compile_load_id(comp, qstr_arg);
+            EMIT(load_null);
+            EMIT(load_null);
+        } else {
+            compile_load_id(comp, qstr_arg);
+            EMIT_ARG(get_iter, true);
+        }
+
+        compile_scope_comp_iter(comp, pns_comp_for, pns->nodes[0], 0);
+
+        if (scope->kind == SCOPE_GEN_EXPR) {
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        }
+        EMIT(return_value);
+    } else {
+        assert(scope->kind == SCOPE_CLASS);
+        assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+        mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+        assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_classdef);
+
+        if (comp->pass == MP_PASS_SCOPE) {
+            scope_find_or_add_id(scope, MP_QSTR___class__, ID_INFO_KIND_LOCAL);
+        }
+
+        #if MICROPY_PY_SYS_SETTRACE
+        EMIT_ARG(set_source_line, pns->source_line);
+        #endif
+        compile_load_id(comp, MP_QSTR___name__);
+        compile_store_id(comp, MP_QSTR___module__);
+        EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0])); // 0 is class name
+        compile_store_id(comp, MP_QSTR___qualname__);
+
+        check_for_doc_string(comp, pns->nodes[2]);
+        compile_node(comp, pns->nodes[2]); // 2 is class body
+
+        id_info_t *id = scope_find(scope, MP_QSTR___class__);
+        assert(id != NULL);
+        if (id->kind == ID_INFO_KIND_LOCAL) {
+            EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+        } else {
+            EMIT_LOAD_FAST(MP_QSTR___class__, id->local_num);
+        }
+        EMIT(return_value);
+    }
+
+    bool pass_complete = EMIT(end_pass);
+
+    // make sure we match all the exception levels
+    assert(comp->cur_except_level == 0);
+
+    return pass_complete;
+}
+
+#if MICROPY_EMIT_INLINE_ASM
+// requires 3 passes: SCOPE, CODE_SIZE, EMIT
+static void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+    comp->pass = pass;
+    comp->scope_cur = scope;
+    comp->next_label = 0;
+
+    if (scope->kind != SCOPE_FUNCTION) {
+        compile_syntax_error(comp, MP_PARSE_NODE_NULL, MP_ERROR_TEXT("inline assembler must be a function"));
+        return;
+    }
+
+    if (comp->pass > MP_PASS_SCOPE) {
+        EMIT_INLINE_ASM_ARG(start_pass, comp->pass, &comp->compile_error);
+    }
+
+    // get the function definition parse node
+    assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+    mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+    assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+    // qstr f_id = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); // function name
+
+    // parameters are in pns->nodes[1]
+    if (comp->pass == MP_PASS_CODE_SIZE) {
+        mp_parse_node_t *pn_params;
+        size_t n_params = mp_parse_node_extract_list(&pns->nodes[1], PN_typedargslist, &pn_params);
+        scope->num_pos_args = EMIT_INLINE_ASM_ARG(count_params, n_params, pn_params);
+        if (comp->compile_error != MP_OBJ_NULL) {
+            goto inline_asm_error;
+        }
+    }
+
+    // pns->nodes[2] is function return annotation
+    mp_uint_t type_sig = MP_NATIVE_TYPE_INT;
+    mp_parse_node_t pn_annotation = pns->nodes[2];
+    if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+        // nodes[2] can be null or a test-expr
+        if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+            qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+            switch (ret_type) {
+                case MP_QSTR_object:
+                    type_sig = MP_NATIVE_TYPE_OBJ;
+                    break;
+                case MP_QSTR_bool:
+                    type_sig = MP_NATIVE_TYPE_BOOL;
+                    break;
+                case MP_QSTR_int:
+                    type_sig = MP_NATIVE_TYPE_INT;
+                    break;
+                case MP_QSTR_uint:
+                    type_sig = MP_NATIVE_TYPE_UINT;
+                    break;
+                default:
+                    compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("unknown type"));
+                    return;
+            }
+        } else {
+            compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("return annotation must be an identifier"));
+        }
+    }
+
+    mp_parse_node_t pn_body = pns->nodes[3]; // body
+    mp_parse_node_t *nodes;
+    size_t num = mp_parse_node_extract_list(&pn_body, PN_suite_block_stmts, &nodes);
+
+    for (size_t i = 0; i < num; i++) {
+        assert(MP_PARSE_NODE_IS_STRUCT(nodes[i]));
+        mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)nodes[i];
+        if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_pass_stmt) {
+            // no instructions
+            continue;
+        } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_expr_stmt) {
+            // not an instruction; error
+        not_an_instruction:
+            compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("expecting an assembler instruction"));
+            return;
+        }
+
+        // check structure of parse node
+        assert(MP_PARSE_NODE_IS_STRUCT(pns2->nodes[0]));
+        if (!MP_PARSE_NODE_IS_NULL(pns2->nodes[1])) {
+            goto not_an_instruction;
+        }
+        pns2 = (mp_parse_node_struct_t *)pns2->nodes[0];
+        if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_atom_expr_normal) {
+            goto not_an_instruction;
+        }
+        if (!MP_PARSE_NODE_IS_ID(pns2->nodes[0])) {
+            goto not_an_instruction;
+        }
+        if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns2->nodes[1], PN_trailer_paren)) {
+            goto not_an_instruction;
+        }
+
+        // parse node looks like an instruction
+        // get instruction name and args
+        qstr op = MP_PARSE_NODE_LEAF_ARG(pns2->nodes[0]);
+        pns2 = (mp_parse_node_struct_t *)pns2->nodes[1]; // PN_trailer_paren
+        mp_parse_node_t *pn_arg;
+        size_t n_args = mp_parse_node_extract_list(&pns2->nodes[0], PN_arglist, &pn_arg);
+
+        // emit instructions
+        if (op == MP_QSTR_label) {
+            if (!(n_args == 1 && MP_PARSE_NODE_IS_ID(pn_arg[0]))) {
+                compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'label' requires 1 argument"));
+                return;
+            }
+            uint lab = comp_next_label(comp);
+            if (pass > MP_PASS_SCOPE) {
+                if (!EMIT_INLINE_ASM_ARG(label, lab, MP_PARSE_NODE_LEAF_ARG(pn_arg[0]))) {
+                    compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("label redefined"));
+                    return;
+                }
+            }
+        } else if (op == MP_QSTR_align) {
+            if (!(n_args == 1 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+                compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'align' requires 1 argument"));
+                return;
+            }
+            if (pass > MP_PASS_SCOPE) {
+                mp_asm_base_align((mp_asm_base_t *)comp->emit_inline_asm,
+                    MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]));
+            }
+        } else if (op == MP_QSTR_data) {
+            if (!(n_args >= 2 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+                compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'data' requires at least 2 arguments"));
+                return;
+            }
+            if (pass > MP_PASS_SCOPE) {
+                mp_int_t bytesize = MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]);
+                for (uint j = 1; j < n_args; j++) {
+                    mp_obj_t int_obj;
+                    if (!mp_parse_node_get_int_maybe(pn_arg[j], &int_obj)) {
+                        compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'data' requires integer arguments"));
+                        return;
+                    }
+                    mp_asm_base_data((mp_asm_base_t *)comp->emit_inline_asm,
+                        bytesize, mp_obj_int_get_truncated(int_obj));
+                }
+            }
+        } else {
+            if (pass > MP_PASS_SCOPE) {
+                EMIT_INLINE_ASM_ARG(op, op, n_args, pn_arg);
+            }
+        }
+
+        if (comp->compile_error != MP_OBJ_NULL) {
+            pns = pns2; // this is the parse node that had the error
+            goto inline_asm_error;
+        }
+    }
+
+    if (comp->pass > MP_PASS_SCOPE) {
+        EMIT_INLINE_ASM_ARG(end_pass, type_sig);
+
+        if (comp->pass == MP_PASS_EMIT) {
+            void *f = mp_asm_base_get_code((mp_asm_base_t *)comp->emit_inline_asm);
+            mp_emit_glue_assign_native(comp->scope_cur->raw_code, MP_CODE_NATIVE_ASM,
+                f, mp_asm_base_get_code_size((mp_asm_base_t *)comp->emit_inline_asm),
+                NULL,
+                #if MICROPY_PERSISTENT_CODE_SAVE
+                0,
+                0,
+                #endif
+                0, comp->scope_cur->num_pos_args, type_sig);
+        }
+    }
+
+    if (comp->compile_error != MP_OBJ_NULL) {
+        // inline assembler had an error; set line for its exception
+    inline_asm_error:
+        comp->compile_error_line = pns->source_line;
+    }
+}
+#endif
+
+static void scope_compute_things(scope_t *scope) {
+    // in MicroPython we put the *x parameter after all other parameters (except **y)
+    if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+        id_info_t *id_param = NULL;
+        for (int i = scope->id_info_len - 1; i >= 0; i--) {
+            id_info_t *id = &scope->id_info[i];
+            if (id->flags & ID_FLAG_IS_STAR_PARAM) {
+                if (id_param != NULL) {
+                    // swap star param with last param
+                    id_info_t temp = *id_param;
+                    *id_param = *id;
+                    *id = temp;
+                }
+                break;
+            } else if (id_param == NULL && id->flags == ID_FLAG_IS_PARAM) {
+                id_param = id;
+            }
+        }
+    }
+
+    // in functions, turn implicit globals into explicit globals
+    // compute the index of each local
+    scope->num_locals = 0;
+    for (int i = 0; i < scope->id_info_len; i++) {
+        id_info_t *id = &scope->id_info[i];
+        if (scope->kind == SCOPE_CLASS && id->qst == MP_QSTR___class__) {
+            // __class__ is not counted as a local; if it's used then it becomes a ID_INFO_KIND_CELL
+            continue;
+        }
+        if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+            id->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+        }
+        #if MICROPY_EMIT_NATIVE
+        if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+            // This function makes a reference to a global variable
+            if (scope->emit_options == MP_EMIT_OPT_VIPER
+                && mp_native_type_from_qstr(id->qst) >= MP_NATIVE_TYPE_INT) {
+                // A casting operator in viper mode, not a real global reference
+            } else {
+                scope->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS;
+            }
+        }
+        #endif
+        // params always count for 1 local, even if they are a cell
+        if (id->kind == ID_INFO_KIND_LOCAL || (id->flags & ID_FLAG_IS_PARAM)) {
+            id->local_num = scope->num_locals++;
+        }
+    }
+
+    // compute the index of cell vars
+    for (int i = 0; i < scope->id_info_len; i++) {
+        id_info_t *id = &scope->id_info[i];
+        // in MicroPython the cells come right after the fast locals
+        // parameters are not counted here, since they remain at the start
+        // of the locals, even if they are cell vars
+        if (id->kind == ID_INFO_KIND_CELL && !(id->flags & ID_FLAG_IS_PARAM)) {
+            id->local_num = scope->num_locals;
+            scope->num_locals += 1;
+        }
+    }
+
+    // compute the index of free vars
+    // make sure they are in the order of the parent scope
+    if (scope->parent != NULL) {
+        int num_free = 0;
+        for (int i = 0; i < scope->parent->id_info_len; i++) {
+            id_info_t *id = &scope->parent->id_info[i];
+            if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+                for (int j = 0; j < scope->id_info_len; j++) {
+                    id_info_t *id2 = &scope->id_info[j];
+                    if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+                        assert(!(id2->flags & ID_FLAG_IS_PARAM)); // free vars should not be params
+                        // in MicroPython the frees come first, before the params
+                        id2->local_num = num_free;
+                        num_free += 1;
+                    }
+                }
+            }
+        }
+        // in MicroPython shift all other locals after the free locals
+        if (num_free > 0) {
+            for (int i = 0; i < scope->id_info_len; i++) {
+                id_info_t *id = &scope->id_info[i];
+                if (id->kind != ID_INFO_KIND_FREE || (id->flags & ID_FLAG_IS_PARAM)) {
+                    id->local_num += num_free;
+                }
+            }
+            scope->num_pos_args += num_free; // free vars are counted as params for passing them into the function
+            scope->num_locals += num_free;
+        }
+    }
+}
+
+#if !MICROPY_PERSISTENT_CODE_SAVE
+static
+#endif
+void mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl, mp_compiled_module_t *cm) {
+    // put compiler state on the stack, it's relatively small
+    compiler_t comp_state = {0};
+    compiler_t *comp = &comp_state;
+
+    comp->is_repl = is_repl;
+    comp->break_label = INVALID_LABEL;
+    comp->continue_label = INVALID_LABEL;
+    mp_emit_common_init(&comp->emit_common, source_file);
+
+    // create the module scope
+    #if MICROPY_EMIT_NATIVE
+    const uint emit_opt = MP_STATE_VM(default_emit_opt);
+    #else
+    const uint emit_opt = MP_EMIT_OPT_NONE;
+    #endif
+    scope_t *module_scope = scope_new_and_link(comp, SCOPE_MODULE, parse_tree->root, emit_opt);
+
+    // create standard emitter; it's used at least for MP_PASS_SCOPE
+    emit_t *emit_bc = emit_bc_new(&comp->emit_common);
+
+    // compile MP_PASS_SCOPE
+    comp->emit = emit_bc;
+    #if MICROPY_EMIT_NATIVE
+    comp->emit_method_table = &emit_bc_method_table;
+    #endif
+    uint max_num_labels = 0;
+    for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+        #if MICROPY_EMIT_INLINE_ASM
+        if (s->emit_options == MP_EMIT_OPT_ASM) {
+            compile_scope_inline_asm(comp, s, MP_PASS_SCOPE);
+        } else
+        #endif
+        {
+            compile_scope(comp, s, MP_PASS_SCOPE);
+
+            // Check if any implicitly declared variables should be closed over
+            for (size_t i = 0; i < s->id_info_len; ++i) {
+                id_info_t *id = &s->id_info[i];
+                if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+                    scope_check_to_close_over(s, id);
+                }
+            }
+        }
+
+        // update maximum number of labels needed
+        if (comp->next_label > max_num_labels) {
+            max_num_labels = comp->next_label;
+        }
+    }
+
+    // compute some things related to scope and identifiers
+    for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+        scope_compute_things(s);
+    }
+
+    // set max number of labels now that it's calculated
+    emit_bc_set_max_num_labels(emit_bc, max_num_labels);
+
+    // compile MP_PASS_STACK_SIZE, MP_PASS_CODE_SIZE, MP_PASS_EMIT
+    #if MICROPY_EMIT_NATIVE
+    emit_t *emit_native = NULL;
+    #endif
+    for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+        #if MICROPY_EMIT_INLINE_ASM
+        if (s->emit_options == MP_EMIT_OPT_ASM) {
+            // inline assembly
+            if (comp->emit_inline_asm == NULL) {
+                comp->emit_inline_asm = ASM_EMITTER(new)(max_num_labels);
+            }
+            comp->emit = NULL;
+            comp->emit_inline_asm_method_table = ASM_EMITTER_TABLE;
+            compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE);
+            #if MICROPY_EMIT_INLINE_XTENSA
+            // Xtensa requires an extra pass to compute size of l32r const table
+            // TODO this can be improved by calculating it during SCOPE pass
+            // but that requires some other structural changes to the asm emitters
+            #if MICROPY_DYNAMIC_COMPILER
+            if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_XTENSA)
+            #endif
+            {
+                compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE);
+            }
+            #endif
+            if (comp->compile_error == MP_OBJ_NULL) {
+                compile_scope_inline_asm(comp, s, MP_PASS_EMIT);
+            }
+        } else
+        #endif
+        {
+
+            // choose the emit type
+
+            switch (s->emit_options) {
+
+                #if MICROPY_EMIT_NATIVE
+                case MP_EMIT_OPT_NATIVE_PYTHON:
+                case MP_EMIT_OPT_VIPER:
+                    if (emit_native == NULL) {
+                        emit_native = NATIVE_EMITTER(new)(&comp->emit_common, &comp->compile_error, &comp->next_label, max_num_labels);
+                    }
+                    comp->emit_method_table = NATIVE_EMITTER_TABLE;
+                    comp->emit = emit_native;
+                    break;
+                #endif // MICROPY_EMIT_NATIVE
+
+                default:
+                    comp->emit = emit_bc;
+                    #if MICROPY_EMIT_NATIVE
+                    comp->emit_method_table = &emit_bc_method_table;
+                    #endif
+                    break;
+            }
+
+            // need a pass to compute stack size
+            compile_scope(comp, s, MP_PASS_STACK_SIZE);
+
+            // second last pass: compute code size
+            if (comp->compile_error == MP_OBJ_NULL) {
+                compile_scope(comp, s, MP_PASS_CODE_SIZE);
+            }
+
+            // final pass: emit code
+            // the emitter can request multiple of these passes
+            if (comp->compile_error == MP_OBJ_NULL) {
+                while (!compile_scope(comp, s, MP_PASS_EMIT)) {
+                }
+            }
+        }
+    }
+
+    if (comp->compile_error != MP_OBJ_NULL) {
+        // if there is no line number for the error then use the line
+        // number for the start of this scope
+        compile_error_set_line(comp, comp->scope_cur->pn);
+        // add a traceback to the exception using relevant source info
+        mp_obj_exception_add_traceback(comp->compile_error, source_file,
+            comp->compile_error_line, comp->scope_cur->simple_name);
+    }
+
+    // construct the global qstr/const table for this module
+    cm->rc = module_scope->raw_code;
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    cm->has_native = false;
+    #if MICROPY_EMIT_NATIVE
+    if (emit_native != NULL) {
+        cm->has_native = true;
+    }
+    #endif
+    #if MICROPY_EMIT_INLINE_ASM
+    if (comp->emit_inline_asm != NULL) {
+        cm->has_native = true;
+    }
+    #endif
+    cm->n_qstr = comp->emit_common.qstr_map.used;
+    cm->n_obj = comp->emit_common.const_obj_list.len;
+    #endif
+    if (comp->compile_error == MP_OBJ_NULL) {
+        mp_emit_common_populate_module_context(&comp->emit_common, source_file, cm->context);
+
+        #if MICROPY_DEBUG_PRINTERS
+        // now that the module context is valid, the raw codes can be printed
+        if (mp_verbose_flag >= 2) {
+            for (scope_t *s = comp->scope_head; s != NULL; s = s->next) {
+                mp_raw_code_t *rc = s->raw_code;
+                if (rc->kind == MP_CODE_BYTECODE) {
+                    mp_bytecode_print(&mp_plat_print, rc, s->raw_code_data_len, &cm->context->constants);
+                }
+            }
+        }
+        #endif
+    }
+
+    // free the emitters
+
+    emit_bc_free(emit_bc);
+    #if MICROPY_EMIT_NATIVE
+    if (emit_native != NULL) {
+        NATIVE_EMITTER(free)(emit_native);
+    }
+    #endif
+    #if MICROPY_EMIT_INLINE_ASM
+    if (comp->emit_inline_asm != NULL) {
+        ASM_EMITTER(free)(comp->emit_inline_asm);
+    }
+    #endif
+
+    // free the parse tree
+    mp_parse_tree_clear(parse_tree);
+
+    // free the scopes
+    for (scope_t *s = module_scope; s;) {
+        scope_t *next = s->next;
+        scope_free(s);
+        s = next;
+    }
+
+    if (comp->compile_error != MP_OBJ_NULL) {
+        nlr_raise(comp->compile_error);
+    }
+}
+
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl) {
+    mp_compiled_module_t cm;
+    cm.context = m_new_obj(mp_module_context_t);
+    cm.context->module.globals = mp_globals_get();
+    mp_compile_to_raw_code(parse_tree, source_file, is_repl, &cm);
+    // return function that executes the outer module
+    return mp_make_function_from_proto_fun(cm.rc, cm.context, NULL);
+}
+
+#endif // MICROPY_ENABLE_COMPILER

+ 51 - 0
mp_flipper/lib/micropython/py/compile.h

@@ -0,0 +1,51 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_COMPILE_H
+#define MICROPY_INCLUDED_PY_COMPILE_H
+
+#include "py/lexer.h"
+#include "py/parse.h"
+#include "py/emitglue.h"
+
+#if MICROPY_COMP_ALLOW_TOP_LEVEL_AWAIT
+// set to `true` to allow top-level await expressions
+extern bool mp_compile_allow_top_level_await;
+#endif
+
+// the compiler will raise an exception if an error occurred
+// the compiler will clear the parse tree before it returns
+// mp_globals_get() will be used for the context
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl);
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+// this has the same semantics as mp_compile
+void mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl, mp_compiled_module_t *cm);
+#endif
+
+// this is implemented in runtime.c
+mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals);
+
+#endif // MICROPY_INCLUDED_PY_COMPILE_H

+ 324 - 0
mp_flipper/lib/micropython/py/dynruntime.h

@@ -0,0 +1,324 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_DYNRUNTIME_H
+#define MICROPY_INCLUDED_PY_DYNRUNTIME_H
+
+// This header file contains definitions to dynamically implement the static
+// MicroPython runtime API defined in py/obj.h and py/runtime.h.
+
+#include "py/binary.h"
+#include "py/nativeglue.h"
+#include "py/objfun.h"
+#include "py/objstr.h"
+#include "py/objtype.h"
+
+#if !MICROPY_ENABLE_DYNRUNTIME
+#error "dynruntime.h included in non-dynamic-module build."
+#endif
+
+#undef MP_ROM_QSTR
+#undef MP_OBJ_QSTR_VALUE
+#undef MP_OBJ_NEW_QSTR
+#undef mp_const_none
+#undef mp_const_false
+#undef mp_const_true
+#undef mp_const_empty_bytes
+#undef mp_const_empty_tuple
+#undef nlr_raise
+
+/******************************************************************************/
+// Memory allocation
+
+#define m_malloc(n)                     (m_malloc_dyn((n)))
+#define m_free(ptr)                     (m_free_dyn((ptr)))
+#define m_realloc(ptr, new_num_bytes)   (m_realloc_dyn((ptr), (new_num_bytes)))
+
+static inline void *m_malloc_dyn(size_t n) {
+    // TODO won't raise on OOM
+    return mp_fun_table.realloc_(NULL, n, false);
+}
+
+static inline void m_free_dyn(void *ptr) {
+    mp_fun_table.realloc_(ptr, 0, false);
+}
+
+static inline void *m_realloc_dyn(void *ptr, size_t new_num_bytes) {
+    // TODO won't raise on OOM
+    return mp_fun_table.realloc_(ptr, new_num_bytes, true);
+}
+
+/******************************************************************************/
+// Printing
+
+#define mp_plat_print               (*mp_fun_table.plat_print)
+#define mp_printf(p, ...)           (mp_fun_table.printf_((p), __VA_ARGS__))
+#define mp_vprintf(p, fmt, args)    (mp_fun_table.vprintf_((p), (fmt), (args)))
+
+/******************************************************************************/
+// Types and objects
+
+#define MP_OBJ_NEW_QSTR(x)                  (mp_fun_table.native_to_obj(x, MP_NATIVE_TYPE_QSTR))
+
+#define mp_type_type                        (*mp_fun_table.type_type)
+#define mp_type_NoneType                    (*mp_obj_get_type(mp_const_none))
+#define mp_type_bool                        (*mp_obj_get_type(mp_const_false))
+#define mp_type_int                         (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_int)))
+#define mp_type_str                         (*mp_fun_table.type_str)
+#define mp_type_bytes                       (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_bytes)))
+#define mp_type_bytearray                   (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_bytearray)))
+#define mp_type_tuple                       (*((mp_obj_base_t *)mp_const_empty_tuple)->type)
+#define mp_type_list                        (*mp_fun_table.type_list)
+#define mp_type_Exception                   (*mp_fun_table.type_Exception)
+#define mp_type_EOFError                    (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_EOFError)))
+#define mp_type_IndexError                  (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_IndexError)))
+#define mp_type_KeyError                    (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_KeyError)))
+#define mp_type_NotImplementedError         (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_NotImplementedError)))
+#define mp_type_RuntimeError                (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_RuntimeError)))
+#define mp_type_TypeError                   (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_TypeError)))
+#define mp_type_ValueError                  (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_ValueError)))
+
+#define mp_stream_read_obj                  (*mp_fun_table.stream_read_obj)
+#define mp_stream_readinto_obj              (*mp_fun_table.stream_readinto_obj)
+#define mp_stream_unbuffered_readline_obj   (*mp_fun_table.stream_unbuffered_readline_obj)
+#define mp_stream_write_obj                 (*mp_fun_table.stream_write_obj)
+
+#define mp_const_none                       ((mp_obj_t)mp_fun_table.const_none)
+#define mp_const_false                      ((mp_obj_t)mp_fun_table.const_false)
+#define mp_const_true                       ((mp_obj_t)mp_fun_table.const_true)
+#define mp_const_empty_bytes                (MP_OBJ_TYPE_GET_SLOT(&mp_type_bytes, make_new)(NULL, 0, 0, NULL))
+#define mp_const_empty_tuple                (mp_fun_table.new_tuple(0, NULL))
+
+#define mp_obj_new_bool(b)                  ((b) ? (mp_obj_t)mp_fun_table.const_true : (mp_obj_t)mp_fun_table.const_false)
+#define mp_obj_new_int(i)                   (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_INT))
+#define mp_obj_new_int_from_uint(i)         (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_UINT))
+#define mp_obj_new_str(data, len)           (mp_fun_table.obj_new_str((data), (len)))
+#define mp_obj_new_str_of_type(t, d, l)     (mp_obj_new_str_of_type_dyn((t), (d), (l)))
+#define mp_obj_new_bytes(data, len)         (mp_fun_table.obj_new_bytes((data), (len)))
+#define mp_obj_new_bytearray_by_ref(n, i)   (mp_fun_table.obj_new_bytearray_by_ref((n), (i)))
+#define mp_obj_new_tuple(n, items)          (mp_fun_table.new_tuple((n), (items)))
+#define mp_obj_new_list(n, items)           (mp_fun_table.new_list((n), (items)))
+#define mp_obj_new_dict(n)                  (mp_fun_table.new_dict((n)))
+
+#define mp_obj_get_type(o)                  (mp_fun_table.obj_get_type((o)))
+#define mp_obj_cast_to_native_base(o, t)    (mp_obj_cast_to_native_base_dyn((o), (t)))
+#define mp_obj_get_int(o)                   (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_INT))
+#define mp_obj_get_int_truncated(o)         (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_UINT))
+#define mp_obj_str_get_str(s)               (mp_obj_str_get_data_dyn((s), NULL))
+#define mp_obj_str_get_data(o, len)         (mp_obj_str_get_data_dyn((o), (len)))
+#define mp_get_buffer(o, bufinfo, fl)       (mp_fun_table.get_buffer((o), (bufinfo), (fl)))
+#define mp_get_buffer_raise(o, bufinfo, fl) (mp_fun_table.get_buffer((o), (bufinfo), (fl) | MP_BUFFER_RAISE_IF_UNSUPPORTED))
+#define mp_get_stream_raise(s, flags)       (mp_fun_table.get_stream_raise((s), (flags)))
+#define mp_obj_is_true(o)                   (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_BOOL))
+
+#define mp_obj_len(o)                       (mp_obj_len_dyn(o))
+#define mp_obj_subscr(base, index, val)     (mp_fun_table.obj_subscr((base), (index), (val)))
+#define mp_obj_get_array(o, len, items)     (mp_obj_get_array_dyn((o), (len), (items)))
+#define mp_obj_list_append(list, item)      (mp_fun_table.list_append((list), (item)))
+#define mp_obj_dict_store(dict, key, val)   (mp_fun_table.dict_store((dict), (key), (val)))
+
+#define mp_obj_malloc_helper(n, t)          (mp_obj_malloc_helper_dyn(n, t))
+
+static inline mp_obj_t mp_obj_new_str_of_type_dyn(const mp_obj_type_t *type, const byte *data, size_t len) {
+    if (type == &mp_type_str) {
+        return mp_obj_new_str((const char *)data, len);
+    } else {
+        return mp_obj_new_bytes(data, len);
+    }
+}
+
+static inline mp_obj_t mp_obj_cast_to_native_base_dyn(mp_obj_t self_in, mp_const_obj_t native_type) {
+    const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+
+    if (MP_OBJ_FROM_PTR(self_type) == native_type) {
+        return self_in;
+    } else if (MP_OBJ_TYPE_GET_SLOT_OR_NULL(self_type, parent) != native_type) {
+        // The self_in object is not a direct descendant of native_type, so fail the cast.
+        // This is a very simple version of mp_obj_is_subclass_fast that could be improved.
+        return MP_OBJ_NULL;
+    } else {
+        mp_obj_instance_t *self = (mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in);
+        return self->subobj[0];
+    }
+}
+
+static inline void *mp_obj_str_get_data_dyn(mp_obj_t o, size_t *l) {
+    mp_buffer_info_t bufinfo;
+    mp_get_buffer_raise(o, &bufinfo, MP_BUFFER_READ);
+    if (l != NULL) {
+        *l = bufinfo.len;
+    }
+    return bufinfo.buf;
+}
+
+static inline mp_obj_t mp_obj_len_dyn(mp_obj_t o) {
+    // If bytes implemented MP_UNARY_OP_LEN could use: mp_unary_op(MP_UNARY_OP_LEN, o)
+    return mp_fun_table.call_function_n_kw(mp_fun_table.load_name(MP_QSTR_len), 1, &o);
+}
+
+static inline void *mp_obj_malloc_helper_dyn(size_t num_bytes, const mp_obj_type_t *type) {
+    mp_obj_base_t *base = (mp_obj_base_t *)m_malloc(num_bytes);
+    base->type = type;
+    return base;
+}
+
+/******************************************************************************/
+// General runtime functions
+
+#define mp_binary_get_size(struct_type, val_type, palign) (mp_fun_table.binary_get_size((struct_type), (val_type), (palign)))
+#define mp_binary_get_val_array(typecode, p, index) (mp_fun_table.binary_get_val_array((typecode), (p), (index)))
+#define mp_binary_set_val_array(typecode, p, index, val_in) (mp_fun_table.binary_set_val_array((typecode), (p), (index), (val_in)))
+
+#define mp_load_name(qst)                 (mp_fun_table.load_name((qst)))
+#define mp_load_global(qst)               (mp_fun_table.load_global((qst)))
+#define mp_load_attr(base, attr)          (mp_fun_table.load_attr((base), (attr)))
+#define mp_load_method(base, attr, dest)  (mp_fun_table.load_method((base), (attr), (dest)))
+#define mp_load_method_maybe(base, attr, dest) (mp_fun_table.load_method_maybe((base), (attr), (dest)))
+#define mp_load_super_method(attr, dest)  (mp_fun_table.load_super_method((attr), (dest)))
+#define mp_store_name(qst, obj)           (mp_fun_table.store_name((qst), (obj)))
+#define mp_store_global(qst, obj)         (mp_fun_table.store_global((qst), (obj)))
+#define mp_store_attr(base, attr, val)    (mp_fun_table.store_attr((base), (attr), (val)))
+
+#define mp_unary_op(op, obj)        (mp_fun_table.unary_op((op), (obj)))
+#define mp_binary_op(op, lhs, rhs)  (mp_fun_table.binary_op((op), (lhs), (rhs)))
+
+#define mp_make_function_from_proto_fun(rc, context, def_args) \
+    (mp_fun_table.make_function_from_proto_fun((rc), (context), (def_args)))
+
+#define mp_call_function_n_kw(fun, n_args, n_kw, args) \
+    (mp_fun_table.call_function_n_kw((fun), (n_args) | ((n_kw) << 8), args))
+
+#define mp_arg_check_num(n_args, n_kw, n_args_min, n_args_max, takes_kw) \
+    (mp_fun_table.arg_check_num_sig((n_args), (n_kw), MP_OBJ_FUN_MAKE_SIG((n_args_min), (n_args_max), (takes_kw))))
+
+#define mp_arg_parse_all(n_pos, pos, kws, n_allowed, allowed, out_vals) \
+    (mp_fun_table.arg_parse_all((n_pos), (pos), (kws), (n_allowed), (allowed), (out_vals)))
+
+#define mp_arg_parse_all_kw_array(n_pos, n_kw, args, n_allowed, allowed, out_vals) \
+    (mp_fun_table.arg_parse_all_kw_array((n_pos), (n_kw), (args), (n_allowed), (allowed), (out_vals)))
+
+#define MP_DYNRUNTIME_INIT_ENTRY \
+    mp_obj_t old_globals = mp_fun_table.swap_globals(self->context->module.globals); \
+    mp_raw_code_truncated_t rc; \
+    rc.proto_fun_indicator[0] = MP_PROTO_FUN_INDICATOR_RAW_CODE_0; \
+    rc.proto_fun_indicator[1] = MP_PROTO_FUN_INDICATOR_RAW_CODE_1; \
+    rc.kind = MP_CODE_NATIVE_VIPER; \
+    rc.is_generator = 0; \
+    (void)rc;
+
+#define MP_DYNRUNTIME_INIT_EXIT \
+    mp_fun_table.swap_globals(old_globals); \
+    return mp_const_none;
+
+#define MP_DYNRUNTIME_MAKE_FUNCTION(f) \
+    (mp_make_function_from_proto_fun((rc.fun_data = (f), (const mp_raw_code_t *)&rc), self->context, NULL))
+
+#define mp_import_name(name, fromlist, level) \
+    (mp_fun_table.import_name((name), (fromlist), (level)))
+#define mp_import_from(module, name) \
+    (mp_fun_table.import_from((module), (name)))
+#define mp_import_all(module) \
+    (mp_fun_table.import_all((module))
+
+/******************************************************************************/
+// Exceptions
+
+#define mp_obj_exception_make_new               (MP_OBJ_TYPE_GET_SLOT(&mp_type_Exception, make_new))
+#define mp_obj_exception_print                  (MP_OBJ_TYPE_GET_SLOT(&mp_type_Exception, print))
+#define mp_obj_exception_attr                   (MP_OBJ_TYPE_GET_SLOT(&mp_type_Exception, attr))
+
+#define mp_obj_new_exception(o)                 ((mp_obj_t)(o)) // Assumes returned object will be raised, will create instance then
+#define mp_obj_new_exception_arg1(e_type, arg)  (mp_obj_new_exception_arg1_dyn((e_type), (arg)))
+
+#define nlr_raise(o)                            (mp_raise_dyn(o))
+#define mp_raise_type_arg(type, arg)            (mp_raise_dyn(mp_obj_new_exception_arg1_dyn((type), (arg))))
+#define mp_raise_msg(type, msg)                 (mp_fun_table.raise_msg((type), (msg)))
+#define mp_raise_OSError(er)                    (mp_raise_OSError_dyn(er))
+#define mp_raise_NotImplementedError(msg)       (mp_raise_msg(&mp_type_NotImplementedError, (msg)))
+#define mp_raise_TypeError(msg)                 (mp_raise_msg(&mp_type_TypeError, (msg)))
+#define mp_raise_ValueError(msg)                (mp_raise_msg(&mp_type_ValueError, (msg)))
+
+static inline mp_obj_t mp_obj_new_exception_arg1_dyn(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+    mp_obj_t args[1] = { arg };
+    return mp_call_function_n_kw(MP_OBJ_FROM_PTR(exc_type), 1, 0, &args[0]);
+}
+
+static NORETURN inline void mp_raise_dyn(mp_obj_t o) {
+    mp_fun_table.raise(o);
+    for (;;) {
+    }
+}
+
+static inline void mp_raise_OSError_dyn(int er) {
+    mp_obj_t args[1] = { MP_OBJ_NEW_SMALL_INT(er) };
+    nlr_raise(mp_call_function_n_kw(mp_load_global(MP_QSTR_OSError), 1, 0, &args[0]));
+}
+
+static inline void mp_obj_exception_init(mp_obj_full_type_t *exc, qstr name, const mp_obj_type_t *base) {
+    exc->base.type = &mp_type_type;
+    exc->flags = MP_TYPE_FLAG_NONE;
+    exc->name = name;
+    MP_OBJ_TYPE_SET_SLOT(exc, make_new, mp_obj_exception_make_new, 0);
+    MP_OBJ_TYPE_SET_SLOT(exc, print, mp_obj_exception_print, 1);
+    MP_OBJ_TYPE_SET_SLOT(exc, attr, mp_obj_exception_attr, 2);
+    MP_OBJ_TYPE_SET_SLOT(exc, parent, base, 3);
+}
+
+/******************************************************************************/
+// Floating point
+
+#define mp_obj_new_float_from_f(f)  (mp_fun_table.obj_new_float_from_f((f)))
+#define mp_obj_new_float_from_d(d)  (mp_fun_table.obj_new_float_from_d((d)))
+#define mp_obj_get_float_to_f(o)    (mp_fun_table.obj_get_float_to_f((o)))
+#define mp_obj_get_float_to_d(o)    (mp_fun_table.obj_get_float_to_d((o)))
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define mp_obj_new_float(f)         (mp_obj_new_float_from_f((f)))
+#define mp_obj_get_float(o)         (mp_obj_get_float_to_f((o)))
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define mp_obj_new_float(f)         (mp_obj_new_float_from_d((f)))
+#define mp_obj_get_float(o)         (mp_obj_get_float_to_d((o)))
+#endif
+
+/******************************************************************************/
+// Inline function definitions.
+
+// *items may point inside a GC block
+static inline void mp_obj_get_array_dyn(mp_obj_t o, size_t *len, mp_obj_t **items) {
+    const mp_obj_type_t *type = mp_obj_get_type(o);
+    if (type == &mp_type_tuple) {
+        mp_obj_tuple_t *t = MP_OBJ_TO_PTR(o);
+        *len = t->len;
+        *items = &t->items[0];
+    } else if (type == &mp_type_list) {
+        mp_obj_list_t *l = MP_OBJ_TO_PTR(o);
+        *len = l->len;
+        *items = l->items;
+    } else {
+        mp_raise_TypeError("expected tuple/list");
+    }
+}
+
+#endif // MICROPY_INCLUDED_PY_DYNRUNTIME_H

+ 313 - 0
mp_flipper/lib/micropython/py/emit.h

@@ -0,0 +1,313 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_EMIT_H
+#define MICROPY_INCLUDED_PY_EMIT_H
+
+#include "py/lexer.h"
+#include "py/scope.h"
+
+/* Notes on passes:
+ * We don't know exactly the opcodes in pass 1 because they depend on the
+ * closing over of variables (LOAD_CLOSURE, BUILD_TUPLE, MAKE_CLOSURE), which
+ * depends on determining the scope of variables in each function, and this
+ * is not known until the end of pass 1.
+ * As a consequence, we don't know the maximum stack size until the end of pass 2.
+ * This is problematic for some emitters (x64) since they need to know the maximum
+ * stack size to compile the entry to the function, and this affects code size.
+ */
+
+typedef enum {
+    MP_PASS_SCOPE = 1,      // work out id's and their kind, and number of labels
+    MP_PASS_STACK_SIZE = 2, // work out maximum stack size
+    MP_PASS_CODE_SIZE = 3,  // work out code size and label offsets
+    MP_PASS_EMIT = 4,       // emit code (may be run multiple times if the emitter requests it)
+} pass_kind_t;
+
+#define MP_EMIT_STAR_FLAG_SINGLE (0x01)
+#define MP_EMIT_STAR_FLAG_DOUBLE (0x02)
+
+#define MP_EMIT_BREAK_FROM_FOR (0x8000)
+
+// Kind for emit_id_ops->local()
+#define MP_EMIT_IDOP_LOCAL_FAST (0)
+#define MP_EMIT_IDOP_LOCAL_DEREF (1)
+
+// Kind for emit_id_ops->global()
+#define MP_EMIT_IDOP_GLOBAL_NAME (0)
+#define MP_EMIT_IDOP_GLOBAL_GLOBAL (1)
+
+// Kind for emit->import()
+#define MP_EMIT_IMPORT_NAME (0)
+#define MP_EMIT_IMPORT_FROM (1)
+#define MP_EMIT_IMPORT_STAR (2)
+
+// Kind for emit->subscr()
+#define MP_EMIT_SUBSCR_LOAD (0)
+#define MP_EMIT_SUBSCR_STORE (1)
+#define MP_EMIT_SUBSCR_DELETE (2)
+
+// Kind for emit->attr()
+#define MP_EMIT_ATTR_LOAD (0)
+#define MP_EMIT_ATTR_STORE (1)
+#define MP_EMIT_ATTR_DELETE (2)
+
+// Kind for emit->setup_block()
+#define MP_EMIT_SETUP_BLOCK_WITH (0)
+#define MP_EMIT_SETUP_BLOCK_EXCEPT (1)
+#define MP_EMIT_SETUP_BLOCK_FINALLY (2)
+
+// Kind for emit->build()
+#define MP_EMIT_BUILD_TUPLE (0)
+#define MP_EMIT_BUILD_LIST (1)
+#define MP_EMIT_BUILD_MAP (2)
+#define MP_EMIT_BUILD_SET (3)
+#define MP_EMIT_BUILD_SLICE (4)
+
+// Kind for emit->yield()
+#define MP_EMIT_YIELD_VALUE (0)
+#define MP_EMIT_YIELD_FROM (1)
+
+typedef struct _emit_t emit_t;
+
+typedef struct _mp_emit_common_t {
+    pass_kind_t pass;
+    uint16_t ct_cur_child;
+    mp_raw_code_t **children;
+    #if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+    mp_map_t qstr_map;
+    #endif
+    mp_obj_list_t const_obj_list;
+} mp_emit_common_t;
+
+typedef struct _mp_emit_method_table_id_ops_t {
+    void (*local)(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+    void (*global)(emit_t *emit, qstr qst, int kind);
+} mp_emit_method_table_id_ops_t;
+
+typedef struct _emit_method_table_t {
+    #if MICROPY_DYNAMIC_COMPILER
+    emit_t *(*emit_new)(mp_emit_common_t * emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+    void (*emit_free)(emit_t *emit);
+    #endif
+
+    void (*start_pass)(emit_t *emit, pass_kind_t pass, scope_t *scope);
+    bool (*end_pass)(emit_t *emit);
+    void (*adjust_stack_size)(emit_t *emit, mp_int_t delta);
+    void (*set_source_line)(emit_t *emit, mp_uint_t line);
+
+    mp_emit_method_table_id_ops_t load_id;
+    mp_emit_method_table_id_ops_t store_id;
+    mp_emit_method_table_id_ops_t delete_id;
+
+    void (*label_assign)(emit_t *emit, mp_uint_t l);
+    void (*import)(emit_t *emit, qstr qst, int kind);
+    void (*load_const_tok)(emit_t *emit, mp_token_kind_t tok);
+    void (*load_const_small_int)(emit_t *emit, mp_int_t arg);
+    void (*load_const_str)(emit_t *emit, qstr qst);
+    void (*load_const_obj)(emit_t *emit, mp_obj_t obj);
+    void (*load_null)(emit_t *emit);
+    void (*load_method)(emit_t *emit, qstr qst, bool is_super);
+    void (*load_build_class)(emit_t *emit);
+    void (*subscr)(emit_t *emit, int kind);
+    void (*attr)(emit_t *emit, qstr qst, int kind);
+    void (*dup_top)(emit_t *emit);
+    void (*dup_top_two)(emit_t *emit);
+    void (*pop_top)(emit_t *emit);
+    void (*rot_two)(emit_t *emit);
+    void (*rot_three)(emit_t *emit);
+    void (*jump)(emit_t *emit, mp_uint_t label);
+    void (*pop_jump_if)(emit_t *emit, bool cond, mp_uint_t label);
+    void (*jump_if_or_pop)(emit_t *emit, bool cond, mp_uint_t label);
+    void (*unwind_jump)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+    void (*setup_block)(emit_t *emit, mp_uint_t label, int kind);
+    void (*with_cleanup)(emit_t *emit, mp_uint_t label);
+    void (*end_finally)(emit_t *emit);
+    void (*get_iter)(emit_t *emit, bool use_stack);
+    void (*for_iter)(emit_t *emit, mp_uint_t label);
+    void (*for_iter_end)(emit_t *emit);
+    void (*pop_except_jump)(emit_t *emit, mp_uint_t label, bool within_exc_handler);
+    void (*unary_op)(emit_t *emit, mp_unary_op_t op);
+    void (*binary_op)(emit_t *emit, mp_binary_op_t op);
+    void (*build)(emit_t *emit, mp_uint_t n_args, int kind);
+    void (*store_map)(emit_t *emit);
+    void (*store_comp)(emit_t *emit, scope_kind_t kind, mp_uint_t set_stack_index);
+    void (*unpack_sequence)(emit_t *emit, mp_uint_t n_args);
+    void (*unpack_ex)(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+    void (*make_function)(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+    void (*make_closure)(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+    void (*call_function)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+    void (*call_method)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+    void (*return_value)(emit_t *emit);
+    void (*raise_varargs)(emit_t *emit, mp_uint_t n_args);
+    void (*yield)(emit_t *emit, int kind);
+
+    // these methods are used to control entry to/exit from an exception handler
+    // they may or may not emit code
+    void (*start_except_handler)(emit_t *emit);
+    void (*end_except_handler)(emit_t *emit);
+} emit_method_table_t;
+
+#if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+qstr_short_t mp_emit_common_use_qstr(mp_emit_common_t *emit, qstr qst);
+#else
+static inline qstr_short_t mp_emit_common_use_qstr(mp_emit_common_t *emit, qstr qst) {
+    return qst;
+}
+#endif
+
+size_t mp_emit_common_use_const_obj(mp_emit_common_t *emit, mp_obj_t const_obj);
+
+static inline size_t mp_emit_common_alloc_const_child(mp_emit_common_t *emit, mp_raw_code_t *rc) {
+    if (emit->pass == MP_PASS_EMIT) {
+        emit->children[emit->ct_cur_child] = rc;
+    }
+    return emit->ct_cur_child++;
+}
+
+static inline void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst) {
+    scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT);
+}
+
+id_info_t *mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst);
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst);
+
+extern const emit_method_table_t emit_bc_method_table;
+extern const emit_method_table_t emit_native_x64_method_table;
+extern const emit_method_table_t emit_native_x86_method_table;
+extern const emit_method_table_t emit_native_thumb_method_table;
+extern const emit_method_table_t emit_native_arm_method_table;
+extern const emit_method_table_t emit_native_xtensa_method_table;
+extern const emit_method_table_t emit_native_xtensawin_method_table;
+
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops;
+
+emit_t *emit_bc_new(mp_emit_common_t *emit_common);
+emit_t *emit_native_x64_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_x86_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_thumb_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_arm_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_xtensa_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_xtensawin_new(mp_emit_common_t *emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+
+void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels);
+
+void emit_bc_free(emit_t *emit);
+void emit_native_x64_free(emit_t *emit);
+void emit_native_x86_free(emit_t *emit);
+void emit_native_thumb_free(emit_t *emit);
+void emit_native_arm_free(emit_t *emit);
+void emit_native_xtensa_free(emit_t *emit);
+void emit_native_xtensawin_free(emit_t *emit);
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope);
+bool mp_emit_bc_end_pass(emit_t *emit);
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta);
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t line);
+
+void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind);
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l);
+void mp_emit_bc_import(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok);
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg);
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst);
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj);
+void mp_emit_bc_load_null(emit_t *emit);
+void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super);
+void mp_emit_bc_load_build_class(emit_t *emit);
+void mp_emit_bc_subscr(emit_t *emit, int kind);
+void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_dup_top(emit_t *emit);
+void mp_emit_bc_dup_top_two(emit_t *emit);
+void mp_emit_bc_pop_top(emit_t *emit);
+void mp_emit_bc_rot_two(emit_t *emit);
+void mp_emit_bc_rot_three(emit_t *emit);
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind);
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_end_finally(emit_t *emit);
+void mp_emit_bc_get_iter(emit_t *emit, bool use_stack);
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_for_iter_end(emit_t *emit);
+void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler);
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op);
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op);
+void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind);
+void mp_emit_bc_store_map(emit_t *emit);
+void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t list_stack_index);
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_return_value(emit_t *emit);
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_yield(emit_t *emit, int kind);
+void mp_emit_bc_start_except_handler(emit_t *emit);
+void mp_emit_bc_end_except_handler(emit_t *emit);
+
+typedef struct _emit_inline_asm_t emit_inline_asm_t;
+
+typedef struct _emit_inline_asm_method_table_t {
+    #if MICROPY_DYNAMIC_COMPILER
+    emit_inline_asm_t *(*asm_new)(mp_uint_t max_num_labels);
+    void (*asm_free)(emit_inline_asm_t *emit);
+    #endif
+
+    void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot);
+    void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig);
+    mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params);
+    bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id);
+    void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args);
+} emit_inline_asm_method_table_t;
+
+extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table;
+extern const emit_inline_asm_method_table_t emit_inline_xtensa_method_table;
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels);
+emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels);
+
+void emit_inline_thumb_free(emit_inline_asm_t *emit);
+void emit_inline_xtensa_free(emit_inline_asm_t *emit);
+
+#if MICROPY_WARNINGS
+void mp_emitter_warning(pass_kind_t pass, const char *msg);
+#else
+#define mp_emitter_warning(pass, msg)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_EMIT_H

+ 905 - 0
mp_flipper/lib/micropython/py/emitbc.c

@@ -0,0 +1,905 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/smallint.h"
+#include "py/emit.h"
+#include "py/bc0.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define DUMMY_DATA_SIZE (MP_ENCODE_UINT_MAX_BYTES)
+
+struct _emit_t {
+    // Accessed as mp_obj_t, so must be aligned as such, and we rely on the
+    // memory allocator returning a suitably aligned pointer.
+    // Should work for cases when mp_obj_t is 64-bit on a 32-bit machine.
+    byte dummy_data[DUMMY_DATA_SIZE];
+
+    pass_kind_t pass : 8;
+
+    // Set to true if the code generator should suppress emitted code due to it
+    // being dead code.  This can happen when opcodes immediately follow an
+    // unconditional flow control (eg jump or raise).
+    bool suppress;
+
+    int stack_size;
+
+    mp_emit_common_t *emit_common;
+    scope_t *scope;
+
+    mp_uint_t last_source_line_offset;
+    mp_uint_t last_source_line;
+
+    size_t max_num_labels;
+    size_t *label_offsets;
+
+    size_t code_info_offset;
+    size_t code_info_size;
+    size_t bytecode_offset;
+    size_t bytecode_size;
+    byte *code_base; // stores both byte code and code info
+    bool overflow;
+
+    size_t n_info;
+    size_t n_cell;
+};
+
+emit_t *emit_bc_new(mp_emit_common_t *emit_common) {
+    emit_t *emit = m_new0(emit_t, 1);
+    emit->emit_common = emit_common;
+    return emit;
+}
+
+void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels) {
+    emit->max_num_labels = max_num_labels;
+    emit->label_offsets = m_new(size_t, emit->max_num_labels);
+}
+
+void emit_bc_free(emit_t *emit) {
+    m_del(size_t, emit->label_offsets, emit->max_num_labels);
+    m_del_obj(emit_t, emit);
+}
+
+// all functions must go through this one to emit code info
+static uint8_t *emit_get_cur_to_write_code_info(void *emit_in, size_t num_bytes_to_write) {
+    emit_t *emit = emit_in;
+    if (emit->pass < MP_PASS_EMIT) {
+        emit->code_info_offset += num_bytes_to_write;
+        return emit->dummy_data;
+    } else {
+        assert(emit->code_info_offset + num_bytes_to_write <= emit->code_info_size);
+        byte *c = emit->code_base + emit->code_info_offset;
+        emit->code_info_offset += num_bytes_to_write;
+        return c;
+    }
+}
+
+static void emit_write_code_info_byte(emit_t *emit, byte val) {
+    *emit_get_cur_to_write_code_info(emit, 1) = val;
+}
+
+static void emit_write_code_info_qstr(emit_t *emit, qstr qst) {
+    mp_encode_uint(emit, emit_get_cur_to_write_code_info, mp_emit_common_use_qstr(emit->emit_common, qst));
+}
+
+#if MICROPY_ENABLE_SOURCE_LINE
+static void emit_write_code_info_bytes_lines(emit_t *emit, mp_uint_t bytes_to_skip, mp_uint_t lines_to_skip) {
+    assert(bytes_to_skip > 0 || lines_to_skip > 0);
+    while (bytes_to_skip > 0 || lines_to_skip > 0) {
+        mp_uint_t b, l;
+        if (lines_to_skip <= 6 || bytes_to_skip > 0xf) {
+            // use 0b0LLBBBBB encoding
+            b = MIN(bytes_to_skip, 0x1f);
+            if (b < bytes_to_skip) {
+                // we can't skip any lines until we skip all the bytes
+                l = 0;
+            } else {
+                l = MIN(lines_to_skip, 0x3);
+            }
+            *emit_get_cur_to_write_code_info(emit, 1) = b | (l << 5);
+        } else {
+            // use 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+            b = MIN(bytes_to_skip, 0xf);
+            l = MIN(lines_to_skip, 0x7ff);
+            byte *ci = emit_get_cur_to_write_code_info(emit, 2);
+            ci[0] = 0x80 | b | ((l >> 4) & 0x70);
+            ci[1] = l;
+        }
+        bytes_to_skip -= b;
+        lines_to_skip -= l;
+    }
+}
+#endif
+
+// all functions must go through this one to emit byte code
+static uint8_t *emit_get_cur_to_write_bytecode(void *emit_in, size_t num_bytes_to_write) {
+    emit_t *emit = emit_in;
+    if (emit->suppress) {
+        return emit->dummy_data;
+    }
+    if (emit->pass < MP_PASS_EMIT) {
+        emit->bytecode_offset += num_bytes_to_write;
+        return emit->dummy_data;
+    } else {
+        assert(emit->bytecode_offset + num_bytes_to_write <= emit->bytecode_size);
+        byte *c = emit->code_base + emit->code_info_size + emit->bytecode_offset;
+        emit->bytecode_offset += num_bytes_to_write;
+        return c;
+    }
+}
+
+static void emit_write_bytecode_raw_byte(emit_t *emit, byte b1) {
+    byte *c = emit_get_cur_to_write_bytecode(emit, 1);
+    c[0] = b1;
+}
+
+static void emit_write_bytecode_byte(emit_t *emit, int stack_adj, byte b1) {
+    mp_emit_bc_adjust_stack_size(emit, stack_adj);
+    byte *c = emit_get_cur_to_write_bytecode(emit, 1);
+    c[0] = b1;
+}
+
+// Similar to mp_encode_uint(), just some extra handling to encode sign
+static void emit_write_bytecode_byte_int(emit_t *emit, int stack_adj, byte b1, mp_int_t num) {
+    emit_write_bytecode_byte(emit, stack_adj, b1);
+
+    // We store each 7 bits in a separate byte, and that's how many bytes needed
+    byte buf[MP_ENCODE_UINT_MAX_BYTES];
+    byte *p = buf + sizeof(buf);
+    // We encode in little-ending order, but store in big-endian, to help decoding
+    do {
+        *--p = num & 0x7f;
+        num >>= 7;
+    } while (num != 0 && num != -1);
+    // Make sure that highest bit we stored (mask 0x40) matches sign
+    // of the number. If not, store extra byte just to encode sign
+    if (num == -1 && (*p & 0x40) == 0) {
+        *--p = 0x7f;
+    } else if (num == 0 && (*p & 0x40) != 0) {
+        *--p = 0;
+    }
+
+    byte *c = emit_get_cur_to_write_bytecode(emit, buf + sizeof(buf) - p);
+    while (p != buf + sizeof(buf) - 1) {
+        *c++ = *p++ | 0x80;
+    }
+    *c = *p;
+}
+
+static void emit_write_bytecode_byte_uint(emit_t *emit, int stack_adj, byte b, mp_uint_t val) {
+    emit_write_bytecode_byte(emit, stack_adj, b);
+    mp_encode_uint(emit, emit_get_cur_to_write_bytecode, val);
+}
+
+static void emit_write_bytecode_byte_const(emit_t *emit, int stack_adj, byte b, mp_uint_t n) {
+    emit_write_bytecode_byte_uint(emit, stack_adj, b, n);
+}
+
+static void emit_write_bytecode_byte_qstr(emit_t *emit, int stack_adj, byte b, qstr qst) {
+    emit_write_bytecode_byte_uint(emit, stack_adj, b, mp_emit_common_use_qstr(emit->emit_common, qst));
+}
+
+static void emit_write_bytecode_byte_obj(emit_t *emit, int stack_adj, byte b, mp_obj_t obj) {
+    emit_write_bytecode_byte_const(emit, stack_adj, b, mp_emit_common_use_const_obj(emit->emit_common, obj));
+}
+
+static void emit_write_bytecode_byte_child(emit_t *emit, int stack_adj, byte b, mp_raw_code_t *rc) {
+    emit_write_bytecode_byte_const(emit, stack_adj, b,
+        mp_emit_common_alloc_const_child(emit->emit_common, rc));
+    #if MICROPY_PY_SYS_SETTRACE
+    rc->line_of_definition = emit->last_source_line;
+    #endif
+}
+
+// Emit a jump opcode to a destination label.
+// The offset to the label is relative to the ip following this instruction.
+// The offset is encoded as either 1 or 2 bytes, depending on how big it is.
+// The encoding of this jump opcode can change size from one pass to the next,
+// but it must only ever decrease in size on successive passes.
+static void emit_write_bytecode_byte_label(emit_t *emit, int stack_adj, byte b1, mp_uint_t label) {
+    mp_emit_bc_adjust_stack_size(emit, stack_adj);
+
+    if (emit->suppress) {
+        return;
+    }
+
+    // Determine if the jump offset is signed or unsigned, based on the opcode.
+    const bool is_signed = b1 <= MP_BC_POP_JUMP_IF_FALSE;
+
+    // Default to a 2-byte encoding (the largest) with an unknown jump offset.
+    unsigned int jump_encoding_size = 1;
+    ssize_t bytecode_offset = 0;
+
+    // Compute the jump size and offset only when code size is known.
+    if (emit->pass >= MP_PASS_CODE_SIZE) {
+        // The -2 accounts for this jump opcode taking 2 bytes (at least).
+        bytecode_offset = emit->label_offsets[label] - emit->bytecode_offset - 2;
+
+        // Check if the bytecode_offset is small enough to use a 1-byte encoding.
+        if ((is_signed && -64 <= bytecode_offset && bytecode_offset <= 63)
+            || (!is_signed && (size_t)bytecode_offset <= 127)) {
+            // Use a 1-byte jump offset.
+            jump_encoding_size = 0;
+        }
+
+        // Adjust the offset depending on the size of the encoding of the offset.
+        bytecode_offset -= jump_encoding_size;
+
+        assert(is_signed || bytecode_offset >= 0);
+    }
+
+    // Emit the opcode.
+    byte *c = emit_get_cur_to_write_bytecode(emit, 2 + jump_encoding_size);
+    c[0] = b1;
+    if (jump_encoding_size == 0) {
+        if (is_signed) {
+            bytecode_offset += 0x40;
+        }
+        assert(0 <= bytecode_offset && bytecode_offset <= 0x7f);
+        c[1] = bytecode_offset;
+    } else {
+        if (is_signed) {
+            bytecode_offset += 0x4000;
+        }
+        if (emit->pass == MP_PASS_EMIT && !(0 <= bytecode_offset && bytecode_offset <= 0x7fff)) {
+            emit->overflow = true;
+        }
+        c[1] = 0x80 | (bytecode_offset & 0x7f);
+        c[2] = bytecode_offset >> 7;
+    }
+}
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+    emit->pass = pass;
+    emit->stack_size = 0;
+    emit->suppress = false;
+    emit->scope = scope;
+    emit->last_source_line_offset = 0;
+    emit->last_source_line = 1;
+    emit->bytecode_offset = 0;
+    emit->code_info_offset = 0;
+    emit->overflow = false;
+
+    // Write local state size, exception stack size, scope flags and number of arguments
+    {
+        mp_uint_t n_state = scope->num_locals + scope->stack_size;
+        if (n_state == 0) {
+            // Need at least 1 entry in the state, in the case an exception is
+            // propagated through this function, the exception is returned in
+            // the highest slot in the state (fastn[0], see vm.c).
+            n_state = 1;
+        }
+        #if MICROPY_DEBUG_VM_STACK_OVERFLOW
+        // An extra slot in the stack is needed to detect VM stack overflow
+        n_state += 1;
+        #endif
+
+        size_t n_exc_stack = scope->exc_stack_size;
+        MP_BC_PRELUDE_SIG_ENCODE(n_state, n_exc_stack, scope, emit_write_code_info_byte, emit);
+    }
+
+    // Write number of cells and size of the source code info
+    if (emit->pass >= MP_PASS_CODE_SIZE) {
+        size_t n_info = emit->n_info;
+        size_t n_cell = emit->n_cell;
+        MP_BC_PRELUDE_SIZE_ENCODE(n_info, n_cell, emit_write_code_info_byte, emit);
+    }
+
+    emit->n_info = emit->code_info_offset;
+
+    // Write the name of this function.
+    emit_write_code_info_qstr(emit, scope->simple_name);
+
+    // Write argument names, needed to resolve positional args passed as keywords.
+    {
+        // For a given argument position (indexed by i) we need to find the
+        // corresponding id_info which is a parameter, as it has the correct
+        // qstr name to use as the argument name.  Note that it's not a simple
+        // 1-1 mapping (ie i!=j in general) because of possible closed-over
+        // variables.  In the case that the argument i has no corresponding
+        // parameter we use "*" as its name (since no argument can ever be named
+        // "*").  We could use a blank qstr but "*" is better for debugging.
+        // Note: there is some wasted RAM here for the case of storing a qstr
+        // for each closed-over variable, and maybe there is a better way to do
+        // it, but that would require changes to mp_setup_code_state.
+        for (int i = 0; i < scope->num_pos_args + scope->num_kwonly_args; i++) {
+            qstr qst = MP_QSTR__star_;
+            for (int j = 0; j < scope->id_info_len; ++j) {
+                id_info_t *id = &scope->id_info[j];
+                if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+                    qst = id->qst;
+                    break;
+                }
+            }
+            emit_write_code_info_qstr(emit, qst);
+        }
+    }
+}
+
+bool mp_emit_bc_end_pass(emit_t *emit) {
+    if (emit->pass == MP_PASS_SCOPE) {
+        return true;
+    }
+
+    // check stack is back to zero size
+    assert(emit->stack_size == 0);
+
+    // Calculate size of source code info section
+    emit->n_info = emit->code_info_offset - emit->n_info;
+
+    // Emit closure section of prelude
+    emit->n_cell = 0;
+    for (size_t i = 0; i < emit->scope->id_info_len; ++i) {
+        id_info_t *id = &emit->scope->id_info[i];
+        if (id->kind == ID_INFO_KIND_CELL) {
+            assert(id->local_num <= 255);
+            emit_write_code_info_byte(emit, id->local_num); // write the local which should be converted to a cell
+            ++emit->n_cell;
+        }
+    }
+
+    if (emit->pass == MP_PASS_CODE_SIZE) {
+        // calculate size of total code-info + bytecode, in bytes
+        emit->code_info_size = emit->code_info_offset;
+        emit->bytecode_size = emit->bytecode_offset;
+        emit->code_base = m_new0(byte, emit->code_info_size + emit->bytecode_size);
+
+    } else if (emit->pass == MP_PASS_EMIT) {
+        // Code info and/or bytecode can shrink during this pass.
+        assert(emit->code_info_offset <= emit->code_info_size);
+        assert(emit->bytecode_offset <= emit->bytecode_size);
+
+        if (emit->code_info_offset != emit->code_info_size
+            || emit->bytecode_offset != emit->bytecode_size) {
+            // Code info and/or bytecode changed size in this pass, so request the
+            // compiler to do another pass with these updated sizes.
+            emit->code_info_size = emit->code_info_offset;
+            emit->bytecode_size = emit->bytecode_offset;
+            return false;
+        }
+
+        if (emit->overflow) {
+            mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("bytecode overflow"));
+        }
+
+        #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS
+        size_t bytecode_len = emit->code_info_size + emit->bytecode_size;
+        #if MICROPY_DEBUG_PRINTERS
+        emit->scope->raw_code_data_len = bytecode_len;
+        #endif
+        #endif
+
+        // Bytecode is finalised, assign it to the raw code object.
+        mp_emit_glue_assign_bytecode(emit->scope->raw_code, emit->code_base,
+            emit->emit_common->children,
+            #if MICROPY_PERSISTENT_CODE_SAVE
+            bytecode_len,
+            emit->emit_common->ct_cur_child,
+            #endif
+            emit->scope->scope_flags);
+    }
+
+    return true;
+}
+
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+    if (emit->pass == MP_PASS_SCOPE) {
+        return;
+    }
+    assert((mp_int_t)emit->stack_size + delta >= 0);
+    emit->stack_size += delta;
+    if (emit->stack_size > emit->scope->stack_size) {
+        emit->scope->stack_size = emit->stack_size;
+    }
+}
+
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
+    #if MICROPY_ENABLE_SOURCE_LINE
+    if (MP_STATE_VM(mp_optimise_value) >= 3) {
+        // If we compile with -O3, don't store line numbers.
+        return;
+    }
+    if (source_line > emit->last_source_line) {
+        mp_uint_t bytes_to_skip = emit->bytecode_offset - emit->last_source_line_offset;
+        mp_uint_t lines_to_skip = source_line - emit->last_source_line;
+        emit_write_code_info_bytes_lines(emit, bytes_to_skip, lines_to_skip);
+        emit->last_source_line_offset = emit->bytecode_offset;
+        emit->last_source_line = source_line;
+    }
+    #else
+    (void)emit;
+    (void)source_line;
+    #endif
+}
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l) {
+    // Assigning a label ends any dead-code region, and all following opcodes
+    // should be emitted (until another unconditional flow control).
+    emit->suppress = false;
+
+    if (emit->pass == MP_PASS_SCOPE) {
+        return;
+    }
+
+    // Label offsets can change from one pass to the next, but they must only
+    // decrease (ie code can only shrink).  There will be multiple MP_PASS_EMIT
+    // stages until the labels no longer change, which is when the code size
+    // stays constant after a MP_PASS_EMIT.
+    assert(l < emit->max_num_labels);
+    assert(emit->pass == MP_PASS_STACK_SIZE || emit->bytecode_offset <= emit->label_offsets[l]);
+
+    // Assign label offset.
+    emit->label_offsets[l] = emit->bytecode_offset;
+}
+
+void mp_emit_bc_import(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_NAME == MP_BC_IMPORT_NAME);
+    MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_FROM == MP_BC_IMPORT_FROM);
+    int stack_adj = kind == MP_EMIT_IMPORT_FROM ? 1 : -1;
+    if (kind == MP_EMIT_IMPORT_STAR) {
+        emit_write_bytecode_byte(emit, stack_adj, MP_BC_IMPORT_STAR);
+    } else {
+        emit_write_bytecode_byte_qstr(emit, stack_adj, MP_BC_IMPORT_NAME + kind, qst);
+    }
+}
+
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+    MP_STATIC_ASSERT(MP_BC_LOAD_CONST_FALSE + (MP_TOKEN_KW_NONE - MP_TOKEN_KW_FALSE) == MP_BC_LOAD_CONST_NONE);
+    MP_STATIC_ASSERT(MP_BC_LOAD_CONST_FALSE + (MP_TOKEN_KW_TRUE - MP_TOKEN_KW_FALSE) == MP_BC_LOAD_CONST_TRUE);
+    if (tok == MP_TOKEN_ELLIPSIS) {
+        emit_write_bytecode_byte_obj(emit, 1, MP_BC_LOAD_CONST_OBJ, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj));
+    } else {
+        emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_CONST_FALSE + (tok - MP_TOKEN_KW_FALSE));
+    }
+}
+
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg) {
+    assert(MP_SMALL_INT_FITS(arg));
+    if (-MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS <= arg
+        && arg < MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS) {
+        emit_write_bytecode_byte(emit, 1,
+            MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS + arg);
+    } else {
+        emit_write_bytecode_byte_int(emit, 1, MP_BC_LOAD_CONST_SMALL_INT, arg);
+    }
+}
+
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst) {
+    emit_write_bytecode_byte_qstr(emit, 1, MP_BC_LOAD_CONST_STRING, qst);
+}
+
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj) {
+    emit_write_bytecode_byte_obj(emit, 1, MP_BC_LOAD_CONST_OBJ, obj);
+}
+
+void mp_emit_bc_load_null(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_NULL);
+}
+
+void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_LOAD_FAST_N);
+    MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_LOAD_DEREF);
+    (void)qst;
+    if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) {
+        emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_FAST_MULTI + local_num);
+    } else {
+        emit_write_bytecode_byte_uint(emit, 1, MP_BC_LOAD_FAST_N + kind, local_num);
+    }
+}
+
+void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_LOAD_NAME);
+    MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_LOAD_GLOBAL);
+    (void)qst;
+    emit_write_bytecode_byte_qstr(emit, 1, MP_BC_LOAD_NAME + kind, qst);
+}
+
+void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super) {
+    int stack_adj = 1 - 2 * is_super;
+    emit_write_bytecode_byte_qstr(emit, stack_adj, is_super ? MP_BC_LOAD_SUPER_METHOD : MP_BC_LOAD_METHOD, qst);
+}
+
+void mp_emit_bc_load_build_class(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_BUILD_CLASS);
+}
+
+void mp_emit_bc_subscr(emit_t *emit, int kind) {
+    if (kind == MP_EMIT_SUBSCR_LOAD) {
+        emit_write_bytecode_byte(emit, -1, MP_BC_LOAD_SUBSCR);
+    } else {
+        if (kind == MP_EMIT_SUBSCR_DELETE) {
+            mp_emit_bc_load_null(emit);
+            mp_emit_bc_rot_three(emit);
+        }
+        emit_write_bytecode_byte(emit, -3, MP_BC_STORE_SUBSCR);
+    }
+}
+
+void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind) {
+    if (kind == MP_EMIT_ATTR_LOAD) {
+        emit_write_bytecode_byte_qstr(emit, 0, MP_BC_LOAD_ATTR, qst);
+    } else {
+        if (kind == MP_EMIT_ATTR_DELETE) {
+            mp_emit_bc_load_null(emit);
+            mp_emit_bc_rot_two(emit);
+        }
+        emit_write_bytecode_byte_qstr(emit, -2, MP_BC_STORE_ATTR, qst);
+    }
+}
+
+void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_STORE_FAST_N);
+    MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_STORE_DEREF);
+    (void)qst;
+    if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) {
+        emit_write_bytecode_byte(emit, -1, MP_BC_STORE_FAST_MULTI + local_num);
+    } else {
+        emit_write_bytecode_byte_uint(emit, -1, MP_BC_STORE_FAST_N + kind, local_num);
+    }
+}
+
+void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_STORE_NAME);
+    MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_STORE_GLOBAL);
+    emit_write_bytecode_byte_qstr(emit, -1, MP_BC_STORE_NAME + kind, qst);
+}
+
+void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_DELETE_FAST);
+    MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_DELETE_DEREF);
+    (void)qst;
+    emit_write_bytecode_byte_uint(emit, 0, MP_BC_DELETE_FAST + kind, local_num);
+}
+
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_DELETE_NAME);
+    MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_DELETE_GLOBAL);
+    emit_write_bytecode_byte_qstr(emit, 0, MP_BC_DELETE_NAME + kind, qst);
+}
+
+void mp_emit_bc_dup_top(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 1, MP_BC_DUP_TOP);
+}
+
+void mp_emit_bc_dup_top_two(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 2, MP_BC_DUP_TOP_TWO);
+}
+
+void mp_emit_bc_pop_top(emit_t *emit) {
+    emit_write_bytecode_byte(emit, -1, MP_BC_POP_TOP);
+}
+
+void mp_emit_bc_rot_two(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 0, MP_BC_ROT_TWO);
+}
+
+void mp_emit_bc_rot_three(emit_t *emit) {
+    emit_write_bytecode_byte(emit, 0, MP_BC_ROT_THREE);
+}
+
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label) {
+    emit_write_bytecode_byte_label(emit, 0, MP_BC_JUMP, label);
+    emit->suppress = true;
+}
+
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+    if (cond) {
+        emit_write_bytecode_byte_label(emit, -1, MP_BC_POP_JUMP_IF_TRUE, label);
+    } else {
+        emit_write_bytecode_byte_label(emit, -1, MP_BC_POP_JUMP_IF_FALSE, label);
+    }
+}
+
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+    if (cond) {
+        emit_write_bytecode_byte_label(emit, -1, MP_BC_JUMP_IF_TRUE_OR_POP, label);
+    } else {
+        emit_write_bytecode_byte_label(emit, -1, MP_BC_JUMP_IF_FALSE_OR_POP, label);
+    }
+}
+
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+    if (except_depth == 0) {
+        if (label & MP_EMIT_BREAK_FROM_FOR) {
+            // need to pop the iterator if we are breaking out of a for loop
+            emit_write_bytecode_raw_byte(emit, MP_BC_POP_TOP);
+            // also pop the iter_buf
+            for (size_t i = 0; i < MP_OBJ_ITER_BUF_NSLOTS - 1; ++i) {
+                emit_write_bytecode_raw_byte(emit, MP_BC_POP_TOP);
+            }
+        }
+        emit_write_bytecode_byte_label(emit, 0, MP_BC_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+    } else {
+        emit_write_bytecode_byte_label(emit, 0, MP_BC_UNWIND_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+        emit_write_bytecode_raw_byte(emit, ((label & MP_EMIT_BREAK_FROM_FOR) ? 0x80 : 0) | except_depth);
+    }
+    emit->suppress = true;
+}
+
+void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind) {
+    MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_WITH == MP_BC_SETUP_WITH);
+    MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_EXCEPT == MP_BC_SETUP_EXCEPT);
+    MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_FINALLY == MP_BC_SETUP_FINALLY);
+    // The SETUP_WITH opcode pops ctx_mgr from the top of the stack
+    // and then pushes 3 entries: __exit__, ctx_mgr, as_value.
+    int stack_adj = kind == MP_EMIT_SETUP_BLOCK_WITH ? 2 : 0;
+    emit_write_bytecode_byte_label(emit, stack_adj, MP_BC_SETUP_WITH + kind, label);
+}
+
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label) {
+    mp_emit_bc_load_const_tok(emit, MP_TOKEN_KW_NONE);
+    mp_emit_bc_label_assign(emit, label);
+    // The +2 is to ensure we have enough stack space to call the __exit__ method
+    emit_write_bytecode_byte(emit, 2, MP_BC_WITH_CLEANUP);
+    // Cancel the +2 above, plus the +2 from mp_emit_bc_setup_block(MP_EMIT_SETUP_BLOCK_WITH)
+    mp_emit_bc_adjust_stack_size(emit, -4);
+}
+
+void mp_emit_bc_end_finally(emit_t *emit) {
+    emit_write_bytecode_byte(emit, -1, MP_BC_END_FINALLY);
+}
+
+void mp_emit_bc_get_iter(emit_t *emit, bool use_stack) {
+    int stack_adj = use_stack ? MP_OBJ_ITER_BUF_NSLOTS - 1 : 0;
+    emit_write_bytecode_byte(emit, stack_adj, use_stack ? MP_BC_GET_ITER_STACK : MP_BC_GET_ITER);
+}
+
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label) {
+    emit_write_bytecode_byte_label(emit, 1, MP_BC_FOR_ITER, label);
+}
+
+void mp_emit_bc_for_iter_end(emit_t *emit) {
+    mp_emit_bc_adjust_stack_size(emit, -MP_OBJ_ITER_BUF_NSLOTS);
+}
+
+void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
+    (void)within_exc_handler;
+    emit_write_bytecode_byte_label(emit, 0, MP_BC_POP_EXCEPT_JUMP, label);
+    emit->suppress = true;
+}
+
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op) {
+    emit_write_bytecode_byte(emit, 0, MP_BC_UNARY_OP_MULTI + op);
+}
+
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op) {
+    bool invert = false;
+    if (op == MP_BINARY_OP_NOT_IN) {
+        invert = true;
+        op = MP_BINARY_OP_IN;
+    } else if (op == MP_BINARY_OP_IS_NOT) {
+        invert = true;
+        op = MP_BINARY_OP_IS;
+    }
+    emit_write_bytecode_byte(emit, -1, MP_BC_BINARY_OP_MULTI + op);
+    if (invert) {
+        emit_write_bytecode_byte(emit, 0, MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NOT);
+    }
+}
+
+void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind) {
+    MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_BC_BUILD_TUPLE);
+    MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_BC_BUILD_LIST);
+    MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_BC_BUILD_MAP);
+    MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_BC_BUILD_SET);
+    MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SLICE == MP_BC_BUILD_SLICE);
+    int stack_adj = kind == MP_EMIT_BUILD_MAP ? 1 : 1 - n_args;
+    emit_write_bytecode_byte_uint(emit, stack_adj, MP_BC_BUILD_TUPLE + kind, n_args);
+}
+
+void mp_emit_bc_store_map(emit_t *emit) {
+    emit_write_bytecode_byte(emit, -2, MP_BC_STORE_MAP);
+}
+
+void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t collection_stack_index) {
+    int t;
+    int n;
+    if (kind == SCOPE_LIST_COMP) {
+        n = 0;
+        t = 0;
+    } else if (!MICROPY_PY_BUILTINS_SET || kind == SCOPE_DICT_COMP) {
+        n = 1;
+        t = 1;
+    } else if (MICROPY_PY_BUILTINS_SET) {
+        n = 0;
+        t = 2;
+    }
+    // the lower 2 bits of the opcode argument indicate the collection type
+    emit_write_bytecode_byte_uint(emit, -1 - n, MP_BC_STORE_COMP, ((collection_stack_index + n) << 2) | t);
+}
+
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+    emit_write_bytecode_byte_uint(emit, -1 + n_args, MP_BC_UNPACK_SEQUENCE, n_args);
+}
+
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+    emit_write_bytecode_byte_uint(emit, -1 + n_left + n_right + 1, MP_BC_UNPACK_EX, n_left | (n_right << 8));
+}
+
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+    if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+        emit_write_bytecode_byte_child(emit, 1, MP_BC_MAKE_FUNCTION, scope->raw_code);
+    } else {
+        emit_write_bytecode_byte_child(emit, -1, MP_BC_MAKE_FUNCTION_DEFARGS, scope->raw_code);
+    }
+}
+
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+    if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+        int stack_adj = -n_closed_over + 1;
+        emit_write_bytecode_byte_child(emit, stack_adj, MP_BC_MAKE_CLOSURE, scope->raw_code);
+        emit_write_bytecode_raw_byte(emit, n_closed_over);
+    } else {
+        assert(n_closed_over <= 255);
+        int stack_adj = -2 - (mp_int_t)n_closed_over + 1;
+        emit_write_bytecode_byte_child(emit, stack_adj, MP_BC_MAKE_CLOSURE_DEFARGS, scope->raw_code);
+        emit_write_bytecode_raw_byte(emit, n_closed_over);
+    }
+}
+
+static void emit_bc_call_function_method_helper(emit_t *emit, int stack_adj, mp_uint_t bytecode_base, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+    if (star_flags) {
+        // each positional arg is one object, each kwarg is two objects, the key
+        // and the value and one extra object for the star args bitmap.
+        stack_adj -= (int)n_positional + 2 * (int)n_keyword + 1;
+        emit_write_bytecode_byte_uint(emit, stack_adj, bytecode_base + 1, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+    } else {
+        stack_adj -= (int)n_positional + 2 * (int)n_keyword;
+        emit_write_bytecode_byte_uint(emit, stack_adj, bytecode_base, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+    }
+}
+
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+    emit_bc_call_function_method_helper(emit, 0, MP_BC_CALL_FUNCTION, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+    emit_bc_call_function_method_helper(emit, -1, MP_BC_CALL_METHOD, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_return_value(emit_t *emit) {
+    emit_write_bytecode_byte(emit, -1, MP_BC_RETURN_VALUE);
+    emit->suppress = true;
+}
+
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+    MP_STATIC_ASSERT(MP_BC_RAISE_LAST + 1 == MP_BC_RAISE_OBJ);
+    MP_STATIC_ASSERT(MP_BC_RAISE_LAST + 2 == MP_BC_RAISE_FROM);
+    assert(n_args <= 2);
+    emit_write_bytecode_byte(emit, -n_args, MP_BC_RAISE_LAST + n_args);
+    emit->suppress = true;
+}
+
+void mp_emit_bc_yield(emit_t *emit, int kind) {
+    MP_STATIC_ASSERT(MP_BC_YIELD_VALUE + 1 == MP_BC_YIELD_FROM);
+    emit_write_bytecode_byte(emit, -kind, MP_BC_YIELD_VALUE + kind);
+    emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+}
+
+void mp_emit_bc_start_except_handler(emit_t *emit) {
+    mp_emit_bc_adjust_stack_size(emit, 4); // stack adjust for the exception instance, +3 for possible UNWIND_JUMP state
+}
+
+void mp_emit_bc_end_except_handler(emit_t *emit) {
+    mp_emit_bc_adjust_stack_size(emit, -3); // stack adjust
+}
+
+#if MICROPY_EMIT_NATIVE
+const emit_method_table_t emit_bc_method_table = {
+    #if MICROPY_DYNAMIC_COMPILER
+    NULL,
+    NULL,
+    #endif
+
+    mp_emit_bc_start_pass,
+    mp_emit_bc_end_pass,
+    mp_emit_bc_adjust_stack_size,
+    mp_emit_bc_set_source_line,
+
+    {
+        mp_emit_bc_load_local,
+        mp_emit_bc_load_global,
+    },
+    {
+        mp_emit_bc_store_local,
+        mp_emit_bc_store_global,
+    },
+    {
+        mp_emit_bc_delete_local,
+        mp_emit_bc_delete_global,
+    },
+
+    mp_emit_bc_label_assign,
+    mp_emit_bc_import,
+    mp_emit_bc_load_const_tok,
+    mp_emit_bc_load_const_small_int,
+    mp_emit_bc_load_const_str,
+    mp_emit_bc_load_const_obj,
+    mp_emit_bc_load_null,
+    mp_emit_bc_load_method,
+    mp_emit_bc_load_build_class,
+    mp_emit_bc_subscr,
+    mp_emit_bc_attr,
+    mp_emit_bc_dup_top,
+    mp_emit_bc_dup_top_two,
+    mp_emit_bc_pop_top,
+    mp_emit_bc_rot_two,
+    mp_emit_bc_rot_three,
+    mp_emit_bc_jump,
+    mp_emit_bc_pop_jump_if,
+    mp_emit_bc_jump_if_or_pop,
+    mp_emit_bc_unwind_jump,
+    mp_emit_bc_setup_block,
+    mp_emit_bc_with_cleanup,
+    mp_emit_bc_end_finally,
+    mp_emit_bc_get_iter,
+    mp_emit_bc_for_iter,
+    mp_emit_bc_for_iter_end,
+    mp_emit_bc_pop_except_jump,
+    mp_emit_bc_unary_op,
+    mp_emit_bc_binary_op,
+    mp_emit_bc_build,
+    mp_emit_bc_store_map,
+    mp_emit_bc_store_comp,
+    mp_emit_bc_unpack_sequence,
+    mp_emit_bc_unpack_ex,
+    mp_emit_bc_make_function,
+    mp_emit_bc_make_closure,
+    mp_emit_bc_call_function,
+    mp_emit_bc_call_method,
+    mp_emit_bc_return_value,
+    mp_emit_bc_raise_varargs,
+    mp_emit_bc_yield,
+
+    mp_emit_bc_start_except_handler,
+    mp_emit_bc_end_except_handler,
+};
+#else
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops = {
+    mp_emit_bc_load_local,
+    mp_emit_bc_load_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops = {
+    mp_emit_bc_store_local,
+    mp_emit_bc_store_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops = {
+    mp_emit_bc_delete_local,
+    mp_emit_bc_delete_global,
+};
+#endif
+
+#endif // MICROPY_ENABLE_COMPILER

+ 123 - 0
mp_flipper/lib/micropython/py/emitcommon.c

@@ -0,0 +1,123 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/nativeglue.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#if MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+qstr_short_t mp_emit_common_use_qstr(mp_emit_common_t *emit, qstr qst) {
+    mp_map_elem_t *elem = mp_map_lookup(&emit->qstr_map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+    if (elem->value == MP_OBJ_NULL) {
+        elem->value = MP_OBJ_NEW_SMALL_INT(emit->qstr_map.used - 1);
+    }
+    return MP_OBJ_SMALL_INT_VALUE(elem->value);
+}
+#endif
+
+// Compare two objects for strict equality, including equality of type.  This is
+// different to the semantics of mp_obj_equal which, eg, has (True,) == (1.0,).
+static bool strictly_equal(mp_obj_t a, mp_obj_t b) {
+    if (a == b) {
+        return true;
+    }
+
+    #if MICROPY_EMIT_NATIVE
+    if (a == MP_OBJ_FROM_PTR(&mp_fun_table) || b == MP_OBJ_FROM_PTR(&mp_fun_table)) {
+        return false;
+    }
+    #endif
+
+    const mp_obj_type_t *a_type = mp_obj_get_type(a);
+    const mp_obj_type_t *b_type = mp_obj_get_type(b);
+    if (a_type != b_type) {
+        return false;
+    }
+    if (a_type == &mp_type_tuple) {
+        mp_obj_tuple_t *a_tuple = MP_OBJ_TO_PTR(a);
+        mp_obj_tuple_t *b_tuple = MP_OBJ_TO_PTR(b);
+        if (a_tuple->len != b_tuple->len) {
+            return false;
+        }
+        for (size_t i = 0; i < a_tuple->len; ++i) {
+            if (!strictly_equal(a_tuple->items[i], b_tuple->items[i])) {
+                return false;
+            }
+        }
+        return true;
+    } else {
+        return mp_obj_equal(a, b);
+    }
+}
+
+size_t mp_emit_common_use_const_obj(mp_emit_common_t *emit, mp_obj_t const_obj) {
+    for (size_t i = 0; i < emit->const_obj_list.len; ++i) {
+        if (strictly_equal(emit->const_obj_list.items[i], const_obj)) {
+            return i;
+        }
+    }
+    mp_obj_list_append(MP_OBJ_FROM_PTR(&emit->const_obj_list), const_obj);
+    return emit->const_obj_list.len - 1;
+}
+
+id_info_t *mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst) {
+    // name adding/lookup
+    id_info_t *id = scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT);
+    if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+        if (SCOPE_IS_FUNC_LIKE(scope->kind)) {
+            // rebind as a local variable
+            id->kind = ID_INFO_KIND_LOCAL;
+        } else {
+            // mark this as assigned, to prevent it from being closed over
+            id->kind = ID_INFO_KIND_GLOBAL_IMPLICIT_ASSIGNED;
+        }
+    }
+    return id;
+}
+
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst) {
+    // assumes pass is greater than 1, ie that all identifiers are defined in the scope
+
+    id_info_t *id = scope_find(scope, qst);
+    assert(id != NULL);
+
+    // call the emit backend with the correct code
+    if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT || id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT_ASSIGNED) {
+        emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_NAME);
+    } else if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+        emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL);
+    } else if (id->kind == ID_INFO_KIND_LOCAL) {
+        emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_FAST);
+    } else {
+        assert(id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE);
+        emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_DEREF);
+    }
+}
+
+#endif // MICROPY_ENABLE_COMPILER

+ 250 - 0
mp_flipper/lib/micropython/py/emitglue.c

@@ -0,0 +1,250 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// This code glues the code emitters to the runtime.
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emitglue.h"
+#include "py/runtime0.h"
+#include "py/bc.h"
+#include "py/objfun.h"
+#include "py/profile.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define WRITE_CODE (1)
+#define DEBUG_printf DEBUG_printf
+#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#define DEBUG_OP_printf(...) (void)0
+#endif
+
+#if MICROPY_DEBUG_PRINTERS
+mp_uint_t mp_verbose_flag = 0;
+#endif
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void) {
+    mp_raw_code_t *rc = m_new0(mp_raw_code_t, 1);
+    rc->kind = MP_CODE_RESERVED;
+    #if MICROPY_PY_SYS_SETTRACE
+    rc->line_of_definition = 0;
+    #endif
+    return rc;
+}
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code,
+    mp_raw_code_t **children,
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    size_t len,
+    uint16_t n_children,
+    #endif
+    uint16_t scope_flags) {
+
+    rc->kind = MP_CODE_BYTECODE;
+    rc->is_generator = (scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0;
+    rc->fun_data = code;
+    rc->children = children;
+
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    rc->fun_data_len = len;
+    rc->n_children = n_children;
+    #endif
+
+    #if MICROPY_PY_SYS_SETTRACE
+    mp_bytecode_prelude_t *prelude = &rc->prelude;
+    mp_prof_extract_prelude(code, prelude);
+    #endif
+
+    #if DEBUG_PRINT
+    #if !MICROPY_PERSISTENT_CODE_SAVE
+    const size_t len = 0;
+    #endif
+    DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags);
+    #endif
+}
+
+#if MICROPY_EMIT_MACHINE_CODE
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, const void *fun_data, mp_uint_t fun_len,
+    mp_raw_code_t **children,
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    uint16_t n_children,
+    uint16_t prelude_offset,
+    #endif
+    uint16_t scope_flags, uint32_t asm_n_pos_args, uint32_t asm_type_sig
+    ) {
+
+    assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
+
+    // Some architectures require flushing/invalidation of the I/D caches,
+    // so that the generated native code which was created in data RAM will
+    // be available for execution from instruction RAM.
+    #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+    #if __ICACHE_PRESENT == 1
+    // Flush D-cache, so the code emitted is stored in RAM.
+    MP_HAL_CLEAN_DCACHE(fun_data, fun_len);
+    // Invalidate I-cache, so the newly-created code is reloaded from RAM.
+    SCB_InvalidateICache();
+    #endif
+    #elif MICROPY_EMIT_ARM
+    #if (defined(__linux__) && defined(__GNUC__)) || __ARM_ARCH == 7
+    __builtin___clear_cache((void *)fun_data, (uint8_t *)fun_data + fun_len);
+    #elif defined(__arm__)
+    // Flush I-cache and D-cache.
+    asm volatile (
+        "0:"
+        "mrc p15, 0, r15, c7, c10, 3\n" // test and clean D-cache
+        "bne 0b\n"
+        "mov r0, #0\n"
+        "mcr p15, 0, r0, c7, c7, 0\n" // invalidate I-cache and D-cache
+        : : : "r0", "cc");
+    #endif
+    #endif
+
+    rc->kind = kind;
+    rc->is_generator = (scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0;
+    rc->fun_data = fun_data;
+
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    rc->fun_data_len = fun_len;
+    #endif
+    rc->children = children;
+
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    rc->n_children = n_children;
+    rc->prelude_offset = prelude_offset;
+    #endif
+
+    #if MICROPY_EMIT_INLINE_ASM
+    // These two entries are only needed for MP_CODE_NATIVE_ASM.
+    rc->asm_n_pos_args = asm_n_pos_args;
+    rc->asm_type_sig = asm_type_sig;
+    #endif
+
+    #if DEBUG_PRINT
+    DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, (uint)scope_flags);
+    for (mp_uint_t i = 0; i < fun_len; i++) {
+        if (i > 0 && i % 16 == 0) {
+            DEBUG_printf("\n");
+        }
+        DEBUG_printf(" %02x", ((const byte *)fun_data)[i]);
+    }
+    DEBUG_printf("\n");
+
+    #ifdef WRITE_CODE
+    FILE *fp_write_code = fopen("out-code", "wb");
+    fwrite(fun_data, fun_len, 1, fp_write_code);
+    fclose(fp_write_code);
+    #endif
+    #else
+    (void)fun_len;
+    #endif
+}
+#endif
+
+mp_obj_t mp_make_function_from_proto_fun(mp_proto_fun_t proto_fun, const mp_module_context_t *context, const mp_obj_t *def_args) {
+    DEBUG_OP_printf("make_function_from_proto_fun %p\n", proto_fun);
+    assert(proto_fun != NULL);
+
+    // def_args must be MP_OBJ_NULL or a tuple
+    assert(def_args == NULL || def_args[0] == MP_OBJ_NULL || mp_obj_is_type(def_args[0], &mp_type_tuple));
+
+    // def_kw_args must be MP_OBJ_NULL or a dict
+    assert(def_args == NULL || def_args[1] == MP_OBJ_NULL || mp_obj_is_type(def_args[1], &mp_type_dict));
+
+    #if MICROPY_MODULE_FROZEN_MPY
+    if (mp_proto_fun_is_bytecode(proto_fun)) {
+        const uint8_t *bc = proto_fun;
+        mp_obj_t fun = mp_obj_new_fun_bc(def_args, bc, context, NULL);
+        MP_BC_PRELUDE_SIG_DECODE(bc);
+        if (scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+            ((mp_obj_base_t *)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap;
+        }
+        return fun;
+    }
+    #endif
+
+    // the proto-function is a mp_raw_code_t
+    const mp_raw_code_t *rc = proto_fun;
+
+    // make the function, depending on the raw code kind
+    mp_obj_t fun;
+    switch (rc->kind) {
+        #if MICROPY_EMIT_NATIVE
+        case MP_CODE_NATIVE_PY:
+            fun = mp_obj_new_fun_native(def_args, rc->fun_data, context, rc->children);
+            // Check for a generator function, and if so change the type of the object
+            if (rc->is_generator) {
+                ((mp_obj_base_t *)MP_OBJ_TO_PTR(fun))->type = &mp_type_native_gen_wrap;
+            }
+            break;
+        case MP_CODE_NATIVE_VIPER:
+            fun = mp_obj_new_fun_viper(rc->fun_data, context, rc->children);
+            break;
+        #endif
+        #if MICROPY_EMIT_INLINE_ASM
+        case MP_CODE_NATIVE_ASM:
+            fun = mp_obj_new_fun_asm(rc->asm_n_pos_args, rc->fun_data, rc->asm_type_sig);
+            break;
+        #endif
+        default:
+            // rc->kind should always be set and BYTECODE is the only remaining case
+            assert(rc->kind == MP_CODE_BYTECODE);
+            fun = mp_obj_new_fun_bc(def_args, rc->fun_data, context, rc->children);
+            // check for generator functions and if so change the type of the object
+            if (rc->is_generator) {
+                ((mp_obj_base_t *)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap;
+            }
+
+            #if MICROPY_PY_SYS_SETTRACE
+            mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)MP_OBJ_TO_PTR(fun);
+            self_fun->rc = rc;
+            #endif
+
+            break;
+    }
+
+    return fun;
+}
+
+mp_obj_t mp_make_closure_from_proto_fun(mp_proto_fun_t proto_fun, const mp_module_context_t *context, mp_uint_t n_closed_over, const mp_obj_t *args) {
+    DEBUG_OP_printf("make_closure_from_proto_fun %p " UINT_FMT " %p\n", proto_fun, n_closed_over, args);
+    // make function object
+    mp_obj_t ffun;
+    if (n_closed_over & 0x100) {
+        // default positional and keyword args given
+        ffun = mp_make_function_from_proto_fun(proto_fun, context, args);
+    } else {
+        // default positional and keyword args not given
+        ffun = mp_make_function_from_proto_fun(proto_fun, context, NULL);
+    }
+    // wrap function in closure object
+    return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2));
+}

+ 144 - 0
mp_flipper/lib/micropython/py/emitglue.h

@@ -0,0 +1,144 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_EMITGLUE_H
+#define MICROPY_INCLUDED_PY_EMITGLUE_H
+
+#include "py/obj.h"
+#include "py/bc.h"
+
+// These variables and functions glue the code emitters to the runtime.
+
+// Used with mp_raw_code_t::proto_fun_indicator to detect if a mp_proto_fun_t is a
+// mp_raw_code_t struct or a direct pointer to bytecode.
+#define MP_PROTO_FUN_INDICATOR_RAW_CODE_0 (0)
+#define MP_PROTO_FUN_INDICATOR_RAW_CODE_1 (0)
+
+// These must fit in 8 bits; see scope.h
+enum {
+    MP_EMIT_OPT_NONE,
+    MP_EMIT_OPT_BYTECODE,
+    MP_EMIT_OPT_NATIVE_PYTHON,
+    MP_EMIT_OPT_VIPER,
+    MP_EMIT_OPT_ASM,
+};
+
+typedef enum {
+    MP_CODE_UNUSED,
+    MP_CODE_RESERVED,
+    MP_CODE_BYTECODE,
+    MP_CODE_NATIVE_PY,
+    MP_CODE_NATIVE_VIPER,
+    MP_CODE_NATIVE_ASM,
+} mp_raw_code_kind_t;
+
+// An mp_proto_fun_t points to static information about a non-instantiated function.
+// A function object is created from this information, and that object can then be executed.
+// It points either to bytecode, or an mp_raw_code_t struct.
+typedef const void *mp_proto_fun_t;
+
+// Bytecode is distinguished from an mp_raw_code_t struct by the first two bytes: bytecode
+// is guaranteed to have either its first or second byte non-zero.  So if both bytes are
+// zero then the mp_proto_fun_t pointer must be an mp_raw_code_t.
+static inline bool mp_proto_fun_is_bytecode(mp_proto_fun_t proto_fun) {
+    const uint8_t *header = (const uint8_t *)proto_fun;
+    return (header[0] | (header[1] << 8)) != (MP_PROTO_FUN_INDICATOR_RAW_CODE_0 | (MP_PROTO_FUN_INDICATOR_RAW_CODE_1 << 8));
+}
+
+// The mp_raw_code_t struct appears in the following places:
+// compiled bytecode: instance in RAM, referenced by outer scope, usually freed after first (and only) use
+// mpy file: instance in RAM, created when .mpy file is loaded (same comments as above)
+// frozen: instance in ROM
+typedef struct _mp_raw_code_t {
+    uint8_t proto_fun_indicator[2];
+    uint8_t kind; // of type mp_raw_code_kind_t; only 3 bits used
+    bool is_generator;
+    const void *fun_data;
+    struct _mp_raw_code_t **children;
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    uint32_t fun_data_len; // for mp_raw_code_save
+    uint16_t n_children;
+    #if MICROPY_EMIT_MACHINE_CODE
+    uint16_t prelude_offset;
+    #endif
+    #if MICROPY_PY_SYS_SETTRACE
+    // line_of_definition is a Python source line where the raw_code was
+    // created e.g. MP_BC_MAKE_FUNCTION. This is different from lineno info
+    // stored in prelude, which provides line number for first statement of
+    // a function. Required to properly implement "call" trace event.
+    uint32_t line_of_definition;
+    mp_bytecode_prelude_t prelude;
+    #endif
+    #endif
+    #if MICROPY_EMIT_INLINE_ASM
+    uint32_t asm_n_pos_args : 8;
+    uint32_t asm_type_sig : 24; // compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
+    #endif
+} mp_raw_code_t;
+
+// Version of mp_raw_code_t but without the asm_n_pos_args/asm_type_sig entries, which are
+// only needed when the kind is MP_CODE_NATIVE_ASM.  So this struct can be used when the
+// kind is MP_CODE_BYTECODE, MP_CODE_NATIVE_PY or MP_CODE_NATIVE_VIPER, to reduce its size.
+typedef struct _mp_raw_code_truncated_t {
+    uint8_t proto_fun_indicator[2];
+    uint8_t kind;
+    bool is_generator;
+    const void *fun_data;
+    struct _mp_raw_code_t **children;
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    uint32_t fun_data_len;
+    uint16_t n_children;
+    #if MICROPY_EMIT_MACHINE_CODE
+    uint16_t prelude_offset;
+    #endif
+    #if MICROPY_PY_SYS_SETTRACE
+    uint32_t line_of_definition;
+    mp_bytecode_prelude_t prelude;
+    #endif
+    #endif
+} mp_raw_code_truncated_t;
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void);
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code,
+    mp_raw_code_t **children,
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    size_t len,
+    uint16_t n_children,
+    #endif
+    uint16_t scope_flags);
+
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, const void *fun_data, mp_uint_t fun_len,
+    mp_raw_code_t **children,
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    uint16_t n_children,
+    uint16_t prelude_offset,
+    #endif
+    uint16_t scope_flags, uint32_t asm_n_pos_args, uint32_t asm_type_sig);
+
+mp_obj_t mp_make_function_from_proto_fun(mp_proto_fun_t proto_fun, const mp_module_context_t *context, const mp_obj_t *def_args);
+mp_obj_t mp_make_closure_from_proto_fun(mp_proto_fun_t proto_fun, const mp_module_context_t *context, mp_uint_t n_closed_over, const mp_obj_t *args);
+
+#endif // MICROPY_INCLUDED_PY_EMITGLUE_H

+ 865 - 0
mp_flipper/lib/micropython/py/emitinlinethumb.c

@@ -0,0 +1,865 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/asmthumb.h"
+
+#if MICROPY_EMIT_INLINE_THUMB
+
+typedef enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+    #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+    PN_const_object, // special node for a constant, generic Python object
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
+    #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+} pn_kind_t;
+
+struct _emit_inline_asm_t {
+    asm_thumb_t as;
+    uint16_t pass;
+    mp_obj_t *error_slot;
+    mp_uint_t max_num_labels;
+    qstr *label_lookup;
+};
+
+#if MICROPY_DYNAMIC_COMPILER
+
+static inline bool emit_inline_thumb_allow_float(emit_inline_asm_t *emit) {
+    return MP_NATIVE_ARCH_ARMV7EMSP <= mp_dynamic_compiler.native_arch
+           && mp_dynamic_compiler.native_arch <= MP_NATIVE_ARCH_ARMV7EMDP;
+}
+
+#else
+
+static inline bool emit_inline_thumb_allow_float(emit_inline_asm_t *emit) {
+    return MICROPY_EMIT_INLINE_THUMB_FLOAT;
+}
+
+#endif
+
+static void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, mp_rom_error_text_t msg) {
+    *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+static void emit_inline_thumb_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+    *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels) {
+    emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+    memset(&emit->as, 0, sizeof(emit->as));
+    mp_asm_base_init(&emit->as.base, max_num_labels);
+    emit->max_num_labels = max_num_labels;
+    emit->label_lookup = m_new(qstr, max_num_labels);
+    return emit;
+}
+
+void emit_inline_thumb_free(emit_inline_asm_t *emit) {
+    m_del(qstr, emit->label_lookup, emit->max_num_labels);
+    mp_asm_base_deinit(&emit->as.base, false);
+    m_del_obj(emit_inline_asm_t, emit);
+}
+
+static void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) {
+    emit->pass = pass;
+    emit->error_slot = error_slot;
+    if (emit->pass == MP_PASS_CODE_SIZE) {
+        memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+    }
+    mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+    asm_thumb_entry(&emit->as, 0);
+}
+
+static void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+    asm_thumb_exit(&emit->as);
+    asm_thumb_end_pass(&emit->as);
+}
+
+static mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
+    if (n_params > 4) {
+        emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Thumb assembly"));
+        return 0;
+    }
+    for (mp_uint_t i = 0; i < n_params; i++) {
+        if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
+            emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
+            return 0;
+        }
+        const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
+        if (!(strlen(p) == 2 && p[0] == 'r' && (mp_uint_t)p[1] == '0' + i)) {
+            emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
+            return 0;
+        }
+    }
+    return n_params;
+}
+
+static bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+    assert(label_num < emit->max_num_labels);
+    if (emit->pass == MP_PASS_CODE_SIZE) {
+        // check for duplicate label on first pass
+        for (uint i = 0; i < emit->max_num_labels; i++) {
+            if (emit->label_lookup[i] == label_id) {
+                return false;
+            }
+        }
+    }
+    emit->label_lookup[label_num] = label_id;
+    mp_asm_base_label_assign(&emit->as.base, label_num);
+    return true;
+}
+
+typedef struct _reg_name_t { byte reg;
+                             byte name[3];
+} reg_name_t;
+static const reg_name_t reg_name_table[] = {
+    {0, "r0\0"},
+    {1, "r1\0"},
+    {2, "r2\0"},
+    {3, "r3\0"},
+    {4, "r4\0"},
+    {5, "r5\0"},
+    {6, "r6\0"},
+    {7, "r7\0"},
+    {8, "r8\0"},
+    {9, "r9\0"},
+    {10, "r10"},
+    {11, "r11"},
+    {12, "r12"},
+    {13, "r13"},
+    {14, "r14"},
+    {15, "r15"},
+    {10, "sl\0"},
+    {11, "fp\0"},
+    {13, "sp\0"},
+    {14, "lr\0"},
+    {15, "pc\0"},
+};
+
+#define MAX_SPECIAL_REGISTER_NAME_LENGTH 7
+typedef struct _special_reg_name_t { byte reg;
+                                     char name[MAX_SPECIAL_REGISTER_NAME_LENGTH + 1];
+} special_reg_name_t;
+static const special_reg_name_t special_reg_name_table[] = {
+    {5, "IPSR"},
+    {17, "BASEPRI"},
+};
+
+// return empty string in case of error, so we can attempt to parse the string
+// without a special check if it was in fact a string
+static const char *get_arg_str(mp_parse_node_t pn) {
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        qstr qst = MP_PARSE_NODE_LEAF_ARG(pn);
+        return qstr_str(qst);
+    } else {
+        return "";
+    }
+}
+
+static mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_uint_t max_reg) {
+    const char *reg_str = get_arg_str(pn);
+    for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) {
+        const reg_name_t *r = &reg_name_table[i];
+        if (reg_str[0] == r->name[0]
+            && reg_str[1] == r->name[1]
+            && reg_str[2] == r->name[2]
+            && (reg_str[2] == '\0' || reg_str[3] == '\0')) {
+            if (r->reg > max_reg) {
+                emit_inline_thumb_error_exc(emit,
+                    mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+                        MP_ERROR_TEXT("'%s' expects at most r%d"), op, max_reg));
+                return 0;
+            } else {
+                return r->reg;
+            }
+        }
+    }
+    emit_inline_thumb_error_exc(emit,
+        mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+            MP_ERROR_TEXT("'%s' expects a register"), op));
+    return 0;
+}
+
+static mp_uint_t get_arg_special_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    const char *reg_str = get_arg_str(pn);
+    for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(special_reg_name_table); i++) {
+        const special_reg_name_t *r = &special_reg_name_table[i];
+        if (strcmp(r->name, reg_str) == 0) {
+            return r->reg;
+        }
+    }
+    emit_inline_thumb_error_exc(emit,
+        mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+            MP_ERROR_TEXT("'%s' expects a special register"), op));
+    return 0;
+}
+
+static mp_uint_t get_arg_vfpreg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    const char *reg_str = get_arg_str(pn);
+    if (reg_str[0] == 's' && reg_str[1] != '\0') {
+        mp_uint_t regno = 0;
+        for (++reg_str; *reg_str; ++reg_str) {
+            mp_uint_t v = *reg_str;
+            if (!('0' <= v && v <= '9')) {
+                goto malformed;
+            }
+            regno = 10 * regno + v - '0';
+        }
+        if (regno > 31) {
+            emit_inline_thumb_error_exc(emit,
+                mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+                    MP_ERROR_TEXT("'%s' expects at most r%d"), op, 31));
+            return 0;
+        } else {
+            return regno;
+        }
+    }
+malformed:
+    emit_inline_thumb_error_exc(emit,
+        mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+            MP_ERROR_TEXT("'%s' expects an FPU register"), op));
+    return 0;
+}
+
+static mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    // a register list looks like {r0, r1, r2} and is parsed as a Python set
+
+    if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_brace)) {
+        goto bad_arg;
+    }
+
+    mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+    assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 1); // should always be
+    pn = pns->nodes[0];
+
+    mp_uint_t reglist = 0;
+
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        // set with one element
+        reglist |= 1 << get_arg_reg(emit, op, pn, 15);
+    } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+        pns = (mp_parse_node_struct_t *)pn;
+        if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+            assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+            mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+            if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+                // set with multiple elements
+
+                // get first element of set (we rely on get_arg_reg to catch syntax errors)
+                reglist |= 1 << get_arg_reg(emit, op, pns->nodes[0], 15);
+
+                // get tail elements (2nd, 3rd, ...)
+                mp_parse_node_t *nodes;
+                int n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+                // process rest of elements
+                for (int i = 0; i < n; i++) {
+                    reglist |= 1 << get_arg_reg(emit, op, nodes[i], 15);
+                }
+            } else {
+                goto bad_arg;
+            }
+        } else {
+            goto bad_arg;
+        }
+    } else {
+        goto bad_arg;
+    }
+
+    return reglist;
+
+bad_arg:
+    emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects {r0, r1, ...}"), op));
+    return 0;
+}
+
+static uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, uint32_t fit_mask) {
+    mp_obj_t o;
+    if (!mp_parse_node_get_int_maybe(pn, &o)) {
+        emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
+        return 0;
+    }
+    uint32_t i = mp_obj_get_int_truncated(o);
+    if ((i & (~fit_mask)) != 0) {
+        emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer 0x%x doesn't fit in mask 0x%x"), op, i, fit_mask));
+        return 0;
+    }
+    return i;
+}
+
+static bool get_arg_addr(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_parse_node_t *pn_base, mp_parse_node_t *pn_offset) {
+    if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_bracket)) {
+        goto bad_arg;
+    }
+    mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+    if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+        goto bad_arg;
+    }
+    pns = (mp_parse_node_struct_t *)pns->nodes[0];
+    if (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) != 2) {
+        goto bad_arg;
+    }
+
+    *pn_base = pns->nodes[0];
+    *pn_offset = pns->nodes[1];
+    return true;
+
+bad_arg:
+    emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an address of the form [a, b]"), op));
+    return false;
+}
+
+static int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    if (!MP_PARSE_NODE_IS_ID(pn)) {
+        emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
+        return 0;
+    }
+    qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
+    for (uint i = 0; i < emit->max_num_labels; i++) {
+        if (emit->label_lookup[i] == label_qstr) {
+            return i;
+        }
+    }
+    // only need to have the labels on the last pass
+    if (emit->pass == MP_PASS_EMIT) {
+        emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
+    }
+    return 0;
+}
+
+typedef struct _cc_name_t { byte cc;
+                            byte name[2];
+} cc_name_t;
+static const cc_name_t cc_name_table[] = {
+    { ASM_THUMB_CC_EQ, "eq" },
+    { ASM_THUMB_CC_NE, "ne" },
+    { ASM_THUMB_CC_CS, "cs" },
+    { ASM_THUMB_CC_CC, "cc" },
+    { ASM_THUMB_CC_MI, "mi" },
+    { ASM_THUMB_CC_PL, "pl" },
+    { ASM_THUMB_CC_VS, "vs" },
+    { ASM_THUMB_CC_VC, "vc" },
+    { ASM_THUMB_CC_HI, "hi" },
+    { ASM_THUMB_CC_LS, "ls" },
+    { ASM_THUMB_CC_GE, "ge" },
+    { ASM_THUMB_CC_LT, "lt" },
+    { ASM_THUMB_CC_GT, "gt" },
+    { ASM_THUMB_CC_LE, "le" },
+};
+
+typedef struct _format_4_op_t { byte op;
+                                char name[3];
+} format_4_op_t;
+#define X(x) (((x) >> 4) & 0xff) // only need 1 byte to distinguish these ops
+static const format_4_op_t format_4_op_table[] = {
+    { X(ASM_THUMB_FORMAT_4_EOR), "eor" },
+    { X(ASM_THUMB_FORMAT_4_LSL), "lsl" },
+    { X(ASM_THUMB_FORMAT_4_LSR), "lsr" },
+    { X(ASM_THUMB_FORMAT_4_ASR), "asr" },
+    { X(ASM_THUMB_FORMAT_4_ADC), "adc" },
+    { X(ASM_THUMB_FORMAT_4_SBC), "sbc" },
+    { X(ASM_THUMB_FORMAT_4_ROR), "ror" },
+    { X(ASM_THUMB_FORMAT_4_TST), "tst" },
+    { X(ASM_THUMB_FORMAT_4_NEG), "neg" },
+    { X(ASM_THUMB_FORMAT_4_CMP), "cmp" },
+    { X(ASM_THUMB_FORMAT_4_CMN), "cmn" },
+    { X(ASM_THUMB_FORMAT_4_ORR), "orr" },
+    { X(ASM_THUMB_FORMAT_4_MUL), "mul" },
+    { X(ASM_THUMB_FORMAT_4_BIC), "bic" },
+    { X(ASM_THUMB_FORMAT_4_MVN), "mvn" },
+};
+#undef X
+
+// name is actually a qstr, which should fit in 16 bits
+typedef struct _format_9_10_op_t { uint16_t op;
+                                   uint16_t name;
+} format_9_10_op_t;
+#define X(x) (x)
+static const format_9_10_op_t format_9_10_op_table[] = {
+    { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_ldr },
+    { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_ldrb },
+    { X(ASM_THUMB_FORMAT_10_LDRH), MP_QSTR_ldrh },
+    { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_str },
+    { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_strb },
+    { X(ASM_THUMB_FORMAT_10_STRH), MP_QSTR_strh },
+};
+#undef X
+
+// actual opcodes are: 0xee00 | op.hi_nibble, 0x0a00 | op.lo_nibble
+typedef struct _format_vfp_op_t {
+    byte op;
+    char name[3];
+} format_vfp_op_t;
+static const format_vfp_op_t format_vfp_op_table[] = {
+    { 0x30, "add" },
+    { 0x34, "sub" },
+    { 0x20, "mul" },
+    { 0x80, "div" },
+};
+
+// shorthand alias for whether we allow ARMv7-M instructions
+#define ARMV7M asm_thumb_allow_armv7m(&emit->as)
+
+static void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
+    // TODO perhaps make two tables:
+    // one_args =
+    // "b", LAB, asm_thumb_b_n,
+    // "bgt", LAB, asm_thumb_bgt_n,
+    // two_args =
+    // "movs", RLO, I8, asm_thumb_movs_reg_i8
+    // "movw", REG, REG, asm_thumb_movw_reg_i16
+    // three_args =
+    // "subs", RLO, RLO, I3, asm_thumb_subs_reg_reg_i3
+
+    size_t op_len;
+    const char *op_str = (const char *)qstr_data(op, &op_len);
+
+    if (emit_inline_thumb_allow_float(emit) && op_str[0] == 'v') {
+        // floating point operations
+        if (n_args == 2) {
+            mp_uint_t op_code = 0x0ac0, op_code_hi;
+            if (op == MP_QSTR_vcmp) {
+                op_code_hi = 0xeeb4;
+            op_vfp_twoargs:;
+                mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+                mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+                asm_thumb_op32(&emit->as,
+                    op_code_hi | ((vd & 1) << 6),
+                    op_code | ((vd & 0x1e) << 11) | ((vm & 1) << 5) | (vm & 0x1e) >> 1);
+            } else if (op == MP_QSTR_vsqrt) {
+                op_code_hi = 0xeeb1;
+                goto op_vfp_twoargs;
+            } else if (op == MP_QSTR_vneg) {
+                op_code_hi = 0xeeb1;
+                op_code = 0x0a40;
+                goto op_vfp_twoargs;
+            } else if (op == MP_QSTR_vcvt_f32_s32) {
+                op_code_hi = 0xeeb8; // int to float
+                goto op_vfp_twoargs;
+            } else if (op == MP_QSTR_vcvt_s32_f32) {
+                op_code_hi = 0xeebd; // float to int
+                goto op_vfp_twoargs;
+            } else if (op == MP_QSTR_vmrs) {
+                mp_uint_t reg_dest;
+                const char *reg_str0 = get_arg_str(pn_args[0]);
+                if (strcmp(reg_str0, "APSR_nzcv") == 0) {
+                    reg_dest = 15;
+                } else {
+                    reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+                }
+                const char *reg_str1 = get_arg_str(pn_args[1]);
+                if (strcmp(reg_str1, "FPSCR") == 0) {
+                    // FP status to ARM reg
+                    asm_thumb_op32(&emit->as, 0xeef1, 0x0a10 | (reg_dest << 12));
+                } else {
+                    goto unknown_op;
+                }
+            } else if (op == MP_QSTR_vmov) {
+                op_code_hi = 0xee00;
+                mp_uint_t r_arm, vm;
+                const char *reg_str = get_arg_str(pn_args[0]);
+                if (reg_str[0] == 'r') {
+                    r_arm = get_arg_reg(emit, op_str, pn_args[0], 15);
+                    vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+                    op_code_hi |= 0x10;
+                } else {
+                    vm = get_arg_vfpreg(emit, op_str, pn_args[0]);
+                    r_arm = get_arg_reg(emit, op_str, pn_args[1], 15);
+                }
+                asm_thumb_op32(&emit->as,
+                    op_code_hi | ((vm & 0x1e) >> 1),
+                    0x0a10 | (r_arm << 12) | ((vm & 1) << 7));
+            } else if (op == MP_QSTR_vldr) {
+                op_code_hi = 0xed90;
+            op_vldr_vstr:;
+                mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+                mp_parse_node_t pn_base, pn_offset;
+                if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+                    mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+                    mp_uint_t i8;
+                    i8 = get_arg_i(emit, op_str, pn_offset, 0x3fc) >> 2;
+                    asm_thumb_op32(&emit->as,
+                        op_code_hi | rlo_base | ((vd & 1) << 6),
+                        0x0a00 | ((vd & 0x1e) << 11) | i8);
+                }
+            } else if (op == MP_QSTR_vstr) {
+                op_code_hi = 0xed80;
+                goto op_vldr_vstr;
+            } else {
+                goto unknown_op;
+            }
+        } else if (n_args == 3) {
+            // search table for arith ops
+            for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_vfp_op_table); i++) {
+                if (strncmp(op_str + 1, format_vfp_op_table[i].name, 3) == 0 && op_str[4] == '\0') {
+                    mp_uint_t op_code_hi = 0xee00 | (format_vfp_op_table[i].op & 0xf0);
+                    mp_uint_t op_code = 0x0a00 | ((format_vfp_op_table[i].op & 0x0f) << 4);
+                    mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+                    mp_uint_t vn = get_arg_vfpreg(emit, op_str, pn_args[1]);
+                    mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[2]);
+                    asm_thumb_op32(&emit->as,
+                        op_code_hi | ((vd & 1) << 6) | (vn >> 1),
+                        op_code | (vm >> 1) | ((vm & 1) << 5) | ((vd & 0x1e) << 11) | ((vn & 1) << 7));
+                    return;
+                }
+            }
+            goto unknown_op;
+        } else {
+            goto unknown_op;
+        }
+        return;
+    }
+
+    if (n_args == 0) {
+        if (op == MP_QSTR_nop) {
+            asm_thumb_op16(&emit->as, ASM_THUMB_OP_NOP);
+        } else if (op == MP_QSTR_wfi) {
+            asm_thumb_op16(&emit->as, ASM_THUMB_OP_WFI);
+        } else {
+            goto unknown_op;
+        }
+
+    } else if (n_args == 1) {
+        if (op == MP_QSTR_b) {
+            int label_num = get_arg_label(emit, op_str, pn_args[0]);
+            if (!asm_thumb_b_n_label(&emit->as, label_num)) {
+                goto branch_not_in_range;
+            }
+        } else if (op == MP_QSTR_bl) {
+            int label_num = get_arg_label(emit, op_str, pn_args[0]);
+            if (!asm_thumb_bl_label(&emit->as, label_num)) {
+                goto branch_not_in_range;
+            }
+        } else if (op == MP_QSTR_bx) {
+            mp_uint_t r = get_arg_reg(emit, op_str, pn_args[0], 15);
+            asm_thumb_op16(&emit->as, 0x4700 | (r << 3));
+        } else if (op_str[0] == 'b' && (op_len == 3
+                                        || (op_len == 5 && op_str[3] == '_'
+                                            && (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) {
+            mp_uint_t cc = -1;
+            for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+                if (op_str[1] == cc_name_table[i].name[0] && op_str[2] == cc_name_table[i].name[1]) {
+                    cc = cc_name_table[i].cc;
+                }
+            }
+            if (cc == (mp_uint_t)-1) {
+                goto unknown_op;
+            }
+            int label_num = get_arg_label(emit, op_str, pn_args[0]);
+            bool wide = op_len == 5 && op_str[4] == 'w';
+            if (wide && !ARMV7M) {
+                goto unknown_op;
+            }
+            if (!asm_thumb_bcc_nw_label(&emit->as, cc, label_num, wide)) {
+                goto branch_not_in_range;
+            }
+        } else if (ARMV7M && op_str[0] == 'i' && op_str[1] == 't') {
+            const char *arg_str = get_arg_str(pn_args[0]);
+            mp_uint_t cc = -1;
+            for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+                if (arg_str[0] == cc_name_table[i].name[0]
+                    && arg_str[1] == cc_name_table[i].name[1]
+                    && arg_str[2] == '\0') {
+                    cc = cc_name_table[i].cc;
+                    break;
+                }
+            }
+            if (cc == (mp_uint_t)-1) {
+                goto unknown_op;
+            }
+            const char *os = op_str + 2;
+            while (*os != '\0') {
+                os++;
+            }
+            if (os > op_str + 5) {
+                goto unknown_op;
+            }
+            mp_uint_t it_mask = 8;
+            while (--os >= op_str + 2) {
+                it_mask >>= 1;
+                if (*os == 't') {
+                    it_mask |= (cc & 1) << 3;
+                } else if (*os == 'e') {
+                    it_mask |= ((~cc) & 1) << 3;
+                } else {
+                    goto unknown_op;
+                }
+            }
+            asm_thumb_it_cc(&emit->as, cc, it_mask);
+        } else if (op == MP_QSTR_cpsid) {
+            // TODO check pn_args[0] == i
+            asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSID_I);
+        } else if (op == MP_QSTR_cpsie) {
+            // TODO check pn_args[0] == i
+            asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSIE_I);
+        } else if (op == MP_QSTR_push) {
+            mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+            if ((reglist & 0xbf00) == 0) {
+                if ((reglist & (1 << 14)) == 0) {
+                    asm_thumb_op16(&emit->as, 0xb400 | reglist);
+                } else {
+                    // 16-bit encoding for pushing low registers and LR
+                    asm_thumb_op16(&emit->as, 0xb500 | (reglist & 0xff));
+                }
+            } else {
+                if (!ARMV7M) {
+                    goto unknown_op;
+                }
+                asm_thumb_op32(&emit->as, 0xe92d, reglist);
+            }
+        } else if (op == MP_QSTR_pop) {
+            mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+            if ((reglist & 0x7f00) == 0) {
+                if ((reglist & (1 << 15)) == 0) {
+                    asm_thumb_op16(&emit->as, 0xbc00 | reglist);
+                } else {
+                    // 16-bit encoding for popping low registers and PC, i.e., returning
+                    asm_thumb_op16(&emit->as, 0xbd00 | (reglist & 0xff));
+                }
+            } else {
+                if (!ARMV7M) {
+                    goto unknown_op;
+                }
+                asm_thumb_op32(&emit->as, 0xe8bd, reglist);
+            }
+        } else {
+            goto unknown_op;
+        }
+
+    } else if (n_args == 2) {
+        if (MP_PARSE_NODE_IS_ID(pn_args[1])) {
+            // second arg is a register (or should be)
+            mp_uint_t op_code, op_code_hi;
+            if (op == MP_QSTR_mov) {
+                mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+                mp_uint_t reg_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+                asm_thumb_mov_reg_reg(&emit->as, reg_dest, reg_src);
+            } else if (ARMV7M && op == MP_QSTR_clz) {
+                op_code_hi = 0xfab0;
+                op_code = 0xf080;
+                mp_uint_t rd, rm;
+            op_clz_rbit:
+                rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+                rm = get_arg_reg(emit, op_str, pn_args[1], 15);
+                asm_thumb_op32(&emit->as, op_code_hi | rm, op_code | (rd << 8) | rm);
+            } else if (ARMV7M && op == MP_QSTR_rbit) {
+                op_code_hi = 0xfa90;
+                op_code = 0xf0a0;
+                goto op_clz_rbit;
+            } else if (ARMV7M && op == MP_QSTR_mrs) {
+                mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 12);
+                mp_uint_t reg_src = get_arg_special_reg(emit, op_str, pn_args[1]);
+                asm_thumb_op32(&emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src);
+            } else {
+                if (op == MP_QSTR_and_) {
+                    op_code = ASM_THUMB_FORMAT_4_AND;
+                    mp_uint_t reg_dest, reg_src;
+                op_format_4:
+                    reg_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+                    reg_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+                    asm_thumb_format_4(&emit->as, op_code, reg_dest, reg_src);
+                    return;
+                }
+                // search table for ALU ops
+                for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_4_op_table); i++) {
+                    if (strncmp(op_str, format_4_op_table[i].name, 3) == 0 && op_str[3] == '\0') {
+                        op_code = 0x4000 | (format_4_op_table[i].op << 4);
+                        goto op_format_4;
+                    }
+                }
+                goto unknown_op;
+            }
+        } else {
+            // second arg is not a register
+            mp_uint_t op_code;
+            if (op == MP_QSTR_mov) {
+                op_code = ASM_THUMB_FORMAT_3_MOV;
+                mp_uint_t rlo_dest, i8_src;
+            op_format_3:
+                rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+                i8_src = get_arg_i(emit, op_str, pn_args[1], 0xff);
+                asm_thumb_format_3(&emit->as, op_code, rlo_dest, i8_src);
+            } else if (op == MP_QSTR_cmp) {
+                op_code = ASM_THUMB_FORMAT_3_CMP;
+                goto op_format_3;
+            } else if (op == MP_QSTR_add) {
+                op_code = ASM_THUMB_FORMAT_3_ADD;
+                goto op_format_3;
+            } else if (op == MP_QSTR_sub) {
+                op_code = ASM_THUMB_FORMAT_3_SUB;
+                goto op_format_3;
+            } else if (ARMV7M && op == MP_QSTR_movw) {
+                op_code = ASM_THUMB_OP_MOVW;
+                mp_uint_t reg_dest;
+            op_movw_movt:
+                reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+                int i_src = get_arg_i(emit, op_str, pn_args[1], 0xffff);
+                asm_thumb_mov_reg_i16(&emit->as, op_code, reg_dest, i_src);
+            } else if (ARMV7M && op == MP_QSTR_movt) {
+                op_code = ASM_THUMB_OP_MOVT;
+                goto op_movw_movt;
+            } else if (ARMV7M && op == MP_QSTR_movwt) {
+                // this is a convenience instruction
+                mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+                uint32_t i_src = get_arg_i(emit, op_str, pn_args[1], 0xffffffff);
+                asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff);
+                asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff);
+            } else if (ARMV7M && op == MP_QSTR_ldrex) {
+                mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+                mp_parse_node_t pn_base, pn_offset;
+                if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+                    mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+                    mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+                    asm_thumb_op32(&emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8);
+                }
+            } else {
+                // search table for ldr/str instructions
+                for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_9_10_op_table); i++) {
+                    if (op == format_9_10_op_table[i].name) {
+                        op_code = format_9_10_op_table[i].op;
+                        mp_parse_node_t pn_base, pn_offset;
+                        mp_uint_t rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+                        if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+                            mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+                            mp_uint_t i5;
+                            if (op_code & ASM_THUMB_FORMAT_9_BYTE_TRANSFER) {
+                                i5 = get_arg_i(emit, op_str, pn_offset, 0x1f);
+                            } else if (op_code & ASM_THUMB_FORMAT_10_STRH) { // also catches LDRH
+                                i5 = get_arg_i(emit, op_str, pn_offset, 0x3e) >> 1;
+                            } else {
+                                i5 = get_arg_i(emit, op_str, pn_offset, 0x7c) >> 2;
+                            }
+                            asm_thumb_format_9_10(&emit->as, op_code, rlo_dest, rlo_base, i5);
+                            return;
+                        }
+                        break;
+                    }
+                }
+                goto unknown_op;
+            }
+        }
+
+    } else if (n_args == 3) {
+        mp_uint_t op_code;
+        if (op == MP_QSTR_lsl) {
+            op_code = ASM_THUMB_FORMAT_1_LSL;
+            mp_uint_t rlo_dest, rlo_src, i5;
+        op_format_1:
+            rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+            rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+            i5 = get_arg_i(emit, op_str, pn_args[2], 0x1f);
+            asm_thumb_format_1(&emit->as, op_code, rlo_dest, rlo_src, i5);
+        } else if (op == MP_QSTR_lsr) {
+            op_code = ASM_THUMB_FORMAT_1_LSR;
+            goto op_format_1;
+        } else if (op == MP_QSTR_asr) {
+            op_code = ASM_THUMB_FORMAT_1_ASR;
+            goto op_format_1;
+        } else if (op == MP_QSTR_add) {
+            op_code = ASM_THUMB_FORMAT_2_ADD;
+            mp_uint_t rlo_dest, rlo_src;
+        op_format_2:
+            rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+            rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+            int src_b;
+            if (MP_PARSE_NODE_IS_ID(pn_args[2])) {
+                op_code |= ASM_THUMB_FORMAT_2_REG_OPERAND;
+                src_b = get_arg_reg(emit, op_str, pn_args[2], 7);
+            } else {
+                op_code |= ASM_THUMB_FORMAT_2_IMM_OPERAND;
+                src_b = get_arg_i(emit, op_str, pn_args[2], 0x7);
+            }
+            asm_thumb_format_2(&emit->as, op_code, rlo_dest, rlo_src, src_b);
+        } else if (ARMV7M && op == MP_QSTR_sdiv) {
+            op_code = 0xfb90; // sdiv high part
+            mp_uint_t rd, rn, rm;
+        op_sdiv_udiv:
+            rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+            rn = get_arg_reg(emit, op_str, pn_args[1], 15);
+            rm = get_arg_reg(emit, op_str, pn_args[2], 15);
+            asm_thumb_op32(&emit->as, op_code | rn, 0xf0f0 | (rd << 8) | rm);
+        } else if (ARMV7M && op == MP_QSTR_udiv) {
+            op_code = 0xfbb0; // udiv high part
+            goto op_sdiv_udiv;
+        } else if (op == MP_QSTR_sub) {
+            op_code = ASM_THUMB_FORMAT_2_SUB;
+            goto op_format_2;
+        } else if (ARMV7M && op == MP_QSTR_strex) {
+            mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+            mp_uint_t r_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+            mp_parse_node_t pn_base, pn_offset;
+            if (get_arg_addr(emit, op_str, pn_args[2], &pn_base, &pn_offset)) {
+                mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+                mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+                asm_thumb_op32(&emit->as, 0xe840 | r_base, (r_src << 12) | (r_dest << 8) | i8);
+            }
+        } else {
+            goto unknown_op;
+        }
+
+    } else {
+        goto unknown_op;
+    }
+
+    return;
+
+unknown_op:
+    emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Thumb instruction '%s' with %d arguments"), op_str, n_args));
+    return;
+
+branch_not_in_range:
+    emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
+    return;
+}
+
+const emit_inline_asm_method_table_t emit_inline_thumb_method_table = {
+    #if MICROPY_DYNAMIC_COMPILER
+    emit_inline_thumb_new,
+    emit_inline_thumb_free,
+    #endif
+
+    emit_inline_thumb_start_pass,
+    emit_inline_thumb_end_pass,
+    emit_inline_thumb_count_params,
+    emit_inline_thumb_label,
+    emit_inline_thumb_op,
+};
+
+#endif // MICROPY_EMIT_INLINE_THUMB

+ 352 - 0
mp_flipper/lib/micropython/py/emitinlinextensa.c

@@ -0,0 +1,352 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/asmxtensa.h"
+
+#if MICROPY_EMIT_INLINE_XTENSA
+
+struct _emit_inline_asm_t {
+    asm_xtensa_t as;
+    uint16_t pass;
+    mp_obj_t *error_slot;
+    mp_uint_t max_num_labels;
+    qstr *label_lookup;
+};
+
+static void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, mp_rom_error_text_t msg) {
+    *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+static void emit_inline_xtensa_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+    *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels) {
+    emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+    memset(&emit->as, 0, sizeof(emit->as));
+    mp_asm_base_init(&emit->as.base, max_num_labels);
+    emit->max_num_labels = max_num_labels;
+    emit->label_lookup = m_new(qstr, max_num_labels);
+    return emit;
+}
+
+void emit_inline_xtensa_free(emit_inline_asm_t *emit) {
+    m_del(qstr, emit->label_lookup, emit->max_num_labels);
+    mp_asm_base_deinit(&emit->as.base, false);
+    m_del_obj(emit_inline_asm_t, emit);
+}
+
+static void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) {
+    emit->pass = pass;
+    emit->error_slot = error_slot;
+    if (emit->pass == MP_PASS_CODE_SIZE) {
+        memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+    }
+    mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+    asm_xtensa_entry(&emit->as, 0);
+}
+
+static void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+    asm_xtensa_exit(&emit->as);
+    asm_xtensa_end_pass(&emit->as);
+}
+
+static mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
+    if (n_params > 4) {
+        emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Xtensa assembly"));
+        return 0;
+    }
+    for (mp_uint_t i = 0; i < n_params; i++) {
+        if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
+            emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
+            return 0;
+        }
+        const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
+        if (!(strlen(p) == 2 && p[0] == 'a' && (mp_uint_t)p[1] == '2' + i)) {
+            emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
+            return 0;
+        }
+    }
+    return n_params;
+}
+
+static bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+    assert(label_num < emit->max_num_labels);
+    if (emit->pass == MP_PASS_CODE_SIZE) {
+        // check for duplicate label on first pass
+        for (uint i = 0; i < emit->max_num_labels; i++) {
+            if (emit->label_lookup[i] == label_id) {
+                return false;
+            }
+        }
+    }
+    emit->label_lookup[label_num] = label_id;
+    mp_asm_base_label_assign(&emit->as.base, label_num);
+    return true;
+}
+
+typedef struct _reg_name_t { byte reg;
+                             byte name[3];
+} reg_name_t;
+static const reg_name_t reg_name_table[] = {
+    {0, "a0\0"},
+    {1, "a1\0"},
+    {2, "a2\0"},
+    {3, "a3\0"},
+    {4, "a4\0"},
+    {5, "a5\0"},
+    {6, "a6\0"},
+    {7, "a7\0"},
+    {8, "a8\0"},
+    {9, "a9\0"},
+    {10, "a10"},
+    {11, "a11"},
+    {12, "a12"},
+    {13, "a13"},
+    {14, "a14"},
+    {15, "a15"},
+};
+
+// return empty string in case of error, so we can attempt to parse the string
+// without a special check if it was in fact a string
+static const char *get_arg_str(mp_parse_node_t pn) {
+    if (MP_PARSE_NODE_IS_ID(pn)) {
+        qstr qst = MP_PARSE_NODE_LEAF_ARG(pn);
+        return qstr_str(qst);
+    } else {
+        return "";
+    }
+}
+
+static mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    const char *reg_str = get_arg_str(pn);
+    for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) {
+        const reg_name_t *r = &reg_name_table[i];
+        if (reg_str[0] == r->name[0]
+            && reg_str[1] == r->name[1]
+            && reg_str[2] == r->name[2]
+            && (reg_str[2] == '\0' || reg_str[3] == '\0')) {
+            return r->reg;
+        }
+    }
+    emit_inline_xtensa_error_exc(emit,
+        mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+            MP_ERROR_TEXT("'%s' expects a register"), op));
+    return 0;
+}
+
+static uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, int min, int max) {
+    mp_obj_t o;
+    if (!mp_parse_node_get_int_maybe(pn, &o)) {
+        emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
+        return 0;
+    }
+    uint32_t i = mp_obj_get_int_truncated(o);
+    if (min != max && ((int)i < min || (int)i > max)) {
+        emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer %d isn't within range %d..%d"), op, i, min, max));
+        return 0;
+    }
+    return i;
+}
+
+static int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+    if (!MP_PARSE_NODE_IS_ID(pn)) {
+        emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
+        return 0;
+    }
+    qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
+    for (uint i = 0; i < emit->max_num_labels; i++) {
+        if (emit->label_lookup[i] == label_qstr) {
+            return i;
+        }
+    }
+    // only need to have the labels on the last pass
+    if (emit->pass == MP_PASS_EMIT) {
+        emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
+    }
+    return 0;
+}
+
+#define RRR (0)
+#define RRI8 (1)
+#define RRI8_B (2)
+
+typedef struct _opcode_table_3arg_t {
+    uint16_t name; // actually a qstr, which should fit in 16 bits
+    uint8_t type;
+    uint8_t a0 : 4;
+    uint8_t a1 : 4;
+} opcode_table_3arg_t;
+
+static const opcode_table_3arg_t opcode_table_3arg[] = {
+    // arithmetic opcodes: reg, reg, reg
+    {MP_QSTR_and_, RRR, 0, 1},
+    {MP_QSTR_or_, RRR, 0, 2},
+    {MP_QSTR_xor, RRR, 0, 3},
+    {MP_QSTR_add, RRR, 0, 8},
+    {MP_QSTR_sub, RRR, 0, 12},
+    {MP_QSTR_mull, RRR, 2, 8},
+
+    // load/store/addi opcodes: reg, reg, imm
+    // upper nibble of type encodes the range of the immediate arg
+    {MP_QSTR_l8ui, RRI8 | 0x10, 2, 0},
+    {MP_QSTR_l16ui, RRI8 | 0x30, 2, 1},
+    {MP_QSTR_l32i, RRI8 | 0x50, 2, 2},
+    {MP_QSTR_s8i, RRI8 | 0x10, 2, 4},
+    {MP_QSTR_s16i, RRI8 | 0x30, 2, 5},
+    {MP_QSTR_s32i, RRI8 | 0x50, 2, 6},
+    {MP_QSTR_l16si, RRI8 | 0x30, 2, 9},
+    {MP_QSTR_addi, RRI8 | 0x00, 2, 12},
+
+    // branch opcodes: reg, reg, label
+    {MP_QSTR_ball, RRI8_B, ASM_XTENSA_CC_ALL, 0},
+    {MP_QSTR_bany, RRI8_B, ASM_XTENSA_CC_ANY, 0},
+    {MP_QSTR_bbc, RRI8_B, ASM_XTENSA_CC_BC, 0},
+    {MP_QSTR_bbs, RRI8_B, ASM_XTENSA_CC_BS, 0},
+    {MP_QSTR_beq, RRI8_B, ASM_XTENSA_CC_EQ, 0},
+    {MP_QSTR_bge, RRI8_B, ASM_XTENSA_CC_GE, 0},
+    {MP_QSTR_bgeu, RRI8_B, ASM_XTENSA_CC_GEU, 0},
+    {MP_QSTR_blt, RRI8_B, ASM_XTENSA_CC_LT, 0},
+    {MP_QSTR_bnall, RRI8_B, ASM_XTENSA_CC_NALL, 0},
+    {MP_QSTR_bne, RRI8_B, ASM_XTENSA_CC_NE, 0},
+    {MP_QSTR_bnone, RRI8_B, ASM_XTENSA_CC_NONE, 0},
+};
+
+static void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
+    size_t op_len;
+    const char *op_str = (const char *)qstr_data(op, &op_len);
+
+    if (n_args == 0) {
+        if (op == MP_QSTR_ret_n) {
+            asm_xtensa_op_ret_n(&emit->as);
+        } else {
+            goto unknown_op;
+        }
+
+    } else if (n_args == 1) {
+        if (op == MP_QSTR_callx0) {
+            uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+            asm_xtensa_op_callx0(&emit->as, r0);
+        } else if (op == MP_QSTR_j) {
+            int label = get_arg_label(emit, op_str, pn_args[0]);
+            asm_xtensa_j_label(&emit->as, label);
+        } else if (op == MP_QSTR_jx) {
+            uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+            asm_xtensa_op_jx(&emit->as, r0);
+        } else {
+            goto unknown_op;
+        }
+
+    } else if (n_args == 2) {
+        uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+        if (op == MP_QSTR_beqz) {
+            int label = get_arg_label(emit, op_str, pn_args[1]);
+            asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_EQ, r0, label);
+        } else if (op == MP_QSTR_bnez) {
+            int label = get_arg_label(emit, op_str, pn_args[1]);
+            asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_NE, r0, label);
+        } else if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) {
+            // we emit mov.n for both "mov" and "mov_n" opcodes
+            uint r1 = get_arg_reg(emit, op_str, pn_args[1]);
+            asm_xtensa_op_mov_n(&emit->as, r0, r1);
+        } else if (op == MP_QSTR_movi) {
+            // for convenience we emit l32r if the integer doesn't fit in movi
+            uint32_t imm = get_arg_i(emit, op_str, pn_args[1], 0, 0);
+            asm_xtensa_mov_reg_i32(&emit->as, r0, imm);
+        } else {
+            goto unknown_op;
+        }
+
+    } else if (n_args == 3) {
+        // search table for 3 arg instructions
+        for (uint i = 0; i < MP_ARRAY_SIZE(opcode_table_3arg); i++) {
+            const opcode_table_3arg_t *o = &opcode_table_3arg[i];
+            if (op == o->name) {
+                uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+                uint r1 = get_arg_reg(emit, op_str, pn_args[1]);
+                if (o->type == RRR) {
+                    uint r2 = get_arg_reg(emit, op_str, pn_args[2]);
+                    asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, o->a0, o->a1, r0, r1, r2));
+                } else if (o->type == RRI8_B) {
+                    int label = get_arg_label(emit, op_str, pn_args[2]);
+                    asm_xtensa_bcc_reg_reg_label(&emit->as, o->a0, r0, r1, label);
+                } else {
+                    int shift, min, max;
+                    if ((o->type & 0xf0) == 0) {
+                        shift = 0;
+                        min = -128;
+                        max = 127;
+                    } else {
+                        shift = (o->type & 0xf0) >> 5;
+                        min = 0;
+                        max = 0xff << shift;
+                    }
+                    uint32_t imm = get_arg_i(emit, op_str, pn_args[2], min, max);
+                    asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRI8(o->a0, o->a1, r1, r0, (imm >> shift) & 0xff));
+                }
+                return;
+            }
+        }
+        goto unknown_op;
+
+    } else {
+        goto unknown_op;
+    }
+
+    return;
+
+unknown_op:
+    emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Xtensa instruction '%s' with %d arguments"), op_str, n_args));
+    return;
+
+    /*
+branch_not_in_range:
+    emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
+    return;
+    */
+}
+
+const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = {
+    #if MICROPY_DYNAMIC_COMPILER
+    emit_inline_xtensa_new,
+    emit_inline_xtensa_free,
+    #endif
+
+    emit_inline_xtensa_start_pass,
+    emit_inline_xtensa_end_pass,
+    emit_inline_xtensa_count_params,
+    emit_inline_xtensa_label,
+    emit_inline_xtensa_op,
+};
+
+#endif // MICROPY_EMIT_INLINE_XTENSA

+ 18 - 0
mp_flipper/lib/micropython/py/emitnarm.c

@@ -0,0 +1,18 @@
+// ARM specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_ARM
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmarm.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (3) // r4
+
+#define N_ARM (1)
+#define EXPORT_FUN(name) emit_native_arm_##name
+#include "py/emitnative.c"
+
+#endif

+ 3009 - 0
mp_flipper/lib/micropython/py/emitnative.c

@@ -0,0 +1,3009 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// Essentially normal Python has 1 type: Python objects
+// Viper has more than 1 type, and is just a more complicated (a superset of) Python.
+// If you declare everything in Viper as a Python object (ie omit type decls) then
+// it should in principle be exactly the same as Python native.
+// Having types means having more opcodes, like binary_op_nat_nat, binary_op_nat_obj etc.
+// In practice we won't have a VM but rather do this in asm which is actually very minimal.
+
+// Because it breaks strict Python equivalence it should be a completely separate
+// decorator.  It breaks equivalence because overflow on integers wraps around.
+// It shouldn't break equivalence if you don't use the new types, but since the
+// type decls might be used in normal Python for other reasons, it's probably safest,
+// cleanest and clearest to make it a separate decorator.
+
+// Actually, it does break equivalence because integers default to native integers,
+// not Python objects.
+
+// for x in l[0:8]: can be compiled into a native loop if l has pointer type
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/nativeglue.h"
+#include "py/objfun.h"
+#include "py/objstr.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+// wrapper around everything in this file
+#if N_X64 || N_X86 || N_THUMB || N_ARM || N_XTENSA || N_XTENSAWIN
+
+// C stack layout for native functions:
+//  0:                          nlr_buf_t [optional]
+//                              return_value [optional word]
+//                              exc_handler_unwind [optional word]
+//  emit->code_state_start:     mp_code_state_native_t
+//  emit->stack_start:          Python object stack             | emit->n_state
+//                              locals (reversed, L0 at end)    |
+//
+// C stack layout for native generator functions:
+//  0=emit->stack_start:        nlr_buf_t
+//                              return_value
+//                              exc_handler_unwind [optional word]
+//
+//  Then REG_GENERATOR_STATE points to:
+//  0=emit->code_state_start:   mp_code_state_native_t
+//  emit->stack_start:          Python object stack             | emit->n_state
+//                              locals (reversed, L0 at end)    |
+//
+// C stack layout for viper functions:
+//  0:                          nlr_buf_t [optional]
+//                              return_value [optional word]
+//                              exc_handler_unwind [optional word]
+//  emit->code_state_start:     fun_obj, old_globals [optional]
+//  emit->stack_start:          Python object stack             | emit->n_state
+//                              locals (reversed, L0 at end)    |
+//                              (L0-L2 may be in regs instead)
+
+// Native emitter needs to know the following sizes and offsets of C structs (on the target):
+#if MICROPY_DYNAMIC_COMPILER
+#define SIZEOF_NLR_BUF (2 + mp_dynamic_compiler.nlr_buf_num_regs + 1) // the +1 is conservative in case MICROPY_ENABLE_PYSTACK enabled
+#else
+#define SIZEOF_NLR_BUF (sizeof(nlr_buf_t) / sizeof(uintptr_t))
+#endif
+#define SIZEOF_CODE_STATE (sizeof(mp_code_state_native_t) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_STATE (offsetof(mp_code_state_native_t, state) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_FUN_BC (offsetof(mp_code_state_native_t, fun_bc) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_IP (offsetof(mp_code_state_native_t, ip) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_SP (offsetof(mp_code_state_native_t, sp) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_N_STATE (offsetof(mp_code_state_native_t, n_state) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_CONTEXT (offsetof(mp_obj_fun_bc_t, context) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_CHILD_TABLE (offsetof(mp_obj_fun_bc_t, child_table) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_BYTECODE (offsetof(mp_obj_fun_bc_t, bytecode) / sizeof(uintptr_t))
+#define OFFSETOF_MODULE_CONTEXT_QSTR_TABLE (offsetof(mp_module_context_t, constants.qstr_table) / sizeof(uintptr_t))
+#define OFFSETOF_MODULE_CONTEXT_OBJ_TABLE (offsetof(mp_module_context_t, constants.obj_table) / sizeof(uintptr_t))
+#define OFFSETOF_MODULE_CONTEXT_GLOBALS (offsetof(mp_module_context_t, module.globals) / sizeof(uintptr_t))
+
+// If not already defined, set parent args to same as child call registers
+#ifndef REG_PARENT_RET
+#define REG_PARENT_RET REG_RET
+#define REG_PARENT_ARG_1 REG_ARG_1
+#define REG_PARENT_ARG_2 REG_ARG_2
+#define REG_PARENT_ARG_3 REG_ARG_3
+#define REG_PARENT_ARG_4 REG_ARG_4
+#endif
+
+// Word index of nlr_buf_t.ret_val
+#define NLR_BUF_IDX_RET_VAL (1)
+
+// Whether the viper function needs access to fun_obj
+#define NEED_FUN_OBJ(emit) ((emit)->scope->exc_stack_size > 0 \
+    || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS)))
+
+// Whether the native/viper function needs to be wrapped in an exception handler
+#define NEED_GLOBAL_EXC_HANDLER(emit) ((emit)->scope->exc_stack_size > 0 \
+    || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_GENERATOR | MP_SCOPE_FLAG_REFGLOBALS)))
+
+// Whether a slot is needed to store LOCAL_IDX_EXC_HANDLER_UNWIND
+#define NEED_EXC_HANDLER_UNWIND(emit) ((emit)->scope->exc_stack_size > 0)
+
+// Whether registers can be used to store locals (only true if there are no
+// exception handlers, because otherwise an nlr_jump will restore registers to
+// their state at the start of the function and updates to locals will be lost)
+#define CAN_USE_REGS_FOR_LOCALS(emit) ((emit)->scope->exc_stack_size == 0 && !(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR))
+
+// Indices within the local C stack for various variables
+#define LOCAL_IDX_EXC_VAL(emit) (NLR_BUF_IDX_RET_VAL)
+#define LOCAL_IDX_EXC_HANDLER_PC(emit) (NLR_BUF_IDX_LOCAL_1)
+#define LOCAL_IDX_EXC_HANDLER_UNWIND(emit) (SIZEOF_NLR_BUF + 1) // this needs a dedicated variable outside nlr_buf_t
+#define LOCAL_IDX_RET_VAL(emit) (SIZEOF_NLR_BUF) // needed when NEED_GLOBAL_EXC_HANDLER is true
+#define LOCAL_IDX_FUN_OBJ(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_FUN_BC)
+#define LOCAL_IDX_OLD_GLOBALS(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_IP)
+#define LOCAL_IDX_GEN_PC(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_IP)
+#define LOCAL_IDX_LOCAL_VAR(emit, local_num) ((emit)->stack_start + (emit)->n_state - 1 - (local_num))
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+
+// When building with the ability to save native code to .mpy files:
+//  - Qstrs are indirect via qstr_table, and REG_LOCAL_3 always points to qstr_table.
+//  - In a generator no registers are used to store locals, and REG_LOCAL_2 points to the generator state.
+//  - At most 2 registers hold local variables (see CAN_USE_REGS_FOR_LOCALS for when this is possible).
+
+#define REG_GENERATOR_STATE (REG_LOCAL_2)
+#define REG_QSTR_TABLE (REG_LOCAL_3)
+#define MAX_REGS_FOR_LOCAL_VARS (2)
+
+static const uint8_t reg_local_table[MAX_REGS_FOR_LOCAL_VARS] = {REG_LOCAL_1, REG_LOCAL_2};
+
+#else
+
+// When building without the ability to save native code to .mpy files:
+//  - Qstrs values are written directly into the machine code.
+//  - In a generator no registers are used to store locals, and REG_LOCAL_3 points to the generator state.
+//  - At most 3 registers hold local variables (see CAN_USE_REGS_FOR_LOCALS for when this is possible).
+
+#define REG_GENERATOR_STATE (REG_LOCAL_3)
+#define MAX_REGS_FOR_LOCAL_VARS (3)
+
+static const uint8_t reg_local_table[MAX_REGS_FOR_LOCAL_VARS] = {REG_LOCAL_1, REG_LOCAL_2, REG_LOCAL_3};
+
+#endif
+
+#define REG_LOCAL_LAST (reg_local_table[MAX_REGS_FOR_LOCAL_VARS - 1])
+
+#define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \
+        *emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \
+} while (0)
+
+typedef enum {
+    STACK_VALUE,
+    STACK_REG,
+    STACK_IMM,
+} stack_info_kind_t;
+
+// these enums must be distinct and the bottom 4 bits
+// must correspond to the correct MP_NATIVE_TYPE_xxx value
+typedef enum {
+    VTYPE_PYOBJ = 0x00 | MP_NATIVE_TYPE_OBJ,
+    VTYPE_BOOL = 0x00 | MP_NATIVE_TYPE_BOOL,
+    VTYPE_INT = 0x00 | MP_NATIVE_TYPE_INT,
+    VTYPE_UINT = 0x00 | MP_NATIVE_TYPE_UINT,
+    VTYPE_PTR = 0x00 | MP_NATIVE_TYPE_PTR,
+    VTYPE_PTR8 = 0x00 | MP_NATIVE_TYPE_PTR8,
+    VTYPE_PTR16 = 0x00 | MP_NATIVE_TYPE_PTR16,
+    VTYPE_PTR32 = 0x00 | MP_NATIVE_TYPE_PTR32,
+
+    VTYPE_PTR_NONE = 0x50 | MP_NATIVE_TYPE_PTR,
+
+    VTYPE_UNBOUND = 0x60 | MP_NATIVE_TYPE_OBJ,
+    VTYPE_BUILTIN_CAST = 0x70 | MP_NATIVE_TYPE_OBJ,
+} vtype_kind_t;
+
+static qstr vtype_to_qstr(vtype_kind_t vtype) {
+    switch (vtype) {
+        case VTYPE_PYOBJ:
+            return MP_QSTR_object;
+        case VTYPE_BOOL:
+            return MP_QSTR_bool;
+        case VTYPE_INT:
+            return MP_QSTR_int;
+        case VTYPE_UINT:
+            return MP_QSTR_uint;
+        case VTYPE_PTR:
+            return MP_QSTR_ptr;
+        case VTYPE_PTR8:
+            return MP_QSTR_ptr8;
+        case VTYPE_PTR16:
+            return MP_QSTR_ptr16;
+        case VTYPE_PTR32:
+            return MP_QSTR_ptr32;
+        case VTYPE_PTR_NONE:
+        default:
+            return MP_QSTR_None;
+    }
+}
+
+typedef struct _stack_info_t {
+    vtype_kind_t vtype;
+    stack_info_kind_t kind;
+    union {
+        int u_reg;
+        mp_int_t u_imm;
+    } data;
+} stack_info_t;
+
+#define UNWIND_LABEL_UNUSED (0x7fff)
+#define UNWIND_LABEL_DO_FINAL_UNWIND (0x7ffe)
+
+typedef struct _exc_stack_entry_t {
+    uint16_t label : 15;
+    uint16_t is_finally : 1;
+    uint16_t unwind_label : 15;
+    uint16_t is_active : 1;
+} exc_stack_entry_t;
+
+struct _emit_t {
+    mp_emit_common_t *emit_common;
+    mp_obj_t *error_slot;
+    uint *label_slot;
+    uint exit_label;
+    int pass;
+
+    bool do_viper_types;
+
+    mp_uint_t local_vtype_alloc;
+    vtype_kind_t *local_vtype;
+
+    mp_uint_t stack_info_alloc;
+    stack_info_t *stack_info;
+    vtype_kind_t saved_stack_vtype;
+
+    size_t exc_stack_alloc;
+    size_t exc_stack_size;
+    exc_stack_entry_t *exc_stack;
+
+    int prelude_offset;
+    int prelude_ptr_index;
+    int start_offset;
+    int n_state;
+    uint16_t code_state_start;
+    uint16_t stack_start;
+    int stack_size;
+    uint16_t n_info;
+    uint16_t n_cell;
+
+    scope_t *scope;
+
+    ASM_T *as;
+};
+
+static void emit_load_reg_with_object(emit_t *emit, int reg, mp_obj_t obj);
+static void emit_native_global_exc_entry(emit_t *emit);
+static void emit_native_global_exc_exit(emit_t *emit);
+static void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj);
+
+emit_t *EXPORT_FUN(new)(mp_emit_common_t * emit_common, mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels) {
+    emit_t *emit = m_new0(emit_t, 1);
+    emit->emit_common = emit_common;
+    emit->error_slot = error_slot;
+    emit->label_slot = label_slot;
+    emit->stack_info_alloc = 8;
+    emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc);
+    emit->exc_stack_alloc = 8;
+    emit->exc_stack = m_new(exc_stack_entry_t, emit->exc_stack_alloc);
+    emit->as = m_new0(ASM_T, 1);
+    mp_asm_base_init(&emit->as->base, max_num_labels);
+    return emit;
+}
+
+void EXPORT_FUN(free)(emit_t * emit) {
+    mp_asm_base_deinit(&emit->as->base, false);
+    m_del_obj(ASM_T, emit->as);
+    m_del(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc);
+    m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc);
+    m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc);
+    m_del_obj(emit_t, emit);
+}
+
+static void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg);
+
+static void emit_native_mov_reg_const(emit_t *emit, int reg_dest, int const_val) {
+    ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_FUN_TABLE, const_val);
+}
+
+static void emit_native_mov_state_reg(emit_t *emit, int local_num, int reg_src) {
+    if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+        ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, REG_GENERATOR_STATE, local_num);
+    } else {
+        ASM_MOV_LOCAL_REG(emit->as, local_num, reg_src);
+    }
+}
+
+static void emit_native_mov_reg_state(emit_t *emit, int reg_dest, int local_num) {
+    if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+        ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_GENERATOR_STATE, local_num);
+    } else {
+        ASM_MOV_REG_LOCAL(emit->as, reg_dest, local_num);
+    }
+}
+
+static void emit_native_mov_reg_state_addr(emit_t *emit, int reg_dest, int local_num) {
+    if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+        ASM_MOV_REG_IMM(emit->as, reg_dest, local_num * ASM_WORD_SIZE);
+        ASM_ADD_REG_REG(emit->as, reg_dest, REG_GENERATOR_STATE);
+    } else {
+        ASM_MOV_REG_LOCAL_ADDR(emit->as, reg_dest, local_num);
+    }
+}
+
+static void emit_native_mov_reg_qstr(emit_t *emit, int arg_reg, qstr qst) {
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    ASM_LOAD16_REG_REG_OFFSET(emit->as, arg_reg, REG_QSTR_TABLE, mp_emit_common_use_qstr(emit->emit_common, qst));
+    #else
+    ASM_MOV_REG_IMM(emit->as, arg_reg, qst);
+    #endif
+}
+
+static void emit_native_mov_reg_qstr_obj(emit_t *emit, int reg_dest, qstr qst) {
+    #if MICROPY_PERSISTENT_CODE_SAVE
+    emit_load_reg_with_object(emit, reg_dest, MP_OBJ_NEW_QSTR(qst));
+    #else
+    ASM_MOV_REG_IMM(emit->as, reg_dest, (mp_uint_t)MP_OBJ_NEW_QSTR(qst));
+    #endif
+}
+
+#define emit_native_mov_state_imm_via(emit, local_num, imm, reg_temp) \
+    do { \
+        ASM_MOV_REG_IMM((emit)->as, (reg_temp), (imm)); \
+        emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
+    } while (false)
+
+static void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+    DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope);
+
+    emit->pass = pass;
+    emit->do_viper_types = scope->emit_options == MP_EMIT_OPT_VIPER;
+    emit->stack_size = 0;
+    emit->scope = scope;
+
+    // allocate memory for keeping track of the types of locals
+    if (emit->local_vtype_alloc < scope->num_locals) {
+        emit->local_vtype = m_renew(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc, scope->num_locals);
+        emit->local_vtype_alloc = scope->num_locals;
+    }
+
+    // set default type for arguments
+    mp_uint_t num_args = emit->scope->num_pos_args + emit->scope->num_kwonly_args;
+    if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+        num_args += 1;
+    }
+    if (scope->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) {
+        num_args += 1;
+    }
+    for (mp_uint_t i = 0; i < num_args; i++) {
+        emit->local_vtype[i] = VTYPE_PYOBJ;
+    }
+
+    // Set viper type for arguments
+    if (emit->do_viper_types) {
+        for (int i = 0; i < emit->scope->id_info_len; ++i) {
+            id_info_t *id = &emit->scope->id_info[i];
+            if (id->flags & ID_FLAG_IS_PARAM) {
+                assert(id->local_num < emit->local_vtype_alloc);
+                emit->local_vtype[id->local_num] = id->flags >> ID_FLAG_VIPER_TYPE_POS;
+            }
+        }
+    }
+
+    // local variables begin unbound, and have unknown type
+    for (mp_uint_t i = num_args; i < emit->local_vtype_alloc; i++) {
+        emit->local_vtype[i] = emit->do_viper_types ? VTYPE_UNBOUND : VTYPE_PYOBJ;
+    }
+
+    // values on stack begin unbound
+    for (mp_uint_t i = 0; i < emit->stack_info_alloc; i++) {
+        emit->stack_info[i].kind = STACK_VALUE;
+        emit->stack_info[i].vtype = VTYPE_UNBOUND;
+    }
+
+    mp_asm_base_start_pass(&emit->as->base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+
+    // generate code for entry to function
+
+    // Work out start of code state (mp_code_state_native_t or reduced version for viper)
+    emit->code_state_start = 0;
+    if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+        emit->code_state_start = SIZEOF_NLR_BUF; // for nlr_buf_t
+        emit->code_state_start += 1;  // for return_value
+        if (NEED_EXC_HANDLER_UNWIND(emit)) {
+            emit->code_state_start += 1;
+        }
+    }
+
+    size_t fun_table_off = mp_emit_common_use_const_obj(emit->emit_common, MP_OBJ_FROM_PTR(&mp_fun_table));
+
+    if (emit->do_viper_types) {
+        // Work out size of state (locals plus stack)
+        // n_state counts all stack and locals, even those in registers
+        emit->n_state = scope->num_locals + scope->stack_size;
+        int num_locals_in_regs = 0;
+        if (CAN_USE_REGS_FOR_LOCALS(emit)) {
+            num_locals_in_regs = scope->num_locals;
+            if (num_locals_in_regs > MAX_REGS_FOR_LOCAL_VARS) {
+                num_locals_in_regs = MAX_REGS_FOR_LOCAL_VARS;
+            }
+            // Need a spot for REG_LOCAL_LAST (see below)
+            if (scope->num_pos_args >= MAX_REGS_FOR_LOCAL_VARS + 1) {
+                --num_locals_in_regs;
+            }
+        }
+
+        // Work out where the locals and Python stack start within the C stack
+        if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+            // Reserve 2 words for function object and old globals
+            emit->stack_start = emit->code_state_start + 2;
+        } else if (scope->scope_flags & MP_SCOPE_FLAG_HASCONSTS) {
+            // Reserve 1 word for function object, to access const table
+            emit->stack_start = emit->code_state_start + 1;
+        } else {
+            emit->stack_start = emit->code_state_start + 0;
+        }
+
+        // Entry to function
+        ASM_ENTRY(emit->as, emit->stack_start + emit->n_state - num_locals_in_regs);
+
+        #if N_X86
+        asm_x86_mov_arg_to_r32(emit->as, 0, REG_PARENT_ARG_1);
+        #endif
+
+        // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+        ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+        #if MICROPY_PERSISTENT_CODE_SAVE
+        ASM_LOAD_REG_REG_OFFSET(emit->as, REG_QSTR_TABLE, REG_FUN_TABLE, OFFSETOF_MODULE_CONTEXT_QSTR_TABLE);
+        #endif
+        ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_FUN_TABLE, OFFSETOF_MODULE_CONTEXT_OBJ_TABLE);
+        ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_FUN_TABLE, fun_table_off);
+
+        // Store function object (passed as first arg) to stack if needed
+        if (NEED_FUN_OBJ(emit)) {
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
+        }
+
+        // Put n_args in REG_ARG_1, n_kw in REG_ARG_2, args array in REG_LOCAL_LAST
+        #if N_X86
+        asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_1);
+        asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_2);
+        asm_x86_mov_arg_to_r32(emit->as, 3, REG_LOCAL_LAST);
+        #else
+        ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_PARENT_ARG_2);
+        ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_PARENT_ARG_3);
+        ASM_MOV_REG_REG(emit->as, REG_LOCAL_LAST, REG_PARENT_ARG_4);
+        #endif
+
+        // Check number of args matches this function, and call mp_arg_check_num_sig if not
+        ASM_JUMP_IF_REG_NONZERO(emit->as, REG_ARG_2, *emit->label_slot + 4, true);
+        ASM_MOV_REG_IMM(emit->as, REG_ARG_3, scope->num_pos_args);
+        ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_3, *emit->label_slot + 5);
+        mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 4);
+        ASM_MOV_REG_IMM(emit->as, REG_ARG_3, MP_OBJ_FUN_MAKE_SIG(scope->num_pos_args, scope->num_pos_args, false));
+        ASM_CALL_IND(emit->as, MP_F_ARG_CHECK_NUM_SIG);
+        mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 5);
+
+        // Store arguments into locals (reg or stack), converting to native if needed
+        for (int i = 0; i < emit->scope->num_pos_args; i++) {
+            int r = REG_ARG_1;
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_LOCAL_LAST, i);
+            if (emit->local_vtype[i] != VTYPE_PYOBJ) {
+                emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, emit->local_vtype[i], REG_ARG_2);
+                r = REG_RET;
+            }
+            // REG_LOCAL_LAST points to the args array so be sure not to overwrite it if it's still needed
+            if (i < MAX_REGS_FOR_LOCAL_VARS && CAN_USE_REGS_FOR_LOCALS(emit) && (i != MAX_REGS_FOR_LOCAL_VARS - 1 || emit->scope->num_pos_args == MAX_REGS_FOR_LOCAL_VARS)) {
+                ASM_MOV_REG_REG(emit->as, reg_local_table[i], r);
+            } else {
+                emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, i), r);
+            }
+        }
+        // Get local from the stack back into REG_LOCAL_LAST if this reg couldn't be written to above
+        if (emit->scope->num_pos_args >= MAX_REGS_FOR_LOCAL_VARS + 1 && CAN_USE_REGS_FOR_LOCALS(emit)) {
+            ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_LAST, LOCAL_IDX_LOCAL_VAR(emit, MAX_REGS_FOR_LOCAL_VARS - 1));
+        }
+
+        emit_native_global_exc_entry(emit);
+
+    } else {
+        // work out size of state (locals plus stack)
+        emit->n_state = scope->num_locals + scope->stack_size;
+
+        // Store in the first machine-word an index used to the function's prelude.
+        // This is used at runtime by mp_obj_fun_native_get_prelude_ptr().
+        mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->prelude_ptr_index);
+
+        if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+            mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->start_offset);
+            ASM_ENTRY(emit->as, emit->code_state_start);
+
+            // Reset the state size for the state pointed to by REG_GENERATOR_STATE
+            emit->code_state_start = 0;
+            emit->stack_start = SIZEOF_CODE_STATE;
+
+            // Put address of code_state into REG_GENERATOR_STATE
+            #if N_X86
+            asm_x86_mov_arg_to_r32(emit->as, 0, REG_GENERATOR_STATE);
+            #else
+            ASM_MOV_REG_REG(emit->as, REG_GENERATOR_STATE, REG_PARENT_ARG_1);
+            #endif
+
+            // Put throw value into LOCAL_IDX_EXC_VAL slot, for yield/yield-from
+            #if N_X86
+            asm_x86_mov_arg_to_r32(emit->as, 1, REG_PARENT_ARG_2);
+            #endif
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_PARENT_ARG_2);
+
+            // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, LOCAL_IDX_FUN_OBJ(emit));
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+            #if MICROPY_PERSISTENT_CODE_SAVE
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_QSTR_TABLE, REG_TEMP0, OFFSETOF_MODULE_CONTEXT_QSTR_TABLE);
+            #endif
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_MODULE_CONTEXT_OBJ_TABLE);
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_TEMP0, fun_table_off);
+        } else {
+            // The locals and stack start after the code_state structure
+            emit->stack_start = emit->code_state_start + SIZEOF_CODE_STATE;
+
+            // Allocate space on C-stack for code_state structure, which includes state
+            ASM_ENTRY(emit->as, emit->stack_start + emit->n_state);
+
+            // Prepare incoming arguments for call to mp_setup_code_state
+
+            #if N_X86
+            asm_x86_mov_arg_to_r32(emit->as, 0, REG_PARENT_ARG_1);
+            asm_x86_mov_arg_to_r32(emit->as, 1, REG_PARENT_ARG_2);
+            asm_x86_mov_arg_to_r32(emit->as, 2, REG_PARENT_ARG_3);
+            asm_x86_mov_arg_to_r32(emit->as, 3, REG_PARENT_ARG_4);
+            #endif
+
+            // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+            #if MICROPY_PERSISTENT_CODE_SAVE
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_QSTR_TABLE, REG_FUN_TABLE, OFFSETOF_MODULE_CONTEXT_QSTR_TABLE);
+            #endif
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_FUN_TABLE, OFFSETOF_MODULE_CONTEXT_OBJ_TABLE);
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_FUN_TABLE, fun_table_off);
+
+            // Set code_state.fun_bc
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
+
+            // Set code_state.n_state (only works on little endian targets due to n_state being uint16_t)
+            emit_native_mov_state_imm_via(emit, emit->code_state_start + OFFSETOF_CODE_STATE_N_STATE, emit->n_state, REG_ARG_1);
+
+            // Put address of code_state into first arg
+            ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, emit->code_state_start);
+
+            // Copy next 3 args if needed
+            #if REG_ARG_2 != REG_PARENT_ARG_2
+            ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_PARENT_ARG_2);
+            #endif
+            #if REG_ARG_3 != REG_PARENT_ARG_3
+            ASM_MOV_REG_REG(emit->as, REG_ARG_3, REG_PARENT_ARG_3);
+            #endif
+            #if REG_ARG_4 != REG_PARENT_ARG_4
+            ASM_MOV_REG_REG(emit->as, REG_ARG_4, REG_PARENT_ARG_4);
+            #endif
+
+            // Call mp_setup_code_state to prepare code_state structure
+            #if N_THUMB
+            asm_thumb_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_THUMB_REG_R4);
+            #elif N_ARM
+            asm_arm_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_ARM_REG_R4);
+            #else
+            ASM_CALL_IND(emit->as, MP_F_SETUP_CODE_STATE);
+            #endif
+        }
+
+        emit_native_global_exc_entry(emit);
+
+        // cache some locals in registers, but only if no exception handlers
+        if (CAN_USE_REGS_FOR_LOCALS(emit)) {
+            for (int i = 0; i < MAX_REGS_FOR_LOCAL_VARS && i < scope->num_locals; ++i) {
+                ASM_MOV_REG_LOCAL(emit->as, reg_local_table[i], LOCAL_IDX_LOCAL_VAR(emit, i));
+            }
+        }
+
+        // set the type of closed over variables
+        for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
+            id_info_t *id = &scope->id_info[i];
+            if (id->kind == ID_INFO_KIND_CELL) {
+                emit->local_vtype[id->local_num] = VTYPE_PYOBJ;
+            }
+        }
+    }
+}
+
+static inline void emit_native_write_code_info_byte(emit_t *emit, byte val) {
+    mp_asm_base_data(&emit->as->base, 1, val);
+}
+
+static inline void emit_native_write_code_info_qstr(emit_t *emit, qstr qst) {
+    mp_encode_uint(&emit->as->base, mp_asm_base_get_cur_to_write_bytes, mp_emit_common_use_qstr(emit->emit_common, qst));
+}
+
+static bool emit_native_end_pass(emit_t *emit) {
+    emit_native_global_exc_exit(emit);
+
+    if (!emit->do_viper_types) {
+        emit->prelude_offset = mp_asm_base_get_code_pos(&emit->as->base);
+        emit->prelude_ptr_index = emit->emit_common->ct_cur_child;
+
+        size_t n_state = emit->n_state;
+        size_t n_exc_stack = 0; // exc-stack not needed for native code
+        MP_BC_PRELUDE_SIG_ENCODE(n_state, n_exc_stack, emit->scope, emit_native_write_code_info_byte, emit);
+
+        size_t n_info = emit->n_info;
+        size_t n_cell = emit->n_cell;
+        MP_BC_PRELUDE_SIZE_ENCODE(n_info, n_cell, emit_native_write_code_info_byte, emit);
+
+        // bytecode prelude: source info (function and argument qstrs)
+        size_t info_start = mp_asm_base_get_code_pos(&emit->as->base);
+        emit_native_write_code_info_qstr(emit, emit->scope->simple_name);
+        for (int i = 0; i < emit->scope->num_pos_args + emit->scope->num_kwonly_args; i++) {
+            qstr qst = MP_QSTR__star_;
+            for (int j = 0; j < emit->scope->id_info_len; ++j) {
+                id_info_t *id = &emit->scope->id_info[j];
+                if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+                    qst = id->qst;
+                    break;
+                }
+            }
+            emit_native_write_code_info_qstr(emit, qst);
+        }
+        emit->n_info = mp_asm_base_get_code_pos(&emit->as->base) - info_start;
+
+        // bytecode prelude: initialise closed over variables
+        size_t cell_start = mp_asm_base_get_code_pos(&emit->as->base);
+        for (int i = 0; i < emit->scope->id_info_len; i++) {
+            id_info_t *id = &emit->scope->id_info[i];
+            if (id->kind == ID_INFO_KIND_CELL) {
+                assert(id->local_num <= 255);
+                mp_asm_base_data(&emit->as->base, 1, id->local_num); // write the local which should be converted to a cell
+            }
+        }
+        emit->n_cell = mp_asm_base_get_code_pos(&emit->as->base) - cell_start;
+
+    }
+
+    ASM_END_PASS(emit->as);
+
+    // check stack is back to zero size
+    assert(emit->stack_size == 0);
+    assert(emit->exc_stack_size == 0);
+
+    if (emit->pass == MP_PASS_EMIT) {
+        void *f = mp_asm_base_get_code(&emit->as->base);
+        mp_uint_t f_len = mp_asm_base_get_code_size(&emit->as->base);
+
+        mp_raw_code_t **children = emit->emit_common->children;
+        if (!emit->do_viper_types) {
+            #if MICROPY_EMIT_NATIVE_PRELUDE_SEPARATE_FROM_MACHINE_CODE
+            // Executable code cannot be accessed byte-wise on this architecture, so copy
+            // the prelude to a separate memory region that is byte-wise readable.
+            void *buf = emit->as->base.code_base + emit->prelude_offset;
+            size_t n = emit->as->base.code_offset - emit->prelude_offset;
+            const uint8_t *prelude_ptr = memcpy(m_new(uint8_t, n), buf, n);
+            #else
+            // Point to the prelude directly, at the end of the machine code data.
+            const uint8_t *prelude_ptr = (const uint8_t *)f + emit->prelude_offset;
+            #endif
+
+            // Store the pointer to the prelude using the child_table.
+            assert(emit->prelude_ptr_index == emit->emit_common->ct_cur_child);
+            if (emit->prelude_ptr_index == 0) {
+                children = (void *)prelude_ptr;
+            } else {
+                children = m_renew(mp_raw_code_t *, children, emit->prelude_ptr_index, emit->prelude_ptr_index + 1);
+                children[emit->prelude_ptr_index] = (void *)prelude_ptr;
+            }
+        }
+
+        mp_emit_glue_assign_native(emit->scope->raw_code,
+            emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY,
+            f, f_len,
+            children,
+            #if MICROPY_PERSISTENT_CODE_SAVE
+            emit->emit_common->ct_cur_child,
+            emit->prelude_offset,
+            #endif
+            emit->scope->scope_flags, 0, 0);
+    }
+
+    return true;
+}
+
+static void ensure_extra_stack(emit_t *emit, size_t delta) {
+    if (emit->stack_size + delta > emit->stack_info_alloc) {
+        size_t new_alloc = (emit->stack_size + delta + 8) & ~3;
+        emit->stack_info = m_renew(stack_info_t, emit->stack_info, emit->stack_info_alloc, new_alloc);
+        emit->stack_info_alloc = new_alloc;
+    }
+}
+
+static void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
+    assert((mp_int_t)emit->stack_size + stack_size_delta >= 0);
+    assert((mp_int_t)emit->stack_size + stack_size_delta <= (mp_int_t)emit->stack_info_alloc);
+    emit->stack_size += stack_size_delta;
+    if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) {
+        emit->scope->stack_size = emit->stack_size;
+    }
+    #if DEBUG_PRINT
+    DEBUG_printf("  adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
+    for (int i = 0; i < emit->stack_size; i++) {
+        stack_info_t *si = &emit->stack_info[i];
+        DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
+    }
+    DEBUG_printf("\n");
+    #endif
+}
+
+static void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+    DEBUG_printf("adjust_stack_size(" INT_FMT ")\n", delta);
+    if (delta > 0) {
+        ensure_extra_stack(emit, delta);
+    }
+    // If we are adjusting the stack in a positive direction (pushing) then we
+    // need to fill in values for the stack kind and vtype of the newly-pushed
+    // entries.  These should be set to "value" (ie not reg or imm) because we
+    // should only need to adjust the stack due to a jump to this part in the
+    // code (and hence we have settled the stack before the jump).
+    for (mp_int_t i = 0; i < delta; i++) {
+        stack_info_t *si = &emit->stack_info[emit->stack_size + i];
+        si->kind = STACK_VALUE;
+        // TODO we don't know the vtype to use here.  At the moment this is a
+        // hack to get the case of multi comparison working.
+        if (delta == 1) {
+            si->vtype = emit->saved_stack_vtype;
+        } else {
+            si->vtype = VTYPE_PYOBJ;
+        }
+    }
+    adjust_stack(emit, delta);
+}
+
+static void emit_native_set_source_line(emit_t *emit, mp_uint_t source_line) {
+    (void)emit;
+    (void)source_line;
+}
+
+// this must be called at start of emit functions
+static void emit_native_pre(emit_t *emit) {
+    (void)emit;
+}
+
+// depth==0 is top, depth==1 is before top, etc
+static stack_info_t *peek_stack(emit_t *emit, mp_uint_t depth) {
+    return &emit->stack_info[emit->stack_size - 1 - depth];
+}
+
+// depth==0 is top, depth==1 is before top, etc
+static vtype_kind_t peek_vtype(emit_t *emit, mp_uint_t depth) {
+    if (emit->do_viper_types) {
+        return peek_stack(emit, depth)->vtype;
+    } else {
+        // Type is always PYOBJ even if the intermediate stored value is not
+        return VTYPE_PYOBJ;
+    }
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+// use pos=0 for no skipping
+static void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
+    skip_stack_pos = emit->stack_size - skip_stack_pos;
+    for (int i = 0; i < emit->stack_size; i++) {
+        if (i != skip_stack_pos) {
+            stack_info_t *si = &emit->stack_info[i];
+            if (si->kind == STACK_REG && si->data.u_reg == reg_needed) {
+                si->kind = STACK_VALUE;
+                emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
+            }
+        }
+    }
+}
+
+// Ensures all unsettled registers that hold Python values are copied to the
+// concrete Python stack.  All registers are then free to use.
+static void need_reg_all(emit_t *emit) {
+    for (int i = 0; i < emit->stack_size; i++) {
+        stack_info_t *si = &emit->stack_info[i];
+        if (si->kind == STACK_REG) {
+            DEBUG_printf("    reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
+            si->kind = STACK_VALUE;
+            emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
+        }
+    }
+}
+
+static vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_info_t *si, bool convert_to_pyobj) {
+    if (!convert_to_pyobj && emit->do_viper_types) {
+        ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm);
+        return si->vtype;
+    } else {
+        if (si->vtype == VTYPE_PYOBJ) {
+            ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm);
+        } else if (si->vtype == VTYPE_BOOL) {
+            emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_FALSE_OBJ + si->data.u_imm);
+        } else if (si->vtype == VTYPE_INT || si->vtype == VTYPE_UINT) {
+            ASM_MOV_REG_IMM(emit->as, reg_dest, (uintptr_t)MP_OBJ_NEW_SMALL_INT(si->data.u_imm));
+        } else if (si->vtype == VTYPE_PTR_NONE) {
+            emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_NONE_OBJ);
+        } else {
+            mp_raise_NotImplementedError(MP_ERROR_TEXT("conversion to object"));
+        }
+        return VTYPE_PYOBJ;
+    }
+}
+
+// Copies all unsettled registers and immediates that are Python values into the
+// concrete Python stack.  This ensures the concrete Python stack holds valid
+// values for the current stack_size.
+// This function may clobber REG_TEMP1.
+static void need_stack_settled(emit_t *emit) {
+    DEBUG_printf("  need_stack_settled; stack_size=%d\n", emit->stack_size);
+    need_reg_all(emit);
+    for (int i = 0; i < emit->stack_size; i++) {
+        stack_info_t *si = &emit->stack_info[i];
+        if (si->kind == STACK_IMM) {
+            DEBUG_printf("    imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
+            si->kind = STACK_VALUE;
+            // using REG_TEMP1 to avoid clobbering REG_TEMP0 (aka REG_RET)
+            si->vtype = load_reg_stack_imm(emit, REG_TEMP1, si, false);
+            emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP1);
+        }
+    }
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+static void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int reg_dest) {
+    need_reg_single(emit, reg_dest, pos);
+    stack_info_t *si = &emit->stack_info[emit->stack_size - pos];
+    *vtype = si->vtype;
+    switch (si->kind) {
+        case STACK_VALUE:
+            emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - pos);
+            break;
+
+        case STACK_REG:
+            if (si->data.u_reg != reg_dest) {
+                ASM_MOV_REG_REG(emit->as, reg_dest, si->data.u_reg);
+            }
+            break;
+
+        case STACK_IMM:
+            *vtype = load_reg_stack_imm(emit, reg_dest, si, false);
+            break;
+    }
+}
+
+// does an efficient X=pop(); discard(); push(X)
+// needs a (non-temp) register in case the popped element was stored in the stack
+static void emit_fold_stack_top(emit_t *emit, int reg_dest) {
+    stack_info_t *si = &emit->stack_info[emit->stack_size - 2];
+    si[0] = si[1];
+    if (si->kind == STACK_VALUE) {
+        // if folded element was on the stack we need to put it in a register
+        emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - 1);
+        si->kind = STACK_REG;
+        si->data.u_reg = reg_dest;
+    }
+    adjust_stack(emit, -1);
+}
+
+// If stacked value is in a register and the register is not r1 or r2, then
+// *reg_dest is set to that register.  Otherwise the value is put in *reg_dest.
+static void emit_pre_pop_reg_flexible(emit_t *emit, vtype_kind_t *vtype, int *reg_dest, int not_r1, int not_r2) {
+    stack_info_t *si = peek_stack(emit, 0);
+    if (si->kind == STACK_REG && si->data.u_reg != not_r1 && si->data.u_reg != not_r2) {
+        *vtype = si->vtype;
+        *reg_dest = si->data.u_reg;
+        need_reg_single(emit, *reg_dest, 1);
+    } else {
+        emit_access_stack(emit, 1, vtype, *reg_dest);
+    }
+    adjust_stack(emit, -1);
+}
+
+static void emit_pre_pop_discard(emit_t *emit) {
+    adjust_stack(emit, -1);
+}
+
+static void emit_pre_pop_reg(emit_t *emit, vtype_kind_t *vtype, int reg_dest) {
+    emit_access_stack(emit, 1, vtype, reg_dest);
+    adjust_stack(emit, -1);
+}
+
+static void emit_pre_pop_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb) {
+    emit_pre_pop_reg(emit, vtypea, rega);
+    emit_pre_pop_reg(emit, vtypeb, regb);
+}
+
+static void emit_pre_pop_reg_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb, vtype_kind_t *vtypec, int regc) {
+    emit_pre_pop_reg(emit, vtypea, rega);
+    emit_pre_pop_reg(emit, vtypeb, regb);
+    emit_pre_pop_reg(emit, vtypec, regc);
+}
+
+static void emit_post(emit_t *emit) {
+    (void)emit;
+}
+
+static void emit_post_top_set_vtype(emit_t *emit, vtype_kind_t new_vtype) {
+    stack_info_t *si = &emit->stack_info[emit->stack_size - 1];
+    si->vtype = new_vtype;
+}
+
+static void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) {
+    ensure_extra_stack(emit, 1);
+    stack_info_t *si = &emit->stack_info[emit->stack_size];
+    si->vtype = vtype;
+    si->kind = STACK_REG;
+    si->data.u_reg = reg;
+    adjust_stack(emit, 1);
+}
+
+static void emit_post_push_imm(emit_t *emit, vtype_kind_t vtype, mp_int_t imm) {
+    ensure_extra_stack(emit, 1);
+    stack_info_t *si = &emit->stack_info[emit->stack_size];
+    si->vtype = vtype;
+    si->kind = STACK_IMM;
+    si->data.u_imm = imm;
+    adjust_stack(emit, 1);
+}
+
+static void emit_post_push_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb) {
+    emit_post_push_reg(emit, vtypea, rega);
+    emit_post_push_reg(emit, vtypeb, regb);
+}
+
+static void emit_post_push_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc) {
+    emit_post_push_reg(emit, vtypea, rega);
+    emit_post_push_reg(emit, vtypeb, regb);
+    emit_post_push_reg(emit, vtypec, regc);
+}
+
+static void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc, vtype_kind_t vtyped, int regd) {
+    emit_post_push_reg(emit, vtypea, rega);
+    emit_post_push_reg(emit, vtypeb, regb);
+    emit_post_push_reg(emit, vtypec, regc);
+    emit_post_push_reg(emit, vtyped, regd);
+}
+
+static void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) {
+    need_reg_all(emit);
+    ASM_CALL_IND(emit->as, fun_kind);
+}
+
+static void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
+    need_reg_all(emit);
+    ASM_MOV_REG_IMM(emit->as, arg_reg, arg_val);
+    ASM_CALL_IND(emit->as, fun_kind);
+}
+
+static void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
+    need_reg_all(emit);
+    ASM_MOV_REG_IMM(emit->as, arg_reg1, arg_val1);
+    ASM_MOV_REG_IMM(emit->as, arg_reg2, arg_val2);
+    ASM_CALL_IND(emit->as, fun_kind);
+}
+
+static void emit_call_with_qstr_arg(emit_t *emit, mp_fun_kind_t fun_kind, qstr qst, int arg_reg) {
+    need_reg_all(emit);
+    emit_native_mov_reg_qstr(emit, arg_reg, qst);
+    ASM_CALL_IND(emit->as, fun_kind);
+}
+
+// vtype of all n_pop objects is VTYPE_PYOBJ
+// Will convert any items that are not VTYPE_PYOBJ to this type and put them back on the stack.
+// If any conversions of non-immediate values are needed, then it uses REG_ARG_1, REG_ARG_2 and REG_RET.
+// Otherwise, it does not use any temporary registers (but may use reg_dest before loading it with stack pointer).
+static void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_pop) {
+    need_reg_all(emit);
+
+    // First, store any immediate values to their respective place on the stack.
+    for (mp_uint_t i = 0; i < n_pop; i++) {
+        stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+        // must push any imm's to stack
+        // must convert them to VTYPE_PYOBJ for viper code
+        if (si->kind == STACK_IMM) {
+            si->kind = STACK_VALUE;
+            si->vtype = load_reg_stack_imm(emit, reg_dest, si, true);
+            emit_native_mov_state_reg(emit, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+        }
+
+        // verify that this value is on the stack
+        assert(si->kind == STACK_VALUE);
+    }
+
+    // Second, convert any non-VTYPE_PYOBJ to that type.
+    for (mp_uint_t i = 0; i < n_pop; i++) {
+        stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+        if (si->vtype != VTYPE_PYOBJ) {
+            mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i;
+            emit_native_mov_reg_state(emit, REG_ARG_1, local_num);
+            emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type
+            emit_native_mov_state_reg(emit, local_num, REG_RET);
+            si->vtype = VTYPE_PYOBJ;
+            DEBUG_printf("  convert_native_to_obj(local_num=" UINT_FMT ")\n", local_num);
+        }
+    }
+
+    // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
+    adjust_stack(emit, -n_pop);
+    emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size);
+}
+
+// vtype of all n_push objects is VTYPE_PYOBJ
+static void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_push) {
+    need_reg_all(emit);
+    ensure_extra_stack(emit, n_push);
+    for (mp_uint_t i = 0; i < n_push; i++) {
+        emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
+        emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
+    }
+    emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size);
+    adjust_stack(emit, n_push);
+}
+
+static void emit_native_push_exc_stack(emit_t *emit, uint label, bool is_finally) {
+    if (emit->exc_stack_size + 1 > emit->exc_stack_alloc) {
+        size_t new_alloc = emit->exc_stack_alloc + 4;
+        emit->exc_stack = m_renew(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc, new_alloc);
+        emit->exc_stack_alloc = new_alloc;
+    }
+
+    exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size++];
+    e->label = label;
+    e->is_finally = is_finally;
+    e->unwind_label = UNWIND_LABEL_UNUSED;
+    e->is_active = true;
+
+    ASM_MOV_REG_PCREL(emit->as, REG_RET, label);
+    ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+}
+
+static void emit_native_leave_exc_stack(emit_t *emit, bool start_of_handler) {
+    assert(emit->exc_stack_size > 0);
+
+    // Get current exception handler and deactivate it
+    exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+    e->is_active = false;
+
+    // Find next innermost active exception handler, to restore as current handler
+    for (--e; e >= emit->exc_stack && !e->is_active; --e) {
+    }
+
+    // Update the PC of the new exception handler
+    if (e < emit->exc_stack) {
+        // No active handler, clear handler PC to zero
+        if (start_of_handler) {
+            // Optimisation: PC is already cleared by global exc handler
+            return;
+        }
+        ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET);
+    } else {
+        // Found new active handler, get its PC
+        ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+    }
+    ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+}
+
+static exc_stack_entry_t *emit_native_pop_exc_stack(emit_t *emit) {
+    assert(emit->exc_stack_size > 0);
+    exc_stack_entry_t *e = &emit->exc_stack[--emit->exc_stack_size];
+    assert(e->is_active == false);
+    return e;
+}
+
+static void emit_load_reg_with_object(emit_t *emit, int reg, mp_obj_t obj) {
+    emit->scope->scope_flags |= MP_SCOPE_FLAG_HASCONSTS;
+    size_t table_off = mp_emit_common_use_const_obj(emit->emit_common, obj);
+    emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_FUN_OBJ(emit));
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_MODULE_CONTEXT_OBJ_TABLE);
+    ASM_LOAD_REG_REG_OFFSET(emit->as, reg, REG_TEMP0, table_off);
+}
+
+static void emit_load_reg_with_child(emit_t *emit, int reg, mp_raw_code_t *rc) {
+    size_t table_off = mp_emit_common_alloc_const_child(emit->emit_common, rc);
+    emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_FUN_OBJ(emit));
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_OBJ_FUN_BC_CHILD_TABLE);
+    ASM_LOAD_REG_REG_OFFSET(emit->as, reg, REG_TEMP0, table_off);
+}
+
+static void emit_native_label_assign(emit_t *emit, mp_uint_t l) {
+    DEBUG_printf("label_assign(" UINT_FMT ")\n", l);
+
+    bool is_finally = false;
+    if (emit->exc_stack_size > 0) {
+        exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+        is_finally = e->is_finally && e->label == l;
+    }
+
+    if (is_finally) {
+        // Label is at start of finally handler: store TOS into exception slot
+        vtype_kind_t vtype;
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+    }
+
+    emit_native_pre(emit);
+    // need to commit stack because we can jump here from elsewhere
+    need_stack_settled(emit);
+    mp_asm_base_label_assign(&emit->as->base, l);
+    emit_post(emit);
+
+    if (is_finally) {
+        // Label is at start of finally handler: pop exception stack
+        emit_native_leave_exc_stack(emit, false);
+    }
+}
+
+static void emit_native_global_exc_entry(emit_t *emit) {
+    // Note: 4 labels are reserved for this function, starting at *emit->label_slot
+
+    emit->exit_label = *emit->label_slot;
+
+    if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+        mp_uint_t nlr_label = *emit->label_slot + 1;
+        mp_uint_t start_label = *emit->label_slot + 2;
+        mp_uint_t global_except_label = *emit->label_slot + 3;
+
+        if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+            // Set new globals
+            emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_FUN_OBJ(emit));
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_ARG_1, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+            ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_ARG_1, OFFSETOF_MODULE_CONTEXT_GLOBALS);
+            emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+
+            // Save old globals (or NULL if globals didn't change)
+            emit_native_mov_state_reg(emit, LOCAL_IDX_OLD_GLOBALS(emit), REG_RET);
+        }
+
+        if (emit->scope->exc_stack_size == 0) {
+            if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+                // Optimisation: if globals didn't change don't push the nlr context
+                ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, false);
+            }
+
+            // Wrap everything in an nlr context
+            ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0);
+            emit_call(emit, MP_F_NLR_PUSH);
+            #if N_NLR_SETJMP
+            ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 2);
+            emit_call(emit, MP_F_SETJMP);
+            #endif
+            ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, true);
+        } else {
+            // Clear the unwind state
+            ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0);
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_TEMP0);
+
+            // Put PC of start code block into REG_LOCAL_1
+            ASM_MOV_REG_PCREL(emit->as, REG_LOCAL_1, start_label);
+
+            // Wrap everything in an nlr context
+            emit_native_label_assign(emit, nlr_label);
+            ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0);
+            emit_call(emit, MP_F_NLR_PUSH);
+            #if N_NLR_SETJMP
+            ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 2);
+            emit_call(emit, MP_F_SETJMP);
+            #endif
+            ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, global_except_label, true);
+
+            // Clear PC of current code block, and jump there to resume execution
+            ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0);
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_TEMP0);
+            ASM_JUMP_REG(emit->as, REG_LOCAL_1);
+
+            // Global exception handler: check for valid exception handler
+            emit_native_label_assign(emit, global_except_label);
+            ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_1, LOCAL_IDX_EXC_HANDLER_PC(emit));
+            ASM_JUMP_IF_REG_NONZERO(emit->as, REG_LOCAL_1, nlr_label, false);
+        }
+
+        if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+            // Restore old globals
+            emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit));
+            emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+        }
+
+        if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+            // Store return value in state[0]
+            ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit));
+            ASM_STORE_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, OFFSETOF_CODE_STATE_STATE);
+
+            // Load return kind
+            ASM_MOV_REG_IMM(emit->as, REG_PARENT_RET, MP_VM_RETURN_EXCEPTION);
+
+            ASM_EXIT(emit->as);
+        } else {
+            // Re-raise exception out to caller
+            ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+            emit_call(emit, MP_F_NATIVE_RAISE);
+        }
+
+        // Label for start of function
+        emit_native_label_assign(emit, start_label);
+
+        if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+            emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_GEN_PC(emit));
+            ASM_JUMP_REG(emit->as, REG_TEMP0);
+            emit->start_offset = mp_asm_base_get_code_pos(&emit->as->base);
+
+            // This is the first entry of the generator
+
+            // Check LOCAL_IDX_EXC_VAL for any injected value
+            ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+            emit_call(emit, MP_F_NATIVE_RAISE);
+        }
+    }
+}
+
+static void emit_native_global_exc_exit(emit_t *emit) {
+    // Label for end of function
+    emit_native_label_assign(emit, emit->exit_label);
+
+    if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+        // Get old globals
+        if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+            emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit));
+
+            if (emit->scope->exc_stack_size == 0) {
+                // Optimisation: if globals didn't change then don't restore them and don't do nlr_pop
+                ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, emit->exit_label + 1, false);
+            }
+
+            // Restore old globals
+            emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+        }
+
+        // Pop the nlr context
+        emit_call(emit, MP_F_NLR_POP);
+
+        if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+            if (emit->scope->exc_stack_size == 0) {
+                // Destination label for above optimisation
+                emit_native_label_assign(emit, emit->exit_label + 1);
+            }
+        }
+
+        // Load return value
+        ASM_MOV_REG_LOCAL(emit->as, REG_PARENT_RET, LOCAL_IDX_RET_VAL(emit));
+    }
+
+    ASM_EXIT(emit->as);
+}
+
+static void emit_native_import_name(emit_t *emit, qstr qst) {
+    DEBUG_printf("import_name %s\n", qstr_str(qst));
+
+    // get arguments from stack: arg2 = fromlist, arg3 = level
+    // If using viper types these arguments must be converted to proper objects, and
+    // to accomplish this viper types are turned off for the emit_pre_pop_reg_reg call.
+    bool orig_do_viper_types = emit->do_viper_types;
+    emit->do_viper_types = false;
+    vtype_kind_t vtype_fromlist;
+    vtype_kind_t vtype_level;
+    emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3);
+    assert(vtype_fromlist == VTYPE_PYOBJ);
+    assert(vtype_level == VTYPE_PYOBJ);
+    emit->do_viper_types = orig_do_viper_types;
+
+    emit_call_with_qstr_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_import_from(emit_t *emit, qstr qst) {
+    DEBUG_printf("import_from %s\n", qstr_str(qst));
+    emit_native_pre(emit);
+    vtype_kind_t vtype_module;
+    emit_access_stack(emit, 1, &vtype_module, REG_ARG_1); // arg1 = module
+    assert(vtype_module == VTYPE_PYOBJ);
+    emit_call_with_qstr_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_import_star(emit_t *emit) {
+    DEBUG_printf("import_star\n");
+    vtype_kind_t vtype_module;
+    emit_pre_pop_reg(emit, &vtype_module, REG_ARG_1); // arg1 = module
+    assert(vtype_module == VTYPE_PYOBJ);
+    emit_call(emit, MP_F_IMPORT_ALL);
+    emit_post(emit);
+}
+
+static void emit_native_import(emit_t *emit, qstr qst, int kind) {
+    if (kind == MP_EMIT_IMPORT_NAME) {
+        emit_native_import_name(emit, qst);
+    } else if (kind == MP_EMIT_IMPORT_FROM) {
+        emit_native_import_from(emit, qst);
+    } else {
+        emit_native_import_star(emit);
+    }
+}
+
+static void emit_native_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+    DEBUG_printf("load_const_tok(tok=%u)\n", tok);
+    if (tok == MP_TOKEN_ELLIPSIS) {
+        emit_native_load_const_obj(emit, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj));
+    } else {
+        emit_native_pre(emit);
+        if (tok == MP_TOKEN_KW_NONE) {
+            emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+        } else {
+            emit_post_push_imm(emit, VTYPE_BOOL, tok == MP_TOKEN_KW_FALSE ? 0 : 1);
+        }
+    }
+}
+
+static void emit_native_load_const_small_int(emit_t *emit, mp_int_t arg) {
+    DEBUG_printf("load_const_small_int(int=" INT_FMT ")\n", arg);
+    emit_native_pre(emit);
+    emit_post_push_imm(emit, VTYPE_INT, arg);
+}
+
+static void emit_native_load_const_str(emit_t *emit, qstr qst) {
+    emit_native_pre(emit);
+    // TODO: Eventually we want to be able to work with raw pointers in viper to
+    // do native array access.  For now we just load them as any other object.
+    /*
+    if (emit->do_viper_types) {
+        // load a pointer to the asciiz string?
+        emit_post_push_imm(emit, VTYPE_PTR, (mp_uint_t)qstr_str(qst));
+    } else
+    */
+    {
+        need_reg_single(emit, REG_TEMP0, 0);
+        emit_native_mov_reg_qstr_obj(emit, REG_TEMP0, qst);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0);
+    }
+}
+
+static void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj) {
+    emit_native_pre(emit);
+    need_reg_single(emit, REG_RET, 0);
+    emit_load_reg_with_object(emit, REG_RET, obj);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_load_null(emit_t *emit) {
+    emit_native_pre(emit);
+    emit_post_push_imm(emit, VTYPE_PYOBJ, 0);
+}
+
+static void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+    DEBUG_printf("load_fast(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+    vtype_kind_t vtype = emit->local_vtype[local_num];
+    if (vtype == VTYPE_UNBOUND) {
+        EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("local '%q' used before type known"), qst);
+    }
+    emit_native_pre(emit);
+    if (local_num < MAX_REGS_FOR_LOCAL_VARS && CAN_USE_REGS_FOR_LOCALS(emit)) {
+        emit_post_push_reg(emit, vtype, reg_local_table[local_num]);
+    } else {
+        need_reg_single(emit, REG_TEMP0, 0);
+        emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_LOCAL_VAR(emit, local_num));
+        emit_post_push_reg(emit, vtype, REG_TEMP0);
+    }
+}
+
+static void emit_native_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+    DEBUG_printf("load_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+    need_reg_single(emit, REG_RET, 0);
+    emit_native_load_fast(emit, qst, local_num);
+    vtype_kind_t vtype;
+    int reg_base = REG_RET;
+    emit_pre_pop_reg_flexible(emit, &vtype, &reg_base, -1, -1);
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_RET, reg_base, 1);
+    // closed over vars are always Python objects
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+        emit_native_load_fast(emit, qst, local_num);
+    } else {
+        emit_native_load_deref(emit, qst, local_num);
+    }
+}
+
+static void emit_native_load_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_LOAD_NAME);
+    MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_LOAD_GLOBAL);
+    emit_native_pre(emit);
+    if (kind == MP_EMIT_IDOP_GLOBAL_NAME) {
+        DEBUG_printf("load_name(%s)\n", qstr_str(qst));
+    } else {
+        DEBUG_printf("load_global(%s)\n", qstr_str(qst));
+        if (emit->do_viper_types) {
+            // check for builtin casting operators
+            int native_type = mp_native_type_from_qstr(qst);
+            if (native_type >= MP_NATIVE_TYPE_BOOL) {
+                emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, native_type);
+                return;
+            }
+        }
+    }
+    emit_call_with_qstr_arg(emit, MP_F_LOAD_NAME + kind, qst, REG_ARG_1);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_load_attr(emit_t *emit, qstr qst) {
+    // depends on type of subject:
+    //  - integer, function, pointer to integers: error
+    //  - pointer to structure: get member, quite easy
+    //  - Python object: call mp_load_attr, and needs to be typed to convert result
+    vtype_kind_t vtype_base;
+    emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+    assert(vtype_base == VTYPE_PYOBJ);
+    emit_call_with_qstr_arg(emit, MP_F_LOAD_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_load_method(emit_t *emit, qstr qst, bool is_super) {
+    if (is_super) {
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, 3); // arg2 = dest ptr
+        emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_2, 2); // arg2 = dest ptr
+        emit_call_with_qstr_arg(emit, MP_F_LOAD_SUPER_METHOD, qst, REG_ARG_1); // arg1 = method name
+    } else {
+        vtype_kind_t vtype_base;
+        emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+        assert(vtype_base == VTYPE_PYOBJ);
+        emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+        emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, qst, REG_ARG_2); // arg2 = method name
+    }
+}
+
+static void emit_native_load_build_class(emit_t *emit) {
+    emit_native_pre(emit);
+    emit_call(emit, MP_F_LOAD_BUILD_CLASS);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_load_subscr(emit_t *emit) {
+    DEBUG_printf("load_subscr\n");
+    // need to compile: base[index]
+
+    // pop: index, base
+    // optimise case where index is an immediate
+    vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+    if (vtype_base == VTYPE_PYOBJ) {
+        // standard Python subscr
+        // TODO factor this implicit cast code with other uses of it
+        vtype_kind_t vtype_index = peek_vtype(emit, 0);
+        if (vtype_index == VTYPE_PYOBJ) {
+            emit_pre_pop_reg(emit, &vtype_index, REG_ARG_2);
+        } else {
+            emit_pre_pop_reg(emit, &vtype_index, REG_ARG_1);
+            emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype_index, REG_ARG_2); // arg2 = type
+            ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+        }
+        emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+        emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    } else {
+        // viper load
+        // TODO The different machine architectures have very different
+        // capabilities and requirements for loads, so probably best to
+        // write a completely separate load-optimiser for each one.
+        stack_info_t *top = peek_stack(emit, 0);
+        if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+            // index is an immediate
+            mp_int_t index_value = top->data.u_imm;
+            emit_pre_pop_discard(emit); // discard index
+            int reg_base = REG_ARG_1;
+            int reg_index = REG_ARG_2;
+            emit_pre_pop_reg_flexible(emit, &vtype_base, &reg_base, reg_index, reg_index);
+            need_reg_single(emit, REG_RET, 0);
+            switch (vtype_base) {
+                case VTYPE_PTR8: {
+                    // pointer to 8-bit memory
+                    // TODO optimise to use thumb ldrb r1, [r2, r3]
+                    if (index_value != 0) {
+                        // index is non-zero
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        need_reg_single(emit, reg_index, 0);
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_LOAD8_REG_REG(emit->as, REG_RET, reg_base); // load from (base+index)
+                    break;
+                }
+                case VTYPE_PTR16: {
+                    // pointer to 16-bit memory
+                    if (index_value != 0) {
+                        // index is a non-zero immediate
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        need_reg_single(emit, reg_index, 0);
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_LOAD16_REG_REG(emit->as, REG_RET, reg_base); // load from (base+2*index)
+                    break;
+                }
+                case VTYPE_PTR32: {
+                    // pointer to 32-bit memory
+                    if (index_value != 0) {
+                        // index is a non-zero immediate
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        need_reg_single(emit, reg_index, 0);
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_LOAD32_REG_REG(emit->as, REG_RET, reg_base); // load from (base+4*index)
+                    break;
+                }
+                default:
+                    EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                        MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
+            }
+        } else {
+            // index is not an immediate
+            vtype_kind_t vtype_index;
+            int reg_index = REG_ARG_2;
+            emit_pre_pop_reg_flexible(emit, &vtype_index, &reg_index, REG_ARG_1, REG_ARG_1);
+            emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+            need_reg_single(emit, REG_RET, 0);
+            if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("can't load with '%q' index"), vtype_to_qstr(vtype_index));
+            }
+            switch (vtype_base) {
+                case VTYPE_PTR8: {
+                    // pointer to 8-bit memory
+                    // TODO optimise to use thumb ldrb r1, [r2, r3]
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_LOAD8_REG_REG(emit->as, REG_RET, REG_ARG_1); // store value to (base+index)
+                    break;
+                }
+                case VTYPE_PTR16: {
+                    // pointer to 16-bit memory
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_LOAD16_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+2*index)
+                    break;
+                }
+                case VTYPE_PTR32: {
+                    // pointer to word-size memory
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_LOAD32_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+4*index)
+                    break;
+                }
+                default:
+                    EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                        MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
+            }
+        }
+        emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+    }
+}
+
+static void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+    vtype_kind_t vtype;
+    if (local_num < MAX_REGS_FOR_LOCAL_VARS && CAN_USE_REGS_FOR_LOCALS(emit)) {
+        emit_pre_pop_reg(emit, &vtype, reg_local_table[local_num]);
+    } else {
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, local_num), REG_TEMP0);
+    }
+    emit_post(emit);
+
+    // check types
+    if (emit->local_vtype[local_num] == VTYPE_UNBOUND) {
+        // first time this local is assigned, so give it a type of the object stored in it
+        emit->local_vtype[local_num] = vtype;
+    } else if (emit->local_vtype[local_num] != vtype) {
+        // type of local is not the same as object stored in it
+        EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+            MP_ERROR_TEXT("local '%q' has type '%q' but source is '%q'"),
+            qst, vtype_to_qstr(emit->local_vtype[local_num]), vtype_to_qstr(vtype));
+    }
+}
+
+static void emit_native_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+    DEBUG_printf("store_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+    need_reg_single(emit, REG_TEMP0, 0);
+    need_reg_single(emit, REG_TEMP1, 0);
+    emit_native_load_fast(emit, qst, local_num);
+    vtype_kind_t vtype;
+    int reg_base = REG_TEMP0;
+    emit_pre_pop_reg_flexible(emit, &vtype, &reg_base, -1, -1);
+    int reg_src = REG_TEMP1;
+    emit_pre_pop_reg_flexible(emit, &vtype, &reg_src, reg_base, reg_base);
+    ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, reg_base, 1);
+    emit_post(emit);
+}
+
+static void emit_native_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+        emit_native_store_fast(emit, qst, local_num);
+    } else {
+        emit_native_store_deref(emit, qst, local_num);
+    }
+}
+
+static void emit_native_store_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_STORE_NAME);
+    MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_STORE_GLOBAL);
+    if (kind == MP_EMIT_IDOP_GLOBAL_NAME) {
+        // mp_store_name, but needs conversion of object (maybe have mp_viper_store_name(obj, type))
+        vtype_kind_t vtype;
+        emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+        assert(vtype == VTYPE_PYOBJ);
+    } else {
+        vtype_kind_t vtype = peek_vtype(emit, 0);
+        if (vtype == VTYPE_PYOBJ) {
+            emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+        } else {
+            emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+            emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type
+            ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+        }
+    }
+    emit_call_with_qstr_arg(emit, MP_F_STORE_NAME + kind, qst, REG_ARG_1); // arg1 = name
+    emit_post(emit);
+}
+
+static void emit_native_store_attr(emit_t *emit, qstr qst) {
+    vtype_kind_t vtype_base;
+    vtype_kind_t vtype_val = peek_vtype(emit, 1);
+    if (vtype_val == VTYPE_PYOBJ) {
+        emit_pre_pop_reg_reg(emit, &vtype_base, REG_ARG_1, &vtype_val, REG_ARG_3); // arg1 = base, arg3 = value
+    } else {
+        emit_access_stack(emit, 2, &vtype_val, REG_ARG_1); // arg1 = value
+        emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype_val, REG_ARG_2); // arg2 = type
+        ASM_MOV_REG_REG(emit->as, REG_ARG_3, REG_RET); // arg3 = value (converted)
+        emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+        adjust_stack(emit, -1); // pop value
+    }
+    assert(vtype_base == VTYPE_PYOBJ);
+    emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+    emit_post(emit);
+}
+
+static void emit_native_store_subscr(emit_t *emit) {
+    DEBUG_printf("store_subscr\n");
+    // need to compile: base[index] = value
+
+    // pop: index, base, value
+    // optimise case where index is an immediate
+    vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+    if (vtype_base == VTYPE_PYOBJ) {
+        // standard Python subscr
+        vtype_kind_t vtype_index = peek_vtype(emit, 0);
+        vtype_kind_t vtype_value = peek_vtype(emit, 2);
+        if (vtype_index != VTYPE_PYOBJ || vtype_value != VTYPE_PYOBJ) {
+            // need to implicitly convert non-objects to objects
+            // TODO do this properly
+            emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, 3);
+            adjust_stack(emit, 3);
+        }
+        emit_pre_pop_reg_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1, &vtype_value, REG_ARG_3);
+        emit_call(emit, MP_F_OBJ_SUBSCR);
+    } else {
+        // viper store
+        // TODO The different machine architectures have very different
+        // capabilities and requirements for stores, so probably best to
+        // write a completely separate store-optimiser for each one.
+        stack_info_t *top = peek_stack(emit, 0);
+        if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+            // index is an immediate
+            mp_int_t index_value = top->data.u_imm;
+            emit_pre_pop_discard(emit); // discard index
+            vtype_kind_t vtype_value;
+            int reg_base = REG_ARG_1;
+            int reg_index = REG_ARG_2;
+            int reg_value = REG_ARG_3;
+            emit_pre_pop_reg_flexible(emit, &vtype_base, &reg_base, reg_index, reg_value);
+            #if N_X64 || N_X86
+            // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+            emit_pre_pop_reg(emit, &vtype_value, reg_value);
+            #else
+            emit_pre_pop_reg_flexible(emit, &vtype_value, &reg_value, reg_base, reg_index);
+            #endif
+            if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
+            }
+            switch (vtype_base) {
+                case VTYPE_PTR8: {
+                    // pointer to 8-bit memory
+                    // TODO optimise to use thumb strb r1, [r2, r3]
+                    if (index_value != 0) {
+                        // index is non-zero
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_strb_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+                        #if N_ARM
+                        asm_arm_strb_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+                        return;
+                        #endif
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_STORE8_REG_REG(emit->as, reg_value, reg_base); // store value to (base+index)
+                    break;
+                }
+                case VTYPE_PTR16: {
+                    // pointer to 16-bit memory
+                    if (index_value != 0) {
+                        // index is a non-zero immediate
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_STORE16_REG_REG(emit->as, reg_value, reg_base); // store value to (base+2*index)
+                    break;
+                }
+                case VTYPE_PTR32: {
+                    // pointer to 32-bit memory
+                    if (index_value != 0) {
+                        // index is a non-zero immediate
+                        #if N_THUMB
+                        if (index_value > 0 && index_value < 32) {
+                            asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+                            break;
+                        }
+                        #endif
+                        #if N_ARM
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+                        asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+                        return;
+                        #endif
+                        ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
+                        ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+                        reg_base = reg_index;
+                    }
+                    ASM_STORE32_REG_REG(emit->as, reg_value, reg_base); // store value to (base+4*index)
+                    break;
+                }
+                default:
+                    EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                        MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
+            }
+        } else {
+            // index is not an immediate
+            vtype_kind_t vtype_index, vtype_value;
+            int reg_index = REG_ARG_2;
+            int reg_value = REG_ARG_3;
+            emit_pre_pop_reg_flexible(emit, &vtype_index, &reg_index, REG_ARG_1, reg_value);
+            emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+            if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("can't store with '%q' index"), vtype_to_qstr(vtype_index));
+            }
+            #if N_X64 || N_X86
+            // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+            emit_pre_pop_reg(emit, &vtype_value, reg_value);
+            #else
+            emit_pre_pop_reg_flexible(emit, &vtype_value, &reg_value, REG_ARG_1, reg_index);
+            #endif
+            if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
+            }
+            switch (vtype_base) {
+                case VTYPE_PTR8: {
+                    // pointer to 8-bit memory
+                    // TODO optimise to use thumb strb r1, [r2, r3]
+                    #if N_ARM
+                    asm_arm_strb_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+                    break;
+                    #endif
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_STORE8_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+index)
+                    break;
+                }
+                case VTYPE_PTR16: {
+                    // pointer to 16-bit memory
+                    #if N_ARM
+                    asm_arm_strh_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+                    break;
+                    #endif
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_STORE16_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+2*index)
+                    break;
+                }
+                case VTYPE_PTR32: {
+                    // pointer to 32-bit memory
+                    #if N_ARM
+                    asm_arm_str_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+                    break;
+                    #endif
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+                    ASM_STORE32_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+4*index)
+                    break;
+                }
+                default:
+                    EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                        MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
+            }
+        }
+
+    }
+}
+
+static void emit_native_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+    if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+        // TODO: This is not compliant implementation. We could use MP_OBJ_SENTINEL
+        // to mark deleted vars but then every var would need to be checked on
+        // each access. Very inefficient, so just set value to None to enable GC.
+        emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE);
+        emit_native_store_fast(emit, qst, local_num);
+    } else {
+        // TODO implement me!
+    }
+}
+
+static void emit_native_delete_global(emit_t *emit, qstr qst, int kind) {
+    MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_DELETE_NAME);
+    MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_DELETE_GLOBAL);
+    emit_native_pre(emit);
+    emit_call_with_qstr_arg(emit, MP_F_DELETE_NAME + kind, qst, REG_ARG_1);
+    emit_post(emit);
+}
+
+static void emit_native_delete_attr(emit_t *emit, qstr qst) {
+    vtype_kind_t vtype_base;
+    emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+    assert(vtype_base == VTYPE_PYOBJ);
+    ASM_XOR_REG_REG(emit->as, REG_ARG_3, REG_ARG_3); // arg3 = value (null for delete)
+    emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+    emit_post(emit);
+}
+
+static void emit_native_delete_subscr(emit_t *emit) {
+    vtype_kind_t vtype_index, vtype_base;
+    emit_pre_pop_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1); // index, base
+    assert(vtype_index == VTYPE_PYOBJ);
+    assert(vtype_base == VTYPE_PYOBJ);
+    emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+}
+
+static void emit_native_subscr(emit_t *emit, int kind) {
+    if (kind == MP_EMIT_SUBSCR_LOAD) {
+        emit_native_load_subscr(emit);
+    } else if (kind == MP_EMIT_SUBSCR_STORE) {
+        emit_native_store_subscr(emit);
+    } else {
+        emit_native_delete_subscr(emit);
+    }
+}
+
+static void emit_native_attr(emit_t *emit, qstr qst, int kind) {
+    if (kind == MP_EMIT_ATTR_LOAD) {
+        emit_native_load_attr(emit, qst);
+    } else if (kind == MP_EMIT_ATTR_STORE) {
+        emit_native_store_attr(emit, qst);
+    } else {
+        emit_native_delete_attr(emit, qst);
+    }
+}
+
+static void emit_native_dup_top(emit_t *emit) {
+    DEBUG_printf("dup_top\n");
+    vtype_kind_t vtype;
+    int reg = REG_TEMP0;
+    emit_pre_pop_reg_flexible(emit, &vtype, &reg, -1, -1);
+    emit_post_push_reg_reg(emit, vtype, reg, vtype, reg);
+}
+
+static void emit_native_dup_top_two(emit_t *emit) {
+    vtype_kind_t vtype0, vtype1;
+    emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+    emit_post_push_reg_reg_reg_reg(emit, vtype1, REG_TEMP1, vtype0, REG_TEMP0, vtype1, REG_TEMP1, vtype0, REG_TEMP0);
+}
+
+static void emit_native_pop_top(emit_t *emit) {
+    DEBUG_printf("pop_top\n");
+    emit_pre_pop_discard(emit);
+    emit_post(emit);
+}
+
+static void emit_native_rot_two(emit_t *emit) {
+    DEBUG_printf("rot_two\n");
+    vtype_kind_t vtype0, vtype1;
+    emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+    emit_post_push_reg_reg(emit, vtype0, REG_TEMP0, vtype1, REG_TEMP1);
+}
+
+static void emit_native_rot_three(emit_t *emit) {
+    DEBUG_printf("rot_three\n");
+    vtype_kind_t vtype0, vtype1, vtype2;
+    emit_pre_pop_reg_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1, &vtype2, REG_TEMP2);
+    emit_post_push_reg_reg_reg(emit, vtype0, REG_TEMP0, vtype2, REG_TEMP2, vtype1, REG_TEMP1);
+}
+
+static void emit_native_jump(emit_t *emit, mp_uint_t label) {
+    DEBUG_printf("jump(label=" UINT_FMT ")\n", label);
+    emit_native_pre(emit);
+    // need to commit stack because we are jumping elsewhere
+    need_stack_settled(emit);
+    ASM_JUMP(emit->as, label);
+    emit_post(emit);
+    mp_asm_base_suppress_code(&emit->as->base);
+}
+
+static void emit_native_jump_helper(emit_t *emit, bool cond, mp_uint_t label, bool pop) {
+    vtype_kind_t vtype = peek_vtype(emit, 0);
+    if (vtype == VTYPE_PYOBJ) {
+        emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+        if (!pop) {
+            adjust_stack(emit, 1);
+        }
+        emit_call(emit, MP_F_OBJ_IS_TRUE);
+    } else {
+        emit_pre_pop_reg(emit, &vtype, REG_RET);
+        if (!pop) {
+            adjust_stack(emit, 1);
+        }
+        if (!(vtype == VTYPE_BOOL || vtype == VTYPE_INT || vtype == VTYPE_UINT)) {
+            EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                MP_ERROR_TEXT("can't implicitly convert '%q' to 'bool'"), vtype_to_qstr(vtype));
+        }
+    }
+    // For non-pop need to save the vtype so that emit_native_adjust_stack_size
+    // can use it.  This is a bit of a hack.
+    if (!pop) {
+        emit->saved_stack_vtype = vtype;
+    }
+    // need to commit stack because we may jump elsewhere
+    need_stack_settled(emit);
+    // Emit the jump
+    if (cond) {
+        ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ);
+    } else {
+        ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ);
+    }
+    if (!pop) {
+        adjust_stack(emit, -1);
+    }
+    emit_post(emit);
+}
+
+static void emit_native_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+    DEBUG_printf("pop_jump_if(cond=%u, label=" UINT_FMT ")\n", cond, label);
+    emit_native_jump_helper(emit, cond, label, true);
+}
+
+static void emit_native_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+    DEBUG_printf("jump_if_or_pop(cond=%u, label=" UINT_FMT ")\n", cond, label);
+    emit_native_jump_helper(emit, cond, label, false);
+}
+
+static void emit_native_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+    if (except_depth > 0) {
+        exc_stack_entry_t *first_finally = NULL;
+        exc_stack_entry_t *prev_finally = NULL;
+        exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+        for (; except_depth > 0; --except_depth, --e) {
+            if (e->is_finally && e->is_active) {
+                // Found an active finally handler
+                if (first_finally == NULL) {
+                    first_finally = e;
+                }
+                if (prev_finally != NULL) {
+                    // Mark prev finally as needed to unwind a jump
+                    prev_finally->unwind_label = e->label;
+                }
+                prev_finally = e;
+            }
+        }
+        if (prev_finally == NULL) {
+            // No finally, handle the jump ourselves
+            // First, restore the exception handler address for the jump
+            if (e < emit->exc_stack) {
+                ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET);
+            } else {
+                ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+            }
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+        } else {
+            // Last finally should do our jump for us
+            // Mark finally as needing to decide the type of jump
+            prev_finally->unwind_label = UNWIND_LABEL_DO_FINAL_UNWIND;
+            ASM_MOV_REG_PCREL(emit->as, REG_RET, label & ~MP_EMIT_BREAK_FROM_FOR);
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_RET);
+            // Cancel any active exception (see also emit_native_pop_except_jump)
+            ASM_MOV_REG_IMM(emit->as, REG_RET, (mp_uint_t)MP_OBJ_NULL);
+            ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_RET);
+            // Jump to the innermost active finally
+            label = first_finally->label;
+        }
+    }
+    emit_native_jump(emit, label & ~MP_EMIT_BREAK_FROM_FOR);
+}
+
+static void emit_native_setup_with(emit_t *emit, mp_uint_t label) {
+    // the context manager is on the top of the stack
+    // stack: (..., ctx_mgr)
+
+    // get __exit__ method
+    vtype_kind_t vtype;
+    emit_access_stack(emit, 1, &vtype, REG_ARG_1); // arg1 = ctx_mgr
+    assert(vtype == VTYPE_PYOBJ);
+    emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+    emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___exit__, REG_ARG_2);
+    // stack: (..., ctx_mgr, __exit__, self)
+
+    emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // self
+    emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // __exit__
+    emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // ctx_mgr
+    emit_post_push_reg(emit, vtype, REG_ARG_2); // __exit__
+    emit_post_push_reg(emit, vtype, REG_ARG_3); // self
+    // stack: (..., __exit__, self)
+    // REG_ARG_1=ctx_mgr
+
+    // get __enter__ method
+    emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+    emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___enter__, REG_ARG_2); // arg2 = method name
+    // stack: (..., __exit__, self, __enter__, self)
+
+    // call __enter__ method
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2); // pointer to items, including meth and self
+    emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 0, REG_ARG_1, 0, REG_ARG_2);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // push return value of __enter__
+    // stack: (..., __exit__, self, as_value)
+
+    // need to commit stack because we may jump elsewhere
+    need_stack_settled(emit);
+    emit_native_push_exc_stack(emit, label, true);
+
+    emit_native_dup_top(emit);
+    // stack: (..., __exit__, self, as_value, as_value)
+}
+
+static void emit_native_setup_block(emit_t *emit, mp_uint_t label, int kind) {
+    if (kind == MP_EMIT_SETUP_BLOCK_WITH) {
+        emit_native_setup_with(emit, label);
+    } else {
+        // Set up except and finally
+        emit_native_pre(emit);
+        need_stack_settled(emit);
+        emit_native_push_exc_stack(emit, label, kind == MP_EMIT_SETUP_BLOCK_FINALLY);
+        emit_post(emit);
+    }
+}
+
+static void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
+    // Note: 3 labels are reserved for this function, starting at *emit->label_slot
+
+    // stack: (..., __exit__, self, as_value)
+    emit_native_pre(emit);
+    emit_native_leave_exc_stack(emit, false);
+    adjust_stack(emit, -1);
+    // stack: (..., __exit__, self)
+
+    // Label for case where __exit__ is called from an unwind jump
+    emit_native_label_assign(emit, *emit->label_slot + 2);
+
+    // call __exit__
+    emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+    emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+    emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+    emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+
+    // Replace exc with None and finish
+    emit_native_jump(emit, *emit->label_slot);
+
+    // nlr_catch
+    // Don't use emit_native_label_assign because this isn't a real finally label
+    mp_asm_base_label_assign(&emit->as->base, label);
+
+    // Leave with's exception handler
+    emit_native_leave_exc_stack(emit, true);
+
+    // Adjust stack counter for: __exit__, self (implicitly discard as_value which is above self)
+    emit_native_adjust_stack_size(emit, 2);
+    // stack: (..., __exit__, self)
+
+    ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); // get exc
+
+    // Check if exc is MP_OBJ_NULL (i.e. zero) and jump to non-exc handler if it is
+    ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, *emit->label_slot + 2, false);
+
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc)
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc)
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_1); // push exc value
+    emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); // traceback info
+    // Stack: (..., __exit__, self, type(exc), exc, traceback)
+
+    // call __exit__ method
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+    emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+    // Stack: (...)
+
+    // If REG_RET is true then we need to replace exception with None (swallow exception)
+    if (REG_ARG_1 != REG_RET) {
+        ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_RET);
+    }
+    emit_call(emit, MP_F_OBJ_IS_TRUE);
+    ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
+
+    // Replace exception with MP_OBJ_NULL.
+    emit_native_label_assign(emit, *emit->label_slot);
+    ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
+    ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+
+    // end of with cleanup nlr_catch block
+    emit_native_label_assign(emit, *emit->label_slot + 1);
+
+    // Exception is in nlr_buf.ret_val slot
+}
+
+static void emit_native_end_finally(emit_t *emit) {
+    // logic:
+    //   exc = pop_stack
+    //   if exc == None: pass
+    //   else: raise exc
+    // the check if exc is None is done in the MP_F_NATIVE_RAISE stub
+    emit_native_pre(emit);
+    ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+    emit_call(emit, MP_F_NATIVE_RAISE);
+
+    // Get state for this finally and see if we need to unwind
+    exc_stack_entry_t *e = emit_native_pop_exc_stack(emit);
+    if (e->unwind_label != UNWIND_LABEL_UNUSED) {
+        ASM_MOV_REG_LOCAL(emit->as, REG_RET, LOCAL_IDX_EXC_HANDLER_UNWIND(emit));
+        ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot, false);
+        if (e->unwind_label == UNWIND_LABEL_DO_FINAL_UNWIND) {
+            ASM_JUMP_REG(emit->as, REG_RET);
+        } else {
+            emit_native_jump(emit, e->unwind_label);
+        }
+        emit_native_label_assign(emit, *emit->label_slot);
+    }
+
+    emit_post(emit);
+}
+
+static void emit_native_get_iter(emit_t *emit, bool use_stack) {
+    // perhaps the difficult one, as we want to rewrite for loops using native code
+    // in cases where we iterate over a Python object, can we use normal runtime calls?
+
+    vtype_kind_t vtype;
+    emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+    assert(vtype == VTYPE_PYOBJ);
+    if (use_stack) {
+        emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_2, MP_OBJ_ITER_BUF_NSLOTS);
+        emit_call(emit, MP_F_NATIVE_GETITER);
+    } else {
+        // mp_getiter will allocate the iter_buf on the heap
+        ASM_MOV_REG_IMM(emit->as, REG_ARG_2, 0);
+        emit_call(emit, MP_F_NATIVE_GETITER);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    }
+}
+
+static void emit_native_for_iter(emit_t *emit, mp_uint_t label) {
+    emit_native_pre(emit);
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, MP_OBJ_ITER_BUF_NSLOTS);
+    adjust_stack(emit, MP_OBJ_ITER_BUF_NSLOTS);
+    emit_call(emit, MP_F_NATIVE_ITERNEXT);
+    #if MICROPY_DEBUG_MP_OBJ_SENTINELS
+    ASM_MOV_REG_IMM(emit->as, REG_TEMP1, (mp_uint_t)MP_OBJ_STOP_ITERATION);
+    ASM_JUMP_IF_REG_EQ(emit->as, REG_RET, REG_TEMP1, label);
+    #else
+    MP_STATIC_ASSERT(MP_OBJ_STOP_ITERATION == 0);
+    ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, false);
+    #endif
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_for_iter_end(emit_t *emit) {
+    // adjust stack counter (we get here from for_iter ending, which popped the value for us)
+    emit_native_pre(emit);
+    adjust_stack(emit, -MP_OBJ_ITER_BUF_NSLOTS);
+    emit_post(emit);
+}
+
+static void emit_native_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
+    if (within_exc_handler) {
+        // Cancel any active exception so subsequent handlers don't see it
+        ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
+        ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+    } else {
+        emit_native_leave_exc_stack(emit, false);
+    }
+    emit_native_jump(emit, label);
+}
+
+static void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
+    vtype_kind_t vtype = peek_vtype(emit, 0);
+    if (vtype == VTYPE_INT || vtype == VTYPE_UINT) {
+        if (op == MP_UNARY_OP_POSITIVE) {
+            // No-operation, just leave the argument on the stack.
+        } else if (op == MP_UNARY_OP_NEGATIVE) {
+            int reg = REG_RET;
+            emit_pre_pop_reg_flexible(emit, &vtype, &reg, reg, reg);
+            ASM_NEG_REG(emit->as, reg);
+            emit_post_push_reg(emit, vtype, reg);
+        } else if (op == MP_UNARY_OP_INVERT) {
+            #ifdef ASM_NOT_REG
+            int reg = REG_RET;
+            emit_pre_pop_reg_flexible(emit, &vtype, &reg, reg, reg);
+            ASM_NOT_REG(emit->as, reg);
+            #else
+            int reg = REG_RET;
+            emit_pre_pop_reg_flexible(emit, &vtype, &reg, REG_ARG_1, reg);
+            ASM_MOV_REG_IMM(emit->as, REG_ARG_1, -1);
+            ASM_XOR_REG_REG(emit->as, reg, REG_ARG_1);
+            #endif
+            emit_post_push_reg(emit, vtype, reg);
+        } else {
+            EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                MP_ERROR_TEXT("'not' not implemented"), mp_binary_op_method_name[op]);
+        }
+    } else if (vtype == VTYPE_PYOBJ) {
+        emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+        emit_call_with_imm_arg(emit, MP_F_UNARY_OP, op, REG_ARG_1);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    } else {
+        EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+            MP_ERROR_TEXT("can't do unary op of '%q'"), vtype_to_qstr(vtype));
+    }
+}
+
+static void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
+    DEBUG_printf("binary_op(" UINT_FMT ")\n", op);
+    vtype_kind_t vtype_lhs = peek_vtype(emit, 1);
+    vtype_kind_t vtype_rhs = peek_vtype(emit, 0);
+    if ((vtype_lhs == VTYPE_INT || vtype_lhs == VTYPE_UINT)
+        && (vtype_rhs == VTYPE_INT || vtype_rhs == VTYPE_UINT)) {
+        // for integers, inplace and normal ops are equivalent, so use just normal ops
+        if (MP_BINARY_OP_INPLACE_OR <= op && op <= MP_BINARY_OP_INPLACE_POWER) {
+            op += MP_BINARY_OP_OR - MP_BINARY_OP_INPLACE_OR;
+        }
+
+        #if N_X64 || N_X86
+        // special cases for x86 and shifting
+        if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
+            #if N_X64
+            emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X64_REG_RCX, &vtype_lhs, REG_RET);
+            #else
+            emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X86_REG_ECX, &vtype_lhs, REG_RET);
+            #endif
+            if (op == MP_BINARY_OP_LSHIFT) {
+                ASM_LSL_REG(emit->as, REG_RET);
+            } else {
+                if (vtype_lhs == VTYPE_UINT) {
+                    ASM_LSR_REG(emit->as, REG_RET);
+                } else {
+                    ASM_ASR_REG(emit->as, REG_RET);
+                }
+            }
+            emit_post_push_reg(emit, vtype_lhs, REG_RET);
+            return;
+        }
+        #endif
+
+        // special cases for floor-divide and module because we dispatch to helper functions
+        if (op == MP_BINARY_OP_FLOOR_DIVIDE || op == MP_BINARY_OP_MODULO) {
+            emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
+            if (vtype_lhs != VTYPE_INT) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("div/mod not implemented for uint"), mp_binary_op_method_name[op]);
+            }
+            if (op == MP_BINARY_OP_FLOOR_DIVIDE) {
+                emit_call(emit, MP_F_SMALL_INT_FLOOR_DIVIDE);
+            } else {
+                emit_call(emit, MP_F_SMALL_INT_MODULO);
+            }
+            emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+            return;
+        }
+
+        int reg_rhs = REG_ARG_3;
+        emit_pre_pop_reg_flexible(emit, &vtype_rhs, &reg_rhs, REG_RET, REG_ARG_2);
+        emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2);
+
+        #if !(N_X64 || N_X86)
+        if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
+            if (op == MP_BINARY_OP_LSHIFT) {
+                ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            } else {
+                if (vtype_lhs == VTYPE_UINT) {
+                    ASM_LSR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+                } else {
+                    ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+                }
+            }
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+            return;
+        }
+        #endif
+
+        if (op == MP_BINARY_OP_OR) {
+            ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_XOR) {
+            ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_AND) {
+            ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_ADD) {
+            ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_SUBTRACT) {
+            ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_MULTIPLY) {
+            ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_LESS
+                   || op == MP_BINARY_OP_MORE
+                   || op == MP_BINARY_OP_EQUAL
+                   || op == MP_BINARY_OP_LESS_EQUAL
+                   || op == MP_BINARY_OP_MORE_EQUAL
+                   || op == MP_BINARY_OP_NOT_EQUAL) {
+            // comparison ops
+
+            if (vtype_lhs != vtype_rhs) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("comparison of int and uint"));
+            }
+
+            size_t op_idx = op - MP_BINARY_OP_LESS + (vtype_lhs == VTYPE_UINT ? 0 : 6);
+
+            need_reg_single(emit, REG_RET, 0);
+            #if N_X64
+            asm_x64_xor_r64_r64(emit->as, REG_RET, REG_RET);
+            asm_x64_cmp_r64_with_r64(emit->as, reg_rhs, REG_ARG_2);
+            static byte ops[6 + 6] = {
+                // unsigned
+                ASM_X64_CC_JB,
+                ASM_X64_CC_JA,
+                ASM_X64_CC_JE,
+                ASM_X64_CC_JBE,
+                ASM_X64_CC_JAE,
+                ASM_X64_CC_JNE,
+                // signed
+                ASM_X64_CC_JL,
+                ASM_X64_CC_JG,
+                ASM_X64_CC_JE,
+                ASM_X64_CC_JLE,
+                ASM_X64_CC_JGE,
+                ASM_X64_CC_JNE,
+            };
+            asm_x64_setcc_r8(emit->as, ops[op_idx], REG_RET);
+            #elif N_X86
+            asm_x86_xor_r32_r32(emit->as, REG_RET, REG_RET);
+            asm_x86_cmp_r32_with_r32(emit->as, reg_rhs, REG_ARG_2);
+            static byte ops[6 + 6] = {
+                // unsigned
+                ASM_X86_CC_JB,
+                ASM_X86_CC_JA,
+                ASM_X86_CC_JE,
+                ASM_X86_CC_JBE,
+                ASM_X86_CC_JAE,
+                ASM_X86_CC_JNE,
+                // signed
+                ASM_X86_CC_JL,
+                ASM_X86_CC_JG,
+                ASM_X86_CC_JE,
+                ASM_X86_CC_JLE,
+                ASM_X86_CC_JGE,
+                ASM_X86_CC_JNE,
+            };
+            asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
+            #elif N_THUMB
+            asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
+            if (asm_thumb_allow_armv7m(emit->as)) {
+                static uint16_t ops[6 + 6] = {
+                    // unsigned
+                    ASM_THUMB_OP_ITE_CC,
+                    ASM_THUMB_OP_ITE_HI,
+                    ASM_THUMB_OP_ITE_EQ,
+                    ASM_THUMB_OP_ITE_LS,
+                    ASM_THUMB_OP_ITE_CS,
+                    ASM_THUMB_OP_ITE_NE,
+                    // signed
+                    ASM_THUMB_OP_ITE_LT,
+                    ASM_THUMB_OP_ITE_GT,
+                    ASM_THUMB_OP_ITE_EQ,
+                    ASM_THUMB_OP_ITE_LE,
+                    ASM_THUMB_OP_ITE_GE,
+                    ASM_THUMB_OP_ITE_NE,
+                };
+                asm_thumb_op16(emit->as, ops[op_idx]);
+                asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
+                asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
+            } else {
+                static uint16_t ops[6 + 6] = {
+                    // unsigned
+                    ASM_THUMB_CC_CC,
+                    ASM_THUMB_CC_HI,
+                    ASM_THUMB_CC_EQ,
+                    ASM_THUMB_CC_LS,
+                    ASM_THUMB_CC_CS,
+                    ASM_THUMB_CC_NE,
+                    // signed
+                    ASM_THUMB_CC_LT,
+                    ASM_THUMB_CC_GT,
+                    ASM_THUMB_CC_EQ,
+                    ASM_THUMB_CC_LE,
+                    ASM_THUMB_CC_GE,
+                    ASM_THUMB_CC_NE,
+                };
+                asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
+                asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
+                asm_thumb_b_rel12(emit->as, 4);
+                asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
+            }
+            #elif N_ARM
+            asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
+            static uint ccs[6 + 6] = {
+                // unsigned
+                ASM_ARM_CC_CC,
+                ASM_ARM_CC_HI,
+                ASM_ARM_CC_EQ,
+                ASM_ARM_CC_LS,
+                ASM_ARM_CC_CS,
+                ASM_ARM_CC_NE,
+                // signed
+                ASM_ARM_CC_LT,
+                ASM_ARM_CC_GT,
+                ASM_ARM_CC_EQ,
+                ASM_ARM_CC_LE,
+                ASM_ARM_CC_GE,
+                ASM_ARM_CC_NE,
+            };
+            asm_arm_setcc_reg(emit->as, REG_RET, ccs[op_idx]);
+            #elif N_XTENSA || N_XTENSAWIN
+            static uint8_t ccs[6 + 6] = {
+                // unsigned
+                ASM_XTENSA_CC_LTU,
+                0x80 | ASM_XTENSA_CC_LTU, // for GTU we'll swap args
+                ASM_XTENSA_CC_EQ,
+                0x80 | ASM_XTENSA_CC_GEU, // for LEU we'll swap args
+                ASM_XTENSA_CC_GEU,
+                ASM_XTENSA_CC_NE,
+                // signed
+                ASM_XTENSA_CC_LT,
+                0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args
+                ASM_XTENSA_CC_EQ,
+                0x80 | ASM_XTENSA_CC_GE, // for LE we'll swap args
+                ASM_XTENSA_CC_GE,
+                ASM_XTENSA_CC_NE,
+            };
+            uint8_t cc = ccs[op_idx];
+            if ((cc & 0x80) == 0) {
+                asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs);
+            } else {
+                asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2);
+            }
+            #else
+            #error not implemented
+            #endif
+            emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
+        } else {
+            // TODO other ops not yet implemented
+            adjust_stack(emit, 1);
+            EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                MP_ERROR_TEXT("binary op %q not implemented"), mp_binary_op_method_name[op]);
+        }
+    } else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
+        emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_3, &vtype_lhs, REG_ARG_2);
+        bool invert = false;
+        if (op == MP_BINARY_OP_NOT_IN) {
+            invert = true;
+            op = MP_BINARY_OP_IN;
+        } else if (op == MP_BINARY_OP_IS_NOT) {
+            invert = true;
+            op = MP_BINARY_OP_IS;
+        }
+        emit_call_with_imm_arg(emit, MP_F_BINARY_OP, op, REG_ARG_1);
+        if (invert) {
+            ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+            emit_call_with_imm_arg(emit, MP_F_UNARY_OP, MP_UNARY_OP_NOT, REG_ARG_1);
+        }
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    } else {
+        adjust_stack(emit, -1);
+        EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+            MP_ERROR_TEXT("can't do binary op between '%q' and '%q'"),
+            vtype_to_qstr(vtype_lhs), vtype_to_qstr(vtype_rhs));
+    }
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+static void emit_native_build_slice(emit_t *emit, mp_uint_t n_args);
+#endif
+
+static void emit_native_build(emit_t *emit, mp_uint_t n_args, int kind) {
+    // for viper: call runtime, with types of args
+    //   if wrapped in byte_array, or something, allocates memory and fills it
+    MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_F_BUILD_TUPLE);
+    MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_F_BUILD_LIST);
+    MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_F_BUILD_MAP);
+    MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_F_BUILD_SET);
+    #if MICROPY_PY_BUILTINS_SLICE
+    if (kind == MP_EMIT_BUILD_SLICE) {
+        emit_native_build_slice(emit, n_args);
+        return;
+    }
+    #endif
+    emit_native_pre(emit);
+    if (kind == MP_EMIT_BUILD_TUPLE || kind == MP_EMIT_BUILD_LIST || kind == MP_EMIT_BUILD_SET) {
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
+    }
+    emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE + kind, n_args, REG_ARG_1);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple/list/map/set
+}
+
+static void emit_native_store_map(emit_t *emit) {
+    vtype_kind_t vtype_key, vtype_value, vtype_map;
+    emit_pre_pop_reg_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3, &vtype_map, REG_ARG_1); // key, value, map
+    assert(vtype_key == VTYPE_PYOBJ);
+    assert(vtype_value == VTYPE_PYOBJ);
+    assert(vtype_map == VTYPE_PYOBJ);
+    emit_call(emit, MP_F_STORE_MAP);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // map
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+static void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) {
+    DEBUG_printf("build_slice %d\n", n_args);
+    if (n_args == 2) {
+        vtype_kind_t vtype_start, vtype_stop;
+        emit_pre_pop_reg_reg(emit, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop
+        assert(vtype_start == VTYPE_PYOBJ);
+        assert(vtype_stop == VTYPE_PYOBJ);
+        emit_native_mov_reg_const(emit, REG_ARG_3, MP_F_CONST_NONE_OBJ); // arg3 = step
+    } else {
+        assert(n_args == 3);
+        vtype_kind_t vtype_start, vtype_stop, vtype_step;
+        emit_pre_pop_reg_reg_reg(emit, &vtype_step, REG_ARG_3, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop, arg3 = step
+        assert(vtype_start == VTYPE_PYOBJ);
+        assert(vtype_stop == VTYPE_PYOBJ);
+        assert(vtype_step == VTYPE_PYOBJ);
+    }
+    emit_call(emit, MP_F_NEW_SLICE);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+#endif
+
+static void emit_native_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t collection_index) {
+    mp_fun_kind_t f;
+    if (kind == SCOPE_LIST_COMP) {
+        vtype_kind_t vtype_item;
+        emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+        assert(vtype_item == VTYPE_PYOBJ);
+        f = MP_F_LIST_APPEND;
+    #if MICROPY_PY_BUILTINS_SET
+    } else if (kind == SCOPE_SET_COMP) {
+        vtype_kind_t vtype_item;
+        emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+        assert(vtype_item == VTYPE_PYOBJ);
+        f = MP_F_STORE_SET;
+    #endif
+    } else {
+        // SCOPE_DICT_COMP
+        vtype_kind_t vtype_key, vtype_value;
+        emit_pre_pop_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3);
+        assert(vtype_key == VTYPE_PYOBJ);
+        assert(vtype_value == VTYPE_PYOBJ);
+        f = MP_F_STORE_MAP;
+    }
+    vtype_kind_t vtype_collection;
+    emit_access_stack(emit, collection_index, &vtype_collection, REG_ARG_1);
+    assert(vtype_collection == VTYPE_PYOBJ);
+    emit_call(emit, f);
+    emit_post(emit);
+}
+
+static void emit_native_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+    DEBUG_printf("unpack_sequence %d\n", n_args);
+    vtype_kind_t vtype_base;
+    emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+    assert(vtype_base == VTYPE_PYOBJ);
+    emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_args); // arg3 = dest ptr
+    emit_call_with_imm_arg(emit, MP_F_UNPACK_SEQUENCE, n_args, REG_ARG_2); // arg2 = n_args
+}
+
+static void emit_native_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+    DEBUG_printf("unpack_ex %d %d\n", n_left, n_right);
+    vtype_kind_t vtype_base;
+    emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+    assert(vtype_base == VTYPE_PYOBJ);
+    emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_left + n_right + 1); // arg3 = dest ptr
+    emit_call_with_imm_arg(emit, MP_F_UNPACK_EX, n_left | (n_right << 8), REG_ARG_2); // arg2 = n_left + n_right
+}
+
+static void emit_native_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+    // call runtime, with type info for args, or don't support dict/default params, or only support Python objects for them
+    emit_native_pre(emit);
+    emit_native_mov_reg_state(emit, REG_ARG_2, LOCAL_IDX_FUN_OBJ(emit));
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_2, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+    if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+        need_reg_all(emit);
+        ASM_MOV_REG_IMM(emit->as, REG_ARG_3, 0);
+    } else {
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2);
+        need_reg_all(emit);
+    }
+    emit_load_reg_with_child(emit, REG_ARG_1, scope->raw_code);
+    ASM_CALL_IND(emit->as, MP_F_MAKE_FUNCTION_FROM_PROTO_FUN);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+    // make function
+    emit_native_pre(emit);
+    emit_native_mov_reg_state(emit, REG_ARG_2, LOCAL_IDX_FUN_OBJ(emit));
+    ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_2, OFFSETOF_OBJ_FUN_BC_CONTEXT);
+    if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+        need_reg_all(emit);
+        ASM_MOV_REG_IMM(emit->as, REG_ARG_3, 0);
+    } else {
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2 + n_closed_over);
+        adjust_stack(emit, 2 + n_closed_over);
+        need_reg_all(emit);
+    }
+    emit_load_reg_with_child(emit, REG_ARG_1, scope->raw_code);
+    ASM_CALL_IND(emit->as, MP_F_MAKE_FUNCTION_FROM_PROTO_FUN);
+
+    // make closure
+    #if REG_ARG_1 != REG_RET
+    ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_RET);
+    #endif
+    ASM_MOV_REG_IMM(emit->as, REG_ARG_2, n_closed_over);
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over);
+    if (n_pos_defaults != 0 || n_kw_defaults != 0) {
+        adjust_stack(emit, -2);
+    }
+    ASM_CALL_IND(emit->as, MP_F_NEW_CLOSURE);
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+static void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+    DEBUG_printf("call_function(n_pos=" UINT_FMT ", n_kw=" UINT_FMT ", star_flags=" UINT_FMT ")\n", n_positional, n_keyword, star_flags);
+
+    // TODO: in viper mode, call special runtime routine with type info for args,
+    // and wanted type info for return, to remove need for boxing/unboxing
+
+    emit_native_pre(emit);
+    vtype_kind_t vtype_fun = peek_vtype(emit, n_positional + 2 * n_keyword);
+    if (vtype_fun == VTYPE_BUILTIN_CAST) {
+        // casting operator
+        assert(n_positional == 1 && n_keyword == 0);
+        assert(!star_flags);
+        DEBUG_printf("  cast to %d\n", vtype_fun);
+        vtype_kind_t vtype_cast = peek_stack(emit, 1)->data.u_imm;
+        switch (peek_vtype(emit, 0)) {
+            case VTYPE_PYOBJ: {
+                vtype_kind_t vtype;
+                emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+                emit_pre_pop_discard(emit);
+                emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, vtype_cast, REG_ARG_2); // arg2 = type
+                emit_post_push_reg(emit, vtype_cast, REG_RET);
+                break;
+            }
+            case VTYPE_BOOL:
+            case VTYPE_INT:
+            case VTYPE_UINT:
+            case VTYPE_PTR:
+            case VTYPE_PTR8:
+            case VTYPE_PTR16:
+            case VTYPE_PTR32:
+            case VTYPE_PTR_NONE:
+                emit_fold_stack_top(emit, REG_ARG_1);
+                emit_post_top_set_vtype(emit, vtype_cast);
+                break;
+            default:
+                // this can happen when casting a cast: int(int)
+                mp_raise_NotImplementedError(MP_ERROR_TEXT("casting"));
+        }
+    } else {
+        assert(vtype_fun == VTYPE_PYOBJ);
+        if (star_flags) {
+            emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 2); // pointer to args
+            emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 0, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+            emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+        } else {
+            if (n_positional != 0 || n_keyword != 0) {
+                emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword); // pointer to args
+            }
+            emit_pre_pop_reg(emit, &vtype_fun, REG_ARG_1); // the function
+            emit_call_with_imm_arg(emit, MP_F_NATIVE_CALL_FUNCTION_N_KW, n_positional | (n_keyword << 8), REG_ARG_2);
+            emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+        }
+    }
+}
+
+static void emit_native_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+    if (star_flags) {
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 3); // pointer to args
+        emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 1, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    } else {
+        emit_native_pre(emit);
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2 + n_positional + 2 * n_keyword); // pointer to items, including meth and self
+        emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, n_positional, REG_ARG_1, n_keyword, REG_ARG_2);
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+    }
+}
+
+static void emit_native_return_value(emit_t *emit) {
+    DEBUG_printf("return_value\n");
+
+    if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+        // Save pointer to current stack position for caller to access return value
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1);
+        emit_native_mov_state_reg(emit, OFFSETOF_CODE_STATE_SP, REG_TEMP0);
+
+        // Put return type in return value slot
+        ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_NORMAL);
+        ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0);
+
+        // Do the unwinding jump to get to the return handler
+        emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size);
+        return;
+    }
+
+    if (emit->do_viper_types) {
+        vtype_kind_t return_vtype = emit->scope->scope_flags >> MP_SCOPE_FLAG_VIPERRET_POS;
+        if (peek_vtype(emit, 0) == VTYPE_PTR_NONE) {
+            emit_pre_pop_discard(emit);
+            if (return_vtype == VTYPE_PYOBJ) {
+                emit_native_mov_reg_const(emit, REG_PARENT_RET, MP_F_CONST_NONE_OBJ);
+            } else {
+                ASM_MOV_REG_IMM(emit->as, REG_ARG_1, 0);
+            }
+        } else {
+            vtype_kind_t vtype;
+            emit_pre_pop_reg(emit, &vtype, return_vtype == VTYPE_PYOBJ ? REG_PARENT_RET : REG_ARG_1);
+            if (vtype != return_vtype) {
+                EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+                    MP_ERROR_TEXT("return expected '%q' but got '%q'"),
+                    vtype_to_qstr(return_vtype), vtype_to_qstr(vtype));
+            }
+        }
+        if (return_vtype != VTYPE_PYOBJ) {
+            emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, return_vtype, REG_ARG_2);
+            #if REG_RET != REG_PARENT_RET
+            ASM_MOV_REG_REG(emit->as, REG_PARENT_RET, REG_RET);
+            #endif
+        }
+    } else {
+        vtype_kind_t vtype;
+        emit_pre_pop_reg(emit, &vtype, REG_PARENT_RET);
+        assert(vtype == VTYPE_PYOBJ);
+    }
+    if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+        // Save return value for the global exception handler to use
+        ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_PARENT_RET);
+    }
+    emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size);
+}
+
+static void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+    (void)n_args;
+    assert(n_args == 1);
+    vtype_kind_t vtype_exc;
+    emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
+    if (vtype_exc != VTYPE_PYOBJ) {
+        EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("must raise an object"));
+    }
+    // TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
+    emit_call(emit, MP_F_NATIVE_RAISE);
+    mp_asm_base_suppress_code(&emit->as->base);
+}
+
+static void emit_native_yield(emit_t *emit, int kind) {
+    // Note: 1 (yield) or 3 (yield from) labels are reserved for this function, starting at *emit->label_slot
+
+    if (emit->do_viper_types) {
+        mp_raise_NotImplementedError(MP_ERROR_TEXT("native yield"));
+    }
+    emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+
+    need_stack_settled(emit);
+
+    if (kind == MP_EMIT_YIELD_FROM) {
+
+        // Top of yield-from loop, conceptually implementing:
+        //     for item in generator:
+        //         yield item
+
+        // Jump to start of loop
+        emit_native_jump(emit, *emit->label_slot + 2);
+
+        // Label for top of loop
+        emit_native_label_assign(emit, *emit->label_slot + 1);
+    }
+
+    // Save pointer to current stack position for caller to access yielded value
+    emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1);
+    emit_native_mov_state_reg(emit, OFFSETOF_CODE_STATE_SP, REG_TEMP0);
+
+    // Put return type in return value slot
+    ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_YIELD);
+    ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0);
+
+    // Save re-entry PC
+    ASM_MOV_REG_PCREL(emit->as, REG_TEMP0, *emit->label_slot);
+    emit_native_mov_state_reg(emit, LOCAL_IDX_GEN_PC(emit), REG_TEMP0);
+
+    // Jump to exit handler
+    ASM_JUMP(emit->as, emit->exit_label);
+
+    // Label re-entry point
+    mp_asm_base_label_assign(&emit->as->base, *emit->label_slot);
+
+    // Re-open any active exception handler
+    if (emit->exc_stack_size > 0) {
+        // Find innermost active exception handler, to restore as current handler
+        exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+        for (; e >= emit->exc_stack; --e) {
+            if (e->is_active) {
+                // Found active handler, get its PC
+                ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+                ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+                break;
+            }
+        }
+    }
+
+    emit_native_adjust_stack_size(emit, 1); // send_value
+
+    if (kind == MP_EMIT_YIELD_VALUE) {
+        // Check LOCAL_IDX_EXC_VAL for any injected value
+        ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+        emit_call(emit, MP_F_NATIVE_RAISE);
+    } else {
+        // Label loop entry
+        emit_native_label_assign(emit, *emit->label_slot + 2);
+
+        // Get the next item from the delegate generator
+        vtype_kind_t vtype;
+        emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // send_value
+        emit_access_stack(emit, 1, &vtype, REG_ARG_1); // generator
+        ASM_MOV_REG_LOCAL(emit->as, REG_ARG_3, LOCAL_IDX_EXC_VAL(emit)); // throw_value
+        emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_3);
+        emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 1); // ret_value
+        emit_call(emit, MP_F_NATIVE_YIELD_FROM);
+
+        // If returned non-zero then generator continues
+        ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
+
+        // Pop exhausted gen, replace with ret_value
+        emit_native_adjust_stack_size(emit, 1); // ret_value
+        emit_fold_stack_top(emit, REG_ARG_1);
+    }
+}
+
+static void emit_native_start_except_handler(emit_t *emit) {
+    // Protected block has finished so leave the current exception handler
+    emit_native_leave_exc_stack(emit, true);
+
+    // Get and push nlr_buf.ret_val
+    ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit));
+    emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0);
+}
+
+static void emit_native_end_except_handler(emit_t *emit) {
+    adjust_stack(emit, -1); // pop the exception (end_finally didn't use it)
+}
+
+const emit_method_table_t EXPORT_FUN(method_table) = {
+    #if MICROPY_DYNAMIC_COMPILER
+    EXPORT_FUN(new),
+    EXPORT_FUN(free),
+    #endif
+
+    emit_native_start_pass,
+    emit_native_end_pass,
+    emit_native_adjust_stack_size,
+    emit_native_set_source_line,
+
+    {
+        emit_native_load_local,
+        emit_native_load_global,
+    },
+    {
+        emit_native_store_local,
+        emit_native_store_global,
+    },
+    {
+        emit_native_delete_local,
+        emit_native_delete_global,
+    },
+
+    emit_native_label_assign,
+    emit_native_import,
+    emit_native_load_const_tok,
+    emit_native_load_const_small_int,
+    emit_native_load_const_str,
+    emit_native_load_const_obj,
+    emit_native_load_null,
+    emit_native_load_method,
+    emit_native_load_build_class,
+    emit_native_subscr,
+    emit_native_attr,
+    emit_native_dup_top,
+    emit_native_dup_top_two,
+    emit_native_pop_top,
+    emit_native_rot_two,
+    emit_native_rot_three,
+    emit_native_jump,
+    emit_native_pop_jump_if,
+    emit_native_jump_if_or_pop,
+    emit_native_unwind_jump,
+    emit_native_setup_block,
+    emit_native_with_cleanup,
+    emit_native_end_finally,
+    emit_native_get_iter,
+    emit_native_for_iter,
+    emit_native_for_iter_end,
+    emit_native_pop_except_jump,
+    emit_native_unary_op,
+    emit_native_binary_op,
+    emit_native_build,
+    emit_native_store_map,
+    emit_native_store_comp,
+    emit_native_unpack_sequence,
+    emit_native_unpack_ex,
+    emit_native_make_function,
+    emit_native_make_closure,
+    emit_native_call_function,
+    emit_native_call_method,
+    emit_native_return_value,
+    emit_native_raise_varargs,
+    emit_native_yield,
+
+    emit_native_start_except_handler,
+    emit_native_end_except_handler,
+};
+
+#endif

+ 18 - 0
mp_flipper/lib/micropython/py/emitnthumb.c

@@ -0,0 +1,18 @@
+// thumb specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_THUMB
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmthumb.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (3) // r4
+
+#define N_THUMB (1)
+#define EXPORT_FUN(name) emit_native_thumb_##name
+#include "py/emitnative.c"
+
+#endif

+ 18 - 0
mp_flipper/lib/micropython/py/emitnx64.c

@@ -0,0 +1,18 @@
+// x64 specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_X64
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmx64.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (5) // rbx
+
+#define N_X64 (1)
+#define EXPORT_FUN(name) emit_native_x64_##name
+#include "py/emitnative.c"
+
+#endif

+ 70 - 0
mp_flipper/lib/micropython/py/emitnx86.c

@@ -0,0 +1,70 @@
+// x86 specific stuff
+
+#include "py/mpconfig.h"
+#include "py/nativeglue.h"
+
+#if MICROPY_EMIT_X86
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmx86.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (5) // ebx
+
+// x86 needs a table to know how many args a given function has
+static byte mp_f_n_args[MP_F_NUMBER_OF] = {
+    [MP_F_CONVERT_OBJ_TO_NATIVE] = 2,
+    [MP_F_CONVERT_NATIVE_TO_OBJ] = 2,
+    [MP_F_NATIVE_SWAP_GLOBALS] = 1,
+    [MP_F_LOAD_NAME] = 1,
+    [MP_F_LOAD_GLOBAL] = 1,
+    [MP_F_LOAD_BUILD_CLASS] = 0,
+    [MP_F_LOAD_ATTR] = 2,
+    [MP_F_LOAD_METHOD] = 3,
+    [MP_F_LOAD_SUPER_METHOD] = 2,
+    [MP_F_STORE_NAME] = 2,
+    [MP_F_STORE_GLOBAL] = 2,
+    [MP_F_STORE_ATTR] = 3,
+    [MP_F_OBJ_SUBSCR] = 3,
+    [MP_F_OBJ_IS_TRUE] = 1,
+    [MP_F_UNARY_OP] = 2,
+    [MP_F_BINARY_OP] = 3,
+    [MP_F_BUILD_TUPLE] = 2,
+    [MP_F_BUILD_LIST] = 2,
+    [MP_F_BUILD_MAP] = 1,
+    [MP_F_BUILD_SET] = 2,
+    [MP_F_STORE_SET] = 2,
+    [MP_F_LIST_APPEND] = 2,
+    [MP_F_STORE_MAP] = 3,
+    [MP_F_MAKE_FUNCTION_FROM_PROTO_FUN] = 3,
+    [MP_F_NATIVE_CALL_FUNCTION_N_KW] = 3,
+    [MP_F_CALL_METHOD_N_KW] = 3,
+    [MP_F_CALL_METHOD_N_KW_VAR] = 3,
+    [MP_F_NATIVE_GETITER] = 2,
+    [MP_F_NATIVE_ITERNEXT] = 1,
+    [MP_F_NLR_PUSH] = 1,
+    [MP_F_NLR_POP] = 0,
+    [MP_F_NATIVE_RAISE] = 1,
+    [MP_F_IMPORT_NAME] = 3,
+    [MP_F_IMPORT_FROM] = 2,
+    [MP_F_IMPORT_ALL] = 1,
+    [MP_F_NEW_SLICE] = 3,
+    [MP_F_UNPACK_SEQUENCE] = 3,
+    [MP_F_UNPACK_EX] = 3,
+    [MP_F_DELETE_NAME] = 1,
+    [MP_F_DELETE_GLOBAL] = 1,
+    [MP_F_NEW_CLOSURE] = 3,
+    [MP_F_ARG_CHECK_NUM_SIG] = 3,
+    [MP_F_SETUP_CODE_STATE] = 4,
+    [MP_F_SMALL_INT_FLOOR_DIVIDE] = 2,
+    [MP_F_SMALL_INT_MODULO] = 2,
+    [MP_F_NATIVE_YIELD_FROM] = 3,
+    [MP_F_SETJMP] = 1,
+};
+
+#define N_X86 (1)
+#define EXPORT_FUN(name) emit_native_x86_##name
+#include "py/emitnative.c"
+
+#endif

+ 18 - 0
mp_flipper/lib/micropython/py/emitnxtensa.c

@@ -0,0 +1,18 @@
+// Xtensa specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_XTENSA
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmxtensa.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (8) // a12
+
+#define N_XTENSA (1)
+#define EXPORT_FUN(name) emit_native_xtensa_##name
+#include "py/emitnative.c"
+
+#endif

+ 20 - 0
mp_flipper/lib/micropython/py/emitnxtensawin.c

@@ -0,0 +1,20 @@
+// Xtensa-Windowed specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_XTENSAWIN
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#define GENERIC_ASM_API_WIN (1)
+#include "py/asmxtensa.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (2 + 4) // a4
+
+#define N_NLR_SETJMP (1)
+#define N_XTENSAWIN (1)
+#define EXPORT_FUN(name) emit_native_xtensawin_##name
+#include "py/emitnative.c"
+
+#endif

+ 424 - 0
mp_flipper/lib/micropython/py/formatfloat.c

@@ -0,0 +1,424 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#if MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <math.h>
+#include "py/formatfloat.h"
+
+/***********************************************************************
+
+  Routine for converting a arbitrary floating
+  point number into a string.
+
+  The code in this function was inspired from Fred Bayer's pdouble.c.
+  Since pdouble.c was released as Public Domain, I'm releasing this
+  code as public domain as well.
+
+  The original code can be found in https://github.com/dhylands/format-float
+
+  Dave Hylands
+
+***********************************************************************/
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+// 1 sign bit, 8 exponent bits, and 23 mantissa bits.
+// exponent values 0 and 255 are reserved, exponent can be 1 to 254.
+// exponent is stored with a bias of 127.
+// The min and max floats are on the order of 1x10^37 and 1x10^-37
+
+#define FPTYPE float
+#define FPCONST(x) x##F
+#define FPROUND_TO_ONE 0.9999995F
+#define FPDECEXP 32
+#define FPMIN_BUF_SIZE 6 // +9e+99
+
+#define FLT_SIGN_MASK   0x80000000
+
+static inline int fp_signbit(float x) {
+    mp_float_union_t fb = {x};
+    return fb.i & FLT_SIGN_MASK;
+}
+#define fp_isnan(x) isnan(x)
+#define fp_isinf(x) isinf(x)
+static inline int fp_iszero(float x) {
+    mp_float_union_t fb = {x};
+    return fb.i == 0;
+}
+static inline int fp_isless1(float x) {
+    mp_float_union_t fb = {x};
+    return fb.i < 0x3f800000;
+}
+
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+
+#define FPTYPE double
+#define FPCONST(x) x
+#define FPROUND_TO_ONE 0.999999999995
+#define FPDECEXP 256
+#define FPMIN_BUF_SIZE 7 // +9e+199
+#define fp_signbit(x) signbit(x)
+#define fp_isnan(x) isnan(x)
+#define fp_isinf(x) isinf(x)
+#define fp_iszero(x) (x == 0)
+#define fp_isless1(x) (x < 1.0)
+
+#endif // MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT/DOUBLE
+
+static inline int fp_expval(FPTYPE x) {
+    mp_float_union_t fb = {x};
+    return (int)((fb.i >> MP_FLOAT_FRAC_BITS) & (~(0xFFFFFFFF << MP_FLOAT_EXP_BITS))) - MP_FLOAT_EXP_OFFSET;
+}
+
+int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, char sign) {
+
+    char *s = buf;
+
+    if (buf_size <= FPMIN_BUF_SIZE) {
+        // FPMIN_BUF_SIZE is the minimum size needed to store any FP number.
+        // If the buffer does not have enough room for this (plus null terminator)
+        // then don't try to format the float.
+
+        if (buf_size >= 2) {
+            *s++ = '?';
+        }
+        if (buf_size >= 1) {
+            *s = '\0';
+        }
+        return buf_size >= 2;
+    }
+    if (fp_signbit(f) && !fp_isnan(f)) {
+        *s++ = '-';
+        f = -f;
+    } else {
+        if (sign) {
+            *s++ = sign;
+        }
+    }
+
+    // buf_remaining contains bytes available for digits and exponent.
+    // It is buf_size minus room for the sign and null byte.
+    int buf_remaining = buf_size - 1 - (s - buf);
+
+    {
+        char uc = fmt & 0x20;
+        if (fp_isinf(f)) {
+            *s++ = 'I' ^ uc;
+            *s++ = 'N' ^ uc;
+            *s++ = 'F' ^ uc;
+            goto ret;
+        } else if (fp_isnan(f)) {
+            *s++ = 'N' ^ uc;
+            *s++ = 'A' ^ uc;
+            *s++ = 'N' ^ uc;
+        ret:
+            *s = '\0';
+            return s - buf;
+        }
+    }
+
+    if (prec < 0) {
+        prec = 6;
+    }
+    char e_char = 'E' | (fmt & 0x20);   // e_char will match case of fmt
+    fmt |= 0x20; // Force fmt to be lowercase
+    char org_fmt = fmt;
+    if (fmt == 'g' && prec == 0) {
+        prec = 1;
+    }
+    int e;
+    int dec = 0;
+    char e_sign = '\0';
+    int num_digits = 0;
+    int signed_e = 0;
+
+    // Approximate power of 10 exponent from binary exponent.
+    // abs(e_guess) is lower bound on abs(power of 10 exponent).
+    int e_guess = (int)(fp_expval(f) * FPCONST(0.3010299956639812));  // 1/log2(10).
+    if (fp_iszero(f)) {
+        e = 0;
+        if (fmt == 'f') {
+            // Truncate precision to prevent buffer overflow
+            if (prec + 2 > buf_remaining) {
+                prec = buf_remaining - 2;
+            }
+            num_digits = prec + 1;
+        } else {
+            // Truncate precision to prevent buffer overflow
+            if (prec + 6 > buf_remaining) {
+                prec = buf_remaining - 6;
+            }
+            if (fmt == 'e') {
+                e_sign = '+';
+            }
+        }
+    } else if (fp_isless1(f)) {
+        FPTYPE f_entry = f;  // Save f in case we go to 'f' format.
+        // Build negative exponent
+        e = -e_guess;
+        FPTYPE u_base = MICROPY_FLOAT_C_FUN(pow)(10, -e);
+        while (u_base > f) {
+            ++e;
+            u_base = MICROPY_FLOAT_C_FUN(pow)(10, -e);
+        }
+        // Normalize out the inferred unit.  Use divide because
+        // pow(10, e) * pow(10, -e) is slightly < 1 for some e in float32
+        // (e.g. print("%.12f" % ((1e13) * (1e-13))))
+        f /= u_base;
+
+        // If the user specified 'g' format, and e is <= 4, then we'll switch
+        // to the fixed format ('f')
+
+        if (fmt == 'f' || (fmt == 'g' && e <= 4)) {
+            fmt = 'f';
+            dec = 0;
+
+            if (org_fmt == 'g') {
+                prec += (e - 1);
+            }
+
+            // truncate precision to prevent buffer overflow
+            if (prec + 2 > buf_remaining) {
+                prec = buf_remaining - 2;
+            }
+
+            num_digits = prec;
+            signed_e = 0;
+            f = f_entry;
+            ++num_digits;
+        } else {
+            // For e & g formats, we'll be printing the exponent, so set the
+            // sign.
+            e_sign = '-';
+            dec = 0;
+
+            if (prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+                prec = buf_remaining - FPMIN_BUF_SIZE;
+                if (fmt == 'g') {
+                    prec++;
+                }
+            }
+            signed_e = -e;
+        }
+    } else {
+        // Build positive exponent.
+        // We don't modify f at this point to avoid inaccuracies from
+        // scaling it.  Instead, we find the product of powers of 10
+        // that is not greater than it, and use that to start the
+        // mantissa.
+        e = e_guess;
+        FPTYPE next_u = MICROPY_FLOAT_C_FUN(pow)(10, e + 1);
+        while (f >= next_u) {
+            ++e;
+            next_u = MICROPY_FLOAT_C_FUN(pow)(10, e + 1);
+        }
+
+        // If the user specified fixed format (fmt == 'f') and e makes the
+        // number too big to fit into the available buffer, then we'll
+        // switch to the 'e' format.
+
+        if (fmt == 'f') {
+            if (e >= buf_remaining) {
+                fmt = 'e';
+            } else if ((e + prec + 2) > buf_remaining) {
+                prec = buf_remaining - e - 2;
+                if (prec < 0) {
+                    // This means no decimal point, so we can add one back
+                    // for the decimal.
+                    prec++;
+                }
+            }
+        }
+        if (fmt == 'e' && prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+            prec = buf_remaining - FPMIN_BUF_SIZE;
+        }
+        if (fmt == 'g') {
+            // Truncate precision to prevent buffer overflow
+            if (prec + (FPMIN_BUF_SIZE - 1) > buf_remaining) {
+                prec = buf_remaining - (FPMIN_BUF_SIZE - 1);
+            }
+        }
+        // If the user specified 'g' format, and e is < prec, then we'll switch
+        // to the fixed format.
+
+        if (fmt == 'g' && e < prec) {
+            fmt = 'f';
+            prec -= (e + 1);
+        }
+        if (fmt == 'f') {
+            dec = e;
+            num_digits = prec + e + 1;
+        } else {
+            e_sign = '+';
+        }
+        signed_e = e;
+    }
+    if (prec < 0) {
+        // This can happen when the prec is trimmed to prevent buffer overflow
+        prec = 0;
+    }
+
+    // At this point e contains the absolute value of the power of 10 exponent.
+    // (dec + 1) == the number of dgits before the decimal.
+
+    // For e, prec is # digits after the decimal
+    // For f, prec is # digits after the decimal
+    // For g, prec is the max number of significant digits
+    //
+    // For e & g there will be a single digit before the decimal
+    // for f there will be e digits before the decimal
+
+    if (fmt == 'e') {
+        num_digits = prec + 1;
+    } else if (fmt == 'g') {
+        if (prec == 0) {
+            prec = 1;
+        }
+        num_digits = prec;
+    }
+
+    int d = 0;
+    for (int digit_index = signed_e; num_digits >= 0; --digit_index) {
+        FPTYPE u_base = FPCONST(1.0);
+        if (digit_index > 0) {
+            // Generate 10^digit_index for positive digit_index.
+            u_base = MICROPY_FLOAT_C_FUN(pow)(10, digit_index);
+        }
+        for (d = 0; d < 9; ++d) {
+            if (f < u_base) {
+                break;
+            }
+            f -= u_base;
+        }
+        // We calculate one more digit than we display, to use in rounding
+        // below.  So only emit the digit if it's one that we display.
+        if (num_digits > 0) {
+            // Emit this number (the leading digit).
+            *s++ = '0' + d;
+            if (dec == 0 && prec > 0) {
+                *s++ = '.';
+            }
+        }
+        --dec;
+        --num_digits;
+        if (digit_index <= 0) {
+            // Once we get below 1.0, we scale up f instead of calculating
+            // negative powers of 10 in u_base.  This provides better
+            // renditions of exact decimals like 1/16 etc.
+            f *= FPCONST(10.0);
+        }
+    }
+    // Rounding.  If the next digit to print is >= 5, round up.
+    if (d >= 5) {
+        char *rs = s;
+        rs--;
+        while (1) {
+            if (*rs == '.') {
+                rs--;
+                continue;
+            }
+            if (*rs < '0' || *rs > '9') {
+                // + or -
+                rs++; // So we sit on the digit to the right of the sign
+                break;
+            }
+            if (*rs < '9') {
+                (*rs)++;
+                break;
+            }
+            *rs = '0';
+            if (rs == buf) {
+                break;
+            }
+            rs--;
+        }
+        if (*rs == '0') {
+            // We need to insert a 1
+            if (rs[1] == '.' && fmt != 'f') {
+                // We're going to round 9.99 to 10.00
+                // Move the decimal point
+                rs[0] = '.';
+                rs[1] = '0';
+                if (e_sign == '-') {
+                    e--;
+                    if (e == 0) {
+                        e_sign = '+';
+                    }
+                } else {
+                    e++;
+                }
+            } else {
+                // Need at extra digit at the end to make room for the leading '1'
+                // but if we're at the buffer size limit, just drop the final digit.
+                if ((size_t)(s + 1 - buf) < buf_size) {
+                    s++;
+                }
+            }
+            char *ss = s;
+            while (ss > rs) {
+                *ss = ss[-1];
+                ss--;
+            }
+            *rs = '1';
+        }
+    }
+
+    // verify that we did not overrun the input buffer so far
+    assert((size_t)(s + 1 - buf) <= buf_size);
+
+    if (org_fmt == 'g' && prec > 0) {
+        // Remove trailing zeros and a trailing decimal point
+        while (s[-1] == '0') {
+            s--;
+        }
+        if (s[-1] == '.') {
+            s--;
+        }
+    }
+    // Append the exponent
+    if (e_sign) {
+        *s++ = e_char;
+        *s++ = e_sign;
+        if (FPMIN_BUF_SIZE == 7 && e >= 100) {
+            *s++ = '0' + (e / 100);
+        }
+        *s++ = '0' + ((e / 10) % 10);
+        *s++ = '0' + (e % 10);
+    }
+    *s = '\0';
+
+    // verify that we did not overrun the input buffer
+    assert((size_t)(s + 1 - buf) <= buf_size);
+
+    return s - buf;
+}
+
+#endif // MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE

+ 35 - 0
mp_flipper/lib/micropython/py/formatfloat.h

@@ -0,0 +1,35 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_FORMATFLOAT_H
+#define MICROPY_INCLUDED_PY_FORMATFLOAT_H
+
+#include "py/mpconfig.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_format_float(mp_float_t f, char *buf, size_t bufSize, char fmt, int prec, char sign);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_FORMATFLOAT_H

+ 135 - 0
mp_flipper/lib/micropython/py/frozenmod.c

@@ -0,0 +1,135 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ * Copyright (c) 2016 Damien P. George
+ * Copyright (c) 2021 Jim Mussared
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+
+#include "py/lexer.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_MODULE_FROZEN
+
+// Null-separated frozen file names. All string-type entries are listed first,
+// followed by mpy-type entries. Use mp_frozen_str_sizes to determine how
+// many string entries.
+extern const char mp_frozen_names[];
+
+#if MICROPY_MODULE_FROZEN_STR
+
+#ifndef MICROPY_MODULE_FROZEN_LEXER
+#define MICROPY_MODULE_FROZEN_LEXER mp_lexer_new_from_str_len
+#else
+mp_lexer_t *MICROPY_MODULE_FROZEN_LEXER(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len);
+#endif
+
+// Size in bytes of each string entry, followed by a zero (terminator).
+extern const uint32_t mp_frozen_str_sizes[];
+// Null-separated string content.
+extern const char mp_frozen_str_content[];
+#endif // MICROPY_MODULE_FROZEN_STR
+
+#if MICROPY_MODULE_FROZEN_MPY
+
+#include "py/emitglue.h"
+
+extern const mp_frozen_module_t *const mp_frozen_mpy_content[];
+
+#endif // MICROPY_MODULE_FROZEN_MPY
+
+// Search for "str" as a frozen entry, returning the stat result
+// (no-exist/file/dir), as well as the type (none/str/mpy) and data.
+// frozen_type can be NULL if its value isn't needed (and then data is assumed to be NULL).
+mp_import_stat_t mp_find_frozen_module(const char *str, int *frozen_type, void **data) {
+    size_t len = strlen(str);
+    const char *name = mp_frozen_names;
+
+    if (frozen_type != NULL) {
+        *frozen_type = MP_FROZEN_NONE;
+    }
+
+    // Count the number of str lengths we have to find how many str entries.
+    size_t num_str = 0;
+    #if MICROPY_MODULE_FROZEN_STR && MICROPY_MODULE_FROZEN_MPY
+    for (const uint32_t *s = mp_frozen_str_sizes; *s != 0; ++s) {
+        ++num_str;
+    }
+    #endif
+
+    for (size_t i = 0; *name != 0; i++) {
+        size_t entry_len = strlen(name);
+        if (entry_len >= len && memcmp(str, name, len) == 0) {
+            // Query is a prefix of the current entry.
+            if (entry_len == len) {
+                // Exact match --> file.
+
+                if (frozen_type != NULL) {
+                    #if MICROPY_MODULE_FROZEN_STR
+                    if (i < num_str) {
+                        *frozen_type = MP_FROZEN_STR;
+                        // Use the size table to figure out where this index starts.
+                        size_t offset = 0;
+                        for (size_t j = 0; j < i; ++j) {
+                            offset += mp_frozen_str_sizes[j] + 1;
+                        }
+                        size_t content_len = mp_frozen_str_sizes[i];
+                        const char *content = &mp_frozen_str_content[offset];
+
+                        // Note: str & len have been updated by find_frozen_entry to strip
+                        // the ".frozen/" prefix (to avoid this being a distinct qstr to
+                        // the original path QSTR in frozen_content.c).
+                        qstr source = qstr_from_strn(str, len);
+                        mp_lexer_t *lex = MICROPY_MODULE_FROZEN_LEXER(source, content, content_len, 0);
+                        *data = lex;
+                    }
+                    #endif
+
+                    #if MICROPY_MODULE_FROZEN_MPY
+                    if (i >= num_str) {
+                        *frozen_type = MP_FROZEN_MPY;
+                        // Load the corresponding index as a raw_code, taking
+                        // into account any string entries to offset by.
+                        *data = (void *)mp_frozen_mpy_content[i - num_str];
+                    }
+                    #endif
+                }
+
+                return MP_IMPORT_STAT_FILE;
+            } else if (name[len] == '/') {
+                // Matches up to directory separator, this is a valid
+                // directory path.
+                return MP_IMPORT_STAT_DIR;
+            }
+        }
+        // Skip null separator.
+        name += entry_len + 1;
+    }
+
+    return MP_IMPORT_STAT_NO_EXIST;
+}
+
+#endif // MICROPY_MODULE_FROZEN

+ 40 - 0
mp_flipper/lib/micropython/py/frozenmod.h

@@ -0,0 +1,40 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_FROZENMOD_H
+#define MICROPY_INCLUDED_PY_FROZENMOD_H
+
+#include "py/builtin.h"
+
+enum {
+    MP_FROZEN_NONE,
+    MP_FROZEN_STR,
+    MP_FROZEN_MPY,
+};
+
+mp_import_stat_t mp_find_frozen_module(const char *str, int *frozen_type, void **data);
+
+#endif // MICROPY_INCLUDED_PY_FROZENMOD_H

+ 1354 - 0
mp_flipper/lib/micropython/py/gc.c

@@ -0,0 +1,1354 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/gc.h"
+#include "py/runtime.h"
+
+#if MICROPY_DEBUG_VALGRIND
+#include <valgrind/memcheck.h>
+#endif
+
+#if MICROPY_ENABLE_GC
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+// make this 1 to dump the heap each time it changes
+#define EXTENSIVE_HEAP_PROFILING (0)
+
+// make this 1 to zero out swept memory to more eagerly
+// detect untraced object still in use
+#define CLEAR_ON_SWEEP (0)
+
+#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
+#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
+
+// ATB = allocation table byte
+// 0b00 = FREE -- free block
+// 0b01 = HEAD -- head of a chain of blocks
+// 0b10 = TAIL -- in the tail of a chain of blocks
+// 0b11 = MARK -- marked head block
+
+#define AT_FREE (0)
+#define AT_HEAD (1)
+#define AT_TAIL (2)
+#define AT_MARK (3)
+
+#define BLOCKS_PER_ATB (4)
+#define ATB_MASK_0 (0x03)
+#define ATB_MASK_1 (0x0c)
+#define ATB_MASK_2 (0x30)
+#define ATB_MASK_3 (0xc0)
+
+#define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0)
+#define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0)
+#define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0)
+#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
+
+#if MICROPY_GC_SPLIT_HEAP
+#define NEXT_AREA(area) ((area)->next)
+#else
+#define NEXT_AREA(area) (NULL)
+#endif
+
+#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
+#define ATB_GET_KIND(area, block) (((area)->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
+#define ATB_ANY_TO_FREE(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
+#define ATB_FREE_TO_HEAD(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
+#define ATB_FREE_TO_TAIL(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
+#define ATB_HEAD_TO_MARK(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
+#define ATB_MARK_TO_HEAD(area, block) do { area->gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
+
+#define BLOCK_FROM_PTR(area, ptr) (((byte *)(ptr) - area->gc_pool_start) / BYTES_PER_BLOCK)
+#define PTR_FROM_BLOCK(area, block) (((block) * BYTES_PER_BLOCK + (uintptr_t)area->gc_pool_start))
+
+// After the ATB, there must be a byte filled with AT_FREE so that gc_mark_tree
+// cannot erroneously conclude that a block extends past the end of the GC heap
+// due to bit patterns in the FTB (or first block, if finalizers are disabled)
+// being interpreted as AT_TAIL.
+#define ALLOC_TABLE_GAP_BYTE (1)
+
+#if MICROPY_ENABLE_FINALISER
+// FTB = finaliser table byte
+// if set, then the corresponding block may have a finaliser
+
+#define BLOCKS_PER_FTB (8)
+
+#define FTB_GET(area, block) ((area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
+#define FTB_SET(area, block) do { area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
+#define FTB_CLEAR(area, block) do { area->gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
+#endif
+
+#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+#define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1)
+#define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex))
+#else
+#define GC_ENTER()
+#define GC_EXIT()
+#endif
+
+// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
+static void gc_setup_area(mp_state_mem_area_t *area, void *start, void *end) {
+    // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
+    // T = A + F + P
+    //     F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
+    //     P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
+    // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
+    size_t total_byte_len = (byte *)end - (byte *)start;
+    #if MICROPY_ENABLE_FINALISER
+    area->gc_alloc_table_byte_len = (total_byte_len - ALLOC_TABLE_GAP_BYTE)
+        * MP_BITS_PER_BYTE
+        / (
+            MP_BITS_PER_BYTE
+            + MP_BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB
+            + MP_BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK
+            );
+    #else
+    area->gc_alloc_table_byte_len = (total_byte_len - ALLOC_TABLE_GAP_BYTE) / (1 + MP_BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
+    #endif
+
+    area->gc_alloc_table_start = (byte *)start;
+
+    #if MICROPY_ENABLE_FINALISER
+    size_t gc_finaliser_table_byte_len = (area->gc_alloc_table_byte_len * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
+    area->gc_finaliser_table_start = area->gc_alloc_table_start + area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE;
+    #endif
+
+    size_t gc_pool_block_len = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
+    area->gc_pool_start = (byte *)end - gc_pool_block_len * BYTES_PER_BLOCK;
+    area->gc_pool_end = end;
+
+    #if MICROPY_ENABLE_FINALISER
+    assert(area->gc_pool_start >= area->gc_finaliser_table_start + gc_finaliser_table_byte_len);
+    #endif
+
+    #if MICROPY_ENABLE_FINALISER
+    // clear ATB's and FTB's
+    memset(area->gc_alloc_table_start, 0, gc_finaliser_table_byte_len + area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE);
+    #else
+    // clear ATB's
+    memset(area->gc_alloc_table_start, 0, area->gc_alloc_table_byte_len + ALLOC_TABLE_GAP_BYTE);
+    #endif
+
+    area->gc_last_free_atb_index = 0;
+    area->gc_last_used_block = 0;
+
+    #if MICROPY_GC_SPLIT_HEAP
+    area->next = NULL;
+    #endif
+
+    DEBUG_printf("GC layout:\n");
+    DEBUG_printf("  alloc table at %p, length " UINT_FMT " bytes, "
+        UINT_FMT " blocks\n",
+        area->gc_alloc_table_start, area->gc_alloc_table_byte_len,
+        area->gc_alloc_table_byte_len * BLOCKS_PER_ATB);
+    #if MICROPY_ENABLE_FINALISER
+    DEBUG_printf("  finaliser table at %p, length " UINT_FMT " bytes, "
+        UINT_FMT " blocks\n", area->gc_finaliser_table_start,
+        gc_finaliser_table_byte_len,
+        gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
+    #endif
+    DEBUG_printf("  pool at %p, length " UINT_FMT " bytes, "
+        UINT_FMT " blocks\n", area->gc_pool_start,
+        gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
+}
+
+void gc_init(void *start, void *end) {
+    // align end pointer on block boundary
+    end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
+    DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
+
+    gc_setup_area(&MP_STATE_MEM(area), start, end);
+
+    // set last free ATB index to start of heap
+    #if MICROPY_GC_SPLIT_HEAP
+    MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
+    #endif
+
+    // unlock the GC
+    MP_STATE_THREAD(gc_lock_depth) = 0;
+
+    // allow auto collection
+    MP_STATE_MEM(gc_auto_collect_enabled) = 1;
+
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    // by default, maxuint for gc threshold, effectively turning gc-by-threshold off
+    MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
+    MP_STATE_MEM(gc_alloc_amount) = 0;
+    #endif
+
+    #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+    mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
+    #endif
+}
+
+#if MICROPY_GC_SPLIT_HEAP
+void gc_add(void *start, void *end) {
+    // Place the area struct at the start of the area.
+    mp_state_mem_area_t *area = (mp_state_mem_area_t *)start;
+    start = (void *)((uintptr_t)start + sizeof(mp_state_mem_area_t));
+
+    end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
+    DEBUG_printf("Adding GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
+
+    // Init this area
+    gc_setup_area(area, start, end);
+
+    // Find the last registered area in the linked list
+    mp_state_mem_area_t *prev_area = &MP_STATE_MEM(area);
+    while (prev_area->next != NULL) {
+        prev_area = prev_area->next;
+    }
+
+    // Add this area to the linked list
+    prev_area->next = area;
+}
+
+#if MICROPY_GC_SPLIT_HEAP_AUTO
+// Try to automatically add a heap area large enough to fulfill 'failed_alloc'.
+static bool gc_try_add_heap(size_t failed_alloc) {
+    // 'needed' is the size of a heap large enough to hold failed_alloc, with
+    // the additional metadata overheads as calculated in gc_setup_area().
+    //
+    // Rather than reproduce all of that logic here, we approximate that adding
+    // (13/512) is enough overhead for sufficiently large heap areas (the
+    // overhead converges to 3/128, but there's some fixed overhead and some
+    // rounding up of partial block sizes).
+    size_t needed = failed_alloc + MAX(2048, failed_alloc * 13 / 512);
+
+    size_t avail = gc_get_max_new_split();
+
+    DEBUG_printf("gc_try_add_heap failed_alloc " UINT_FMT ", "
+        "needed " UINT_FMT ", avail " UINT_FMT " bytes \n",
+        failed_alloc,
+        needed,
+        avail);
+
+    if (avail < needed) {
+        // Can't fit this allocation, or system heap has nearly run out anyway
+        return false;
+    }
+
+    // Deciding how much to grow the total heap by each time is tricky:
+    //
+    // - Grow by too small amounts, leads to heap fragmentation issues.
+    //
+    // - Grow by too large amounts, may lead to system heap running out of
+    //   space.
+    //
+    // Currently, this implementation is:
+    //
+    // - At minimum, aim to double the total heap size each time we add a new
+    //   heap.  i.e. without any large single allocations, total size will be
+    //   64KB -> 128KB -> 256KB -> 512KB -> 1MB, etc
+    //
+    // - If the failed allocation is too large to fit in that size, the new
+    //   heap is made exactly large enough for that allocation. Future growth
+    //   will double the total heap size again.
+    //
+    // - If the new heap won't fit in the available free space, add the largest
+    //   new heap that will fit (this may lead to failed system heap allocations
+    //   elsewhere, but some allocation will likely fail in this circumstance!)
+
+    // Compute total number of blocks in the current heap.
+    size_t total_blocks = 0;
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area);
+         area != NULL;
+         area = NEXT_AREA(area)) {
+        total_blocks += area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
+    }
+
+    // Compute bytes needed to build a heap with total_blocks blocks.
+    size_t total_heap =
+        total_blocks / BLOCKS_PER_ATB
+        #if MICROPY_ENABLE_FINALISER
+        + total_blocks / BLOCKS_PER_FTB
+        #endif
+        + total_blocks * BYTES_PER_BLOCK
+        + ALLOC_TABLE_GAP_BYTE
+        + sizeof(mp_state_mem_area_t);
+
+    // Round up size to the nearest multiple of BYTES_PER_BLOCK.
+    total_heap = (total_heap + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1));
+
+    DEBUG_printf("total_heap " UINT_FMT " bytes\n", total_heap);
+
+    size_t to_alloc = MIN(avail, MAX(total_heap, needed));
+
+    mp_state_mem_area_t *new_heap = MP_PLAT_ALLOC_HEAP(to_alloc);
+
+    DEBUG_printf("MP_PLAT_ALLOC_HEAP " UINT_FMT " = %p\n",
+        to_alloc, new_heap);
+
+    if (new_heap == NULL) {
+        // This should only fail:
+        // - In a threaded environment if another thread has
+        //   allocated while this function ran.
+        // - If there is a bug in gc_get_max_new_split().
+        return false;
+    }
+
+    gc_add(new_heap, (void *)new_heap + to_alloc);
+
+    return true;
+}
+#endif
+
+#endif
+
+void gc_lock(void) {
+    // This does not need to be atomic or have the GC mutex because:
+    // - each thread has its own gc_lock_depth so there are no races between threads;
+    // - a hard interrupt will only change gc_lock_depth during its execution, and
+    //   upon return will restore the value of gc_lock_depth.
+    MP_STATE_THREAD(gc_lock_depth)++;
+}
+
+void gc_unlock(void) {
+    // This does not need to be atomic, See comment above in gc_lock.
+    MP_STATE_THREAD(gc_lock_depth)--;
+}
+
+bool gc_is_locked(void) {
+    return MP_STATE_THREAD(gc_lock_depth) != 0;
+}
+
+#if MICROPY_GC_SPLIT_HEAP
+// Returns the area to which this pointer belongs, or NULL if it isn't
+// allocated on the GC-managed heap.
+static inline mp_state_mem_area_t *gc_get_ptr_area(const void *ptr) {
+    if (((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) != 0) {   // must be aligned on a block
+        return NULL;
+    }
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+        if (ptr >= (void *)area->gc_pool_start   // must be above start of pool
+            && ptr < (void *)area->gc_pool_end) {   // must be below end of pool
+            return area;
+        }
+    }
+    return NULL;
+}
+#endif
+
+// ptr should be of type void*
+#define VERIFY_PTR(ptr) ( \
+    ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0          /* must be aligned on a block */ \
+    && ptr >= (void *)MP_STATE_MEM(area).gc_pool_start      /* must be above start of pool */ \
+    && ptr < (void *)MP_STATE_MEM(area).gc_pool_end         /* must be below end of pool */ \
+    )
+
+#ifndef TRACE_MARK
+#if DEBUG_PRINT
+#define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr)
+#else
+#define TRACE_MARK(block, ptr)
+#endif
+#endif
+
+// Take the given block as the topmost block on the stack. Check all it's
+// children: mark the unmarked child blocks and put those newly marked
+// blocks on the stack. When all children have been checked, pop off the
+// topmost block on the stack and repeat with that one.
+#if MICROPY_GC_SPLIT_HEAP
+static void gc_mark_subtree(mp_state_mem_area_t *area, size_t block)
+#else
+static void gc_mark_subtree(size_t block)
+#endif
+{
+    // Start with the block passed in the argument.
+    size_t sp = 0;
+    for (;;) {
+        #if !MICROPY_GC_SPLIT_HEAP
+        mp_state_mem_area_t *area = &MP_STATE_MEM(area);
+        #endif
+
+        // work out number of consecutive blocks in the chain starting with this one
+        size_t n_blocks = 0;
+        do {
+            n_blocks += 1;
+        } while (ATB_GET_KIND(area, block + n_blocks) == AT_TAIL);
+
+        // check that the consecutive blocks didn't overflow past the end of the area
+        assert(area->gc_pool_start + (block + n_blocks) * BYTES_PER_BLOCK <= area->gc_pool_end);
+
+        // check this block's children
+        void **ptrs = (void **)PTR_FROM_BLOCK(area, block);
+        for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void *); i > 0; i--, ptrs++) {
+            MICROPY_GC_HOOK_LOOP(i);
+            void *ptr = *ptrs;
+            // If this is a heap pointer that hasn't been marked, mark it and push
+            // it's children to the stack.
+            #if MICROPY_GC_SPLIT_HEAP
+            mp_state_mem_area_t *ptr_area = gc_get_ptr_area(ptr);
+            if (!ptr_area) {
+                // Not a heap-allocated pointer (might even be random data).
+                continue;
+            }
+            #else
+            if (!VERIFY_PTR(ptr)) {
+                continue;
+            }
+            mp_state_mem_area_t *ptr_area = area;
+            #endif
+            size_t ptr_block = BLOCK_FROM_PTR(ptr_area, ptr);
+            if (ATB_GET_KIND(ptr_area, ptr_block) != AT_HEAD) {
+                // This block is already marked.
+                continue;
+            }
+            // An unmarked head. Mark it, and push it on gc stack.
+            TRACE_MARK(ptr_block, ptr);
+            ATB_HEAD_TO_MARK(ptr_area, ptr_block);
+            if (sp < MICROPY_ALLOC_GC_STACK_SIZE) {
+                MP_STATE_MEM(gc_block_stack)[sp] = ptr_block;
+                #if MICROPY_GC_SPLIT_HEAP
+                MP_STATE_MEM(gc_area_stack)[sp] = ptr_area;
+                #endif
+                sp += 1;
+            } else {
+                MP_STATE_MEM(gc_stack_overflow) = 1;
+            }
+        }
+
+        // Are there any blocks on the stack?
+        if (sp == 0) {
+            break; // No, stack is empty, we're done.
+        }
+
+        // pop the next block off the stack
+        sp -= 1;
+        block = MP_STATE_MEM(gc_block_stack)[sp];
+        #if MICROPY_GC_SPLIT_HEAP
+        area = MP_STATE_MEM(gc_area_stack)[sp];
+        #endif
+    }
+}
+
+static void gc_deal_with_stack_overflow(void) {
+    while (MP_STATE_MEM(gc_stack_overflow)) {
+        MP_STATE_MEM(gc_stack_overflow) = 0;
+
+        // scan entire memory looking for blocks which have been marked but not their children
+        for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+            for (size_t block = 0; block < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
+                MICROPY_GC_HOOK_LOOP(block);
+                // trace (again) if mark bit set
+                if (ATB_GET_KIND(area, block) == AT_MARK) {
+                    #if MICROPY_GC_SPLIT_HEAP
+                    gc_mark_subtree(area, block);
+                    #else
+                    gc_mark_subtree(block);
+                    #endif
+                }
+            }
+        }
+    }
+}
+
+static void gc_sweep(void) {
+    #if MICROPY_PY_GC_COLLECT_RETVAL
+    MP_STATE_MEM(gc_collected) = 0;
+    #endif
+    // free unmarked heads and their tails
+    int free_tail = 0;
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    mp_state_mem_area_t *prev_area = NULL;
+    #endif
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+        size_t end_block = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
+        if (area->gc_last_used_block < end_block) {
+            end_block = area->gc_last_used_block + 1;
+        }
+
+        size_t last_used_block = 0;
+
+        for (size_t block = 0; block < end_block; block++) {
+            MICROPY_GC_HOOK_LOOP(block);
+            switch (ATB_GET_KIND(area, block)) {
+                case AT_HEAD:
+                    #if MICROPY_ENABLE_FINALISER
+                    if (FTB_GET(area, block)) {
+                        mp_obj_base_t *obj = (mp_obj_base_t *)PTR_FROM_BLOCK(area, block);
+                        if (obj->type != NULL) {
+                            // if the object has a type then see if it has a __del__ method
+                            mp_obj_t dest[2];
+                            mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest);
+                            if (dest[0] != MP_OBJ_NULL) {
+                                // load_method returned a method, execute it in a protected environment
+                                #if MICROPY_ENABLE_SCHEDULER
+                                mp_sched_lock();
+                                #endif
+                                mp_call_function_1_protected(dest[0], dest[1]);
+                                #if MICROPY_ENABLE_SCHEDULER
+                                mp_sched_unlock();
+                                #endif
+                            }
+                        }
+                        // clear finaliser flag
+                        FTB_CLEAR(area, block);
+                    }
+                    #endif
+                    free_tail = 1;
+                    DEBUG_printf("gc_sweep(%p)\n", (void *)PTR_FROM_BLOCK(area, block));
+                    #if MICROPY_PY_GC_COLLECT_RETVAL
+                    MP_STATE_MEM(gc_collected)++;
+                    #endif
+                    // fall through to free the head
+                    MP_FALLTHROUGH
+
+                case AT_TAIL:
+                    if (free_tail) {
+                        ATB_ANY_TO_FREE(area, block);
+                        #if CLEAR_ON_SWEEP
+                        memset((void *)PTR_FROM_BLOCK(area, block), 0, BYTES_PER_BLOCK);
+                        #endif
+                    } else {
+                        last_used_block = block;
+                    }
+                    break;
+
+                case AT_MARK:
+                    ATB_MARK_TO_HEAD(area, block);
+                    free_tail = 0;
+                    last_used_block = block;
+                    break;
+            }
+        }
+
+        area->gc_last_used_block = last_used_block;
+
+        #if MICROPY_GC_SPLIT_HEAP_AUTO
+        // Free any empty area, aside from the first one
+        if (last_used_block == 0 && prev_area != NULL) {
+            DEBUG_printf("gc_sweep free empty area %p\n", area);
+            NEXT_AREA(prev_area) = NEXT_AREA(area);
+            MP_PLAT_FREE_HEAP(area);
+            area = prev_area;
+        }
+        prev_area = area;
+        #endif
+    }
+}
+
+void gc_collect_start(void) {
+    GC_ENTER();
+    MP_STATE_THREAD(gc_lock_depth)++;
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    MP_STATE_MEM(gc_alloc_amount) = 0;
+    #endif
+    MP_STATE_MEM(gc_stack_overflow) = 0;
+
+    // Trace root pointers.  This relies on the root pointers being organised
+    // correctly in the mp_state_ctx structure.  We scan nlr_top, dict_locals,
+    // dict_globals, then the root pointer section of mp_state_vm.
+    void **ptrs = (void **)(void *)&mp_state_ctx;
+    size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals);
+    size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk);
+    gc_collect_root(ptrs + root_start / sizeof(void *), (root_end - root_start) / sizeof(void *));
+
+    #if MICROPY_ENABLE_PYSTACK
+    // Trace root pointers from the Python stack.
+    ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
+    gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
+    #endif
+}
+
+// Address sanitizer needs to know that the access to ptrs[i] must always be
+// considered OK, even if it's a load from an address that would normally be
+// prohibited (due to being undefined, in a red zone, etc).
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+__attribute__((no_sanitize_address))
+#endif
+static void *gc_get_ptr(void **ptrs, int i) {
+    #if MICROPY_DEBUG_VALGRIND
+    if (!VALGRIND_CHECK_MEM_IS_ADDRESSABLE(&ptrs[i], sizeof(*ptrs))) {
+        return NULL;
+    }
+    #endif
+    return ptrs[i];
+}
+
+void gc_collect_root(void **ptrs, size_t len) {
+    #if !MICROPY_GC_SPLIT_HEAP
+    mp_state_mem_area_t *area = &MP_STATE_MEM(area);
+    #endif
+    for (size_t i = 0; i < len; i++) {
+        MICROPY_GC_HOOK_LOOP(i);
+        void *ptr = gc_get_ptr(ptrs, i);
+        #if MICROPY_GC_SPLIT_HEAP
+        mp_state_mem_area_t *area = gc_get_ptr_area(ptr);
+        if (!area) {
+            continue;
+        }
+        #else
+        if (!VERIFY_PTR(ptr)) {
+            continue;
+        }
+        #endif
+        size_t block = BLOCK_FROM_PTR(area, ptr);
+        if (ATB_GET_KIND(area, block) == AT_HEAD) {
+            // An unmarked head: mark it, and mark all its children
+            ATB_HEAD_TO_MARK(area, block);
+            #if MICROPY_GC_SPLIT_HEAP
+            gc_mark_subtree(area, block);
+            #else
+            gc_mark_subtree(block);
+            #endif
+        }
+    }
+}
+
+void gc_collect_end(void) {
+    gc_deal_with_stack_overflow();
+    gc_sweep();
+    #if MICROPY_GC_SPLIT_HEAP
+    MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
+    #endif
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+        area->gc_last_free_atb_index = 0;
+    }
+    MP_STATE_THREAD(gc_lock_depth)--;
+    GC_EXIT();
+}
+
+void gc_sweep_all(void) {
+    GC_ENTER();
+    MP_STATE_THREAD(gc_lock_depth)++;
+    MP_STATE_MEM(gc_stack_overflow) = 0;
+    gc_collect_end();
+}
+
+void gc_info(gc_info_t *info) {
+    GC_ENTER();
+    info->total = 0;
+    info->used = 0;
+    info->free = 0;
+    info->max_free = 0;
+    info->num_1block = 0;
+    info->num_2block = 0;
+    info->max_block = 0;
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+        bool finish = false;
+        info->total += area->gc_pool_end - area->gc_pool_start;
+        for (size_t block = 0, len = 0, len_free = 0; !finish;) {
+            MICROPY_GC_HOOK_LOOP(block);
+            size_t kind = ATB_GET_KIND(area, block);
+            switch (kind) {
+                case AT_FREE:
+                    info->free += 1;
+                    len_free += 1;
+                    len = 0;
+                    break;
+
+                case AT_HEAD:
+                    info->used += 1;
+                    len = 1;
+                    break;
+
+                case AT_TAIL:
+                    info->used += 1;
+                    len += 1;
+                    break;
+
+                case AT_MARK:
+                    // shouldn't happen
+                    break;
+            }
+
+            block++;
+            finish = (block == area->gc_alloc_table_byte_len * BLOCKS_PER_ATB);
+            // Get next block type if possible
+            if (!finish) {
+                kind = ATB_GET_KIND(area, block);
+            }
+
+            if (finish || kind == AT_FREE || kind == AT_HEAD) {
+                if (len == 1) {
+                    info->num_1block += 1;
+                } else if (len == 2) {
+                    info->num_2block += 1;
+                }
+                if (len > info->max_block) {
+                    info->max_block = len;
+                }
+                if (finish || kind == AT_HEAD) {
+                    if (len_free > info->max_free) {
+                        info->max_free = len_free;
+                    }
+                    len_free = 0;
+                }
+            }
+        }
+    }
+
+    info->used *= BYTES_PER_BLOCK;
+    info->free *= BYTES_PER_BLOCK;
+
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    info->max_new_split = gc_get_max_new_split();
+    #endif
+
+    GC_EXIT();
+}
+
+void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
+    bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER;
+    size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
+    DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
+
+    // check for 0 allocation
+    if (n_blocks == 0) {
+        return NULL;
+    }
+
+    // check if GC is locked
+    if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+        return NULL;
+    }
+
+    GC_ENTER();
+
+    mp_state_mem_area_t *area;
+    size_t i;
+    size_t end_block;
+    size_t start_block;
+    size_t n_free;
+    int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    bool added = false;
+    #endif
+
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
+        GC_EXIT();
+        gc_collect();
+        collected = 1;
+        GC_ENTER();
+    }
+    #endif
+
+    for (;;) {
+
+        #if MICROPY_GC_SPLIT_HEAP
+        area = MP_STATE_MEM(gc_last_free_area);
+        #else
+        area = &MP_STATE_MEM(area);
+        #endif
+
+        // look for a run of n_blocks available blocks
+        for (; area != NULL; area = NEXT_AREA(area), i = 0) {
+            n_free = 0;
+            for (i = area->gc_last_free_atb_index; i < area->gc_alloc_table_byte_len; i++) {
+                MICROPY_GC_HOOK_LOOP(i);
+                byte a = area->gc_alloc_table_start[i];
+                // *FORMAT-OFF*
+                if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
+                if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
+                if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
+                if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
+                // *FORMAT-ON*
+            }
+
+            // No free blocks found on this heap. Mark this heap as
+            // filled, so we won't try to find free space here again until
+            // space is freed.
+            #if MICROPY_GC_SPLIT_HEAP
+            if (n_blocks == 1) {
+                area->gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB; // or (size_t)-1
+            }
+            #endif
+        }
+
+        GC_EXIT();
+        // nothing found!
+        if (collected) {
+            #if MICROPY_GC_SPLIT_HEAP_AUTO
+            if (!added && gc_try_add_heap(n_bytes)) {
+                added = true;
+                continue;
+            }
+            #endif
+            return NULL;
+        }
+        DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
+        gc_collect();
+        collected = 1;
+        GC_ENTER();
+    }
+
+    // found, ending at block i inclusive
+found:
+    // get starting and end blocks, both inclusive
+    end_block = i;
+    start_block = i - n_free + 1;
+
+    // Set last free ATB index to block after last block we found, for start of
+    // next scan.  To reduce fragmentation, we only do this if we were looking
+    // for a single free block, which guarantees that there are no free blocks
+    // before this one.  Also, whenever we free or shink a block we must check
+    // if this index needs adjusting (see gc_realloc and gc_free).
+    if (n_free == 1) {
+        #if MICROPY_GC_SPLIT_HEAP
+        MP_STATE_MEM(gc_last_free_area) = area;
+        #endif
+        area->gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB;
+    }
+
+    area->gc_last_used_block = MAX(area->gc_last_used_block, end_block);
+
+    // mark first block as used head
+    ATB_FREE_TO_HEAD(area, start_block);
+
+    // mark rest of blocks as used tail
+    // TODO for a run of many blocks can make this more efficient
+    for (size_t bl = start_block + 1; bl <= end_block; bl++) {
+        ATB_FREE_TO_TAIL(area, bl);
+    }
+
+    // get pointer to first block
+    // we must create this pointer before unlocking the GC so a collection can find it
+    void *ret_ptr = (void *)(area->gc_pool_start + start_block * BYTES_PER_BLOCK);
+    DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
+
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    MP_STATE_MEM(gc_alloc_amount) += n_blocks;
+    #endif
+
+    GC_EXIT();
+
+    #if MICROPY_GC_CONSERVATIVE_CLEAR
+    // be conservative and zero out all the newly allocated blocks
+    memset((byte *)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
+    #else
+    // zero out the additional bytes of the newly allocated blocks
+    // This is needed because the blocks may have previously held pointers
+    // to the heap and will not be set to something else if the caller
+    // doesn't actually use the entire block.  As such they will continue
+    // to point to the heap and may prevent other blocks from being reclaimed.
+    memset((byte *)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
+    #endif
+
+    #if MICROPY_ENABLE_FINALISER
+    if (has_finaliser) {
+        // clear type pointer in case it is never set
+        ((mp_obj_base_t *)ret_ptr)->type = NULL;
+        // set mp_obj flag only if it has a finaliser
+        GC_ENTER();
+        FTB_SET(area, start_block);
+        GC_EXIT();
+    }
+    #else
+    (void)has_finaliser;
+    #endif
+
+    #if EXTENSIVE_HEAP_PROFILING
+    gc_dump_alloc_table(&mp_plat_print);
+    #endif
+
+    return ret_ptr;
+}
+
+/*
+void *gc_alloc(mp_uint_t n_bytes) {
+    return _gc_alloc(n_bytes, false);
+}
+
+void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
+    return _gc_alloc(n_bytes, true);
+}
+*/
+
+// force the freeing of a piece of memory
+// TODO: freeing here does not call finaliser
+void gc_free(void *ptr) {
+    if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+        // Cannot free while the GC is locked. However free is an optimisation
+        // to reclaim the memory immediately, this means it will now be left
+        // until the next collection.
+        return;
+    }
+
+    GC_ENTER();
+
+    DEBUG_printf("gc_free(%p)\n", ptr);
+
+    if (ptr == NULL) {
+        // free(NULL) is a no-op
+        GC_EXIT();
+        return;
+    }
+
+    // get the GC block number corresponding to this pointer
+    mp_state_mem_area_t *area;
+    #if MICROPY_GC_SPLIT_HEAP
+    area = gc_get_ptr_area(ptr);
+    assert(area);
+    #else
+    assert(VERIFY_PTR(ptr));
+    area = &MP_STATE_MEM(area);
+    #endif
+
+    size_t block = BLOCK_FROM_PTR(area, ptr);
+    assert(ATB_GET_KIND(area, block) == AT_HEAD);
+
+    #if MICROPY_ENABLE_FINALISER
+    FTB_CLEAR(area, block);
+    #endif
+
+    #if MICROPY_GC_SPLIT_HEAP
+    if (MP_STATE_MEM(gc_last_free_area) != area) {
+        // We freed something but it isn't the current area. Reset the
+        // last free area to the start for a rescan. Note that this won't
+        // give much of a performance hit, since areas that are completely
+        // filled will likely be skipped (the gc_last_free_atb_index
+        // points to the last block).
+        // The reason why this is necessary is because it is not possible
+        // to see which area came first (like it is possible to adjust
+        // gc_last_free_atb_index based on whether the freed block is
+        // before the last free block).
+        MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
+    }
+    #endif
+
+    // set the last_free pointer to this block if it's earlier in the heap
+    if (block / BLOCKS_PER_ATB < area->gc_last_free_atb_index) {
+        area->gc_last_free_atb_index = block / BLOCKS_PER_ATB;
+    }
+
+    // free head and all of its tail blocks
+    do {
+        ATB_ANY_TO_FREE(area, block);
+        block += 1;
+    } while (ATB_GET_KIND(area, block) == AT_TAIL);
+
+    GC_EXIT();
+
+    #if EXTENSIVE_HEAP_PROFILING
+    gc_dump_alloc_table(&mp_plat_print);
+    #endif
+}
+
+size_t gc_nbytes(const void *ptr) {
+    GC_ENTER();
+
+    mp_state_mem_area_t *area;
+    #if MICROPY_GC_SPLIT_HEAP
+    area = gc_get_ptr_area(ptr);
+    #else
+    if (VERIFY_PTR(ptr)) {
+        area = &MP_STATE_MEM(area);
+    } else {
+        area = NULL;
+    }
+    #endif
+
+    if (area) {
+        size_t block = BLOCK_FROM_PTR(area, ptr);
+        if (ATB_GET_KIND(area, block) == AT_HEAD) {
+            // work out number of consecutive blocks in the chain starting with this on
+            size_t n_blocks = 0;
+            do {
+                n_blocks += 1;
+            } while (ATB_GET_KIND(area, block + n_blocks) == AT_TAIL);
+            GC_EXIT();
+            return n_blocks * BYTES_PER_BLOCK;
+        }
+    }
+
+    // invalid pointer
+    GC_EXIT();
+    return 0;
+}
+
+#if 0
+// old, simple realloc that didn't expand memory in place
+void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
+    mp_uint_t n_existing = gc_nbytes(ptr);
+    if (n_bytes <= n_existing) {
+        return ptr;
+    } else {
+        bool has_finaliser;
+        if (ptr == NULL) {
+            has_finaliser = false;
+        } else {
+            #if MICROPY_ENABLE_FINALISER
+            has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
+            #else
+            has_finaliser = false;
+            #endif
+        }
+        void *ptr2 = gc_alloc(n_bytes, has_finaliser);
+        if (ptr2 == NULL) {
+            return ptr2;
+        }
+        memcpy(ptr2, ptr, n_existing);
+        gc_free(ptr);
+        return ptr2;
+    }
+}
+
+#else // Alternative gc_realloc impl
+
+void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
+    // check for pure allocation
+    if (ptr_in == NULL) {
+        return gc_alloc(n_bytes, false);
+    }
+
+    // check for pure free
+    if (n_bytes == 0) {
+        gc_free(ptr_in);
+        return NULL;
+    }
+
+    if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+        return NULL;
+    }
+
+    void *ptr = ptr_in;
+
+    GC_ENTER();
+
+    // get the GC block number corresponding to this pointer
+    mp_state_mem_area_t *area;
+    #if MICROPY_GC_SPLIT_HEAP
+    area = gc_get_ptr_area(ptr);
+    assert(area);
+    #else
+    assert(VERIFY_PTR(ptr));
+    area = &MP_STATE_MEM(area);
+    #endif
+    size_t block = BLOCK_FROM_PTR(area, ptr);
+    assert(ATB_GET_KIND(area, block) == AT_HEAD);
+
+    // compute number of new blocks that are requested
+    size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK;
+
+    // Get the total number of consecutive blocks that are already allocated to
+    // this chunk of memory, and then count the number of free blocks following
+    // it.  Stop if we reach the end of the heap, or if we find enough extra
+    // free blocks to satisfy the realloc.  Note that we need to compute the
+    // total size of the existing memory chunk so we can correctly and
+    // efficiently shrink it (see below for shrinking code).
+    size_t n_free = 0;
+    size_t n_blocks = 1; // counting HEAD block
+    size_t max_block = area->gc_alloc_table_byte_len * BLOCKS_PER_ATB;
+    for (size_t bl = block + n_blocks; bl < max_block; bl++) {
+        byte block_type = ATB_GET_KIND(area, bl);
+        if (block_type == AT_TAIL) {
+            n_blocks++;
+            continue;
+        }
+        if (block_type == AT_FREE) {
+            n_free++;
+            if (n_blocks + n_free >= new_blocks) {
+                // stop as soon as we find enough blocks for n_bytes
+                break;
+            }
+            continue;
+        }
+        break;
+    }
+
+    // return original ptr if it already has the requested number of blocks
+    if (new_blocks == n_blocks) {
+        GC_EXIT();
+        return ptr_in;
+    }
+
+    // check if we can shrink the allocated area
+    if (new_blocks < n_blocks) {
+        // free unneeded tail blocks
+        for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) {
+            ATB_ANY_TO_FREE(area, bl);
+        }
+
+        #if MICROPY_GC_SPLIT_HEAP
+        if (MP_STATE_MEM(gc_last_free_area) != area) {
+            // See comment in gc_free.
+            MP_STATE_MEM(gc_last_free_area) = &MP_STATE_MEM(area);
+        }
+        #endif
+
+        // set the last_free pointer to end of this block if it's earlier in the heap
+        if ((block + new_blocks) / BLOCKS_PER_ATB < area->gc_last_free_atb_index) {
+            area->gc_last_free_atb_index = (block + new_blocks) / BLOCKS_PER_ATB;
+        }
+
+        GC_EXIT();
+
+        #if EXTENSIVE_HEAP_PROFILING
+        gc_dump_alloc_table(&mp_plat_print);
+        #endif
+
+        return ptr_in;
+    }
+
+    // check if we can expand in place
+    if (new_blocks <= n_blocks + n_free) {
+        // mark few more blocks as used tail
+        size_t end_block = block + new_blocks;
+        for (size_t bl = block + n_blocks; bl < end_block; bl++) {
+            assert(ATB_GET_KIND(area, bl) == AT_FREE);
+            ATB_FREE_TO_TAIL(area, bl);
+        }
+
+        area->gc_last_used_block = MAX(area->gc_last_used_block, end_block);
+
+        GC_EXIT();
+
+        #if MICROPY_GC_CONSERVATIVE_CLEAR
+        // be conservative and zero out all the newly allocated blocks
+        memset((byte *)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
+        #else
+        // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
+        memset((byte *)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
+        #endif
+
+        #if EXTENSIVE_HEAP_PROFILING
+        gc_dump_alloc_table(&mp_plat_print);
+        #endif
+
+        return ptr_in;
+    }
+
+    #if MICROPY_ENABLE_FINALISER
+    bool ftb_state = FTB_GET(area, block);
+    #else
+    bool ftb_state = false;
+    #endif
+
+    GC_EXIT();
+
+    if (!allow_move) {
+        // not allowed to move memory block so return failure
+        return NULL;
+    }
+
+    // can't resize inplace; try to find a new contiguous chain
+    void *ptr_out = gc_alloc(n_bytes, ftb_state);
+
+    // check that the alloc succeeded
+    if (ptr_out == NULL) {
+        return NULL;
+    }
+
+    DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out);
+    memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK);
+    gc_free(ptr_in);
+    return ptr_out;
+}
+#endif // Alternative gc_realloc impl
+
+void gc_dump_info(const mp_print_t *print) {
+    gc_info_t info;
+    gc_info(&info);
+    mp_printf(print, "GC: total: %u, used: %u, free: %u",
+        (uint)info.total, (uint)info.used, (uint)info.free);
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    mp_printf(print, ", max new split: %u", (uint)info.max_new_split);
+    #endif
+    mp_printf(print, "\n No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n",
+        (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
+}
+
+void gc_dump_alloc_table(const mp_print_t *print) {
+    GC_ENTER();
+    static const size_t DUMP_BYTES_PER_LINE = 64;
+    for (mp_state_mem_area_t *area = &MP_STATE_MEM(area); area != NULL; area = NEXT_AREA(area)) {
+        #if !EXTENSIVE_HEAP_PROFILING
+        // When comparing heap output we don't want to print the starting
+        // pointer of the heap because it changes from run to run.
+        mp_printf(print, "GC memory layout; from %p:", area->gc_pool_start);
+        #endif
+        for (size_t bl = 0; bl < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB; bl++) {
+            if (bl % DUMP_BYTES_PER_LINE == 0) {
+                // a new line of blocks
+                {
+                    // check if this line contains only free blocks
+                    size_t bl2 = bl;
+                    while (bl2 < area->gc_alloc_table_byte_len * BLOCKS_PER_ATB && ATB_GET_KIND(area, bl2) == AT_FREE) {
+                        bl2++;
+                    }
+                    if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
+                        // there are at least 2 lines containing only free blocks, so abbreviate their printing
+                        mp_printf(print, "\n       (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE);
+                        bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
+                        if (bl >= area->gc_alloc_table_byte_len * BLOCKS_PER_ATB) {
+                            // got to end of heap
+                            break;
+                        }
+                    }
+                }
+                // print header for new line of blocks
+                // (the cast to uint32_t is for 16-bit ports)
+                mp_printf(print, "\n%08x: ", (uint)(bl * BYTES_PER_BLOCK));
+            }
+            int c = ' ';
+            switch (ATB_GET_KIND(area, bl)) {
+                case AT_FREE:
+                    c = '.';
+                    break;
+                /* this prints out if the object is reachable from BSS or STACK (for unix only)
+                case AT_HEAD: {
+                    c = 'h';
+                    void **ptrs = (void**)(void*)&mp_state_ctx;
+                    mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t);
+                    for (mp_uint_t i = 0; i < len; i++) {
+                        mp_uint_t ptr = (mp_uint_t)ptrs[i];
+                        if (gc_get_ptr_area(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+                            c = 'B';
+                            break;
+                        }
+                    }
+                    if (c == 'h') {
+                        ptrs = (void**)&c;
+                        len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t);
+                        for (mp_uint_t i = 0; i < len; i++) {
+                            mp_uint_t ptr = (mp_uint_t)ptrs[i];
+                            if (gc_get_ptr_area(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+                                c = 'S';
+                                break;
+                            }
+                        }
+                    }
+                    break;
+                }
+                */
+                /* this prints the uPy object type of the head block */
+                case AT_HEAD: {
+                    void **ptr = (void **)(area->gc_pool_start + bl * BYTES_PER_BLOCK);
+                    if (*ptr == &mp_type_tuple) {
+                        c = 'T';
+                    } else if (*ptr == &mp_type_list) {
+                        c = 'L';
+                    } else if (*ptr == &mp_type_dict) {
+                        c = 'D';
+                    } else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) {
+                        c = 'S';
+                    }
+                    #if MICROPY_PY_BUILTINS_BYTEARRAY
+                    else if (*ptr == &mp_type_bytearray) {
+                        c = 'A';
+                    }
+                    #endif
+                    #if MICROPY_PY_ARRAY
+                    else if (*ptr == &mp_type_array) {
+                        c = 'A';
+                    }
+                    #endif
+                    #if MICROPY_PY_BUILTINS_FLOAT
+                    else if (*ptr == &mp_type_float) {
+                        c = 'F';
+                    }
+                    #endif
+                    else if (*ptr == &mp_type_fun_bc) {
+                        c = 'B';
+                    } else if (*ptr == &mp_type_module) {
+                        c = 'M';
+                    } else {
+                        c = 'h';
+                        #if 0
+                        // This code prints "Q" for qstr-pool data, and "q" for qstr-str
+                        // data.  It can be useful to see how qstrs are being allocated,
+                        // but is disabled by default because it is very slow.
+                        for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
+                            if ((qstr_pool_t *)ptr == pool) {
+                                c = 'Q';
+                                break;
+                            }
+                            for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+                                if ((const byte *)ptr == *q) {
+                                    c = 'q';
+                                    break;
+                                }
+                            }
+                        }
+                        #endif
+                    }
+                    break;
+                }
+                case AT_TAIL:
+                    c = '=';
+                    break;
+                case AT_MARK:
+                    c = 'm';
+                    break;
+            }
+            mp_printf(print, "%c", c);
+        }
+        mp_print_str(print, "\n");
+    }
+    GC_EXIT();
+}
+
+#if 0
+// For testing the GC functions
+void gc_test(void) {
+    mp_uint_t len = 500;
+    mp_uint_t *heap = malloc(len);
+    gc_init(heap, heap + len / sizeof(mp_uint_t));
+    void *ptrs[100];
+    {
+        mp_uint_t **p = gc_alloc(16, false);
+        p[0] = gc_alloc(64, false);
+        p[1] = gc_alloc(1, false);
+        p[2] = gc_alloc(1, false);
+        p[3] = gc_alloc(1, false);
+        mp_uint_t ***p2 = gc_alloc(16, false);
+        p2[0] = p;
+        p2[1] = p;
+        ptrs[0] = p2;
+    }
+    for (int i = 0; i < 25; i += 2) {
+        mp_uint_t *p = gc_alloc(i, false);
+        printf("p=%p\n", p);
+        if (i & 3) {
+            // ptrs[i] = p;
+        }
+    }
+
+    printf("Before GC:\n");
+    gc_dump_alloc_table(&mp_plat_print);
+    printf("Starting GC...\n");
+    gc_collect_start();
+    gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void *));
+    gc_collect_end();
+    printf("After GC:\n");
+    gc_dump_alloc_table(&mp_plat_print);
+}
+#endif
+
+#endif // MICROPY_ENABLE_GC

+ 87 - 0
mp_flipper/lib/micropython/py/gc.h

@@ -0,0 +1,87 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_GC_H
+#define MICROPY_INCLUDED_PY_GC_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include "py/mpprint.h"
+
+void gc_init(void *start, void *end);
+
+#if MICROPY_GC_SPLIT_HEAP
+// Used to add additional memory areas to the heap.
+void gc_add(void *start, void *end);
+
+#if MICROPY_GC_SPLIT_HEAP_AUTO
+// Port must implement this function to return the maximum available block of
+// RAM to allocate a new heap area into using MP_PLAT_ALLOC_HEAP.
+size_t gc_get_max_new_split(void);
+#endif // MICROPY_GC_SPLIT_HEAP_AUTO
+#endif // MICROPY_GC_SPLIT_HEAP
+
+// These lock/unlock functions can be nested.
+// They can be used to prevent the GC from allocating/freeing.
+void gc_lock(void);
+void gc_unlock(void);
+bool gc_is_locked(void);
+
+// A given port must implement gc_collect by using the other collect functions.
+void gc_collect(void);
+void gc_collect_start(void);
+void gc_collect_root(void **ptrs, size_t len);
+void gc_collect_end(void);
+
+// Use this function to sweep the whole heap and run all finalisers
+void gc_sweep_all(void);
+
+enum {
+    GC_ALLOC_FLAG_HAS_FINALISER = 1,
+};
+
+void *gc_alloc(size_t n_bytes, unsigned int alloc_flags);
+void gc_free(void *ptr); // does not call finaliser
+size_t gc_nbytes(const void *ptr);
+void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move);
+
+typedef struct _gc_info_t {
+    size_t total;
+    size_t used;
+    size_t free;
+    size_t max_free;
+    size_t num_1block;
+    size_t num_2block;
+    size_t max_block;
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    size_t max_new_split;
+    #endif
+} gc_info_t;
+
+void gc_info(gc_info_t *info);
+void gc_dump_info(const mp_print_t *print);
+void gc_dump_alloc_table(const mp_print_t *print);
+
+#endif // MICROPY_INCLUDED_PY_GC_H

+ 372 - 0
mp_flipper/lib/micropython/py/grammar.h

@@ -0,0 +1,372 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// *FORMAT-OFF*
+
+// rules for writing rules:
+// - zero_or_more is implemented using opt_rule around a one_or_more rule
+// - don't put opt_rule in arguments of or rule; instead, wrap the call to this or rule in opt_rule
+
+// Generic sub-rules used by multiple rules below.
+
+DEF_RULE_NC(generic_colon_test, and_ident(2), tok(DEL_COLON), rule(test))
+DEF_RULE_NC(generic_equal_test, and_ident(2), tok(DEL_EQUAL), rule(test))
+
+// # Start symbols for the grammar:
+// #       single_input is a single interactive statement;
+// #       file_input is a module or sequence of commands read from an input file;
+// #       eval_input is the input for the eval() functions.
+// # NB: compound_stmt in single_input is followed by extra NEWLINE! --> not in MicroPython
+// single_input: NEWLINE | simple_stmt | compound_stmt
+// file_input: (NEWLINE | stmt)* ENDMARKER
+// eval_input: testlist NEWLINE* ENDMARKER
+
+DEF_RULE_NC(single_input, or(3), tok(NEWLINE), rule(simple_stmt), rule(compound_stmt))
+DEF_RULE(file_input, c(generic_all_nodes), and_ident(1), opt_rule(file_input_2))
+DEF_RULE(file_input_2, c(generic_all_nodes), one_or_more, rule(file_input_3))
+DEF_RULE_NC(file_input_3, or(2), tok(NEWLINE), rule(stmt))
+DEF_RULE_NC(eval_input, and_ident(2), rule(testlist), opt_rule(eval_input_2))
+DEF_RULE_NC(eval_input_2, and(1), tok(NEWLINE))
+
+// decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+// decorators: decorator+
+// decorated: decorators (classdef | funcdef | async_funcdef)
+// funcdef: 'def' NAME parameters ['->' test] ':' suite
+// async_funcdef: 'async' funcdef
+// parameters: '(' [typedargslist] ')'
+// typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* [',' ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef
+// tfpdef: NAME [':' test]
+// varargslist: vfpdef ['=' test] (',' vfpdef ['=' test])* [',' ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] |  '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef
+// vfpdef: NAME
+
+DEF_RULE_NC(decorator, and(4), tok(OP_AT), rule(dotted_name), opt_rule(trailer_paren), tok(NEWLINE))
+DEF_RULE_NC(decorators, one_or_more, rule(decorator))
+DEF_RULE(decorated, c(decorated), and_ident(2), rule(decorators), rule(decorated_body))
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(decorated_body, or(3), rule(classdef), rule(funcdef), rule(async_funcdef))
+DEF_RULE_NC(async_funcdef, and(2), tok(KW_ASYNC), rule(funcdef))
+#else
+DEF_RULE_NC(decorated_body, or(2), rule(classdef), rule(funcdef))
+#endif
+DEF_RULE(funcdef, c(funcdef), and_blank(8), tok(KW_DEF), tok(NAME), tok(DEL_PAREN_OPEN), opt_rule(typedargslist), tok(DEL_PAREN_CLOSE), opt_rule(funcdefrettype), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(funcdefrettype, and_ident(2), tok(DEL_MINUS_MORE), rule(test))
+// note: typedargslist lets through more than is allowed, compiler does further checks
+DEF_RULE_NC(typedargslist, list_with_end, rule(typedargslist_item), tok(DEL_COMMA))
+DEF_RULE_NC(typedargslist_item, or(3), rule(typedargslist_name), rule(typedargslist_star), rule(typedargslist_dbl_star))
+DEF_RULE_NC(typedargslist_name, and_ident(3), tok(NAME), opt_rule(generic_colon_test), opt_rule(generic_equal_test))
+DEF_RULE_NC(typedargslist_star, and(2), tok(OP_STAR), opt_rule(tfpdef))
+DEF_RULE_NC(typedargslist_dbl_star, and(3), tok(OP_DBL_STAR), tok(NAME), opt_rule(generic_colon_test))
+DEF_RULE_NC(tfpdef, and(2), tok(NAME), opt_rule(generic_colon_test))
+// note: varargslist lets through more than is allowed, compiler does further checks
+DEF_RULE_NC(varargslist, list_with_end, rule(varargslist_item), tok(DEL_COMMA))
+DEF_RULE_NC(varargslist_item, or(3), rule(varargslist_name), rule(varargslist_star), rule(varargslist_dbl_star))
+DEF_RULE_NC(varargslist_name, and_ident(2), tok(NAME), opt_rule(generic_equal_test))
+DEF_RULE_NC(varargslist_star, and(2), tok(OP_STAR), opt_rule(vfpdef))
+DEF_RULE_NC(varargslist_dbl_star, and(2), tok(OP_DBL_STAR), tok(NAME))
+DEF_RULE_NC(vfpdef, and_ident(1), tok(NAME))
+
+// stmt: compound_stmt | simple_stmt
+
+DEF_RULE_NC(stmt, or(2), rule(compound_stmt), rule(simple_stmt))
+
+// simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+
+DEF_RULE_NC(simple_stmt, and_ident(2), rule(simple_stmt_2), tok(NEWLINE))
+DEF_RULE(simple_stmt_2, c(generic_all_nodes), list_with_end, rule(small_stmt), tok(DEL_SEMICOLON))
+
+// small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt
+// expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
+// testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+// annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
+// augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//='
+// # For normal and annotated assignments, additional restrictions enforced by the interpreter
+
+DEF_RULE_NC(small_stmt, or(8), rule(del_stmt), rule(pass_stmt), rule(flow_stmt), rule(import_stmt), rule(global_stmt), rule(nonlocal_stmt), rule(assert_stmt), rule(expr_stmt))
+DEF_RULE(expr_stmt, c(expr_stmt), and(2), rule(testlist_star_expr), opt_rule(expr_stmt_2))
+DEF_RULE_NC(expr_stmt_2, or(3), rule(annassign), rule(expr_stmt_augassign), rule(expr_stmt_assign_list))
+DEF_RULE_NC(expr_stmt_augassign, and_ident(2), rule(augassign), rule(expr_stmt_6))
+DEF_RULE_NC(expr_stmt_assign_list, one_or_more, rule(expr_stmt_assign))
+DEF_RULE_NC(expr_stmt_assign, and_ident(2), tok(DEL_EQUAL), rule(expr_stmt_6))
+DEF_RULE_NC(expr_stmt_6, or(2), rule(yield_expr), rule(testlist_star_expr))
+DEF_RULE(testlist_star_expr, c(generic_tuple), list_with_end, rule(testlist_star_expr_2), tok(DEL_COMMA))
+DEF_RULE_NC(testlist_star_expr_2, or(2), rule(star_expr), rule(test))
+DEF_RULE_NC(annassign, and(3), tok(DEL_COLON), rule(test), opt_rule(expr_stmt_assign))
+DEF_RULE_NC(augassign, or(13), tok(DEL_PLUS_EQUAL), tok(DEL_MINUS_EQUAL), tok(DEL_STAR_EQUAL), tok(DEL_AT_EQUAL), tok(DEL_SLASH_EQUAL), tok(DEL_PERCENT_EQUAL), tok(DEL_AMPERSAND_EQUAL), tok(DEL_PIPE_EQUAL), tok(DEL_CARET_EQUAL), tok(DEL_DBL_LESS_EQUAL), tok(DEL_DBL_MORE_EQUAL), tok(DEL_DBL_STAR_EQUAL), tok(DEL_DBL_SLASH_EQUAL))
+
+// del_stmt: 'del' exprlist
+// pass_stmt: 'pass'
+// flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+// break_stmt: 'break'
+// continue_stmt: 'continue'
+// return_stmt: 'return' [testlist]
+// yield_stmt: yield_expr
+// raise_stmt: 'raise' [test ['from' test]]
+
+DEF_RULE(del_stmt, c(del_stmt), and(2), tok(KW_DEL), rule(exprlist))
+DEF_RULE(pass_stmt, c(generic_all_nodes), and(1), tok(KW_PASS))
+DEF_RULE_NC(flow_stmt, or(5), rule(break_stmt), rule(continue_stmt), rule(return_stmt), rule(raise_stmt), rule(yield_stmt))
+DEF_RULE(break_stmt, c(break_cont_stmt), and(1), tok(KW_BREAK))
+DEF_RULE(continue_stmt, c(break_cont_stmt), and(1), tok(KW_CONTINUE))
+DEF_RULE(return_stmt, c(return_stmt), and(2), tok(KW_RETURN), opt_rule(testlist))
+DEF_RULE(yield_stmt, c(yield_stmt), and(1), rule(yield_expr))
+DEF_RULE(raise_stmt, c(raise_stmt), and(2), tok(KW_RAISE), opt_rule(raise_stmt_arg))
+DEF_RULE_NC(raise_stmt_arg, and_ident(2), rule(test), opt_rule(raise_stmt_from))
+DEF_RULE_NC(raise_stmt_from, and_ident(2), tok(KW_FROM), rule(test))
+
+// import_stmt: import_name | import_from
+// import_name: 'import' dotted_as_names
+// import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)
+// import_as_name: NAME ['as' NAME]
+// dotted_as_name: dotted_name ['as' NAME]
+// import_as_names: import_as_name (',' import_as_name)* [',']
+// dotted_as_names: dotted_as_name (',' dotted_as_name)*
+// dotted_name: NAME ('.' NAME)*
+// global_stmt: 'global' NAME (',' NAME)*
+// nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+// assert_stmt: 'assert' test [',' test]
+
+DEF_RULE_NC(import_stmt, or(2), rule(import_name), rule(import_from))
+DEF_RULE(import_name, c(import_name), and(2), tok(KW_IMPORT), rule(dotted_as_names))
+DEF_RULE(import_from, c(import_from), and(4), tok(KW_FROM), rule(import_from_2), tok(KW_IMPORT), rule(import_from_3))
+DEF_RULE_NC(import_from_2, or(2), rule(dotted_name), rule(import_from_2b))
+DEF_RULE_NC(import_from_2b, and_ident(2), rule(one_or_more_period_or_ellipsis), opt_rule(dotted_name))
+DEF_RULE_NC(import_from_3, or(3), tok(OP_STAR), rule(import_as_names_paren), rule(import_as_names))
+DEF_RULE_NC(import_as_names_paren, and_ident(3), tok(DEL_PAREN_OPEN), rule(import_as_names), tok(DEL_PAREN_CLOSE))
+DEF_RULE_NC(one_or_more_period_or_ellipsis, one_or_more, rule(period_or_ellipsis))
+DEF_RULE_NC(period_or_ellipsis, or(2), tok(DEL_PERIOD), tok(ELLIPSIS))
+DEF_RULE_NC(import_as_name, and(2), tok(NAME), opt_rule(as_name))
+DEF_RULE_NC(dotted_as_name, and_ident(2), rule(dotted_name), opt_rule(as_name))
+DEF_RULE_NC(as_name, and_ident(2), tok(KW_AS), tok(NAME))
+DEF_RULE_NC(import_as_names, list_with_end, rule(import_as_name), tok(DEL_COMMA))
+DEF_RULE_NC(dotted_as_names, list, rule(dotted_as_name), tok(DEL_COMMA))
+DEF_RULE_NC(dotted_name, list, tok(NAME), tok(DEL_PERIOD))
+DEF_RULE(global_stmt, c(global_nonlocal_stmt), and(2), tok(KW_GLOBAL), rule(name_list))
+DEF_RULE(nonlocal_stmt, c(global_nonlocal_stmt), and(2), tok(KW_NONLOCAL), rule(name_list))
+DEF_RULE_NC(name_list, list, tok(NAME), tok(DEL_COMMA))
+DEF_RULE(assert_stmt, c(assert_stmt), and(3), tok(KW_ASSERT), rule(test), opt_rule(assert_stmt_extra))
+DEF_RULE_NC(assert_stmt_extra, and_ident(2), tok(DEL_COMMA), rule(test))
+
+// compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
+// if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+// while_stmt: 'while' test ':' suite ['else' ':' suite]
+// for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+// try_stmt: 'try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)
+// # NB compile.c makes sure that the default except clause is last
+// except_clause: 'except' [test ['as' NAME]]
+// with_stmt: 'with' with_item (',' with_item)* ':' suite
+// with_item: test ['as' expr]
+// suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+// async_stmt: 'async' (funcdef | with_stmt | for_stmt)
+
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(compound_stmt, or(9), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated), rule(async_stmt))
+DEF_RULE(async_stmt, c(async_stmt), and(2), tok(KW_ASYNC), rule(async_stmt_2))
+DEF_RULE_NC(async_stmt_2, or(3), rule(funcdef), rule(with_stmt), rule(for_stmt))
+#else
+DEF_RULE_NC(compound_stmt, or(8), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated))
+#endif
+DEF_RULE(if_stmt, c(if_stmt), and(6), tok(KW_IF), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(if_stmt_elif_list), opt_rule(else_stmt))
+DEF_RULE_NC(if_stmt_elif_list, one_or_more, rule(if_stmt_elif))
+DEF_RULE_NC(if_stmt_elif, and(4), tok(KW_ELIF), rule(namedexpr_test), tok(DEL_COLON), rule(suite))
+DEF_RULE(while_stmt, c(while_stmt), and(5), tok(KW_WHILE), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(for_stmt, c(for_stmt), and(7), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(testlist), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(try_stmt, c(try_stmt), and(4), tok(KW_TRY), tok(DEL_COLON), rule(suite), rule(try_stmt_2))
+DEF_RULE_NC(try_stmt_2, or(2), rule(try_stmt_except_and_more), rule(try_stmt_finally))
+DEF_RULE_NC(try_stmt_except_and_more, and_ident(3), rule(try_stmt_except_list), opt_rule(else_stmt), opt_rule(try_stmt_finally))
+DEF_RULE_NC(try_stmt_except, and(4), tok(KW_EXCEPT), opt_rule(try_stmt_as_name), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(try_stmt_as_name, and_ident(2), rule(test), opt_rule(as_name))
+DEF_RULE_NC(try_stmt_except_list, one_or_more, rule(try_stmt_except))
+DEF_RULE_NC(try_stmt_finally, and(3), tok(KW_FINALLY), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(else_stmt, and_ident(3), tok(KW_ELSE), tok(DEL_COLON), rule(suite))
+DEF_RULE(with_stmt, c(with_stmt), and(4), tok(KW_WITH), rule(with_stmt_list), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(with_stmt_list, list, rule(with_item), tok(DEL_COMMA))
+DEF_RULE_NC(with_item, and_ident(2), rule(test), opt_rule(with_item_as))
+DEF_RULE_NC(with_item_as, and_ident(2), tok(KW_AS), rule(expr))
+DEF_RULE_NC(suite, or(2), rule(suite_block), rule(simple_stmt))
+DEF_RULE_NC(suite_block, and_ident(4), tok(NEWLINE), tok(INDENT), rule(suite_block_stmts), tok(DEDENT))
+DEF_RULE(suite_block_stmts, c(generic_all_nodes), one_or_more, rule(stmt))
+
+// test: or_test ['if' or_test 'else' test] | lambdef
+// test_nocond: or_test | lambdef_nocond
+// lambdef: 'lambda' [varargslist] ':' test
+// lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+
+#if MICROPY_PY_ASSIGN_EXPR
+DEF_RULE(namedexpr_test, c(namedexpr), and_ident(2), rule(test), opt_rule(namedexpr_test_2))
+DEF_RULE_NC(namedexpr_test_2, and_ident(2), tok(OP_ASSIGN), rule(test))
+#else
+DEF_RULE_NC(namedexpr_test, or(1), rule(test))
+#endif
+DEF_RULE_NC(test, or(2), rule(lambdef), rule(test_if_expr))
+DEF_RULE(test_if_expr, c(test_if_expr), and_ident(2), rule(or_test), opt_rule(test_if_else))
+DEF_RULE_NC(test_if_else, and(4), tok(KW_IF), rule(or_test), tok(KW_ELSE), rule(test))
+DEF_RULE_NC(test_nocond, or(2), rule(lambdef_nocond), rule(or_test))
+DEF_RULE(lambdef, c(lambdef), and_blank(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test))
+DEF_RULE(lambdef_nocond, c(lambdef), and_blank(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test_nocond))
+
+// or_test: and_test ('or' and_test)*
+// and_test: not_test ('and' not_test)*
+// not_test: 'not' not_test | comparison
+// comparison: expr (comp_op expr)*
+// comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+// star_expr: '*' expr
+// expr: xor_expr ('|' xor_expr)*
+// xor_expr: and_expr ('^' and_expr)*
+// and_expr: shift_expr ('&' shift_expr)*
+// shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+// arith_expr: term (('+'|'-') term)*
+// term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+// factor: ('+'|'-'|'~') factor | power
+// power: atom_expr ['**' factor]
+// atom_expr: 'await' atom trailer* | atom trailer*
+
+DEF_RULE(or_test, c(or_and_test), list, rule(and_test), tok(KW_OR))
+DEF_RULE(and_test, c(or_and_test), list, rule(not_test), tok(KW_AND))
+DEF_RULE_NC(not_test, or(2), rule(not_test_2), rule(comparison))
+DEF_RULE(not_test_2, c(not_test_2), and(2), tok(KW_NOT), rule(not_test))
+DEF_RULE(comparison, c(comparison), list, rule(expr), rule(comp_op))
+DEF_RULE_NC(comp_op, or(9), tok(OP_LESS), tok(OP_MORE), tok(OP_DBL_EQUAL), tok(OP_LESS_EQUAL), tok(OP_MORE_EQUAL), tok(OP_NOT_EQUAL), tok(KW_IN), rule(comp_op_not_in), rule(comp_op_is))
+DEF_RULE_NC(comp_op_not_in, and(2), tok(KW_NOT), tok(KW_IN))
+DEF_RULE_NC(comp_op_is, and(2), tok(KW_IS), opt_rule(comp_op_is_not))
+DEF_RULE_NC(comp_op_is_not, and(1), tok(KW_NOT))
+DEF_RULE(star_expr, c(star_expr), and(2), tok(OP_STAR), rule(expr))
+DEF_RULE(expr, c(binary_op), list, rule(xor_expr), tok(OP_PIPE))
+DEF_RULE(xor_expr, c(binary_op), list, rule(and_expr), tok(OP_CARET))
+DEF_RULE(and_expr, c(binary_op), list, rule(shift_expr), tok(OP_AMPERSAND))
+DEF_RULE(shift_expr, c(term), list, rule(arith_expr), rule(shift_op))
+DEF_RULE_NC(shift_op, or(2), tok(OP_DBL_LESS), tok(OP_DBL_MORE))
+DEF_RULE(arith_expr, c(term), list, rule(term), rule(arith_op))
+DEF_RULE_NC(arith_op, or(2), tok(OP_PLUS), tok(OP_MINUS))
+DEF_RULE(term, c(term), list, rule(factor), rule(term_op))
+DEF_RULE_NC(term_op, or(5), tok(OP_STAR), tok(OP_AT), tok(OP_SLASH), tok(OP_PERCENT), tok(OP_DBL_SLASH))
+DEF_RULE_NC(factor, or(2), rule(factor_2), rule(power))
+DEF_RULE(factor_2, c(factor_2), and_ident(2), rule(factor_op), rule(factor))
+DEF_RULE_NC(factor_op, or(3), tok(OP_PLUS), tok(OP_MINUS), tok(OP_TILDE))
+DEF_RULE(power, c(power), and_ident(2), rule(atom_expr), opt_rule(power_dbl_star))
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(atom_expr, or(2), rule(atom_expr_await), rule(atom_expr_normal))
+DEF_RULE(atom_expr_await, c(atom_expr_await), and(3), tok(KW_AWAIT), rule(atom), opt_rule(atom_expr_trailers))
+#else
+DEF_RULE_NC(atom_expr, or(1), rule(atom_expr_normal))
+#endif
+DEF_RULE(atom_expr_normal, c(atom_expr_normal), and_ident(2), rule(atom), opt_rule(atom_expr_trailers))
+DEF_RULE_NC(atom_expr_trailers, one_or_more, rule(trailer))
+DEF_RULE_NC(power_dbl_star, and_ident(2), tok(OP_DBL_STAR), rule(factor))
+
+// atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False'
+// testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+// trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+
+DEF_RULE_NC(atom, or(12), tok(NAME), tok(INTEGER), tok(FLOAT_OR_IMAG), tok(STRING), tok(BYTES), tok(ELLIPSIS), tok(KW_NONE), tok(KW_TRUE), tok(KW_FALSE), rule(atom_paren), rule(atom_bracket), rule(atom_brace))
+DEF_RULE(atom_paren, c(atom_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(atom_2b), tok(DEL_PAREN_CLOSE))
+DEF_RULE_NC(atom_2b, or(2), rule(yield_expr), rule(testlist_comp))
+DEF_RULE(atom_bracket, c(atom_bracket), and(3), tok(DEL_BRACKET_OPEN), opt_rule(testlist_comp), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(atom_brace, c(atom_brace), and(3), tok(DEL_BRACE_OPEN), opt_rule(dictorsetmaker), tok(DEL_BRACE_CLOSE))
+DEF_RULE_NC(testlist_comp, and_ident(2), rule(testlist_comp_2), opt_rule(testlist_comp_3))
+DEF_RULE_NC(testlist_comp_2, or(2), rule(star_expr), rule(namedexpr_test))
+DEF_RULE_NC(testlist_comp_3, or(2), rule(comp_for), rule(testlist_comp_3b))
+DEF_RULE_NC(testlist_comp_3b, and_ident(2), tok(DEL_COMMA), opt_rule(testlist_comp_3c))
+DEF_RULE_NC(testlist_comp_3c, list_with_end, rule(testlist_comp_2), tok(DEL_COMMA))
+DEF_RULE_NC(trailer, or(3), rule(trailer_paren), rule(trailer_bracket), rule(trailer_period))
+DEF_RULE(trailer_paren, c(trailer_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+DEF_RULE(trailer_bracket, c(trailer_bracket), and(3), tok(DEL_BRACKET_OPEN), rule(subscriptlist), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(trailer_period, c(trailer_period), and(2), tok(DEL_PERIOD), tok(NAME))
+
+// subscriptlist: subscript (',' subscript)* [',']
+// subscript: test | [test] ':' [test] [sliceop]
+// sliceop: ':' [test]
+
+#if MICROPY_PY_BUILTINS_SLICE
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(subscript), tok(DEL_COMMA))
+DEF_RULE_NC(subscript, or(2), rule(subscript_3), rule(subscript_2))
+DEF_RULE(subscript_2, c(subscript), and_ident(2), rule(test), opt_rule(subscript_3))
+DEF_RULE(subscript_3, c(subscript), and(2), tok(DEL_COLON), opt_rule(subscript_3b))
+DEF_RULE_NC(subscript_3b, or(2), rule(subscript_3c), rule(subscript_3d))
+DEF_RULE_NC(subscript_3c, and(2), tok(DEL_COLON), opt_rule(test))
+DEF_RULE_NC(subscript_3d, and_ident(2), rule(test), opt_rule(sliceop))
+DEF_RULE_NC(sliceop, and(2), tok(DEL_COLON), opt_rule(test))
+#else
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+#endif
+
+// exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+// testlist: test (',' test)* [',']
+// dictorsetmaker: (test ':' test (comp_for | (',' test ':' test)* [','])) | (test (comp_for | (',' test)* [',']))
+
+DEF_RULE_NC(exprlist, list_with_end, rule(exprlist_2), tok(DEL_COMMA))
+DEF_RULE_NC(exprlist_2, or(2), rule(star_expr), rule(expr))
+DEF_RULE(testlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+// TODO dictorsetmaker lets through more than is allowed
+DEF_RULE_NC(dictorsetmaker, and_ident(2), rule(dictorsetmaker_item), opt_rule(dictorsetmaker_tail))
+#if MICROPY_PY_BUILTINS_SET
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and_ident(2), rule(test), opt_rule(generic_colon_test))
+#else
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and(3), rule(test), tok(DEL_COLON), rule(test))
+#endif
+DEF_RULE_NC(dictorsetmaker_tail, or(2), rule(comp_for), rule(dictorsetmaker_list))
+DEF_RULE_NC(dictorsetmaker_list, and(2), tok(DEL_COMMA), opt_rule(dictorsetmaker_list2))
+DEF_RULE_NC(dictorsetmaker_list2, list_with_end, rule(dictorsetmaker_item), tok(DEL_COMMA))
+
+// classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+DEF_RULE(classdef, c(classdef), and_blank(5), tok(KW_CLASS), tok(NAME), opt_rule(classdef_2), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(classdef_2, and_ident(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+
+// arglist: (argument ',')* (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
+
+// TODO arglist lets through more than is allowed, compiler needs to do further verification
+DEF_RULE_NC(arglist, list_with_end, rule(arglist_2), tok(DEL_COMMA))
+DEF_RULE_NC(arglist_2, or(3), rule(arglist_star), rule(arglist_dbl_star), rule(argument))
+DEF_RULE_NC(arglist_star, and(2), tok(OP_STAR), rule(test))
+DEF_RULE_NC(arglist_dbl_star, and(2), tok(OP_DBL_STAR), rule(test))
+
+// # The reason that keywords are test nodes instead of NAME is that using NAME
+// # results in an ambiguity. ast.c makes sure it's a NAME.
+// argument: test [comp_for] | test '=' test  # Really [keyword '='] test
+// comp_iter: comp_for | comp_if
+// comp_for: 'for' exprlist 'in' or_test [comp_iter]
+// comp_if: 'if' test_nocond [comp_iter]
+
+DEF_RULE_NC(argument, and_ident(2), rule(test), opt_rule(argument_2))
+#if MICROPY_PY_ASSIGN_EXPR
+DEF_RULE_NC(argument_2, or(3), rule(comp_for), rule(generic_equal_test), rule(argument_3))
+DEF_RULE_NC(argument_3, and(2), tok(OP_ASSIGN), rule(test))
+#else
+DEF_RULE_NC(argument_2, or(2), rule(comp_for), rule(generic_equal_test))
+#endif
+DEF_RULE_NC(comp_iter, or(2), rule(comp_for), rule(comp_if))
+DEF_RULE_NC(comp_for, and_blank(5), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(or_test), opt_rule(comp_iter))
+DEF_RULE_NC(comp_if, and(3), tok(KW_IF), rule(test_nocond), opt_rule(comp_iter))
+
+// # not used in grammar, but may appear in "node" passed from Parser to Compiler
+// encoding_decl: NAME
+
+// yield_expr: 'yield' [yield_arg]
+// yield_arg: 'from' test | testlist
+
+DEF_RULE(yield_expr, c(yield_expr), and(2), tok(KW_YIELD), opt_rule(yield_arg))
+DEF_RULE_NC(yield_arg, or(2), rule(yield_arg_from), rule(testlist))
+DEF_RULE_NC(yield_arg_from, and(2), tok(KW_FROM), rule(test))

+ 944 - 0
mp_flipper/lib/micropython/py/lexer.c

@@ -0,0 +1,944 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/reader.h"
+#include "py/lexer.h"
+#include "py/runtime.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define TAB_SIZE (8)
+
+// TODO seems that CPython allows NULL byte in the input stream
+// don't know if that's intentional or not, but we don't allow it
+
+#define MP_LEXER_EOF ((unichar)MP_READER_EOF)
+#define CUR_CHAR(lex) ((lex)->chr0)
+
+static bool is_end(mp_lexer_t *lex) {
+    return lex->chr0 == MP_LEXER_EOF;
+}
+
+static bool is_physical_newline(mp_lexer_t *lex) {
+    return lex->chr0 == '\n';
+}
+
+static bool is_char(mp_lexer_t *lex, byte c) {
+    return lex->chr0 == c;
+}
+
+static bool is_char_or(mp_lexer_t *lex, byte c1, byte c2) {
+    return lex->chr0 == c1 || lex->chr0 == c2;
+}
+
+static bool is_char_or3(mp_lexer_t *lex, byte c1, byte c2, byte c3) {
+    return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3;
+}
+
+#if MICROPY_PY_FSTRINGS
+static bool is_char_or4(mp_lexer_t *lex, byte c1, byte c2, byte c3, byte c4) {
+    return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3 || lex->chr0 == c4;
+}
+#endif
+
+static bool is_char_following(mp_lexer_t *lex, byte c) {
+    return lex->chr1 == c;
+}
+
+static bool is_char_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+    return lex->chr1 == c1 || lex->chr1 == c2;
+}
+
+static bool is_char_following_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+    return lex->chr2 == c1 || lex->chr2 == c2;
+}
+
+static bool is_char_and(mp_lexer_t *lex, byte c1, byte c2) {
+    return lex->chr0 == c1 && lex->chr1 == c2;
+}
+
+static bool is_whitespace(mp_lexer_t *lex) {
+    return unichar_isspace(lex->chr0);
+}
+
+static bool is_letter(mp_lexer_t *lex) {
+    return unichar_isalpha(lex->chr0);
+}
+
+static bool is_digit(mp_lexer_t *lex) {
+    return unichar_isdigit(lex->chr0);
+}
+
+static bool is_following_digit(mp_lexer_t *lex) {
+    return unichar_isdigit(lex->chr1);
+}
+
+static bool is_following_base_char(mp_lexer_t *lex) {
+    const unichar chr1 = lex->chr1 | 0x20;
+    return chr1 == 'b' || chr1 == 'o' || chr1 == 'x';
+}
+
+static bool is_following_odigit(mp_lexer_t *lex) {
+    return lex->chr1 >= '0' && lex->chr1 <= '7';
+}
+
+static bool is_string_or_bytes(mp_lexer_t *lex) {
+    return is_char_or(lex, '\'', '\"')
+           #if MICROPY_PY_FSTRINGS
+           || (is_char_or4(lex, 'r', 'u', 'b', 'f') && is_char_following_or(lex, '\'', '\"'))
+           || (((is_char_and(lex, 'r', 'f') || is_char_and(lex, 'f', 'r'))
+               && is_char_following_following_or(lex, '\'', '\"')))
+           #else
+           || (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
+           #endif
+           || ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r'))
+               && is_char_following_following_or(lex, '\'', '\"'));
+}
+
+// to easily parse utf-8 identifiers we allow any raw byte with high bit set
+static bool is_head_of_identifier(mp_lexer_t *lex) {
+    return is_letter(lex) || lex->chr0 == '_' || lex->chr0 >= 0x80;
+}
+
+static bool is_tail_of_identifier(mp_lexer_t *lex) {
+    return is_head_of_identifier(lex) || is_digit(lex);
+}
+
+static void next_char(mp_lexer_t *lex) {
+    if (lex->chr0 == '\n') {
+        // a new line
+        ++lex->line;
+        lex->column = 1;
+    } else if (lex->chr0 == '\t') {
+        // a tab
+        lex->column = (((lex->column - 1 + TAB_SIZE) / TAB_SIZE) * TAB_SIZE) + 1;
+    } else {
+        // a character worth one column
+        ++lex->column;
+    }
+
+    // shift the input queue forward
+    lex->chr0 = lex->chr1;
+    lex->chr1 = lex->chr2;
+
+    // and add the next byte from either the fstring args or the reader
+    #if MICROPY_PY_FSTRINGS
+    if (lex->fstring_args_idx) {
+        // if there are saved chars, then we're currently injecting fstring args
+        if (lex->fstring_args_idx < lex->fstring_args.len) {
+            lex->chr2 = lex->fstring_args.buf[lex->fstring_args_idx++];
+        } else {
+            // no more fstring arg bytes
+            lex->chr2 = '\0';
+        }
+
+        if (lex->chr0 == '\0') {
+            // consumed all fstring data, restore saved input queue
+            lex->chr0 = lex->chr0_saved;
+            lex->chr1 = lex->chr1_saved;
+            lex->chr2 = lex->chr2_saved;
+            // stop consuming fstring arg data
+            vstr_reset(&lex->fstring_args);
+            lex->fstring_args_idx = 0;
+        }
+    } else
+    #endif
+    {
+        lex->chr2 = lex->reader.readbyte(lex->reader.data);
+    }
+
+    if (lex->chr1 == '\r') {
+        // CR is a new line, converted to LF
+        lex->chr1 = '\n';
+        if (lex->chr2 == '\n') {
+            // CR LF is a single new line, throw out the extra LF
+            lex->chr2 = lex->reader.readbyte(lex->reader.data);
+        }
+    }
+
+    // check if we need to insert a newline at end of file
+    if (lex->chr2 == MP_LEXER_EOF && lex->chr1 != MP_LEXER_EOF && lex->chr1 != '\n') {
+        lex->chr2 = '\n';
+    }
+}
+
+static void indent_push(mp_lexer_t *lex, size_t indent) {
+    if (lex->num_indent_level >= lex->alloc_indent_level) {
+        lex->indent_level = m_renew(uint16_t, lex->indent_level, lex->alloc_indent_level, lex->alloc_indent_level + MICROPY_ALLOC_LEXEL_INDENT_INC);
+        lex->alloc_indent_level += MICROPY_ALLOC_LEXEL_INDENT_INC;
+    }
+    lex->indent_level[lex->num_indent_level++] = indent;
+}
+
+static size_t indent_top(mp_lexer_t *lex) {
+    return lex->indent_level[lex->num_indent_level - 1];
+}
+
+static void indent_pop(mp_lexer_t *lex) {
+    lex->num_indent_level -= 1;
+}
+
+// some tricky operator encoding:
+//     <op>  = begin with <op>, if this opchar matches then begin here
+//     e<op> = end with <op>, if this opchar matches then end
+//     c<op> = continue with <op>, if this opchar matches then continue matching
+// this means if the start of two ops are the same then they are equal til the last char
+
+static const char *const tok_enc =
+    "()[]{},;~"   // singles
+    ":e="         // : :=
+    "<e=c<e="     // < <= << <<=
+    ">e=c>e="     // > >= >> >>=
+    "*e=c*e="     // * *= ** **=
+    "+e="         // + +=
+    "-e=e>"       // - -= ->
+    "&e="         // & &=
+    "|e="         // | |=
+    "/e=c/e="     // / /= // //=
+    "%e="         // % %=
+    "^e="         // ^ ^=
+    "@e="         // @ @=
+    "=e="         // = ==
+    "!.";         // start of special cases: != . ...
+
+// TODO static assert that number of tokens is less than 256 so we can safely make this table with byte sized entries
+static const uint8_t tok_enc_kind[] = {
+    MP_TOKEN_DEL_PAREN_OPEN, MP_TOKEN_DEL_PAREN_CLOSE,
+    MP_TOKEN_DEL_BRACKET_OPEN, MP_TOKEN_DEL_BRACKET_CLOSE,
+    MP_TOKEN_DEL_BRACE_OPEN, MP_TOKEN_DEL_BRACE_CLOSE,
+    MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_OP_TILDE,
+
+    MP_TOKEN_DEL_COLON, MP_TOKEN_OP_ASSIGN,
+    MP_TOKEN_OP_LESS, MP_TOKEN_OP_LESS_EQUAL, MP_TOKEN_OP_DBL_LESS, MP_TOKEN_DEL_DBL_LESS_EQUAL,
+    MP_TOKEN_OP_MORE, MP_TOKEN_OP_MORE_EQUAL, MP_TOKEN_OP_DBL_MORE, MP_TOKEN_DEL_DBL_MORE_EQUAL,
+    MP_TOKEN_OP_STAR, MP_TOKEN_DEL_STAR_EQUAL, MP_TOKEN_OP_DBL_STAR, MP_TOKEN_DEL_DBL_STAR_EQUAL,
+    MP_TOKEN_OP_PLUS, MP_TOKEN_DEL_PLUS_EQUAL,
+    MP_TOKEN_OP_MINUS, MP_TOKEN_DEL_MINUS_EQUAL, MP_TOKEN_DEL_MINUS_MORE,
+    MP_TOKEN_OP_AMPERSAND, MP_TOKEN_DEL_AMPERSAND_EQUAL,
+    MP_TOKEN_OP_PIPE, MP_TOKEN_DEL_PIPE_EQUAL,
+    MP_TOKEN_OP_SLASH, MP_TOKEN_DEL_SLASH_EQUAL, MP_TOKEN_OP_DBL_SLASH, MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+    MP_TOKEN_OP_PERCENT, MP_TOKEN_DEL_PERCENT_EQUAL,
+    MP_TOKEN_OP_CARET, MP_TOKEN_DEL_CARET_EQUAL,
+    MP_TOKEN_OP_AT, MP_TOKEN_DEL_AT_EQUAL,
+    MP_TOKEN_DEL_EQUAL, MP_TOKEN_OP_DBL_EQUAL,
+};
+
+// must have the same order as enum in lexer.h
+// must be sorted according to strcmp
+static const char *const tok_kw[] = {
+    "False",
+    "None",
+    "True",
+    "__debug__",
+    "and",
+    "as",
+    "assert",
+    #if MICROPY_PY_ASYNC_AWAIT
+    "async",
+    "await",
+    #endif
+    "break",
+    "class",
+    "continue",
+    "def",
+    "del",
+    "elif",
+    "else",
+    "except",
+    "finally",
+    "for",
+    "from",
+    "global",
+    "if",
+    "import",
+    "in",
+    "is",
+    "lambda",
+    "nonlocal",
+    "not",
+    "or",
+    "pass",
+    "raise",
+    "return",
+    "try",
+    "while",
+    "with",
+    "yield",
+};
+
+// This is called with CUR_CHAR() before first hex digit, and should return with
+// it pointing to last hex digit
+// num_digits must be greater than zero
+static bool get_hex(mp_lexer_t *lex, size_t num_digits, mp_uint_t *result) {
+    mp_uint_t num = 0;
+    while (num_digits-- != 0) {
+        next_char(lex);
+        unichar c = CUR_CHAR(lex);
+        if (!unichar_isxdigit(c)) {
+            return false;
+        }
+        num = (num << 4) + unichar_xdigit_value(c);
+    }
+    *result = num;
+    return true;
+}
+
+static void parse_string_literal(mp_lexer_t *lex, bool is_raw, bool is_fstring) {
+    // get first quoting character
+    char quote_char = '\'';
+    if (is_char(lex, '\"')) {
+        quote_char = '\"';
+    }
+    next_char(lex);
+
+    // work out if it's a single or triple quoted literal
+    size_t num_quotes;
+    if (is_char_and(lex, quote_char, quote_char)) {
+        // triple quotes
+        next_char(lex);
+        next_char(lex);
+        num_quotes = 3;
+    } else {
+        // single quotes
+        num_quotes = 1;
+    }
+
+    size_t n_closing = 0;
+    #if MICROPY_PY_FSTRINGS
+    if (is_fstring) {
+        // assume there's going to be interpolation, so prep the injection data
+        // fstring_args_idx==0 && len(fstring_args)>0 means we're extracting the args.
+        // only when fstring_args_idx>0 will we consume the arg data
+        // note: lex->fstring_args will be empty already (it's reset when finished)
+        vstr_add_str(&lex->fstring_args, ".format(");
+    }
+    #endif
+
+    while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) {
+        if (is_char(lex, quote_char)) {
+            n_closing += 1;
+            vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+        } else {
+            n_closing = 0;
+
+            #if MICROPY_PY_FSTRINGS
+            while (is_fstring && is_char(lex, '{')) {
+                next_char(lex);
+                if (is_char(lex, '{')) {
+                    // "{{" is passed through unchanged to be handled by str.format
+                    vstr_add_byte(&lex->vstr, '{');
+                    next_char(lex);
+                } else {
+                    // wrap each argument in (), e.g.
+                    // f"{a,b,}, {c}" --> "{}".format((a,b), (c),)
+                    vstr_add_byte(&lex->fstring_args, '(');
+                    // remember the start of this argument (if we need it for f'{a=}').
+                    size_t i = lex->fstring_args.len;
+                    // Extract characters inside the { until the bracket level
+                    // is zero and we reach the conversion specifier '!',
+                    // format specifier ':', or closing '}'. The conversion
+                    // and format specifiers are left unchanged in the format
+                    // string to be handled by str.format.
+                    // (MicroPython limitation) note: this is completely
+                    // unaware of Python syntax and will not handle any
+                    // expression containing '}' or ':'. e.g. f'{"}"}' or f'
+                    // {foo({})}'. However, detection of the '!' will
+                    // specifically ensure that it's followed by [rs] and
+                    // then either the format specifier or the closing
+                    // brace. This allows the use of e.g. != in expressions.
+                    unsigned int nested_bracket_level = 0;
+                    while (!is_end(lex) && (nested_bracket_level != 0
+                                            || !(is_char_or(lex, ':', '}')
+                                                 || (is_char(lex, '!')
+                                                     && is_char_following_or(lex, 'r', 's')
+                                                     && is_char_following_following_or(lex, ':', '}'))))
+                           ) {
+                        unichar c = CUR_CHAR(lex);
+                        if (c == '[' || c == '{') {
+                            nested_bracket_level += 1;
+                        } else if (c == ']' || c == '}') {
+                            nested_bracket_level -= 1;
+                        }
+                        // like the default case at the end of this function, stay 8-bit clean
+                        vstr_add_byte(&lex->fstring_args, c);
+                        next_char(lex);
+                    }
+                    if (lex->fstring_args.buf[lex->fstring_args.len - 1] == '=') {
+                        // if the last character of the arg was '=', then inject "arg=" before the '{'.
+                        // f'{a=}' --> 'a={}'.format(a)
+                        vstr_add_strn(&lex->vstr, lex->fstring_args.buf + i, lex->fstring_args.len - i);
+                        // remove the trailing '='
+                        lex->fstring_args.len--;
+                    }
+                    // close the paren-wrapped arg to .format().
+                    vstr_add_byte(&lex->fstring_args, ')');
+                    // comma-separate args to .format().
+                    vstr_add_byte(&lex->fstring_args, ',');
+                }
+                vstr_add_byte(&lex->vstr, '{');
+            }
+            #endif
+
+            if (is_char(lex, '\\')) {
+                next_char(lex);
+                unichar c = CUR_CHAR(lex);
+                if (is_raw) {
+                    // raw strings allow escaping of quotes, but the backslash is also emitted
+                    vstr_add_char(&lex->vstr, '\\');
+                } else {
+                    switch (c) {
+                        // note: "c" can never be MP_LEXER_EOF because next_char
+                        // always inserts a newline at the end of the input stream
+                        case '\n':
+                            c = MP_LEXER_EOF;
+                            break;                          // backslash escape the newline, just ignore it
+                        case '\\':
+                            break;
+                        case '\'':
+                            break;
+                        case '"':
+                            break;
+                        case 'a':
+                            c = 0x07;
+                            break;
+                        case 'b':
+                            c = 0x08;
+                            break;
+                        case 't':
+                            c = 0x09;
+                            break;
+                        case 'n':
+                            c = 0x0a;
+                            break;
+                        case 'v':
+                            c = 0x0b;
+                            break;
+                        case 'f':
+                            c = 0x0c;
+                            break;
+                        case 'r':
+                            c = 0x0d;
+                            break;
+                        case 'u':
+                        case 'U':
+                            if (lex->tok_kind == MP_TOKEN_BYTES) {
+                                // b'\u1234' == b'\\u1234'
+                                vstr_add_char(&lex->vstr, '\\');
+                                break;
+                            }
+                            // Otherwise fall through.
+                            MP_FALLTHROUGH
+                        case 'x': {
+                            mp_uint_t num = 0;
+                            if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) {
+                                // not enough hex chars for escape sequence
+                                lex->tok_kind = MP_TOKEN_INVALID;
+                            }
+                            c = num;
+                            break;
+                        }
+                        case 'N':
+                            // Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the
+                            // entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly
+                            // 3MB of text; even gzip-compressed and with minimal structure, it'll take
+                            // roughly half a meg of storage. This form of Unicode escape may be added
+                            // later on, but it's definitely not a priority right now. -- CJA 20140607
+                            mp_raise_NotImplementedError(MP_ERROR_TEXT("unicode name escapes"));
+                            break;
+                        default:
+                            if (c >= '0' && c <= '7') {
+                                // Octal sequence, 1-3 chars
+                                size_t digits = 3;
+                                mp_uint_t num = c - '0';
+                                while (is_following_odigit(lex) && --digits != 0) {
+                                    next_char(lex);
+                                    num = num * 8 + (CUR_CHAR(lex) - '0');
+                                }
+                                c = num;
+                            } else {
+                                // unrecognised escape character; CPython lets this through verbatim as '\' and then the character
+                                vstr_add_char(&lex->vstr, '\\');
+                            }
+                            break;
+                    }
+                }
+                if (c != MP_LEXER_EOF) {
+                    #if MICROPY_PY_BUILTINS_STR_UNICODE
+                    if (c < 0x110000 && lex->tok_kind == MP_TOKEN_STRING) {
+                        // Valid unicode character in a str object.
+                        vstr_add_char(&lex->vstr, c);
+                    } else if (c < 0x100 && lex->tok_kind == MP_TOKEN_BYTES) {
+                        // Valid byte in a bytes object.
+                        vstr_add_byte(&lex->vstr, c);
+                    }
+                    #else
+                    if (c < 0x100) {
+                        // Without unicode everything is just added as an 8-bit byte.
+                        vstr_add_byte(&lex->vstr, c);
+                    }
+                    #endif
+                    else {
+                        // Character out of range; this raises a generic SyntaxError.
+                        lex->tok_kind = MP_TOKEN_INVALID;
+                    }
+                }
+            } else {
+                // Add the "character" as a byte so that we remain 8-bit clean.
+                // This way, strings are parsed correctly whether or not they contain utf-8 chars.
+                vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+            }
+        }
+        next_char(lex);
+    }
+
+    // check we got the required end quotes
+    if (n_closing < num_quotes) {
+        lex->tok_kind = MP_TOKEN_LONELY_STRING_OPEN;
+    }
+
+    // cut off the end quotes from the token text
+    vstr_cut_tail_bytes(&lex->vstr, n_closing);
+}
+
+// This function returns whether it has crossed a newline or not.
+// It therefore always return true if stop_at_newline is true
+static bool skip_whitespace(mp_lexer_t *lex, bool stop_at_newline) {
+    while (!is_end(lex)) {
+        if (is_physical_newline(lex)) {
+            if (stop_at_newline && lex->nested_bracket_level == 0) {
+                return true;
+            }
+            next_char(lex);
+        } else if (is_whitespace(lex)) {
+            next_char(lex);
+        } else if (is_char(lex, '#')) {
+            next_char(lex);
+            while (!is_end(lex) && !is_physical_newline(lex)) {
+                next_char(lex);
+            }
+            // will return true on next loop
+        } else if (is_char_and(lex, '\\', '\n')) {
+            // line-continuation, so don't return true
+            next_char(lex);
+            next_char(lex);
+        } else {
+            break;
+        }
+    }
+    return false;
+}
+
+void mp_lexer_to_next(mp_lexer_t *lex) {
+    #if MICROPY_PY_FSTRINGS
+    if (lex->fstring_args.len && lex->fstring_args_idx == 0) {
+        // moving onto the next token means the literal string is complete.
+        // switch into injecting the format args.
+        vstr_add_byte(&lex->fstring_args, ')');
+        lex->chr0_saved = lex->chr0;
+        lex->chr1_saved = lex->chr1;
+        lex->chr2_saved = lex->chr2;
+        lex->chr0 = lex->fstring_args.buf[0];
+        lex->chr1 = lex->fstring_args.buf[1];
+        lex->chr2 = lex->fstring_args.buf[2];
+        // we've already extracted 3 chars, but setting this non-zero also
+        // means we'll start consuming the fstring data
+        lex->fstring_args_idx = 3;
+    }
+    #endif
+
+    // start new token text
+    vstr_reset(&lex->vstr);
+
+    // skip white space and comments
+    // set the newline tokens at the line and column of the preceding line:
+    // only advance on the pointer until a new line is crossed, save the
+    // line and column, and then readvance it
+    bool had_physical_newline = skip_whitespace(lex, true);
+
+    // set token source information
+    lex->tok_line = lex->line;
+    lex->tok_column = lex->column;
+
+    if (lex->emit_dent < 0) {
+        lex->tok_kind = MP_TOKEN_DEDENT;
+        lex->emit_dent += 1;
+
+    } else if (lex->emit_dent > 0) {
+        lex->tok_kind = MP_TOKEN_INDENT;
+        lex->emit_dent -= 1;
+
+    } else if (had_physical_newline) {
+        // The cursor is at the end of the previous line, pointing to a
+        // physical newline. Skip any remaining whitespace, comments, and
+        // newlines.
+        skip_whitespace(lex, false);
+
+        lex->tok_kind = MP_TOKEN_NEWLINE;
+
+        size_t num_spaces = lex->column - 1;
+        if (num_spaces == indent_top(lex)) {
+        } else if (num_spaces > indent_top(lex)) {
+            indent_push(lex, num_spaces);
+            lex->emit_dent += 1;
+        } else {
+            while (num_spaces < indent_top(lex)) {
+                indent_pop(lex);
+                lex->emit_dent -= 1;
+            }
+            if (num_spaces != indent_top(lex)) {
+                lex->tok_kind = MP_TOKEN_DEDENT_MISMATCH;
+            }
+        }
+
+    } else if (is_end(lex)) {
+        lex->tok_kind = MP_TOKEN_END;
+
+    } else if (is_string_or_bytes(lex)) {
+        // a string or bytes literal
+
+        // Python requires adjacent string/bytes literals to be automatically
+        // concatenated.  We do it here in the tokeniser to make efficient use of RAM,
+        // because then the lexer's vstr can be used to accumulate the string literal,
+        // in contrast to creating a parse tree of strings and then joining them later
+        // in the compiler.  It's also more compact in code size to do it here.
+
+        // MP_TOKEN_END is used to indicate that this is the first string token
+        lex->tok_kind = MP_TOKEN_END;
+
+        // Loop to accumulate string/bytes literals
+        do {
+            // parse type codes
+            bool is_raw = false;
+            bool is_fstring = false;
+            mp_token_kind_t kind = MP_TOKEN_STRING;
+            int n_char = 0;
+            if (is_char(lex, 'u')) {
+                n_char = 1;
+            } else if (is_char(lex, 'b')) {
+                kind = MP_TOKEN_BYTES;
+                n_char = 1;
+                if (is_char_following(lex, 'r')) {
+                    is_raw = true;
+                    n_char = 2;
+                }
+            } else if (is_char(lex, 'r')) {
+                is_raw = true;
+                n_char = 1;
+                if (is_char_following(lex, 'b')) {
+                    kind = MP_TOKEN_BYTES;
+                    n_char = 2;
+                }
+                #if MICROPY_PY_FSTRINGS
+                if (is_char_following(lex, 'f')) {
+                    // raw-f-strings unsupported, immediately return (invalid) token.
+                    lex->tok_kind = MP_TOKEN_FSTRING_RAW;
+                    break;
+                }
+                #endif
+            }
+            #if MICROPY_PY_FSTRINGS
+            else if (is_char(lex, 'f')) {
+                if (is_char_following(lex, 'r')) {
+                    // raw-f-strings unsupported, immediately return (invalid) token.
+                    lex->tok_kind = MP_TOKEN_FSTRING_RAW;
+                    break;
+                }
+                n_char = 1;
+                is_fstring = true;
+            }
+            #endif
+
+            // Set or check token kind
+            if (lex->tok_kind == MP_TOKEN_END) {
+                lex->tok_kind = kind;
+            } else if (lex->tok_kind != kind) {
+                // Can't concatenate string with bytes
+                break;
+            }
+
+            // Skip any type code characters
+            if (n_char != 0) {
+                next_char(lex);
+                if (n_char == 2) {
+                    next_char(lex);
+                }
+            }
+
+            // Parse the literal
+            parse_string_literal(lex, is_raw, is_fstring);
+
+            // Skip whitespace so we can check if there's another string following
+            skip_whitespace(lex, true);
+
+        } while (is_string_or_bytes(lex));
+
+    } else if (is_head_of_identifier(lex)) {
+        lex->tok_kind = MP_TOKEN_NAME;
+
+        // get first char (add as byte to remain 8-bit clean and support utf-8)
+        vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+        next_char(lex);
+
+        // get tail chars
+        while (!is_end(lex) && is_tail_of_identifier(lex)) {
+            vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+            next_char(lex);
+        }
+
+        // Check if the name is a keyword.
+        // We also check for __debug__ here and convert it to its value.  This is
+        // so the parser gives a syntax error on, eg, x.__debug__.  Otherwise, we
+        // need to check for this special token in many places in the compiler.
+        const char *s = vstr_null_terminated_str(&lex->vstr);
+        for (size_t i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) {
+            int cmp = strcmp(s, tok_kw[i]);
+            if (cmp == 0) {
+                lex->tok_kind = MP_TOKEN_KW_FALSE + i;
+                if (lex->tok_kind == MP_TOKEN_KW___DEBUG__) {
+                    lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
+                }
+                break;
+            } else if (cmp < 0) {
+                // Table is sorted and comparison was less-than, so stop searching
+                break;
+            }
+        }
+
+    } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) {
+        bool forced_integer = false;
+        if (is_char(lex, '.')) {
+            lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+        } else {
+            lex->tok_kind = MP_TOKEN_INTEGER;
+            if (is_char(lex, '0') && is_following_base_char(lex)) {
+                forced_integer = true;
+            }
+        }
+
+        // get first char
+        vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+        next_char(lex);
+
+        // get tail chars
+        while (!is_end(lex)) {
+            if (!forced_integer && is_char_or(lex, 'e', 'E')) {
+                lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+                vstr_add_char(&lex->vstr, 'e');
+                next_char(lex);
+                if (is_char(lex, '+') || is_char(lex, '-')) {
+                    vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+                    next_char(lex);
+                }
+            } else if (is_letter(lex) || is_digit(lex) || is_char(lex, '.')) {
+                if (is_char_or3(lex, '.', 'j', 'J')) {
+                    lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+                }
+                vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+                next_char(lex);
+            } else if (is_char(lex, '_')) {
+                next_char(lex);
+            } else {
+                break;
+            }
+        }
+
+    } else {
+        // search for encoded delimiter or operator
+
+        const char *t = tok_enc;
+        size_t tok_enc_index = 0;
+        for (; *t != 0 && !is_char(lex, *t); t += 1) {
+            if (*t == 'e' || *t == 'c') {
+                t += 1;
+            }
+            tok_enc_index += 1;
+        }
+
+        next_char(lex);
+
+        if (*t == 0) {
+            // didn't match any delimiter or operator characters
+            lex->tok_kind = MP_TOKEN_INVALID;
+
+        } else if (*t == '!') {
+            // "!=" is a special case because "!" is not a valid operator
+            if (is_char(lex, '=')) {
+                next_char(lex);
+                lex->tok_kind = MP_TOKEN_OP_NOT_EQUAL;
+            } else {
+                lex->tok_kind = MP_TOKEN_INVALID;
+            }
+
+        } else if (*t == '.') {
+            // "." and "..." are special cases because ".." is not a valid operator
+            if (is_char_and(lex, '.', '.')) {
+                next_char(lex);
+                next_char(lex);
+                lex->tok_kind = MP_TOKEN_ELLIPSIS;
+            } else {
+                lex->tok_kind = MP_TOKEN_DEL_PERIOD;
+            }
+
+        } else {
+            // matched a delimiter or operator character
+
+            // get the maximum characters for a valid token
+            t += 1;
+            size_t t_index = tok_enc_index;
+            while (*t == 'c' || *t == 'e') {
+                t_index += 1;
+                if (is_char(lex, t[1])) {
+                    next_char(lex);
+                    tok_enc_index = t_index;
+                    if (*t == 'e') {
+                        break;
+                    }
+                } else if (*t == 'c') {
+                    break;
+                }
+                t += 2;
+            }
+
+            // set token kind
+            lex->tok_kind = tok_enc_kind[tok_enc_index];
+
+            // compute bracket level for implicit line joining
+            if (lex->tok_kind == MP_TOKEN_DEL_PAREN_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACKET_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACE_OPEN) {
+                lex->nested_bracket_level += 1;
+            } else if (lex->tok_kind == MP_TOKEN_DEL_PAREN_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACKET_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACE_CLOSE) {
+                lex->nested_bracket_level -= 1;
+            }
+        }
+    }
+}
+
+mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) {
+    mp_lexer_t *lex = m_new_obj(mp_lexer_t);
+
+    lex->source_name = src_name;
+    lex->reader = reader;
+    lex->line = 1;
+    lex->column = (size_t)-2; // account for 3 dummy bytes
+    lex->emit_dent = 0;
+    lex->nested_bracket_level = 0;
+    lex->alloc_indent_level = MICROPY_ALLOC_LEXER_INDENT_INIT;
+    lex->num_indent_level = 1;
+    lex->indent_level = m_new(uint16_t, lex->alloc_indent_level);
+    vstr_init(&lex->vstr, 32);
+    #if MICROPY_PY_FSTRINGS
+    vstr_init(&lex->fstring_args, 0);
+    lex->fstring_args_idx = 0;
+    #endif
+
+    // store sentinel for first indentation level
+    lex->indent_level[0] = 0;
+
+    // load lexer with start of file, advancing lex->column to 1
+    // start with dummy bytes and use next_char() for proper EOL/EOF handling
+    lex->chr0 = lex->chr1 = lex->chr2 = 0;
+    next_char(lex);
+    next_char(lex);
+    next_char(lex);
+
+    // preload first token
+    mp_lexer_to_next(lex);
+
+    // Check that the first token is in the first column unless it is a
+    // newline. Otherwise we convert the token kind to INDENT so that
+    // the parser gives a syntax error.
+    if (lex->tok_column != 1 && lex->tok_kind != MP_TOKEN_NEWLINE) {
+        lex->tok_kind = MP_TOKEN_INDENT;
+    }
+
+    return lex;
+}
+
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len) {
+    mp_reader_t reader;
+    mp_reader_new_mem(&reader, (const byte *)str, len, free_len);
+    return mp_lexer_new(src_name, reader);
+}
+
+#if MICROPY_READER_POSIX || MICROPY_READER_VFS
+
+mp_lexer_t *mp_lexer_new_from_file(qstr filename) {
+    mp_reader_t reader;
+    mp_reader_new_file(&reader, filename);
+    return mp_lexer_new(filename, reader);
+}
+
+#if MICROPY_HELPER_LEXER_UNIX
+
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) {
+    mp_reader_t reader;
+    mp_reader_new_file_from_fd(&reader, fd, close_fd);
+    return mp_lexer_new(filename, reader);
+}
+
+#endif
+
+#endif
+
+void mp_lexer_free(mp_lexer_t *lex) {
+    if (lex) {
+        lex->reader.close(lex->reader.data);
+        vstr_clear(&lex->vstr);
+        #if MICROPY_PY_FSTRINGS
+        vstr_clear(&lex->fstring_args);
+        #endif
+        m_del(uint16_t, lex->indent_level, lex->alloc_indent_level);
+        m_del_obj(mp_lexer_t, lex);
+    }
+}
+
+#if 0
+// This function is used to print the current token and should only be
+// needed to debug the lexer, so it's not available via a config option.
+void mp_lexer_show_token(const mp_lexer_t *lex) {
+    printf("(" UINT_FMT ":" UINT_FMT ") kind:%u str:%p len:%zu", lex->tok_line, lex->tok_column, lex->tok_kind, lex->vstr.buf, lex->vstr.len);
+    if (lex->vstr.len > 0) {
+        const byte *i = (const byte *)lex->vstr.buf;
+        const byte *j = (const byte *)i + lex->vstr.len;
+        printf(" ");
+        while (i < j) {
+            unichar c = utf8_get_char(i);
+            i = utf8_next_char(i);
+            if (unichar_isprint(c)) {
+                printf("%c", (int)c);
+            } else {
+                printf("?");
+            }
+        }
+    }
+    printf("\n");
+}
+#endif
+
+#endif // MICROPY_ENABLE_COMPILER

+ 203 - 0
mp_flipper/lib/micropython/py/lexer.h

@@ -0,0 +1,203 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_LEXER_H
+#define MICROPY_INCLUDED_PY_LEXER_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/qstr.h"
+#include "py/reader.h"
+
+/* lexer.h -- simple tokeniser for MicroPython
+ *
+ * Uses (byte) length instead of null termination.
+ * Tokens are the same - UTF-8 with (byte) length.
+ */
+
+typedef enum _mp_token_kind_t {
+    MP_TOKEN_END,
+
+    MP_TOKEN_INVALID,
+    MP_TOKEN_DEDENT_MISMATCH,
+    MP_TOKEN_LONELY_STRING_OPEN,
+    #if MICROPY_PY_FSTRINGS
+    MP_TOKEN_MALFORMED_FSTRING,
+    MP_TOKEN_FSTRING_RAW,
+    #endif
+
+    MP_TOKEN_NEWLINE,
+    MP_TOKEN_INDENT,
+    MP_TOKEN_DEDENT,
+
+    MP_TOKEN_NAME,
+    MP_TOKEN_INTEGER,
+    MP_TOKEN_FLOAT_OR_IMAG,
+    MP_TOKEN_STRING,
+    MP_TOKEN_BYTES,
+
+    MP_TOKEN_ELLIPSIS,
+
+    MP_TOKEN_KW_FALSE,
+    MP_TOKEN_KW_NONE,
+    MP_TOKEN_KW_TRUE,
+    MP_TOKEN_KW___DEBUG__,
+    MP_TOKEN_KW_AND,
+    MP_TOKEN_KW_AS,
+    MP_TOKEN_KW_ASSERT,
+    #if MICROPY_PY_ASYNC_AWAIT
+    MP_TOKEN_KW_ASYNC,
+    MP_TOKEN_KW_AWAIT,
+    #endif
+    MP_TOKEN_KW_BREAK,
+    MP_TOKEN_KW_CLASS,
+    MP_TOKEN_KW_CONTINUE,
+    MP_TOKEN_KW_DEF,
+    MP_TOKEN_KW_DEL,
+    MP_TOKEN_KW_ELIF,
+    MP_TOKEN_KW_ELSE,
+    MP_TOKEN_KW_EXCEPT,
+    MP_TOKEN_KW_FINALLY,
+    MP_TOKEN_KW_FOR,
+    MP_TOKEN_KW_FROM,
+    MP_TOKEN_KW_GLOBAL,
+    MP_TOKEN_KW_IF,
+    MP_TOKEN_KW_IMPORT,
+    MP_TOKEN_KW_IN,
+    MP_TOKEN_KW_IS,
+    MP_TOKEN_KW_LAMBDA,
+    MP_TOKEN_KW_NONLOCAL,
+    MP_TOKEN_KW_NOT,
+    MP_TOKEN_KW_OR,
+    MP_TOKEN_KW_PASS,
+    MP_TOKEN_KW_RAISE,
+    MP_TOKEN_KW_RETURN,
+    MP_TOKEN_KW_TRY,
+    MP_TOKEN_KW_WHILE,
+    MP_TOKEN_KW_WITH,
+    MP_TOKEN_KW_YIELD,
+
+    MP_TOKEN_OP_ASSIGN,
+    MP_TOKEN_OP_TILDE,
+
+    // Order of these 6 matches corresponding mp_binary_op_t operator
+    MP_TOKEN_OP_LESS,
+    MP_TOKEN_OP_MORE,
+    MP_TOKEN_OP_DBL_EQUAL,
+    MP_TOKEN_OP_LESS_EQUAL,
+    MP_TOKEN_OP_MORE_EQUAL,
+    MP_TOKEN_OP_NOT_EQUAL,
+
+    // Order of these 13 matches corresponding mp_binary_op_t operator
+    MP_TOKEN_OP_PIPE,
+    MP_TOKEN_OP_CARET,
+    MP_TOKEN_OP_AMPERSAND,
+    MP_TOKEN_OP_DBL_LESS,
+    MP_TOKEN_OP_DBL_MORE,
+    MP_TOKEN_OP_PLUS,
+    MP_TOKEN_OP_MINUS,
+    MP_TOKEN_OP_STAR,
+    MP_TOKEN_OP_AT,
+    MP_TOKEN_OP_DBL_SLASH,
+    MP_TOKEN_OP_SLASH,
+    MP_TOKEN_OP_PERCENT,
+    MP_TOKEN_OP_DBL_STAR,
+
+    // Order of these 13 matches corresponding mp_binary_op_t operator
+    MP_TOKEN_DEL_PIPE_EQUAL,
+    MP_TOKEN_DEL_CARET_EQUAL,
+    MP_TOKEN_DEL_AMPERSAND_EQUAL,
+    MP_TOKEN_DEL_DBL_LESS_EQUAL,
+    MP_TOKEN_DEL_DBL_MORE_EQUAL,
+    MP_TOKEN_DEL_PLUS_EQUAL,
+    MP_TOKEN_DEL_MINUS_EQUAL,
+    MP_TOKEN_DEL_STAR_EQUAL,
+    MP_TOKEN_DEL_AT_EQUAL,
+    MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+    MP_TOKEN_DEL_SLASH_EQUAL,
+    MP_TOKEN_DEL_PERCENT_EQUAL,
+    MP_TOKEN_DEL_DBL_STAR_EQUAL,
+
+    MP_TOKEN_DEL_PAREN_OPEN,
+    MP_TOKEN_DEL_PAREN_CLOSE,
+    MP_TOKEN_DEL_BRACKET_OPEN,
+    MP_TOKEN_DEL_BRACKET_CLOSE,
+    MP_TOKEN_DEL_BRACE_OPEN,
+    MP_TOKEN_DEL_BRACE_CLOSE,
+    MP_TOKEN_DEL_COMMA,
+    MP_TOKEN_DEL_COLON,
+    MP_TOKEN_DEL_PERIOD,
+    MP_TOKEN_DEL_SEMICOLON,
+    MP_TOKEN_DEL_EQUAL,
+    MP_TOKEN_DEL_MINUS_MORE,
+} mp_token_kind_t;
+
+// this data structure is exposed for efficiency
+// public members are: source_name, tok_line, tok_column, tok_kind, vstr
+typedef struct _mp_lexer_t {
+    qstr source_name;           // name of source
+    mp_reader_t reader;         // stream source
+
+    unichar chr0, chr1, chr2;   // current cached characters from source
+    #if MICROPY_PY_FSTRINGS
+    unichar chr0_saved, chr1_saved, chr2_saved; // current cached characters from alt source
+    #endif
+
+    size_t line;                // current source line
+    size_t column;              // current source column
+
+    mp_int_t emit_dent;             // non-zero when there are INDENT/DEDENT tokens to emit
+    mp_int_t nested_bracket_level;  // >0 when there are nested brackets over multiple lines
+
+    size_t alloc_indent_level;
+    size_t num_indent_level;
+    uint16_t *indent_level;
+
+    size_t tok_line;            // token source line
+    size_t tok_column;          // token source column
+    mp_token_kind_t tok_kind;   // token kind
+    vstr_t vstr;                // token data
+    #if MICROPY_PY_FSTRINGS
+    vstr_t fstring_args;        // extracted arguments to pass to .format()
+    size_t fstring_args_idx;    // how many bytes of fstring_args have been read
+    #endif
+} mp_lexer_t;
+
+mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader);
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len);
+
+// If MICROPY_READER_POSIX or MICROPY_READER_VFS aren't enabled then
+// this function must be implemented by the port.
+mp_lexer_t *mp_lexer_new_from_file(qstr filename);
+
+#if MICROPY_HELPER_LEXER_UNIX
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd);
+#endif
+
+void mp_lexer_free(mp_lexer_t *lex);
+void mp_lexer_to_next(mp_lexer_t *lex);
+
+#endif // MICROPY_INCLUDED_PY_LEXER_H

+ 315 - 0
mp_flipper/lib/micropython/py/malloc.c

@@ -0,0 +1,315 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/mpstate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_MEM_STATS
+#if !MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#error MICROPY_MEM_STATS requires MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#endif
+#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
+#endif
+
+#if MICROPY_ENABLE_GC
+#include "py/gc.h"
+
+// We redirect standard alloc functions to GC heap - just for the rest of
+// this module. In the rest of MicroPython source, system malloc can be
+// freely accessed - for interfacing with system and 3rd-party libs for
+// example. On the other hand, some (e.g. bare-metal) ports may use GC
+// heap as system heap, so, to avoid warnings, we do undef's first.
+#undef malloc
+#undef free
+#undef realloc
+#define malloc(b) gc_alloc((b), false)
+#define malloc_with_finaliser(b) gc_alloc((b), true)
+#define free gc_free
+#define realloc(ptr, n) gc_realloc(ptr, n, true)
+#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
+#else
+
+// GC is disabled.  Use system malloc/realloc/free.
+
+#if MICROPY_ENABLE_FINALISER
+#error MICROPY_ENABLE_FINALISER requires MICROPY_ENABLE_GC
+#endif
+
+static void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
+    if (allow_move) {
+        return realloc(ptr, n_bytes);
+    } else {
+        // We are asked to resize, but without moving the memory region pointed to
+        // by ptr.  Unless the underlying memory manager has special provision for
+        // this behaviour there is nothing we can do except fail to resize.
+        return NULL;
+    }
+}
+
+#endif // MICROPY_ENABLE_GC
+
+void *m_malloc(size_t num_bytes) {
+    void *ptr = malloc(num_bytes);
+    if (ptr == NULL && num_bytes != 0) {
+        m_malloc_fail(num_bytes);
+    }
+    #if MICROPY_MEM_STATS
+    MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+    MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+    UPDATE_PEAK();
+    #endif
+    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+    return ptr;
+}
+
+void *m_malloc_maybe(size_t num_bytes) {
+    void *ptr = malloc(num_bytes);
+    #if MICROPY_MEM_STATS
+    MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+    MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+    UPDATE_PEAK();
+    #endif
+    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+    return ptr;
+}
+
+#if MICROPY_ENABLE_FINALISER
+void *m_malloc_with_finaliser(size_t num_bytes) {
+    void *ptr = malloc_with_finaliser(num_bytes);
+    if (ptr == NULL && num_bytes != 0) {
+        m_malloc_fail(num_bytes);
+    }
+    #if MICROPY_MEM_STATS
+    MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+    MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+    UPDATE_PEAK();
+    #endif
+    DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+    return ptr;
+}
+#endif
+
+void *m_malloc0(size_t num_bytes) {
+    void *ptr = m_malloc(num_bytes);
+    // If this config is set then the GC clears all memory, so we don't need to.
+    #if !MICROPY_GC_CONSERVATIVE_CLEAR
+    memset(ptr, 0, num_bytes);
+    #endif
+    return ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes)
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes)
+#endif
+{
+    void *new_ptr = realloc(ptr, new_num_bytes);
+    if (new_ptr == NULL && new_num_bytes != 0) {
+        m_malloc_fail(new_num_bytes);
+    }
+    #if MICROPY_MEM_STATS
+    // At first thought, "Total bytes allocated" should only grow,
+    // after all, it's *total*. But consider for example 2K block
+    // shrunk to 1K and then grown to 2K again. It's still 2K
+    // allocated total. If we process only positive increments,
+    // we'll count 3K.
+    size_t diff = new_num_bytes - old_num_bytes;
+    MP_STATE_MEM(total_bytes_allocated) += diff;
+    MP_STATE_MEM(current_bytes_allocated) += diff;
+    UPDATE_PEAK();
+    #endif
+    #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+    DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+    #else
+    DEBUG_printf("realloc %p, %d : %p\n", ptr, new_num_bytes, new_ptr);
+    #endif
+    return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move)
+#else
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move)
+#endif
+{
+    void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
+    #if MICROPY_MEM_STATS
+    // At first thought, "Total bytes allocated" should only grow,
+    // after all, it's *total*. But consider for example 2K block
+    // shrunk to 1K and then grown to 2K again. It's still 2K
+    // allocated total. If we process only positive increments,
+    // we'll count 3K.
+    // Also, don't count failed reallocs.
+    if (!(new_ptr == NULL && new_num_bytes != 0)) {
+        size_t diff = new_num_bytes - old_num_bytes;
+        MP_STATE_MEM(total_bytes_allocated) += diff;
+        MP_STATE_MEM(current_bytes_allocated) += diff;
+        UPDATE_PEAK();
+    }
+    #endif
+    #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+    DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+    #else
+    DEBUG_printf("realloc %p, %d : %p\n", ptr, new_num_bytes, new_ptr);
+    #endif
+    return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void m_free(void *ptr, size_t num_bytes)
+#else
+void m_free(void *ptr)
+#endif
+{
+    free(ptr);
+    #if MICROPY_MEM_STATS
+    MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
+    #endif
+    #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+    DEBUG_printf("free %p, %d\n", ptr, num_bytes);
+    #else
+    DEBUG_printf("free %p\n", ptr);
+    #endif
+}
+
+#if MICROPY_TRACKED_ALLOC
+
+#define MICROPY_TRACKED_ALLOC_STORE_SIZE (!MICROPY_ENABLE_GC)
+
+typedef struct _m_tracked_node_t {
+    struct _m_tracked_node_t *prev;
+    struct _m_tracked_node_t *next;
+    #if MICROPY_TRACKED_ALLOC_STORE_SIZE
+    uintptr_t size;
+    #endif
+    uint8_t data[];
+} m_tracked_node_t;
+
+#if MICROPY_DEBUG_VERBOSE
+static size_t m_tracked_count_links(size_t *nb) {
+    m_tracked_node_t *node = MP_STATE_VM(m_tracked_head);
+    size_t n = 0;
+    *nb = 0;
+    while (node != NULL) {
+        ++n;
+        #if MICROPY_TRACKED_ALLOC_STORE_SIZE
+        *nb += node->size;
+        #else
+        *nb += gc_nbytes(node);
+        #endif
+        node = node->next;
+    }
+    return n;
+}
+#endif
+
+void *m_tracked_calloc(size_t nmemb, size_t size) {
+    m_tracked_node_t *node = m_malloc_maybe(sizeof(m_tracked_node_t) + nmemb * size);
+    if (node == NULL) {
+        return NULL;
+    }
+    #if MICROPY_DEBUG_VERBOSE
+    size_t nb;
+    size_t n = m_tracked_count_links(&nb);
+    DEBUG_printf("m_tracked_calloc(%u, %u) -> (%u;%u) %p\n", (int)nmemb, (int)size, (int)n, (int)nb, node);
+    #endif
+    if (MP_STATE_VM(m_tracked_head) != NULL) {
+        MP_STATE_VM(m_tracked_head)->prev = node;
+    }
+    node->prev = NULL;
+    node->next = MP_STATE_VM(m_tracked_head);
+    MP_STATE_VM(m_tracked_head) = node;
+    #if MICROPY_TRACKED_ALLOC_STORE_SIZE
+    node->size = nmemb * size;
+    #endif
+    #if !MICROPY_GC_CONSERVATIVE_CLEAR
+    memset(&node->data[0], 0, nmemb * size);
+    #endif
+    return &node->data[0];
+}
+
+void m_tracked_free(void *ptr_in) {
+    if (ptr_in == NULL) {
+        return;
+    }
+    m_tracked_node_t *node = (m_tracked_node_t *)((uint8_t *)ptr_in - sizeof(m_tracked_node_t));
+    #if MICROPY_DEBUG_VERBOSE
+    size_t data_bytes;
+    #if MICROPY_TRACKED_ALLOC_STORE_SIZE
+    data_bytes = node->size;
+    #else
+    data_bytes = gc_nbytes(node);
+    #endif
+    size_t nb;
+    size_t n = m_tracked_count_links(&nb);
+    DEBUG_printf("m_tracked_free(%p, [%p, %p], nbytes=%u, links=%u;%u)\n", node, node->prev, node->next, (int)data_bytes, (int)n, (int)nb);
+    #endif
+    if (node->next != NULL) {
+        node->next->prev = node->prev;
+    }
+    if (node->prev != NULL) {
+        node->prev->next = node->next;
+    } else {
+        MP_STATE_VM(m_tracked_head) = node->next;
+    }
+    m_free(node
+        #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+        #if MICROPY_TRACKED_ALLOC_STORE_SIZE
+        , node->size
+        #else
+        , gc_nbytes(node)
+        #endif
+        #endif
+        );
+}
+
+#endif // MICROPY_TRACKED_ALLOC
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void) {
+    return MP_STATE_MEM(total_bytes_allocated);
+}
+
+size_t m_get_current_bytes_allocated(void) {
+    return MP_STATE_MEM(current_bytes_allocated);
+}
+
+size_t m_get_peak_bytes_allocated(void) {
+    return MP_STATE_MEM(peak_bytes_allocated);
+}
+#endif

+ 461 - 0
mp_flipper/lib/micropython/py/map.c

@@ -0,0 +1,461 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/runtime.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_OPT_MAP_LOOKUP_CACHE
+// MP_STATE_VM(map_lookup_cache) provides a cache of index to the last known
+// position of that index in any map. On a cache hit, this allows
+// short-circuiting the full linear search in the case of an ordered map
+// (i.e. all builtin modules and objects' locals dicts), and computation of
+// the hash (and potentially some linear probing) in the case of a regular
+// map. Note the same cache is shared across all maps.
+
+// Gets the index into the cache for this index. Shift down by two to remove
+// mp_obj_t tag bits.
+#define MAP_CACHE_OFFSET(index) ((((uintptr_t)(index)) >> 2) % MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE)
+// Gets the map cache entry for the corresponding index.
+#define MAP_CACHE_ENTRY(index) (MP_STATE_VM(map_lookup_cache)[MAP_CACHE_OFFSET(index)])
+// Retrieve the mp_obj_t at the location suggested by the cache.
+#define MAP_CACHE_GET(map, index) (&(map)->table[MAP_CACHE_ENTRY(index) % (map)->alloc])
+// Update the cache for this index.
+#define MAP_CACHE_SET(index, pos) MAP_CACHE_ENTRY(index) = (pos) & 0xff;
+#else
+#define MAP_CACHE_SET(index, pos)
+#endif
+
+// This table of sizes is used to control the growth of hash tables.
+// The first set of sizes are chosen so the allocation fits exactly in a
+// 4-word GC block, and it's not so important for these small values to be
+// prime.  The latter sizes are prime and increase at an increasing rate.
+static const uint16_t hash_allocation_sizes[] = {
+    0, 2, 4, 6, 8, 10, 12, // +2
+    17, 23, 29, 37, 47, 59, 73, // *1.25
+    97, 127, 167, 223, 293, 389, 521, 691, 919, 1223, 1627, 2161, // *1.33
+    3229, 4831, 7243, 10861, 16273, 24407, 36607, 54907, // *1.5
+};
+
+static size_t get_hash_alloc_greater_or_equal_to(size_t x) {
+    for (size_t i = 0; i < MP_ARRAY_SIZE(hash_allocation_sizes); i++) {
+        if (hash_allocation_sizes[i] >= x) {
+            return hash_allocation_sizes[i];
+        }
+    }
+    // ran out of primes in the table!
+    // return something sensible, at least make it odd
+    return (x + x / 2) | 1;
+}
+
+/******************************************************************************/
+/* map                                                                        */
+
+void mp_map_init(mp_map_t *map, size_t n) {
+    if (n == 0) {
+        map->alloc = 0;
+        map->table = NULL;
+    } else {
+        map->alloc = n;
+        map->table = m_new0(mp_map_elem_t, map->alloc);
+    }
+    map->used = 0;
+    map->all_keys_are_qstrs = 1;
+    map->is_fixed = 0;
+    map->is_ordered = 0;
+}
+
+void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table) {
+    map->alloc = n;
+    map->used = n;
+    map->all_keys_are_qstrs = 1;
+    map->is_fixed = 1;
+    map->is_ordered = 1;
+    map->table = (mp_map_elem_t *)table;
+}
+
+// Differentiate from mp_map_clear() - semantics is different
+void mp_map_deinit(mp_map_t *map) {
+    if (!map->is_fixed) {
+        m_del(mp_map_elem_t, map->table, map->alloc);
+    }
+    map->used = map->alloc = 0;
+}
+
+void mp_map_clear(mp_map_t *map) {
+    if (!map->is_fixed) {
+        m_del(mp_map_elem_t, map->table, map->alloc);
+    }
+    map->alloc = 0;
+    map->used = 0;
+    map->all_keys_are_qstrs = 1;
+    map->is_fixed = 0;
+    map->table = NULL;
+}
+
+static void mp_map_rehash(mp_map_t *map) {
+    size_t old_alloc = map->alloc;
+    size_t new_alloc = get_hash_alloc_greater_or_equal_to(map->alloc + 1);
+    DEBUG_printf("mp_map_rehash(%p): " UINT_FMT " -> " UINT_FMT "\n", map, old_alloc, new_alloc);
+    mp_map_elem_t *old_table = map->table;
+    mp_map_elem_t *new_table = m_new0(mp_map_elem_t, new_alloc);
+    // If we reach this point, table resizing succeeded, now we can edit the old map.
+    map->alloc = new_alloc;
+    map->used = 0;
+    map->all_keys_are_qstrs = 1;
+    map->table = new_table;
+    for (size_t i = 0; i < old_alloc; i++) {
+        if (old_table[i].key != MP_OBJ_NULL && old_table[i].key != MP_OBJ_SENTINEL) {
+            mp_map_lookup(map, old_table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = old_table[i].value;
+        }
+    }
+    m_del(mp_map_elem_t, old_table, old_alloc);
+}
+
+// MP_MAP_LOOKUP behaviour:
+//  - returns NULL if not found, else the slot it was found in with key,value non-null
+// MP_MAP_LOOKUP_ADD_IF_NOT_FOUND behaviour:
+//  - returns slot, with key non-null and value=MP_OBJ_NULL if it was added
+// MP_MAP_LOOKUP_REMOVE_IF_FOUND behaviour:
+//  - returns NULL if not found, else the slot if was found in with key null and value non-null
+mp_map_elem_t *MICROPY_WRAP_MP_MAP_LOOKUP(mp_map_lookup)(mp_map_t * map, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+    // If the map is a fixed array then we must only be called for a lookup
+    assert(!map->is_fixed || lookup_kind == MP_MAP_LOOKUP);
+
+    #if MICROPY_OPT_MAP_LOOKUP_CACHE
+    // Try the cache for lookup or add-if-not-found.
+    if (lookup_kind != MP_MAP_LOOKUP_REMOVE_IF_FOUND && map->alloc) {
+        mp_map_elem_t *slot = MAP_CACHE_GET(map, index);
+        // Note: Just comparing key for value equality will have false negatives, but
+        // these will be handled by the regular path below.
+        if (slot->key == index) {
+            return slot;
+        }
+    }
+    #endif
+
+    // Work out if we can compare just pointers
+    bool compare_only_ptrs = map->all_keys_are_qstrs;
+    if (compare_only_ptrs) {
+        if (mp_obj_is_qstr(index)) {
+            // Index is a qstr, so can just do ptr comparison.
+        } else if (mp_obj_is_exact_type(index, &mp_type_str)) {
+            // Index is a non-interned string.
+            // We can either intern the string, or force a full equality comparison.
+            // We chose the latter, since interning costs time and potentially RAM,
+            // and it won't necessarily benefit subsequent calls because these calls
+            // most likely won't pass the newly-interned string.
+            compare_only_ptrs = false;
+        } else if (lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+            // If we are not adding, then we can return straight away a failed
+            // lookup because we know that the index will never be found.
+            return NULL;
+        }
+    }
+
+    // if the map is an ordered array then we must do a brute force linear search
+    if (map->is_ordered) {
+        for (mp_map_elem_t *elem = &map->table[0], *top = &map->table[map->used]; elem < top; elem++) {
+            if (elem->key == index || (!compare_only_ptrs && mp_obj_equal(elem->key, index))) {
+                #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+                if (MP_UNLIKELY(lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND)) {
+                    // remove the found element by moving the rest of the array down
+                    mp_obj_t value = elem->value;
+                    --map->used;
+                    memmove(elem, elem + 1, (top - elem - 1) * sizeof(*elem));
+                    // put the found element after the end so the caller can access it if needed
+                    // note: caller must NULL the value so the GC can clean up (e.g. see dict_get_helper).
+                    elem = &map->table[map->used];
+                    elem->key = MP_OBJ_NULL;
+                    elem->value = value;
+                }
+                #endif
+                MAP_CACHE_SET(index, elem - map->table);
+                return elem;
+            }
+        }
+        #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+        if (MP_LIKELY(lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)) {
+            return NULL;
+        }
+        if (map->used == map->alloc) {
+            // TODO: Alloc policy
+            map->alloc += 4;
+            map->table = m_renew(mp_map_elem_t, map->table, map->used, map->alloc);
+            mp_seq_clear(map->table, map->used, map->alloc, sizeof(*map->table));
+        }
+        mp_map_elem_t *elem = map->table + map->used++;
+        elem->key = index;
+        elem->value = MP_OBJ_NULL;
+        if (!mp_obj_is_qstr(index)) {
+            map->all_keys_are_qstrs = 0;
+        }
+        return elem;
+        #else
+        return NULL;
+        #endif
+    }
+
+    // map is a hash table (not an ordered array), so do a hash lookup
+
+    if (map->alloc == 0) {
+        if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+            mp_map_rehash(map);
+        } else {
+            return NULL;
+        }
+    }
+
+    // get hash of index, with fast path for common case of qstr
+    mp_uint_t hash;
+    if (mp_obj_is_qstr(index)) {
+        hash = qstr_hash(MP_OBJ_QSTR_VALUE(index));
+    } else {
+        hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+    }
+
+    size_t pos = hash % map->alloc;
+    size_t start_pos = pos;
+    mp_map_elem_t *avail_slot = NULL;
+    for (;;) {
+        mp_map_elem_t *slot = &map->table[pos];
+        if (slot->key == MP_OBJ_NULL) {
+            // found NULL slot, so index is not in table
+            if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+                map->used += 1;
+                if (avail_slot == NULL) {
+                    avail_slot = slot;
+                }
+                avail_slot->key = index;
+                avail_slot->value = MP_OBJ_NULL;
+                if (!mp_obj_is_qstr(index)) {
+                    map->all_keys_are_qstrs = 0;
+                }
+                return avail_slot;
+            } else {
+                return NULL;
+            }
+        } else if (slot->key == MP_OBJ_SENTINEL) {
+            // found deleted slot, remember for later
+            if (avail_slot == NULL) {
+                avail_slot = slot;
+            }
+        } else if (slot->key == index || (!compare_only_ptrs && mp_obj_equal(slot->key, index))) {
+            // found index
+            // Note: CPython does not replace the index; try x={True:'true'};x[1]='one';x
+            if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+                // delete element in this slot
+                map->used--;
+                if (map->table[(pos + 1) % map->alloc].key == MP_OBJ_NULL) {
+                    // optimisation if next slot is empty
+                    slot->key = MP_OBJ_NULL;
+                } else {
+                    slot->key = MP_OBJ_SENTINEL;
+                }
+                // keep slot->value so that caller can access it if needed
+            }
+            MAP_CACHE_SET(index, pos);
+            return slot;
+        }
+
+        // not yet found, keep searching in this table
+        pos = (pos + 1) % map->alloc;
+
+        if (pos == start_pos) {
+            // search got back to starting position, so index is not in table
+            if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+                if (avail_slot != NULL) {
+                    // there was an available slot, so use that
+                    map->used++;
+                    avail_slot->key = index;
+                    avail_slot->value = MP_OBJ_NULL;
+                    if (!mp_obj_is_qstr(index)) {
+                        map->all_keys_are_qstrs = 0;
+                    }
+                    return avail_slot;
+                } else {
+                    // not enough room in table, rehash it
+                    mp_map_rehash(map);
+                    // restart the search for the new element
+                    start_pos = pos = hash % map->alloc;
+                }
+            } else {
+                return NULL;
+            }
+        }
+    }
+}
+
+/******************************************************************************/
+/* set                                                                        */
+
+#if MICROPY_PY_BUILTINS_SET
+
+void mp_set_init(mp_set_t *set, size_t n) {
+    set->alloc = n;
+    set->used = 0;
+    set->table = m_new0(mp_obj_t, set->alloc);
+}
+
+static void mp_set_rehash(mp_set_t *set) {
+    size_t old_alloc = set->alloc;
+    mp_obj_t *old_table = set->table;
+    set->alloc = get_hash_alloc_greater_or_equal_to(set->alloc + 1);
+    set->used = 0;
+    set->table = m_new0(mp_obj_t, set->alloc);
+    for (size_t i = 0; i < old_alloc; i++) {
+        if (old_table[i] != MP_OBJ_NULL && old_table[i] != MP_OBJ_SENTINEL) {
+            mp_set_lookup(set, old_table[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+        }
+    }
+    m_del(mp_obj_t, old_table, old_alloc);
+}
+
+mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+    // Note: lookup_kind can be MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND which
+    // is handled by using bitwise operations.
+
+    if (set->alloc == 0) {
+        if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+            mp_set_rehash(set);
+        } else {
+            return MP_OBJ_NULL;
+        }
+    }
+    mp_uint_t hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+    size_t pos = hash % set->alloc;
+    size_t start_pos = pos;
+    mp_obj_t *avail_slot = NULL;
+    for (;;) {
+        mp_obj_t elem = set->table[pos];
+        if (elem == MP_OBJ_NULL) {
+            // found NULL slot, so index is not in table
+            if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+                if (avail_slot == NULL) {
+                    avail_slot = &set->table[pos];
+                }
+                set->used++;
+                *avail_slot = index;
+                return index;
+            } else {
+                return MP_OBJ_NULL;
+            }
+        } else if (elem == MP_OBJ_SENTINEL) {
+            // found deleted slot, remember for later
+            if (avail_slot == NULL) {
+                avail_slot = &set->table[pos];
+            }
+        } else if (mp_obj_equal(elem, index)) {
+            // found index
+            if (lookup_kind & MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+                // delete element
+                set->used--;
+                if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+                    // optimisation if next slot is empty
+                    set->table[pos] = MP_OBJ_NULL;
+                } else {
+                    set->table[pos] = MP_OBJ_SENTINEL;
+                }
+            }
+            return elem;
+        }
+
+        // not yet found, keep searching in this table
+        pos = (pos + 1) % set->alloc;
+
+        if (pos == start_pos) {
+            // search got back to starting position, so index is not in table
+            if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+                if (avail_slot != NULL) {
+                    // there was an available slot, so use that
+                    set->used++;
+                    *avail_slot = index;
+                    return index;
+                } else {
+                    // not enough room in table, rehash it
+                    mp_set_rehash(set);
+                    // restart the search for the new element
+                    start_pos = pos = hash % set->alloc;
+                }
+            } else {
+                return MP_OBJ_NULL;
+            }
+        }
+    }
+}
+
+mp_obj_t mp_set_remove_first(mp_set_t *set) {
+    for (size_t pos = 0; pos < set->alloc; pos++) {
+        if (mp_set_slot_is_filled(set, pos)) {
+            mp_obj_t elem = set->table[pos];
+            // delete element
+            set->used--;
+            if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+                // optimisation if next slot is empty
+                set->table[pos] = MP_OBJ_NULL;
+            } else {
+                set->table[pos] = MP_OBJ_SENTINEL;
+            }
+            return elem;
+        }
+    }
+    return MP_OBJ_NULL;
+}
+
+void mp_set_clear(mp_set_t *set) {
+    m_del(mp_obj_t, set->table, set->alloc);
+    set->alloc = 0;
+    set->used = 0;
+    set->table = NULL;
+}
+
+#endif // MICROPY_PY_BUILTINS_SET
+
+#if defined(DEBUG_PRINT) && DEBUG_PRINT
+void mp_map_dump(mp_map_t *map) {
+    for (size_t i = 0; i < map->alloc; i++) {
+        if (map->table[i].key != MP_OBJ_NULL) {
+            mp_obj_print(map->table[i].key, PRINT_REPR);
+        } else {
+            DEBUG_printf("(nil)");
+        }
+        DEBUG_printf(": %p\n", map->table[i].value);
+    }
+    DEBUG_printf("---\n");
+}
+#endif

+ 337 - 0
mp_flipper/lib/micropython/py/misc.h

@@ -0,0 +1,337 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MISC_H
+#define MICROPY_INCLUDED_PY_MISC_H
+
+// a mini library of useful types and functions
+
+/** types *******************************************************/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+
+typedef unsigned char byte;
+typedef unsigned int uint;
+
+/** generic ops *************************************************/
+
+#ifndef MIN
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#endif
+#ifndef MAX
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#endif
+
+// Classical double-indirection stringification of preprocessor macro's value
+#define MP_STRINGIFY_HELPER(x) #x
+#define MP_STRINGIFY(x) MP_STRINGIFY_HELPER(x)
+
+// Static assertion macro
+#define MP_STATIC_ASSERT(cond) ((void)sizeof(char[1 - 2 * !(cond)]))
+// In C++ things like comparing extern const pointers are not constant-expressions so cannot be used
+// in MP_STATIC_ASSERT. Note that not all possible compiler versions will reject this. Some gcc versions
+// do, others only with -Werror=vla, msvc always does.
+// The (void) is needed to avoid "left operand of comma operator has no effect [-Werror=unused-value]"
+// when using this macro on the left-hand side of a comma.
+#if defined(_MSC_VER) || defined(__cplusplus)
+#define MP_STATIC_ASSERT_NONCONSTEXPR(cond) ((void)1)
+#else
+#define MP_STATIC_ASSERT_NONCONSTEXPR(cond) MP_STATIC_ASSERT(cond)
+#endif
+
+// Round-up integer division
+#define MP_CEIL_DIVIDE(a, b) (((a) + (b) - 1) / (b))
+#define MP_ROUND_DIVIDE(a, b) (((a) + (b) / 2) / (b))
+
+/** memory allocation ******************************************/
+
+// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
+
+#define m_new(type, num) ((type *)(m_malloc(sizeof(type) * (num))))
+#define m_new_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num))))
+#define m_new0(type, num) ((type *)(m_malloc0(sizeof(type) * (num))))
+#define m_new_obj(type) (m_new(type, 1))
+#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
+#define m_new_obj_var(obj_type, var_field, var_type, var_num) ((obj_type *)m_malloc(offsetof(obj_type, var_field) + sizeof(var_type) * (var_num)))
+#define m_new_obj_var0(obj_type, var_field, var_type, var_num) ((obj_type *)m_malloc0(offsetof(obj_type, var_field) + sizeof(var_type) * (var_num)))
+#define m_new_obj_var_maybe(obj_type, var_field, var_type, var_num) ((obj_type *)m_malloc_maybe(offsetof(obj_type, var_field) + sizeof(var_type) * (var_num)))
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) m_free(ptr, sizeof(type) * (num))
+#define m_del_var(obj_type, var_field, var_type, var_num, ptr) (m_free(ptr, offsetof(obj_type, var_field) + sizeof(var_type) * (var_num)))
+#else
+#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) ((void)(num), m_free(ptr))
+#define m_del_var(obj_type, var_field, var_type, var_num, ptr) ((void)(var_num), m_free(ptr))
+#endif
+#define m_del_obj(type, ptr) (m_del(type, ptr, 1))
+
+void *m_malloc(size_t num_bytes);
+void *m_malloc_maybe(size_t num_bytes);
+void *m_malloc_with_finaliser(size_t num_bytes);
+void *m_malloc0(size_t num_bytes);
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr, size_t num_bytes);
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr);
+#endif
+NORETURN void m_malloc_fail(size_t num_bytes);
+
+#if MICROPY_TRACKED_ALLOC
+// These alloc/free functions track the pointers in a linked list so the GC does not reclaim
+// them.  They can be used by code that requires traditional C malloc/free semantics.
+void *m_tracked_calloc(size_t nmemb, size_t size);
+void m_tracked_free(void *ptr_in);
+#endif
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void);
+size_t m_get_current_bytes_allocated(void);
+size_t m_get_peak_bytes_allocated(void);
+#endif
+
+/** array helpers ***********************************************/
+
+// get the number of elements in a fixed-size array
+#define MP_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+// align ptr to the nearest multiple of "alignment"
+#define MP_ALIGN(ptr, alignment) (void *)(((uintptr_t)(ptr) + ((alignment) - 1)) & ~((alignment) - 1))
+
+/** unichar / UTF-8 *********************************************/
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+// with unicode enabled we need a type which can fit chars up to 0x10ffff
+typedef uint32_t unichar;
+#else
+// without unicode enabled we can only need to fit chars up to 0xff
+// (on 16-bit archs uint is 16-bits and more efficient than uint32_t)
+typedef uint unichar;
+#endif
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+unichar utf8_get_char(const byte *s);
+const byte *utf8_next_char(const byte *s);
+size_t utf8_charlen(const byte *str, size_t len);
+#else
+static inline unichar utf8_get_char(const byte *s) {
+    return *s;
+}
+static inline const byte *utf8_next_char(const byte *s) {
+    return s + 1;
+}
+static inline size_t utf8_charlen(const byte *str, size_t len) {
+    (void)str;
+    return len;
+}
+#endif
+
+bool unichar_isspace(unichar c);
+bool unichar_isalpha(unichar c);
+bool unichar_isprint(unichar c);
+bool unichar_isdigit(unichar c);
+bool unichar_isxdigit(unichar c);
+bool unichar_isident(unichar c);
+bool unichar_isalnum(unichar c);
+bool unichar_isupper(unichar c);
+bool unichar_islower(unichar c);
+unichar unichar_tolower(unichar c);
+unichar unichar_toupper(unichar c);
+mp_uint_t unichar_xdigit_value(unichar c);
+#define UTF8_IS_NONASCII(ch) ((ch) & 0x80)
+#define UTF8_IS_CONT(ch) (((ch) & 0xC0) == 0x80)
+
+/** variable string *********************************************/
+
+typedef struct _vstr_t {
+    size_t alloc;
+    size_t len;
+    char *buf;
+    bool fixed_buf;
+} vstr_t;
+
+// convenience macro to declare a vstr with a fixed size buffer on the stack
+#define VSTR_FIXED(vstr, alloc) vstr_t vstr; char vstr##_buf[(alloc)]; vstr_init_fixed_buf(&vstr, (alloc), vstr##_buf);
+
+void vstr_init(vstr_t *vstr, size_t alloc);
+void vstr_init_len(vstr_t *vstr, size_t len);
+void vstr_init_fixed_buf(vstr_t *vstr, size_t alloc, char *buf);
+struct _mp_print_t;
+void vstr_init_print(vstr_t *vstr, size_t alloc, struct _mp_print_t *print);
+void vstr_clear(vstr_t *vstr);
+vstr_t *vstr_new(size_t alloc);
+void vstr_free(vstr_t *vstr);
+static inline void vstr_reset(vstr_t *vstr) {
+    vstr->len = 0;
+}
+static inline char *vstr_str(vstr_t *vstr) {
+    return vstr->buf;
+}
+static inline size_t vstr_len(vstr_t *vstr) {
+    return vstr->len;
+}
+void vstr_hint_size(vstr_t *vstr, size_t size);
+char *vstr_extend(vstr_t *vstr, size_t size);
+char *vstr_add_len(vstr_t *vstr, size_t len);
+char *vstr_null_terminated_str(vstr_t *vstr);
+void vstr_add_byte(vstr_t *vstr, byte v);
+void vstr_add_char(vstr_t *vstr, unichar chr);
+void vstr_add_str(vstr_t *vstr, const char *str);
+void vstr_add_strn(vstr_t *vstr, const char *str, size_t len);
+void vstr_ins_byte(vstr_t *vstr, size_t byte_pos, byte b);
+void vstr_ins_char(vstr_t *vstr, size_t char_pos, unichar chr);
+void vstr_cut_head_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_tail_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_out_bytes(vstr_t *vstr, size_t byte_pos, size_t bytes_to_cut);
+void vstr_printf(vstr_t *vstr, const char *fmt, ...);
+
+/** non-dynamic size-bounded variable buffer/string *************/
+
+#define CHECKBUF(buf, max_size) char buf[max_size + 1]; size_t buf##_len = max_size; char *buf##_p = buf;
+#define CHECKBUF_RESET(buf, max_size) buf##_len = max_size; buf##_p = buf;
+#define CHECKBUF_APPEND(buf, src, src_len) \
+    { size_t l = MIN(src_len, buf##_len); \
+      memcpy(buf##_p, src, l); \
+      buf##_len -= l; \
+      buf##_p += l; }
+#define CHECKBUF_APPEND_0(buf) { *buf##_p = 0; }
+#define CHECKBUF_LEN(buf) (buf##_p - buf)
+
+#ifdef va_start
+void vstr_vprintf(vstr_t *vstr, const char *fmt, va_list ap);
+#endif
+
+// Debugging helpers
+int DEBUG_printf(const char *fmt, ...);
+
+extern mp_uint_t mp_verbose_flag;
+
+/** float internals *************/
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MP_FLOAT_EXP_BITS (11)
+#define MP_FLOAT_EXP_OFFSET (1023)
+#define MP_FLOAT_FRAC_BITS (52)
+typedef uint64_t mp_float_uint_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MP_FLOAT_EXP_BITS (8)
+#define MP_FLOAT_EXP_OFFSET (127)
+#define MP_FLOAT_FRAC_BITS (23)
+typedef uint32_t mp_float_uint_t;
+#endif
+
+#define MP_FLOAT_EXP_BIAS ((1 << (MP_FLOAT_EXP_BITS - 1)) - 1)
+
+typedef union _mp_float_union_t {
+    mp_float_t f;
+    #if MP_ENDIANNESS_LITTLE
+    struct {
+        mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
+        mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
+        mp_float_uint_t sgn : 1;
+    } p;
+    #else
+    struct {
+        mp_float_uint_t sgn : 1;
+        mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
+        mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
+    } p;
+    #endif
+    mp_float_uint_t i;
+} mp_float_union_t;
+
+#endif // MICROPY_PY_BUILTINS_FLOAT
+
+/** ROM string compression *************/
+
+#if MICROPY_ROM_TEXT_COMPRESSION
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
+#error "MICROPY_ERROR_REPORTING_NONE requires MICROPY_ROM_TEXT_COMPRESSION disabled"
+#endif
+
+#ifdef NO_QSTR
+
+// Compression enabled but doing QSTR extraction.
+// So leave MP_COMPRESSED_ROM_TEXT in place for makeqstrdefs.py / makecompresseddata.py to find them.
+
+#else
+
+// Compression enabled and doing a regular build.
+// Map MP_COMPRESSED_ROM_TEXT to the compressed strings.
+
+// Force usage of the MP_ERROR_TEXT macro by requiring an opaque type.
+typedef struct {
+    #if defined(__clang__) || defined(_MSC_VER)
+    // Fix "error: empty struct has size 0 in C, size 1 in C++", and the msvc counterpart
+    // "C requires that a struct or union have at least one member"
+    char dummy;
+    #endif
+} *mp_rom_error_text_t;
+
+#include <string.h>
+
+inline MP_ALWAYSINLINE const char *MP_COMPRESSED_ROM_TEXT(const char *msg) {
+    // "genhdr/compressed.data.h" contains an invocation of the MP_MATCH_COMPRESSED macro for each compressed string.
+    // The giant if(strcmp) tree is optimized by the compiler, which turns this into a direct return of the compressed data.
+    #define MP_MATCH_COMPRESSED(a, b) if (strcmp(msg, a) == 0) { return b; } else
+
+    // It also contains a single invocation of the MP_COMPRESSED_DATA macro, we don't need that here.
+    #define MP_COMPRESSED_DATA(x)
+
+    #include "genhdr/compressed.data.h"
+
+#undef MP_COMPRESSED_DATA
+#undef MP_MATCH_COMPRESSED
+
+    return msg;
+}
+
+#endif
+
+#else
+
+// Compression not enabled, just make it a no-op.
+
+typedef const char *mp_rom_error_text_t;
+#define MP_COMPRESSED_ROM_TEXT(x) x
+
+#endif // MICROPY_ROM_TEXT_COMPRESSION
+
+// Might add more types of compressed text in the future.
+// For now, forward directly to MP_COMPRESSED_ROM_TEXT.
+#define MP_ERROR_TEXT(x) (mp_rom_error_text_t)MP_COMPRESSED_ROM_TEXT(x)
+
+#endif // MICROPY_INCLUDED_PY_MISC_H

+ 45 - 0
mp_flipper/lib/micropython/py/modarray.c

@@ -0,0 +1,45 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_ARRAY
+
+static const mp_rom_map_elem_t mp_module_array_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_array) },
+    { MP_ROM_QSTR(MP_QSTR_array), MP_ROM_PTR(&mp_type_array) },
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_array_globals, mp_module_array_globals_table);
+
+const mp_obj_module_t mp_module_array = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_array_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_array, mp_module_array);
+
+#endif

+ 773 - 0
mp_flipper/lib/micropython/py/modbuiltins.c

@@ -0,0 +1,773 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_PY_IO
+extern struct _mp_dummy_t mp_sys_stdout_obj; // type is irrelevant, just need pointer
+#endif
+
+// args[0] is function from class body
+// args[1] is class name
+// args[2:] are base objects
+static mp_obj_t mp_builtin___build_class__(size_t n_args, const mp_obj_t *args) {
+    assert(2 <= n_args);
+
+    // set the new classes __locals__ object
+    mp_obj_dict_t *old_locals = mp_locals_get();
+    mp_obj_t class_locals = mp_obj_new_dict(0);
+    mp_locals_set(MP_OBJ_TO_PTR(class_locals));
+
+    // call the class code
+    mp_obj_t cell = mp_call_function_0(args[0]);
+
+    // restore old __locals__ object
+    mp_locals_set(old_locals);
+
+    // get the class type (meta object) from the base objects
+    mp_obj_t meta;
+    if (n_args == 2) {
+        // no explicit bases, so use 'type'
+        meta = MP_OBJ_FROM_PTR(&mp_type_type);
+    } else {
+        // use type of first base object
+        meta = MP_OBJ_FROM_PTR(mp_obj_get_type(args[2]));
+    }
+
+    // TODO do proper metaclass resolution for multiple base objects
+
+    // create the new class using a call to the meta object
+    mp_obj_t meta_args[3];
+    meta_args[0] = args[1]; // class name
+    meta_args[1] = mp_obj_new_tuple(n_args - 2, args + 2); // tuple of bases
+    meta_args[2] = class_locals; // dict of members
+    mp_obj_t new_class = mp_call_function_n_kw(meta, 3, 0, meta_args);
+
+    // store into cell if needed
+    if (cell != mp_const_none) {
+        mp_obj_cell_set(cell, new_class);
+    }
+
+    return new_class;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR(mp_builtin___build_class___obj, 2, mp_builtin___build_class__);
+
+static mp_obj_t mp_builtin_abs(mp_obj_t o_in) {
+    return mp_unary_op(MP_UNARY_OP_ABS, o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_abs_obj, mp_builtin_abs);
+
+static mp_obj_t mp_builtin_all(mp_obj_t o_in) {
+    mp_obj_iter_buf_t iter_buf;
+    mp_obj_t iterable = mp_getiter(o_in, &iter_buf);
+    mp_obj_t item;
+    while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+        if (!mp_obj_is_true(item)) {
+            return mp_const_false;
+        }
+    }
+    return mp_const_true;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_all_obj, mp_builtin_all);
+
+static mp_obj_t mp_builtin_any(mp_obj_t o_in) {
+    mp_obj_iter_buf_t iter_buf;
+    mp_obj_t iterable = mp_getiter(o_in, &iter_buf);
+    mp_obj_t item;
+    while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+        if (mp_obj_is_true(item)) {
+            return mp_const_true;
+        }
+    }
+    return mp_const_false;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_any_obj, mp_builtin_any);
+
+static mp_obj_t mp_builtin_bin(mp_obj_t o_in) {
+    mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_b_brace_close_), o_in };
+    return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_bin_obj, mp_builtin_bin);
+
+static mp_obj_t mp_builtin_callable(mp_obj_t o_in) {
+    if (mp_obj_is_callable(o_in)) {
+        return mp_const_true;
+    } else {
+        return mp_const_false;
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_callable_obj, mp_builtin_callable);
+
+static mp_obj_t mp_builtin_chr(mp_obj_t o_in) {
+    #if MICROPY_PY_BUILTINS_STR_UNICODE
+    mp_uint_t c = mp_obj_get_int(o_in);
+    if (c >= 0x110000) {
+        mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(0x110000)"));
+    }
+    VSTR_FIXED(buf, 4);
+    vstr_add_char(&buf, c);
+    return mp_obj_new_str_via_qstr(buf.buf, buf.len);
+    #else
+    mp_int_t ord = mp_obj_get_int(o_in);
+    if (0 <= ord && ord <= 0xff) {
+        uint8_t str[1] = {ord};
+        return mp_obj_new_str_via_qstr((char *)str, 1);
+    } else {
+        mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(256)"));
+    }
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_chr_obj, mp_builtin_chr);
+
+static mp_obj_t mp_builtin_dir(size_t n_args, const mp_obj_t *args) {
+    mp_obj_t dir = mp_obj_new_list(0, NULL);
+    if (n_args == 0) {
+        // Make a list of names in the local namespace
+        mp_obj_dict_t *dict = mp_locals_get();
+        for (size_t i = 0; i < dict->map.alloc; i++) {
+            if (mp_map_slot_is_filled(&dict->map, i)) {
+                mp_obj_list_append(dir, dict->map.table[i].key);
+            }
+        }
+    } else { // n_args == 1
+        // Make a list of names in the given object
+        // Implemented by probing all possible qstrs with mp_load_method_maybe
+        size_t nqstr = QSTR_TOTAL();
+        for (size_t i = MP_QSTR_ + 1; i < nqstr; ++i) {
+            mp_obj_t dest[2];
+            mp_load_method_protected(args[0], i, dest, false);
+            if (dest[0] != MP_OBJ_NULL) {
+                #if MICROPY_PY_ALL_SPECIAL_METHODS
+                // Support for __dir__: see if we can dispatch to this special method
+                // This relies on MP_QSTR__dir__ being first after MP_QSTR_
+                if (i == MP_QSTR___dir__ && dest[1] != MP_OBJ_NULL) {
+                    return mp_call_method_n_kw(0, 0, dest);
+                }
+                #endif
+                mp_obj_list_append(dir, MP_OBJ_NEW_QSTR(i));
+            }
+        }
+    }
+    return dir;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_dir_obj, 0, 1, mp_builtin_dir);
+
+static mp_obj_t mp_builtin_divmod(mp_obj_t o1_in, mp_obj_t o2_in) {
+    return mp_binary_op(MP_BINARY_OP_DIVMOD, o1_in, o2_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_divmod_obj, mp_builtin_divmod);
+
+static mp_obj_t mp_builtin_hash(mp_obj_t o_in) {
+    // result is guaranteed to be a (small) int
+    return mp_unary_op(MP_UNARY_OP_HASH, o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hash_obj, mp_builtin_hash);
+
+static mp_obj_t mp_builtin_hex(mp_obj_t o_in) {
+    #if MICROPY_PY_BUILTINS_STR_OP_MODULO
+    return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_x), o_in);
+    #else
+    mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_x_brace_close_), o_in };
+    return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hex_obj, mp_builtin_hex);
+
+#if MICROPY_PY_BUILTINS_INPUT
+
+#include "py/mphal.h"
+#include "shared/readline/readline.h"
+
+// A port can define mp_hal_readline if they want to use a custom function here
+#ifndef mp_hal_readline
+#define mp_hal_readline readline
+#endif
+
+static mp_obj_t mp_builtin_input(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 1) {
+        mp_obj_print(args[0], PRINT_STR);
+    }
+    vstr_t line;
+    vstr_init(&line, 16);
+    int ret = mp_hal_readline(&line, "");
+    if (ret == CHAR_CTRL_C) {
+        mp_raise_type(&mp_type_KeyboardInterrupt);
+    }
+    if (line.len == 0 && ret == CHAR_CTRL_D) {
+        mp_raise_type(&mp_type_EOFError);
+    }
+    return mp_obj_new_str_from_vstr(&line);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_input_obj, 0, 1, mp_builtin_input);
+
+#endif
+
+static mp_obj_t mp_builtin_iter(mp_obj_t o_in) {
+    return mp_getiter(o_in, NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_iter_obj, mp_builtin_iter);
+
+#if MICROPY_PY_BUILTINS_MIN_MAX
+
+static mp_obj_t mp_builtin_min_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs, mp_uint_t op) {
+    mp_map_elem_t *key_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_key), MP_MAP_LOOKUP);
+    mp_map_elem_t *default_elem;
+    mp_obj_t key_fn = key_elem == NULL ? MP_OBJ_NULL : key_elem->value;
+    if (n_args == 1) {
+        // given an iterable
+        mp_obj_iter_buf_t iter_buf;
+        mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
+        mp_obj_t best_key = MP_OBJ_NULL;
+        mp_obj_t best_obj = MP_OBJ_NULL;
+        mp_obj_t item;
+        while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+            mp_obj_t key = key_fn == MP_OBJ_NULL ? item : mp_call_function_1(key_fn, item);
+            if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+                best_key = key;
+                best_obj = item;
+            }
+        }
+        if (best_obj == MP_OBJ_NULL) {
+            default_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_default), MP_MAP_LOOKUP);
+            if (default_elem != NULL) {
+                best_obj = default_elem->value;
+            } else {
+                mp_raise_ValueError(MP_ERROR_TEXT("arg is an empty sequence"));
+            }
+        }
+        return best_obj;
+    } else {
+        // given many args
+        mp_obj_t best_key = MP_OBJ_NULL;
+        mp_obj_t best_obj = MP_OBJ_NULL;
+        for (size_t i = 0; i < n_args; i++) {
+            mp_obj_t key = key_fn == MP_OBJ_NULL ? args[i] : mp_call_function_1(key_fn, args[i]);
+            if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+                best_key = key;
+                best_obj = args[i];
+            }
+        }
+        return best_obj;
+    }
+}
+
+static mp_obj_t mp_builtin_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+    return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_MORE);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_max_obj, 1, mp_builtin_max);
+
+static mp_obj_t mp_builtin_min(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+    return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_LESS);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_min_obj, 1, mp_builtin_min);
+
+#endif
+
+#if MICROPY_PY_BUILTINS_NEXT2
+static mp_obj_t mp_builtin_next(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 1) {
+        mp_obj_t ret = mp_iternext_allow_raise(args[0]);
+        if (ret == MP_OBJ_STOP_ITERATION) {
+            mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
+        } else {
+            return ret;
+        }
+    } else {
+        mp_obj_t ret = mp_iternext(args[0]);
+        return ret == MP_OBJ_STOP_ITERATION ? args[1] : ret;
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj, 1, 2, mp_builtin_next);
+#else
+static mp_obj_t mp_builtin_next(mp_obj_t o) {
+    mp_obj_t ret = mp_iternext_allow_raise(o);
+    if (ret == MP_OBJ_STOP_ITERATION) {
+        mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
+    } else {
+        return ret;
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_next_obj, mp_builtin_next);
+#endif
+
+static mp_obj_t mp_builtin_oct(mp_obj_t o_in) {
+    #if MICROPY_PY_BUILTINS_STR_OP_MODULO
+    return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_o), o_in);
+    #else
+    mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_o_brace_close_), o_in };
+    return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_oct_obj, mp_builtin_oct);
+
+static mp_obj_t mp_builtin_ord(mp_obj_t o_in) {
+    size_t len;
+    const byte *str = (const byte *)mp_obj_str_get_data(o_in, &len);
+    #if MICROPY_PY_BUILTINS_STR_UNICODE
+    if (mp_obj_is_str(o_in)) {
+        len = utf8_charlen(str, len);
+        if (len == 1) {
+            return mp_obj_new_int(utf8_get_char(str));
+        }
+    } else
+    #endif
+    {
+        // a bytes object, or a str without unicode support (don't sign extend the char)
+        if (len == 1) {
+            return MP_OBJ_NEW_SMALL_INT(str[0]);
+        }
+    }
+
+    #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+    mp_raise_TypeError(MP_ERROR_TEXT("ord expects a character"));
+    #else
+    mp_raise_msg_varg(&mp_type_TypeError,
+        MP_ERROR_TEXT("ord() expected a character, but string of length %d found"), (int)len);
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_ord_obj, mp_builtin_ord);
+
+static mp_obj_t mp_builtin_pow(size_t n_args, const mp_obj_t *args) {
+    switch (n_args) {
+        case 2:
+            return mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]);
+        default:
+            #if !MICROPY_PY_BUILTINS_POW3
+            mp_raise_NotImplementedError(MP_ERROR_TEXT("3-arg pow() not supported"));
+            #elif MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_MPZ
+            return mp_binary_op(MP_BINARY_OP_MODULO, mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]), args[2]);
+            #else
+            return mp_obj_int_pow3(args[0], args[1], args[2]);
+            #endif
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj, 2, 3, mp_builtin_pow);
+
+static mp_obj_t mp_builtin_print(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+    enum { ARG_sep, ARG_end, ARG_file };
+    static const mp_arg_t allowed_args[] = {
+        { MP_QSTR_sep, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_QSTR(MP_QSTR__space_)} },
+        { MP_QSTR_end, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_QSTR(MP_QSTR__0x0a_)} },
+        #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+        { MP_QSTR_file, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_sys_stdout_obj)} },
+        #endif
+    };
+
+    // parse args (a union is used to reduce the amount of C stack that is needed)
+    union {
+        mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+        size_t len[2];
+    } u;
+    mp_arg_parse_all(0, NULL, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, u.args);
+
+    #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+    mp_get_stream_raise(u.args[ARG_file].u_obj, MP_STREAM_OP_WRITE);
+    mp_print_t print = {MP_OBJ_TO_PTR(u.args[ARG_file].u_obj), mp_stream_write_adaptor};
+    #endif
+
+    // extract the objects first because we are going to use the other part of the union
+    mp_obj_t sep = u.args[ARG_sep].u_obj;
+    mp_obj_t end = u.args[ARG_end].u_obj;
+    const char *sep_data = mp_obj_str_get_data(sep, &u.len[0]);
+    const char *end_data = mp_obj_str_get_data(end, &u.len[1]);
+
+    for (size_t i = 0; i < n_args; i++) {
+        if (i > 0) {
+            #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+            mp_stream_write_adaptor(print.data, sep_data, u.len[0]);
+            #else
+            mp_print_strn(&mp_plat_print, sep_data, u.len[0], 0, 0, 0);
+            #endif
+        }
+        #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+        mp_obj_print_helper(&print, pos_args[i], PRINT_STR);
+        #else
+        mp_obj_print_helper(&mp_plat_print, pos_args[i], PRINT_STR);
+        #endif
+    }
+    #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+    mp_stream_write_adaptor(print.data, end_data, u.len[1]);
+    #else
+    mp_print_strn(&mp_plat_print, end_data, u.len[1], 0, 0, 0);
+    #endif
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_print_obj, 0, mp_builtin_print);
+
+static mp_obj_t mp_builtin___repl_print__(mp_obj_t o) {
+    if (o != mp_const_none) {
+        mp_obj_print_helper(MP_PYTHON_PRINTER, o, PRINT_REPR);
+        mp_print_str(MP_PYTHON_PRINTER, "\n");
+        #if MICROPY_CAN_OVERRIDE_BUILTINS
+        // Set "_" special variable
+        mp_obj_t dest[2] = {MP_OBJ_SENTINEL, o};
+        MP_OBJ_TYPE_GET_SLOT(&mp_type_module, attr)(MP_OBJ_FROM_PTR(&mp_module_builtins), MP_QSTR__, dest);
+        #endif
+    }
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj, mp_builtin___repl_print__);
+
+static mp_obj_t mp_builtin_repr(mp_obj_t o_in) {
+    vstr_t vstr;
+    mp_print_t print;
+    vstr_init_print(&vstr, 16, &print);
+    mp_obj_print_helper(&print, o_in, PRINT_REPR);
+    return mp_obj_new_str_from_utf8_vstr(&vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_repr_obj, mp_builtin_repr);
+
+static mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) {
+    mp_obj_t o_in = args[0];
+    if (mp_obj_is_int(o_in)) {
+        if (n_args <= 1) {
+            return o_in;
+        }
+
+        #if !MICROPY_PY_BUILTINS_ROUND_INT
+        mp_raise_NotImplementedError(NULL);
+        #else
+        mp_int_t num_dig = mp_obj_get_int(args[1]);
+        if (num_dig >= 0) {
+            return o_in;
+        }
+
+        mp_obj_t mult = mp_binary_op(MP_BINARY_OP_POWER, MP_OBJ_NEW_SMALL_INT(10), MP_OBJ_NEW_SMALL_INT(-num_dig));
+        mp_obj_t half_mult = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, mult, MP_OBJ_NEW_SMALL_INT(2));
+        mp_obj_t modulo = mp_binary_op(MP_BINARY_OP_MODULO, o_in, mult);
+        mp_obj_t rounded = mp_binary_op(MP_BINARY_OP_SUBTRACT, o_in, modulo);
+        if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, half_mult, modulo))) {
+            return rounded;
+        } else if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, modulo, half_mult))) {
+            return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult);
+        } else {
+            // round to even number
+            mp_obj_t floor = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, o_in, mult);
+            if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_AND, floor, MP_OBJ_NEW_SMALL_INT(1)))) {
+                return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult);
+            } else {
+                return rounded;
+            }
+        }
+        #endif
+    }
+    #if MICROPY_PY_BUILTINS_FLOAT
+    mp_float_t val = mp_obj_get_float(o_in);
+    if (n_args > 1) {
+        mp_int_t num_dig = mp_obj_get_int(args[1]);
+        mp_float_t mult = MICROPY_FLOAT_C_FUN(pow)(10, (mp_float_t)num_dig);
+        // TODO may lead to overflow
+        mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val * mult) / mult;
+        return mp_obj_new_float(rounded);
+    }
+    mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val);
+    return mp_obj_new_int_from_float(rounded);
+    #else
+    mp_int_t r = mp_obj_get_int(o_in);
+    return mp_obj_new_int(r);
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj, 1, 2, mp_builtin_round);
+
+static mp_obj_t mp_builtin_sum(size_t n_args, const mp_obj_t *args) {
+    mp_obj_t value;
+    switch (n_args) {
+        case 1:
+            value = MP_OBJ_NEW_SMALL_INT(0);
+            break;
+        default:
+            value = args[1];
+            break;
+    }
+    mp_obj_iter_buf_t iter_buf;
+    mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
+    mp_obj_t item;
+    while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+        value = mp_binary_op(MP_BINARY_OP_ADD, value, item);
+    }
+    return value;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj, 1, 2, mp_builtin_sum);
+
+static mp_obj_t mp_builtin_sorted(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+    if (n_args > 1) {
+        mp_raise_TypeError(MP_ERROR_TEXT("must use keyword argument for key function"));
+    }
+    mp_obj_t self = mp_obj_list_make_new(&mp_type_list, 1, 0, args);
+    mp_obj_list_sort(1, &self, kwargs);
+
+    return self;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj, 1, mp_builtin_sorted);
+
+// See mp_load_attr() if making any changes
+static inline mp_obj_t mp_load_attr_default(mp_obj_t base, qstr attr, mp_obj_t defval) {
+    mp_obj_t dest[2];
+    // use load_method, raising or not raising exception
+    if (defval == MP_OBJ_NULL) {
+        mp_load_method(base, attr, dest);
+    } else {
+        mp_load_method_protected(base, attr, dest, false);
+    }
+    if (dest[0] == MP_OBJ_NULL) {
+        return defval;
+    } else if (dest[1] == MP_OBJ_NULL) {
+        // load_method returned just a normal attribute
+        return dest[0];
+    } else {
+        // load_method returned a method, so build a bound method object
+        return mp_obj_new_bound_meth(dest[0], dest[1]);
+    }
+}
+
+static mp_obj_t mp_builtin_getattr(size_t n_args, const mp_obj_t *args) {
+    mp_obj_t defval = MP_OBJ_NULL;
+    if (n_args > 2) {
+        defval = args[2];
+    }
+    return mp_load_attr_default(args[0], mp_obj_str_get_qstr(args[1]), defval);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_getattr_obj, 2, 3, mp_builtin_getattr);
+
+static mp_obj_t mp_builtin_setattr(mp_obj_t base, mp_obj_t attr, mp_obj_t value) {
+    mp_store_attr(base, mp_obj_str_get_qstr(attr), value);
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_3(mp_builtin_setattr_obj, mp_builtin_setattr);
+
+#if MICROPY_CPYTHON_COMPAT
+static mp_obj_t mp_builtin_delattr(mp_obj_t base, mp_obj_t attr) {
+    return mp_builtin_setattr(base, attr, MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_delattr_obj, mp_builtin_delattr);
+#endif
+
+static mp_obj_t mp_builtin_hasattr(mp_obj_t object_in, mp_obj_t attr_in) {
+    qstr attr = mp_obj_str_get_qstr(attr_in);
+    mp_obj_t dest[2];
+    mp_load_method_protected(object_in, attr, dest, false);
+    return mp_obj_new_bool(dest[0] != MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj, mp_builtin_hasattr);
+
+static mp_obj_t mp_builtin_globals(void) {
+    return MP_OBJ_FROM_PTR(mp_globals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_globals_obj, mp_builtin_globals);
+
+static mp_obj_t mp_builtin_locals(void) {
+    return MP_OBJ_FROM_PTR(mp_locals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_locals_obj, mp_builtin_locals);
+
+// These are defined in terms of MicroPython API functions right away
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_id_obj, mp_obj_id);
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_len_obj, mp_obj_len);
+
+static const mp_rom_map_elem_t mp_module_builtins_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_builtins) },
+
+    // built-in core functions
+    { MP_ROM_QSTR(MP_QSTR___build_class__), MP_ROM_PTR(&mp_builtin___build_class___obj) },
+    { MP_ROM_QSTR(MP_QSTR___import__), MP_ROM_PTR(&mp_builtin___import___obj) },
+    { MP_ROM_QSTR(MP_QSTR___repl_print__), MP_ROM_PTR(&mp_builtin___repl_print___obj) },
+
+    // built-in types
+    { MP_ROM_QSTR(MP_QSTR_bool), MP_ROM_PTR(&mp_type_bool) },
+    { MP_ROM_QSTR(MP_QSTR_bytes), MP_ROM_PTR(&mp_type_bytes) },
+    #if MICROPY_PY_BUILTINS_BYTEARRAY
+    { MP_ROM_QSTR(MP_QSTR_bytearray), MP_ROM_PTR(&mp_type_bytearray) },
+    #endif
+    #if MICROPY_PY_BUILTINS_COMPLEX
+    { MP_ROM_QSTR(MP_QSTR_complex), MP_ROM_PTR(&mp_type_complex) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_dict), MP_ROM_PTR(&mp_type_dict) },
+    #if MICROPY_PY_BUILTINS_ENUMERATE
+    { MP_ROM_QSTR(MP_QSTR_enumerate), MP_ROM_PTR(&mp_type_enumerate) },
+    #endif
+    #if MICROPY_PY_BUILTINS_FILTER
+    { MP_ROM_QSTR(MP_QSTR_filter), MP_ROM_PTR(&mp_type_filter) },
+    #endif
+    #if MICROPY_PY_BUILTINS_FLOAT
+    { MP_ROM_QSTR(MP_QSTR_float), MP_ROM_PTR(&mp_type_float) },
+    #endif
+    #if MICROPY_PY_BUILTINS_SET && MICROPY_PY_BUILTINS_FROZENSET
+    { MP_ROM_QSTR(MP_QSTR_frozenset), MP_ROM_PTR(&mp_type_frozenset) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_int), MP_ROM_PTR(&mp_type_int) },
+    { MP_ROM_QSTR(MP_QSTR_list), MP_ROM_PTR(&mp_type_list) },
+    { MP_ROM_QSTR(MP_QSTR_map), MP_ROM_PTR(&mp_type_map) },
+    #if MICROPY_PY_BUILTINS_MEMORYVIEW
+    { MP_ROM_QSTR(MP_QSTR_memoryview), MP_ROM_PTR(&mp_type_memoryview) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_object), MP_ROM_PTR(&mp_type_object) },
+    #if MICROPY_PY_BUILTINS_PROPERTY
+    { MP_ROM_QSTR(MP_QSTR_property), MP_ROM_PTR(&mp_type_property) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_range), MP_ROM_PTR(&mp_type_range) },
+    #if MICROPY_PY_BUILTINS_REVERSED
+    { MP_ROM_QSTR(MP_QSTR_reversed), MP_ROM_PTR(&mp_type_reversed) },
+    #endif
+    #if MICROPY_PY_BUILTINS_SET
+    { MP_ROM_QSTR(MP_QSTR_set), MP_ROM_PTR(&mp_type_set) },
+    #endif
+    #if MICROPY_PY_BUILTINS_SLICE
+    { MP_ROM_QSTR(MP_QSTR_slice), MP_ROM_PTR(&mp_type_slice) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_str), MP_ROM_PTR(&mp_type_str) },
+    { MP_ROM_QSTR(MP_QSTR_super), MP_ROM_PTR(&mp_type_super) },
+    { MP_ROM_QSTR(MP_QSTR_tuple), MP_ROM_PTR(&mp_type_tuple) },
+    { MP_ROM_QSTR(MP_QSTR_type), MP_ROM_PTR(&mp_type_type) },
+    { MP_ROM_QSTR(MP_QSTR_zip), MP_ROM_PTR(&mp_type_zip) },
+
+    { MP_ROM_QSTR(MP_QSTR_classmethod), MP_ROM_PTR(&mp_type_classmethod) },
+    { MP_ROM_QSTR(MP_QSTR_staticmethod), MP_ROM_PTR(&mp_type_staticmethod) },
+
+    // built-in objects
+    { MP_ROM_QSTR(MP_QSTR_Ellipsis), MP_ROM_PTR(&mp_const_ellipsis_obj) },
+    #if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+    { MP_ROM_QSTR(MP_QSTR_NotImplemented), MP_ROM_PTR(&mp_const_notimplemented_obj) },
+    #endif
+
+    // built-in user functions
+    { MP_ROM_QSTR(MP_QSTR_abs), MP_ROM_PTR(&mp_builtin_abs_obj) },
+    { MP_ROM_QSTR(MP_QSTR_all), MP_ROM_PTR(&mp_builtin_all_obj) },
+    { MP_ROM_QSTR(MP_QSTR_any), MP_ROM_PTR(&mp_builtin_any_obj) },
+    { MP_ROM_QSTR(MP_QSTR_bin), MP_ROM_PTR(&mp_builtin_bin_obj) },
+    { MP_ROM_QSTR(MP_QSTR_callable), MP_ROM_PTR(&mp_builtin_callable_obj) },
+    #if MICROPY_PY_BUILTINS_COMPILE
+    { MP_ROM_QSTR(MP_QSTR_compile), MP_ROM_PTR(&mp_builtin_compile_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_chr), MP_ROM_PTR(&mp_builtin_chr_obj) },
+    #if MICROPY_CPYTHON_COMPAT
+    { MP_ROM_QSTR(MP_QSTR_delattr), MP_ROM_PTR(&mp_builtin_delattr_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_dir), MP_ROM_PTR(&mp_builtin_dir_obj) },
+    { MP_ROM_QSTR(MP_QSTR_divmod), MP_ROM_PTR(&mp_builtin_divmod_obj) },
+    #if MICROPY_PY_BUILTINS_EVAL_EXEC
+    { MP_ROM_QSTR(MP_QSTR_eval), MP_ROM_PTR(&mp_builtin_eval_obj) },
+    { MP_ROM_QSTR(MP_QSTR_exec), MP_ROM_PTR(&mp_builtin_exec_obj) },
+    #endif
+    #if MICROPY_PY_BUILTINS_EXECFILE
+    { MP_ROM_QSTR(MP_QSTR_execfile), MP_ROM_PTR(&mp_builtin_execfile_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_getattr), MP_ROM_PTR(&mp_builtin_getattr_obj) },
+    { MP_ROM_QSTR(MP_QSTR_setattr), MP_ROM_PTR(&mp_builtin_setattr_obj) },
+    { MP_ROM_QSTR(MP_QSTR_globals), MP_ROM_PTR(&mp_builtin_globals_obj) },
+    { MP_ROM_QSTR(MP_QSTR_hasattr), MP_ROM_PTR(&mp_builtin_hasattr_obj) },
+    { MP_ROM_QSTR(MP_QSTR_hash), MP_ROM_PTR(&mp_builtin_hash_obj) },
+    #if MICROPY_PY_BUILTINS_HELP
+    { MP_ROM_QSTR(MP_QSTR_help), MP_ROM_PTR(&mp_builtin_help_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_hex), MP_ROM_PTR(&mp_builtin_hex_obj) },
+    { MP_ROM_QSTR(MP_QSTR_id), MP_ROM_PTR(&mp_builtin_id_obj) },
+    #if MICROPY_PY_BUILTINS_INPUT
+    { MP_ROM_QSTR(MP_QSTR_input), MP_ROM_PTR(&mp_builtin_input_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_isinstance), MP_ROM_PTR(&mp_builtin_isinstance_obj) },
+    { MP_ROM_QSTR(MP_QSTR_issubclass), MP_ROM_PTR(&mp_builtin_issubclass_obj) },
+    { MP_ROM_QSTR(MP_QSTR_iter), MP_ROM_PTR(&mp_builtin_iter_obj) },
+    { MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&mp_builtin_len_obj) },
+    { MP_ROM_QSTR(MP_QSTR_locals), MP_ROM_PTR(&mp_builtin_locals_obj) },
+    #if MICROPY_PY_BUILTINS_MIN_MAX
+    { MP_ROM_QSTR(MP_QSTR_max), MP_ROM_PTR(&mp_builtin_max_obj) },
+    { MP_ROM_QSTR(MP_QSTR_min), MP_ROM_PTR(&mp_builtin_min_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_next), MP_ROM_PTR(&mp_builtin_next_obj) },
+    { MP_ROM_QSTR(MP_QSTR_oct), MP_ROM_PTR(&mp_builtin_oct_obj) },
+    #if MICROPY_PY_IO
+    { MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_builtin_open_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_ord), MP_ROM_PTR(&mp_builtin_ord_obj) },
+    { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_builtin_pow_obj) },
+    { MP_ROM_QSTR(MP_QSTR_print), MP_ROM_PTR(&mp_builtin_print_obj) },
+    { MP_ROM_QSTR(MP_QSTR_repr), MP_ROM_PTR(&mp_builtin_repr_obj) },
+    { MP_ROM_QSTR(MP_QSTR_round), MP_ROM_PTR(&mp_builtin_round_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sorted), MP_ROM_PTR(&mp_builtin_sorted_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sum), MP_ROM_PTR(&mp_builtin_sum_obj) },
+
+    // built-in exceptions
+    { MP_ROM_QSTR(MP_QSTR_BaseException), MP_ROM_PTR(&mp_type_BaseException) },
+    { MP_ROM_QSTR(MP_QSTR_ArithmeticError), MP_ROM_PTR(&mp_type_ArithmeticError) },
+    { MP_ROM_QSTR(MP_QSTR_AssertionError), MP_ROM_PTR(&mp_type_AssertionError) },
+    { MP_ROM_QSTR(MP_QSTR_AttributeError), MP_ROM_PTR(&mp_type_AttributeError) },
+    { MP_ROM_QSTR(MP_QSTR_EOFError), MP_ROM_PTR(&mp_type_EOFError) },
+    { MP_ROM_QSTR(MP_QSTR_Exception), MP_ROM_PTR(&mp_type_Exception) },
+    { MP_ROM_QSTR(MP_QSTR_GeneratorExit), MP_ROM_PTR(&mp_type_GeneratorExit) },
+    { MP_ROM_QSTR(MP_QSTR_ImportError), MP_ROM_PTR(&mp_type_ImportError) },
+    { MP_ROM_QSTR(MP_QSTR_IndentationError), MP_ROM_PTR(&mp_type_IndentationError) },
+    { MP_ROM_QSTR(MP_QSTR_IndexError), MP_ROM_PTR(&mp_type_IndexError) },
+    { MP_ROM_QSTR(MP_QSTR_KeyboardInterrupt), MP_ROM_PTR(&mp_type_KeyboardInterrupt) },
+    { MP_ROM_QSTR(MP_QSTR_KeyError), MP_ROM_PTR(&mp_type_KeyError) },
+    { MP_ROM_QSTR(MP_QSTR_LookupError), MP_ROM_PTR(&mp_type_LookupError) },
+    { MP_ROM_QSTR(MP_QSTR_MemoryError), MP_ROM_PTR(&mp_type_MemoryError) },
+    { MP_ROM_QSTR(MP_QSTR_NameError), MP_ROM_PTR(&mp_type_NameError) },
+    { MP_ROM_QSTR(MP_QSTR_NotImplementedError), MP_ROM_PTR(&mp_type_NotImplementedError) },
+    { MP_ROM_QSTR(MP_QSTR_OSError), MP_ROM_PTR(&mp_type_OSError) },
+    { MP_ROM_QSTR(MP_QSTR_OverflowError), MP_ROM_PTR(&mp_type_OverflowError) },
+    { MP_ROM_QSTR(MP_QSTR_RuntimeError), MP_ROM_PTR(&mp_type_RuntimeError) },
+    #if MICROPY_PY_ASYNC_AWAIT
+    { MP_ROM_QSTR(MP_QSTR_StopAsyncIteration), MP_ROM_PTR(&mp_type_StopAsyncIteration) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_StopIteration), MP_ROM_PTR(&mp_type_StopIteration) },
+    { MP_ROM_QSTR(MP_QSTR_SyntaxError), MP_ROM_PTR(&mp_type_SyntaxError) },
+    { MP_ROM_QSTR(MP_QSTR_SystemExit), MP_ROM_PTR(&mp_type_SystemExit) },
+    { MP_ROM_QSTR(MP_QSTR_TypeError), MP_ROM_PTR(&mp_type_TypeError) },
+    #if MICROPY_PY_BUILTINS_STR_UNICODE
+    { MP_ROM_QSTR(MP_QSTR_UnicodeError), MP_ROM_PTR(&mp_type_UnicodeError) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_ValueError), MP_ROM_PTR(&mp_type_ValueError) },
+    #if MICROPY_EMIT_NATIVE
+    { MP_ROM_QSTR(MP_QSTR_ViperTypeError), MP_ROM_PTR(&mp_type_ViperTypeError) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_ZeroDivisionError), MP_ROM_PTR(&mp_type_ZeroDivisionError) },
+
+    // Extra builtins as defined by a port
+    MICROPY_PORT_BUILTINS
+    MICROPY_PORT_EXTRA_BUILTINS
+};
+
+MP_DEFINE_CONST_DICT(mp_module_builtins_globals, mp_module_builtins_globals_table);
+
+const mp_obj_module_t mp_module_builtins = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_builtins_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_builtins, mp_module_builtins);

+ 154 - 0
mp_flipper/lib/micropython/py/modcmath.c

@@ -0,0 +1,154 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
+
+#include <math.h>
+
+// phase(z): returns the phase of the number z in the range (-pi, +pi]
+static mp_obj_t mp_cmath_phase(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    return mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_phase_obj, mp_cmath_phase);
+
+// polar(z): returns the polar form of z as a tuple
+static mp_obj_t mp_cmath_polar(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    mp_obj_t tuple[2] = {
+        mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real * real + imag * imag)),
+        mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real)),
+    };
+    return mp_obj_new_tuple(2, tuple);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_polar_obj, mp_cmath_polar);
+
+// rect(r, phi): returns the complex number with modulus r and phase phi
+static mp_obj_t mp_cmath_rect(mp_obj_t r_obj, mp_obj_t phi_obj) {
+    mp_float_t r = mp_obj_get_float(r_obj);
+    mp_float_t phi = mp_obj_get_float(phi_obj);
+    return mp_obj_new_complex(r * MICROPY_FLOAT_C_FUN(cos)(phi), r * MICROPY_FLOAT_C_FUN(sin)(phi));
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(mp_cmath_rect_obj, mp_cmath_rect);
+
+// exp(z): return the exponential of z
+static mp_obj_t mp_cmath_exp(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    mp_float_t exp_real = MICROPY_FLOAT_C_FUN(exp)(real);
+    return mp_obj_new_complex(exp_real * MICROPY_FLOAT_C_FUN(cos)(imag), exp_real * MICROPY_FLOAT_C_FUN(sin)(imag));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_exp_obj, mp_cmath_exp);
+
+// log(z): return the natural logarithm of z, with branch cut along the negative real axis
+// TODO can take second argument, being the base
+static mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log)(real * real + imag * imag), MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
+
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// log10(z): return the base-10 logarithm of z, with branch cut along the negative real axis
+static mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log10)(real * real + imag * imag), MICROPY_FLOAT_CONST(0.4342944819032518) * MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
+#endif
+
+// sqrt(z): return the square-root of z
+static mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    mp_float_t sqrt_abs = MICROPY_FLOAT_C_FUN(pow)(real * real + imag * imag, MICROPY_FLOAT_CONST(0.25));
+    mp_float_t theta = MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(atan2)(imag, real);
+    return mp_obj_new_complex(sqrt_abs * MICROPY_FLOAT_C_FUN(cos)(theta), sqrt_abs * MICROPY_FLOAT_C_FUN(sin)(theta));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sqrt_obj, mp_cmath_sqrt);
+
+// cos(z): return the cosine of z
+static mp_obj_t mp_cmath_cos(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), -MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_cos_obj, mp_cmath_cos);
+
+// sin(z): return the sine of z
+static mp_obj_t mp_cmath_sin(mp_obj_t z_obj) {
+    mp_float_t real, imag;
+    mp_obj_get_complex(z_obj, &real, &imag);
+    return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sin_obj, mp_cmath_sin);
+
+static const mp_rom_map_elem_t mp_module_cmath_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_cmath) },
+    { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+    { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+    { MP_ROM_QSTR(MP_QSTR_phase), MP_ROM_PTR(&mp_cmath_phase_obj) },
+    { MP_ROM_QSTR(MP_QSTR_polar), MP_ROM_PTR(&mp_cmath_polar_obj) },
+    { MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&mp_cmath_rect_obj) },
+    { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_cmath_exp_obj) },
+    { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_cmath_log_obj) },
+    #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+    { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_cmath_log10_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_cmath_sqrt_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_cmath_acos_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_cmath_asin_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_cmath_atan_obj) },
+    { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_cmath_cos_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_cmath_sin_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_cmath_tan_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_cmath_acosh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_cmath_asinh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_cmath_atanh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_cmath_cosh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_cmath_sinh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_cmath_tanh_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_cmath_isfinite_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_cmath_isinf_obj) },
+    // { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_cmath_isnan_obj) },
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_cmath_globals, mp_module_cmath_globals_table);
+
+const mp_obj_module_t mp_module_cmath = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_cmath_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_cmath, mp_module_cmath);
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH

+ 51 - 0
mp_flipper/lib/micropython/py/modcollections.c

@@ -0,0 +1,51 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+static const mp_rom_map_elem_t mp_module_collections_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_collections) },
+    #if MICROPY_PY_COLLECTIONS_DEQUE
+    { MP_ROM_QSTR(MP_QSTR_deque), MP_ROM_PTR(&mp_type_deque) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_namedtuple), MP_ROM_PTR(&mp_namedtuple_obj) },
+    #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+    { MP_ROM_QSTR(MP_QSTR_OrderedDict), MP_ROM_PTR(&mp_type_ordereddict) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_collections_globals, mp_module_collections_globals_table);
+
+const mp_obj_module_t mp_module_collections = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_collections_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_collections, mp_module_collections);
+
+#endif // MICROPY_PY_COLLECTIONS

+ 124 - 0
mp_flipper/lib/micropython/py/moderrno.c

@@ -0,0 +1,124 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/mperrno.h"
+
+#if MICROPY_PY_ERRNO
+
+// This list can be defined per port in mpconfigport.h to tailor it to a
+// specific port's needs.  If it's not defined then we provide a default.
+#ifndef MICROPY_PY_ERRNO_LIST
+#define MICROPY_PY_ERRNO_LIST \
+    X(EPERM) \
+    X(ENOENT) \
+    X(EIO) \
+    X(EBADF) \
+    X(EAGAIN) \
+    X(ENOMEM) \
+    X(EACCES) \
+    X(EEXIST) \
+    X(ENODEV) \
+    X(EISDIR) \
+    X(EINVAL) \
+    X(EOPNOTSUPP) \
+    X(EADDRINUSE) \
+    X(ECONNABORTED) \
+    X(ECONNRESET) \
+    X(ENOBUFS) \
+    X(ENOTCONN) \
+    X(ETIMEDOUT) \
+    X(ECONNREFUSED) \
+    X(EHOSTUNREACH) \
+    X(EALREADY) \
+    X(EINPROGRESS) \
+
+#endif
+
+#if MICROPY_PY_ERRNO_ERRORCODE
+static const mp_rom_map_elem_t errorcode_table[] = {
+    #define X(e) { MP_ROM_INT(MP_##e), MP_ROM_QSTR(MP_QSTR_##e) },
+    MICROPY_PY_ERRNO_LIST
+#undef X
+};
+
+static const mp_obj_dict_t errorcode_dict = {
+    .base = {&mp_type_dict},
+    .map = {
+        .all_keys_are_qstrs = 0, // keys are integers
+        .is_fixed = 1,
+        .is_ordered = 1,
+        .used = MP_ARRAY_SIZE(errorcode_table),
+        .alloc = MP_ARRAY_SIZE(errorcode_table),
+        .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)errorcode_table,
+    },
+};
+#endif
+
+static const mp_rom_map_elem_t mp_module_errno_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_errno) },
+    #if MICROPY_PY_ERRNO_ERRORCODE
+    { MP_ROM_QSTR(MP_QSTR_errorcode), MP_ROM_PTR(&errorcode_dict) },
+    #endif
+
+    #define X(e) { MP_ROM_QSTR(MP_QSTR_##e), MP_ROM_INT(MP_##e) },
+    MICROPY_PY_ERRNO_LIST
+#undef X
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_errno_globals, mp_module_errno_globals_table);
+
+const mp_obj_module_t mp_module_errno = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_errno_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_errno, mp_module_errno);
+
+qstr mp_errno_to_str(mp_obj_t errno_val) {
+    #if MICROPY_PY_ERRNO_ERRORCODE
+    // We have the errorcode dict so can do a lookup using the hash map
+    mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&errorcode_dict.map, errno_val, MP_MAP_LOOKUP);
+    if (elem == NULL) {
+        return MP_QSTRnull;
+    } else {
+        return MP_OBJ_QSTR_VALUE(elem->value);
+    }
+    #else
+    // We don't have the errorcode dict so do a simple search in the modules dict
+    for (size_t i = 0; i < MP_ARRAY_SIZE(mp_module_errno_globals_table); ++i) {
+        if (errno_val == mp_module_errno_globals_table[i].value) {
+            return MP_OBJ_QSTR_VALUE(mp_module_errno_globals_table[i].key);
+        }
+    }
+    return MP_QSTRnull;
+    #endif
+}
+
+#endif // MICROPY_PY_ERRNO

+ 125 - 0
mp_flipper/lib/micropython/py/modgc.c

@@ -0,0 +1,125 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "py/obj.h"
+#include "py/gc.h"
+
+#if MICROPY_PY_GC && MICROPY_ENABLE_GC
+
+// collect(): run a garbage collection
+static mp_obj_t py_gc_collect(void) {
+    gc_collect();
+    #if MICROPY_PY_GC_COLLECT_RETVAL
+    return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_collected));
+    #else
+    return mp_const_none;
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_collect_obj, py_gc_collect);
+
+// disable(): disable the garbage collector
+static mp_obj_t gc_disable(void) {
+    MP_STATE_MEM(gc_auto_collect_enabled) = 0;
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
+
+// enable(): enable the garbage collector
+static mp_obj_t gc_enable(void) {
+    MP_STATE_MEM(gc_auto_collect_enabled) = 1;
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_enable_obj, gc_enable);
+
+static mp_obj_t gc_isenabled(void) {
+    return mp_obj_new_bool(MP_STATE_MEM(gc_auto_collect_enabled));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_isenabled_obj, gc_isenabled);
+
+// mem_free(): return the number of bytes of available heap RAM
+static mp_obj_t gc_mem_free(void) {
+    gc_info_t info;
+    gc_info(&info);
+    #if MICROPY_GC_SPLIT_HEAP_AUTO
+    // Include max_new_split value here as a more useful heuristic
+    return MP_OBJ_NEW_SMALL_INT(info.free + info.max_new_split);
+    #else
+    return MP_OBJ_NEW_SMALL_INT(info.free);
+    #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_free_obj, gc_mem_free);
+
+// mem_alloc(): return the number of bytes of heap RAM that are allocated
+static mp_obj_t gc_mem_alloc(void) {
+    gc_info_t info;
+    gc_info(&info);
+    return MP_OBJ_NEW_SMALL_INT(info.used);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_alloc_obj, gc_mem_alloc);
+
+#if MICROPY_GC_ALLOC_THRESHOLD
+static mp_obj_t gc_threshold(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 0) {
+        if (MP_STATE_MEM(gc_alloc_threshold) == (size_t)-1) {
+            return MP_OBJ_NEW_SMALL_INT(-1);
+        }
+        return mp_obj_new_int(MP_STATE_MEM(gc_alloc_threshold) * MICROPY_BYTES_PER_GC_BLOCK);
+    }
+    mp_int_t val = mp_obj_get_int(args[0]);
+    if (val < 0) {
+        MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
+    } else {
+        MP_STATE_MEM(gc_alloc_threshold) = val / MICROPY_BYTES_PER_GC_BLOCK;
+    }
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gc_threshold_obj, 0, 1, gc_threshold);
+#endif
+
+static const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_gc) },
+    { MP_ROM_QSTR(MP_QSTR_collect), MP_ROM_PTR(&gc_collect_obj) },
+    { MP_ROM_QSTR(MP_QSTR_disable), MP_ROM_PTR(&gc_disable_obj) },
+    { MP_ROM_QSTR(MP_QSTR_enable), MP_ROM_PTR(&gc_enable_obj) },
+    { MP_ROM_QSTR(MP_QSTR_isenabled), MP_ROM_PTR(&gc_isenabled_obj) },
+    { MP_ROM_QSTR(MP_QSTR_mem_free), MP_ROM_PTR(&gc_mem_free_obj) },
+    { MP_ROM_QSTR(MP_QSTR_mem_alloc), MP_ROM_PTR(&gc_mem_alloc_obj) },
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&gc_threshold_obj) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
+
+const mp_obj_module_t mp_module_gc = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_gc_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_gc, mp_module_gc);
+
+#endif

+ 231 - 0
mp_flipper/lib/micropython/py/modio.c

@@ -0,0 +1,231 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+#include "py/binary.h"
+#include "py/objarray.h"
+#include "py/objstringio.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_PY_IO
+
+#if MICROPY_PY_IO_IOBASE
+
+static const mp_obj_type_t mp_type_iobase;
+
+static const mp_obj_base_t iobase_singleton = {&mp_type_iobase};
+
+static mp_obj_t iobase_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+    (void)type;
+    (void)n_args;
+    (void)n_kw;
+    (void)args;
+    return MP_OBJ_FROM_PTR(&iobase_singleton);
+}
+
+static mp_uint_t iobase_read_write(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode, qstr qst) {
+    mp_obj_t dest[3];
+    mp_load_method(obj, qst, dest);
+    mp_obj_array_t ar = {{&mp_type_bytearray}, BYTEARRAY_TYPECODE, 0, size, buf};
+    dest[2] = MP_OBJ_FROM_PTR(&ar);
+    mp_obj_t ret_obj = mp_call_method_n_kw(1, 0, dest);
+    if (ret_obj == mp_const_none) {
+        *errcode = MP_EAGAIN;
+        return MP_STREAM_ERROR;
+    }
+    mp_int_t ret = mp_obj_get_int(ret_obj);
+    if (ret >= 0) {
+        return ret;
+    } else {
+        *errcode = -ret;
+        return MP_STREAM_ERROR;
+    }
+}
+static mp_uint_t iobase_read(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode) {
+    return iobase_read_write(obj, buf, size, errcode, MP_QSTR_readinto);
+}
+
+static mp_uint_t iobase_write(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode) {
+    return iobase_read_write(obj, (void *)buf, size, errcode, MP_QSTR_write);
+}
+
+static mp_uint_t iobase_ioctl(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode) {
+    mp_obj_t dest[4];
+    mp_load_method(obj, MP_QSTR_ioctl, dest);
+    dest[2] = mp_obj_new_int_from_uint(request);
+    dest[3] = mp_obj_new_int_from_uint(arg);
+    mp_int_t ret = mp_obj_get_int(mp_call_method_n_kw(2, 0, dest));
+    if (ret >= 0) {
+        return ret;
+    } else {
+        *errcode = -ret;
+        return MP_STREAM_ERROR;
+    }
+}
+
+static const mp_stream_p_t iobase_p = {
+    .read = iobase_read,
+    .write = iobase_write,
+    .ioctl = iobase_ioctl,
+};
+
+static MP_DEFINE_CONST_OBJ_TYPE(
+    mp_type_iobase,
+    MP_QSTR_IOBase,
+    MP_TYPE_FLAG_NONE,
+    make_new, iobase_make_new,
+    protocol, &iobase_p
+    );
+
+#endif // MICROPY_PY_IO_IOBASE
+
+#if MICROPY_PY_IO_BUFFEREDWRITER
+typedef struct _mp_obj_bufwriter_t {
+    mp_obj_base_t base;
+    mp_obj_t stream;
+    size_t alloc;
+    size_t len;
+    byte buf[0];
+} mp_obj_bufwriter_t;
+
+static mp_obj_t bufwriter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+    mp_arg_check_num(n_args, n_kw, 2, 2, false);
+    size_t alloc = mp_obj_get_int(args[1]);
+    mp_obj_bufwriter_t *o = mp_obj_malloc_var(mp_obj_bufwriter_t, buf, byte, alloc, type);
+    o->stream = args[0];
+    o->alloc = alloc;
+    o->len = 0;
+    return o;
+}
+
+static mp_uint_t bufwriter_write(mp_obj_t self_in, const void *buf, mp_uint_t size, int *errcode) {
+    mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+    mp_uint_t org_size = size;
+
+    while (size > 0) {
+        mp_uint_t rem = self->alloc - self->len;
+        if (size < rem) {
+            memcpy(self->buf + self->len, buf, size);
+            self->len += size;
+            return org_size;
+        }
+
+        // Buffer flushing policy here is to flush entire buffer all the time.
+        // This allows e.g. to have a block device as backing storage and write
+        // entire block to it. memcpy below is not ideal and could be optimized
+        // in some cases. But the way it is now it at least ensures that buffer
+        // is word-aligned, to guard against obscure cases when it matters, e.g.
+        // https://github.com/micropython/micropython/issues/1863
+        memcpy(self->buf + self->len, buf, rem);
+        buf = (byte *)buf + rem;
+        size -= rem;
+        mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->alloc, errcode);
+        (void)out_sz;
+        if (*errcode != 0) {
+            return MP_STREAM_ERROR;
+        }
+        // TODO: try to recover from a case of non-blocking stream, e.g. move
+        // remaining chunk to the beginning of buffer.
+        assert(out_sz == self->alloc);
+        self->len = 0;
+    }
+
+    return org_size;
+}
+
+static mp_obj_t bufwriter_flush(mp_obj_t self_in) {
+    mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+    if (self->len != 0) {
+        int err;
+        mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->len, &err);
+        (void)out_sz;
+        // TODO: try to recover from a case of non-blocking stream, e.g. move
+        // remaining chunk to the beginning of buffer.
+        assert(out_sz == self->len);
+        self->len = 0;
+        if (err != 0) {
+            mp_raise_OSError(err);
+        }
+    }
+
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(bufwriter_flush_obj, bufwriter_flush);
+
+static const mp_rom_map_elem_t bufwriter_locals_dict_table[] = {
+    { MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj) },
+    { MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&bufwriter_flush_obj) },
+};
+static MP_DEFINE_CONST_DICT(bufwriter_locals_dict, bufwriter_locals_dict_table);
+
+static const mp_stream_p_t bufwriter_stream_p = {
+    .write = bufwriter_write,
+};
+
+static MP_DEFINE_CONST_OBJ_TYPE(
+    mp_type_bufwriter,
+    MP_QSTR_BufferedWriter,
+    MP_TYPE_FLAG_NONE,
+    make_new, bufwriter_make_new,
+    protocol, &bufwriter_stream_p,
+    locals_dict, &bufwriter_locals_dict
+    );
+#endif // MICROPY_PY_IO_BUFFEREDWRITER
+
+static const mp_rom_map_elem_t mp_module_io_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_io) },
+    // Note: mp_builtin_open_obj should be defined by port, it's not
+    // part of the core.
+    { MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_builtin_open_obj) },
+    #if MICROPY_PY_IO_IOBASE
+    { MP_ROM_QSTR(MP_QSTR_IOBase), MP_ROM_PTR(&mp_type_iobase) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_StringIO), MP_ROM_PTR(&mp_type_stringio) },
+    #if MICROPY_PY_IO_BYTESIO
+    { MP_ROM_QSTR(MP_QSTR_BytesIO), MP_ROM_PTR(&mp_type_bytesio) },
+    #endif
+    #if MICROPY_PY_IO_BUFFEREDWRITER
+    { MP_ROM_QSTR(MP_QSTR_BufferedWriter), MP_ROM_PTR(&mp_type_bufwriter) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_io_globals, mp_module_io_globals_table);
+
+const mp_obj_module_t mp_module_io = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_io_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_io, mp_module_io);
+
+#endif

+ 440 - 0
mp_flipper/lib/micropython/py/modmath.c

@@ -0,0 +1,440 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH
+
+#include <math.h>
+
+// M_PI is not part of the math.h standard and may not be defined
+// And by defining our own we can ensure it uses the correct const format.
+#define MP_PI MICROPY_FLOAT_CONST(3.14159265358979323846)
+#define MP_PI_4 MICROPY_FLOAT_CONST(0.78539816339744830962)
+#define MP_3_PI_4 MICROPY_FLOAT_CONST(2.35619449019234492885)
+
+static NORETURN void math_error(void) {
+    mp_raise_ValueError(MP_ERROR_TEXT("math domain error"));
+}
+
+static mp_obj_t math_generic_1(mp_obj_t x_obj, mp_float_t (*f)(mp_float_t)) {
+    mp_float_t x = mp_obj_get_float(x_obj);
+    mp_float_t ans = f(x);
+    if ((isnan(ans) && !isnan(x)) || (isinf(ans) && !isinf(x))) {
+        math_error();
+    }
+    return mp_obj_new_float(ans);
+}
+
+static mp_obj_t math_generic_2(mp_obj_t x_obj, mp_obj_t y_obj, mp_float_t (*f)(mp_float_t, mp_float_t)) {
+    mp_float_t x = mp_obj_get_float(x_obj);
+    mp_float_t y = mp_obj_get_float(y_obj);
+    mp_float_t ans = f(x, y);
+    if ((isnan(ans) && !isnan(x) && !isnan(y)) || (isinf(ans) && !isinf(x) && !isinf(y))) {
+        math_error();
+    }
+    return mp_obj_new_float(ans);
+}
+
+#define MATH_FUN_1(py_name, c_name) \
+    static mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { \
+        return math_generic_1(x_obj, MICROPY_FLOAT_C_FUN(c_name)); \
+    } \
+    static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_1_TO_BOOL(py_name, c_name) \
+    static mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_bool(c_name(mp_obj_get_float(x_obj))); } \
+    static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_1_TO_INT(py_name, c_name) \
+    static mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_int_from_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj))); } \
+    static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_2(py_name, c_name) \
+    static mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
+        return math_generic_2(x_obj, y_obj, MICROPY_FLOAT_C_FUN(c_name)); \
+    } \
+    static MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_2_FLT_INT(py_name, c_name) \
+    static mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
+        return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj), mp_obj_get_int(y_obj))); \
+    } \
+    static MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#if MP_NEED_LOG2
+#undef log2
+#undef log2f
+// 1.442695040888963407354163704 is 1/_M_LN2
+mp_float_t MICROPY_FLOAT_C_FUN(log2)(mp_float_t x) {
+    return MICROPY_FLOAT_C_FUN(log)(x) * MICROPY_FLOAT_CONST(1.442695040888963407354163704);
+}
+#endif
+
+// sqrt(x): returns the square root of x
+MATH_FUN_1(sqrt, sqrt)
+// pow(x, y): returns x to the power of y
+#if MICROPY_PY_MATH_POW_FIX_NAN
+mp_float_t pow_func(mp_float_t x, mp_float_t y) {
+    // pow(base, 0) returns 1 for any base, even when base is NaN
+    // pow(+1, exponent) returns 1 for any exponent, even when exponent is NaN
+    if (x == MICROPY_FLOAT_CONST(1.0) || y == MICROPY_FLOAT_CONST(0.0)) {
+        return MICROPY_FLOAT_CONST(1.0);
+    }
+    return MICROPY_FLOAT_C_FUN(pow)(x, y);
+}
+MATH_FUN_2(pow, pow_func)
+#else
+MATH_FUN_2(pow, pow)
+#endif
+// exp(x)
+MATH_FUN_1(exp, exp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// expm1(x)
+MATH_FUN_1(expm1, expm1)
+// log2(x)
+MATH_FUN_1(log2, log2)
+// log10(x)
+MATH_FUN_1(log10, log10)
+// cosh(x)
+MATH_FUN_1(cosh, cosh)
+// sinh(x)
+MATH_FUN_1(sinh, sinh)
+// tanh(x)
+MATH_FUN_1(tanh, tanh)
+// acosh(x)
+MATH_FUN_1(acosh, acosh)
+// asinh(x)
+MATH_FUN_1(asinh, asinh)
+// atanh(x)
+MATH_FUN_1(atanh, atanh)
+#endif
+// cos(x)
+MATH_FUN_1(cos, cos)
+// sin(x)
+MATH_FUN_1(sin, sin)
+// tan(x)
+MATH_FUN_1(tan, tan)
+// acos(x)
+MATH_FUN_1(acos, acos)
+// asin(x)
+MATH_FUN_1(asin, asin)
+// atan(x)
+MATH_FUN_1(atan, atan)
+// atan2(y, x)
+#if MICROPY_PY_MATH_ATAN2_FIX_INFNAN
+mp_float_t atan2_func(mp_float_t x, mp_float_t y) {
+    if (isinf(x) && isinf(y)) {
+        return copysign(y < 0 ? MP_3_PI_4 : MP_PI_4, x);
+    }
+    return atan2(x, y);
+}
+MATH_FUN_2(atan2, atan2_func)
+#else
+MATH_FUN_2(atan2, atan2)
+#endif
+// ceil(x)
+MATH_FUN_1_TO_INT(ceil, ceil)
+// copysign(x, y)
+static mp_float_t MICROPY_FLOAT_C_FUN(copysign_func)(mp_float_t x, mp_float_t y) {
+    return MICROPY_FLOAT_C_FUN(copysign)(x, y);
+}
+MATH_FUN_2(copysign, copysign_func)
+// fabs(x)
+static mp_float_t MICROPY_FLOAT_C_FUN(fabs_func)(mp_float_t x) {
+    return MICROPY_FLOAT_C_FUN(fabs)(x);
+}
+MATH_FUN_1(fabs, fabs_func)
+// floor(x)
+MATH_FUN_1_TO_INT(floor, floor) // TODO: delegate to x.__floor__() if x is not a float
+// fmod(x, y)
+#if MICROPY_PY_MATH_FMOD_FIX_INFNAN
+mp_float_t fmod_func(mp_float_t x, mp_float_t y) {
+    return (!isinf(x) && isinf(y)) ? x : fmod(x, y);
+}
+MATH_FUN_2(fmod, fmod_func)
+#else
+MATH_FUN_2(fmod, fmod)
+#endif
+// isfinite(x)
+MATH_FUN_1_TO_BOOL(isfinite, isfinite)
+// isinf(x)
+MATH_FUN_1_TO_BOOL(isinf, isinf)
+// isnan(x)
+MATH_FUN_1_TO_BOOL(isnan, isnan)
+// trunc(x)
+MATH_FUN_1_TO_INT(trunc, trunc)
+// ldexp(x, exp)
+MATH_FUN_2_FLT_INT(ldexp, ldexp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// erf(x): return the error function of x
+MATH_FUN_1(erf, erf)
+// erfc(x): return the complementary error function of x
+MATH_FUN_1(erfc, erfc)
+// gamma(x): return the gamma function of x
+MATH_FUN_1(gamma, tgamma)
+// lgamma(x): return the natural logarithm of the gamma function of x
+MATH_FUN_1(lgamma, lgamma)
+#endif
+// TODO: fsum
+
+#if MICROPY_PY_MATH_ISCLOSE
+static mp_obj_t mp_math_isclose(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+    enum { ARG_rel_tol, ARG_abs_tol };
+    static const mp_arg_t allowed_args[] = {
+        {MP_QSTR_rel_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL}},
+        {MP_QSTR_abs_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NEW_SMALL_INT(0)}},
+    };
+    mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+    mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+    const mp_float_t a = mp_obj_get_float(pos_args[0]);
+    const mp_float_t b = mp_obj_get_float(pos_args[1]);
+    const mp_float_t rel_tol = args[ARG_rel_tol].u_obj == MP_OBJ_NULL
+        ? (mp_float_t)1e-9 : mp_obj_get_float(args[ARG_rel_tol].u_obj);
+    const mp_float_t abs_tol = mp_obj_get_float(args[ARG_abs_tol].u_obj);
+    if (rel_tol < (mp_float_t)0.0 || abs_tol < (mp_float_t)0.0) {
+        math_error();
+    }
+    if (a == b) {
+        return mp_const_true;
+    }
+    const mp_float_t difference = MICROPY_FLOAT_C_FUN(fabs)(a - b);
+    if (isinf(difference)) { // Either a or b is inf
+        return mp_const_false;
+    }
+    if ((difference <= abs_tol) ||
+        (difference <= MICROPY_FLOAT_C_FUN(fabs)(rel_tol * a)) ||
+        (difference <= MICROPY_FLOAT_C_FUN(fabs)(rel_tol * b))) {
+        return mp_const_true;
+    }
+    return mp_const_false;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_math_isclose_obj, 2, mp_math_isclose);
+#endif
+
+// Function that takes a variable number of arguments
+
+// log(x[, base])
+static mp_obj_t mp_math_log(size_t n_args, const mp_obj_t *args) {
+    mp_float_t x = mp_obj_get_float(args[0]);
+    if (x <= (mp_float_t)0.0) {
+        math_error();
+    }
+    mp_float_t l = MICROPY_FLOAT_C_FUN(log)(x);
+    if (n_args == 1) {
+        return mp_obj_new_float(l);
+    } else {
+        mp_float_t base = mp_obj_get_float(args[1]);
+        if (base <= (mp_float_t)0.0) {
+            math_error();
+        } else if (base == (mp_float_t)1.0) {
+            mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
+        }
+        return mp_obj_new_float(l / MICROPY_FLOAT_C_FUN(log)(base));
+    }
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_math_log_obj, 1, 2, mp_math_log);
+
+// Functions that return a tuple
+
+// frexp(x): converts a floating-point number to fractional and integral components
+static mp_obj_t mp_math_frexp(mp_obj_t x_obj) {
+    int int_exponent = 0;
+    mp_float_t significand = MICROPY_FLOAT_C_FUN(frexp)(mp_obj_get_float(x_obj), &int_exponent);
+    mp_obj_t tuple[2];
+    tuple[0] = mp_obj_new_float(significand);
+    tuple[1] = mp_obj_new_int(int_exponent);
+    return mp_obj_new_tuple(2, tuple);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_frexp_obj, mp_math_frexp);
+
+// modf(x)
+static mp_obj_t mp_math_modf(mp_obj_t x_obj) {
+    mp_float_t int_part = 0.0;
+    mp_float_t x = mp_obj_get_float(x_obj);
+    mp_float_t fractional_part = MICROPY_FLOAT_C_FUN(modf)(x, &int_part);
+    #if MICROPY_PY_MATH_MODF_FIX_NEGZERO
+    if (fractional_part == MICROPY_FLOAT_CONST(0.0)) {
+        fractional_part = copysign(fractional_part, x);
+    }
+    #endif
+    mp_obj_t tuple[2];
+    tuple[0] = mp_obj_new_float(fractional_part);
+    tuple[1] = mp_obj_new_float(int_part);
+    return mp_obj_new_tuple(2, tuple);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_modf_obj, mp_math_modf);
+
+// Angular conversions
+
+// radians(x)
+static mp_obj_t mp_math_radians(mp_obj_t x_obj) {
+    return mp_obj_new_float(mp_obj_get_float(x_obj) * (MP_PI / MICROPY_FLOAT_CONST(180.0)));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_radians_obj, mp_math_radians);
+
+// degrees(x)
+static mp_obj_t mp_math_degrees(mp_obj_t x_obj) {
+    return mp_obj_new_float(mp_obj_get_float(x_obj) * (MICROPY_FLOAT_CONST(180.0) / MP_PI));
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_degrees_obj, mp_math_degrees);
+
+#if MICROPY_PY_MATH_FACTORIAL
+
+#if MICROPY_OPT_MATH_FACTORIAL
+
+// factorial(x): slightly efficient recursive implementation
+static mp_obj_t mp_math_factorial_inner(mp_uint_t start, mp_uint_t end) {
+    if (start == end) {
+        return mp_obj_new_int(start);
+    } else if (end - start == 1) {
+        return mp_binary_op(MP_BINARY_OP_MULTIPLY, MP_OBJ_NEW_SMALL_INT(start), MP_OBJ_NEW_SMALL_INT(end));
+    } else if (end - start == 2) {
+        mp_obj_t left = MP_OBJ_NEW_SMALL_INT(start);
+        mp_obj_t middle = MP_OBJ_NEW_SMALL_INT(start + 1);
+        mp_obj_t right = MP_OBJ_NEW_SMALL_INT(end);
+        mp_obj_t tmp = mp_binary_op(MP_BINARY_OP_MULTIPLY, left, middle);
+        return mp_binary_op(MP_BINARY_OP_MULTIPLY, tmp, right);
+    } else {
+        mp_uint_t middle = start + ((end - start) >> 1);
+        mp_obj_t left = mp_math_factorial_inner(start, middle);
+        mp_obj_t right = mp_math_factorial_inner(middle + 1, end);
+        return mp_binary_op(MP_BINARY_OP_MULTIPLY, left, right);
+    }
+}
+static mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
+    mp_int_t max = mp_obj_get_int(x_obj);
+    if (max < 0) {
+        mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
+    } else if (max == 0) {
+        return MP_OBJ_NEW_SMALL_INT(1);
+    }
+    return mp_math_factorial_inner(1, max);
+}
+
+#else
+
+// factorial(x): squared difference implementation
+// based on http://www.luschny.de/math/factorial/index.html
+static mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
+    mp_int_t max = mp_obj_get_int(x_obj);
+    if (max < 0) {
+        mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
+    } else if (max <= 1) {
+        return MP_OBJ_NEW_SMALL_INT(1);
+    }
+    mp_int_t h = max >> 1;
+    mp_int_t q = h * h;
+    mp_int_t r = q << 1;
+    if (max & 1) {
+        r *= max;
+    }
+    mp_obj_t prod = MP_OBJ_NEW_SMALL_INT(r);
+    for (mp_int_t num = 1; num < max - 2; num += 2) {
+        q -= num;
+        prod = mp_binary_op(MP_BINARY_OP_MULTIPLY, prod, MP_OBJ_NEW_SMALL_INT(q));
+    }
+    return prod;
+}
+
+#endif
+
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_math_factorial_obj, mp_math_factorial);
+
+#endif
+
+static const mp_rom_map_elem_t mp_module_math_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_math) },
+    { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+    { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+    #if MICROPY_PY_MATH_CONSTANTS
+    { MP_ROM_QSTR(MP_QSTR_tau), mp_const_float_tau },
+    { MP_ROM_QSTR(MP_QSTR_inf), mp_const_float_inf },
+    { MP_ROM_QSTR(MP_QSTR_nan), mp_const_float_nan },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_math_sqrt_obj) },
+    { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_math_pow_obj) },
+    { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_math_exp_obj) },
+    #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+    { MP_ROM_QSTR(MP_QSTR_expm1), MP_ROM_PTR(&mp_math_expm1_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_math_log_obj) },
+    #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+    { MP_ROM_QSTR(MP_QSTR_log2), MP_ROM_PTR(&mp_math_log2_obj) },
+    { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_math_log10_obj) },
+    { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_math_cosh_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_math_sinh_obj) },
+    { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_math_tanh_obj) },
+    { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_math_acosh_obj) },
+    { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_math_asinh_obj) },
+    { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_math_atanh_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_math_cos_obj) },
+    { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_math_sin_obj) },
+    { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_math_tan_obj) },
+    { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_math_acos_obj) },
+    { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_math_asin_obj) },
+    { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_math_atan_obj) },
+    { MP_ROM_QSTR(MP_QSTR_atan2), MP_ROM_PTR(&mp_math_atan2_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ceil), MP_ROM_PTR(&mp_math_ceil_obj) },
+    { MP_ROM_QSTR(MP_QSTR_copysign), MP_ROM_PTR(&mp_math_copysign_obj) },
+    { MP_ROM_QSTR(MP_QSTR_fabs), MP_ROM_PTR(&mp_math_fabs_obj) },
+    { MP_ROM_QSTR(MP_QSTR_floor), MP_ROM_PTR(&mp_math_floor_obj) },
+    { MP_ROM_QSTR(MP_QSTR_fmod), MP_ROM_PTR(&mp_math_fmod_obj) },
+    { MP_ROM_QSTR(MP_QSTR_frexp), MP_ROM_PTR(&mp_math_frexp_obj) },
+    { MP_ROM_QSTR(MP_QSTR_ldexp), MP_ROM_PTR(&mp_math_ldexp_obj) },
+    { MP_ROM_QSTR(MP_QSTR_modf), MP_ROM_PTR(&mp_math_modf_obj) },
+    { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_math_isfinite_obj) },
+    { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_math_isinf_obj) },
+    { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_math_isnan_obj) },
+    #if MICROPY_PY_MATH_ISCLOSE
+    { MP_ROM_QSTR(MP_QSTR_isclose), MP_ROM_PTR(&mp_math_isclose_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_trunc), MP_ROM_PTR(&mp_math_trunc_obj) },
+    { MP_ROM_QSTR(MP_QSTR_radians), MP_ROM_PTR(&mp_math_radians_obj) },
+    { MP_ROM_QSTR(MP_QSTR_degrees), MP_ROM_PTR(&mp_math_degrees_obj) },
+    #if MICROPY_PY_MATH_FACTORIAL
+    { MP_ROM_QSTR(MP_QSTR_factorial), MP_ROM_PTR(&mp_math_factorial_obj) },
+    #endif
+    #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+    { MP_ROM_QSTR(MP_QSTR_erf), MP_ROM_PTR(&mp_math_erf_obj) },
+    { MP_ROM_QSTR(MP_QSTR_erfc), MP_ROM_PTR(&mp_math_erfc_obj) },
+    { MP_ROM_QSTR(MP_QSTR_gamma), MP_ROM_PTR(&mp_math_gamma_obj) },
+    { MP_ROM_QSTR(MP_QSTR_lgamma), MP_ROM_PTR(&mp_math_lgamma_obj) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_math_globals, mp_module_math_globals_table);
+
+const mp_obj_module_t mp_module_math = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_math_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_math, mp_module_math);
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH

+ 217 - 0
mp_flipper/lib/micropython/py/modmicropython.c

@@ -0,0 +1,217 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/builtin.h"
+#include "py/stackctrl.h"
+#include "py/runtime.h"
+#include "py/gc.h"
+#include "py/mphal.h"
+
+#if MICROPY_PY_MICROPYTHON
+
+// Various builtins specific to MicroPython runtime,
+// living in micropython module
+
+#if MICROPY_ENABLE_COMPILER
+static mp_obj_t mp_micropython_opt_level(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 0) {
+        return MP_OBJ_NEW_SMALL_INT(MP_STATE_VM(mp_optimise_value));
+    } else {
+        MP_STATE_VM(mp_optimise_value) = mp_obj_get_int(args[0]);
+        return mp_const_none;
+    }
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_opt_level_obj, 0, 1, mp_micropython_opt_level);
+#endif
+
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if MICROPY_MEM_STATS
+static mp_obj_t mp_micropython_mem_total(void) {
+    return MP_OBJ_NEW_SMALL_INT(m_get_total_bytes_allocated());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_total_obj, mp_micropython_mem_total);
+
+static mp_obj_t mp_micropython_mem_current(void) {
+    return MP_OBJ_NEW_SMALL_INT(m_get_current_bytes_allocated());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_current_obj, mp_micropython_mem_current);
+
+static mp_obj_t mp_micropython_mem_peak(void) {
+    return MP_OBJ_NEW_SMALL_INT(m_get_peak_bytes_allocated());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_peak_obj, mp_micropython_mem_peak);
+#endif
+
+mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args) {
+    (void)args;
+    #if MICROPY_MEM_STATS
+    mp_printf(&mp_plat_print, "mem: total=" UINT_FMT ", current=" UINT_FMT ", peak=" UINT_FMT "\n",
+        (mp_uint_t)m_get_total_bytes_allocated(), (mp_uint_t)m_get_current_bytes_allocated(), (mp_uint_t)m_get_peak_bytes_allocated());
+    #endif
+    #if MICROPY_STACK_CHECK
+    mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " UINT_FMT "\n",
+        mp_stack_usage(), (mp_uint_t)MP_STATE_THREAD(stack_limit));
+    #else
+    mp_printf(&mp_plat_print, "stack: " UINT_FMT "\n", mp_stack_usage());
+    #endif
+    #if MICROPY_ENABLE_GC
+    gc_dump_info(&mp_plat_print);
+    if (n_args == 1) {
+        // arg given means dump gc allocation table
+        gc_dump_alloc_table(&mp_plat_print);
+    }
+    #else
+    (void)n_args;
+    #endif
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_mem_info_obj, 0, 1, mp_micropython_mem_info);
+
+static mp_obj_t mp_micropython_qstr_info(size_t n_args, const mp_obj_t *args) {
+    (void)args;
+    size_t n_pool, n_qstr, n_str_data_bytes, n_total_bytes;
+    qstr_pool_info(&n_pool, &n_qstr, &n_str_data_bytes, &n_total_bytes);
+    mp_printf(&mp_plat_print, "qstr pool: n_pool=%u, n_qstr=%u, n_str_data_bytes=%u, n_total_bytes=%u\n",
+        n_pool, n_qstr, n_str_data_bytes, n_total_bytes);
+    if (n_args == 1) {
+        // arg given means dump qstr data
+        qstr_dump_data();
+    }
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_qstr_info_obj, 0, 1, mp_micropython_qstr_info);
+
+#endif // MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if MICROPY_PY_MICROPYTHON_STACK_USE
+static mp_obj_t mp_micropython_stack_use(void) {
+    return MP_OBJ_NEW_SMALL_INT(mp_stack_usage());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_stack_use_obj, mp_micropython_stack_use);
+#endif
+
+#if MICROPY_ENABLE_PYSTACK
+static mp_obj_t mp_micropython_pystack_use(void) {
+    return MP_OBJ_NEW_SMALL_INT(mp_pystack_usage());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_pystack_use_obj, mp_micropython_pystack_use);
+#endif
+
+#if MICROPY_ENABLE_GC
+static mp_obj_t mp_micropython_heap_lock(void) {
+    gc_lock();
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_lock_obj, mp_micropython_heap_lock);
+
+static mp_obj_t mp_micropython_heap_unlock(void) {
+    gc_unlock();
+    return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_unlock_obj, mp_micropython_heap_unlock);
+
+#if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+static mp_obj_t mp_micropython_heap_locked(void) {
+    return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_locked_obj, mp_micropython_heap_locked);
+#endif
+#endif
+
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_alloc_emergency_exception_buf_obj, mp_alloc_emergency_exception_buf);
+#endif
+
+#if MICROPY_KBD_EXCEPTION
+static mp_obj_t mp_micropython_kbd_intr(mp_obj_t int_chr_in) {
+    mp_hal_set_interrupt_char(mp_obj_get_int(int_chr_in));
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_micropython_kbd_intr_obj, mp_micropython_kbd_intr);
+#endif
+
+#if MICROPY_ENABLE_SCHEDULER
+static mp_obj_t mp_micropython_schedule(mp_obj_t function, mp_obj_t arg) {
+    if (!mp_sched_schedule(function, arg)) {
+        mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("schedule queue full"));
+    }
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_2(mp_micropython_schedule_obj, mp_micropython_schedule);
+#endif
+
+static const mp_rom_map_elem_t mp_module_micropython_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_micropython) },
+    { MP_ROM_QSTR(MP_QSTR_const), MP_ROM_PTR(&mp_identity_obj) },
+    #if MICROPY_ENABLE_COMPILER
+    { MP_ROM_QSTR(MP_QSTR_opt_level), MP_ROM_PTR(&mp_micropython_opt_level_obj) },
+    #endif
+    #if MICROPY_PY_MICROPYTHON_MEM_INFO
+    #if MICROPY_MEM_STATS
+    { MP_ROM_QSTR(MP_QSTR_mem_total), MP_ROM_PTR(&mp_micropython_mem_total_obj) },
+    { MP_ROM_QSTR(MP_QSTR_mem_current), MP_ROM_PTR(&mp_micropython_mem_current_obj) },
+    { MP_ROM_QSTR(MP_QSTR_mem_peak), MP_ROM_PTR(&mp_micropython_mem_peak_obj) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_mem_info), MP_ROM_PTR(&mp_micropython_mem_info_obj) },
+    { MP_ROM_QSTR(MP_QSTR_qstr_info), MP_ROM_PTR(&mp_micropython_qstr_info_obj) },
+    #endif
+    #if MICROPY_PY_MICROPYTHON_STACK_USE
+    { MP_ROM_QSTR(MP_QSTR_stack_use), MP_ROM_PTR(&mp_micropython_stack_use_obj) },
+    #endif
+    #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+    { MP_ROM_QSTR(MP_QSTR_alloc_emergency_exception_buf), MP_ROM_PTR(&mp_alloc_emergency_exception_buf_obj) },
+    #endif
+    #if MICROPY_ENABLE_PYSTACK
+    { MP_ROM_QSTR(MP_QSTR_pystack_use), MP_ROM_PTR(&mp_micropython_pystack_use_obj) },
+    #endif
+    #if MICROPY_ENABLE_GC
+    { MP_ROM_QSTR(MP_QSTR_heap_lock), MP_ROM_PTR(&mp_micropython_heap_lock_obj) },
+    { MP_ROM_QSTR(MP_QSTR_heap_unlock), MP_ROM_PTR(&mp_micropython_heap_unlock_obj) },
+    #if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+    { MP_ROM_QSTR(MP_QSTR_heap_locked), MP_ROM_PTR(&mp_micropython_heap_locked_obj) },
+    #endif
+    #endif
+    #if MICROPY_KBD_EXCEPTION
+    { MP_ROM_QSTR(MP_QSTR_kbd_intr), MP_ROM_PTR(&mp_micropython_kbd_intr_obj) },
+    #endif
+    #if MICROPY_ENABLE_SCHEDULER
+    { MP_ROM_QSTR(MP_QSTR_schedule), MP_ROM_PTR(&mp_micropython_schedule_obj) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_micropython_globals, mp_module_micropython_globals_table);
+
+const mp_obj_module_t mp_module_micropython = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_micropython_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_micropython, mp_module_micropython);
+
+#endif // MICROPY_PY_MICROPYTHON

+ 278 - 0
mp_flipper/lib/micropython/py/modstruct.c

@@ -0,0 +1,278 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/objtuple.h"
+#include "py/binary.h"
+#include "py/parsenum.h"
+
+#if MICROPY_PY_STRUCT
+
+/*
+    This module implements most of character typecodes from CPython, with
+    some extensions:
+
+    O - (Pointer to) an arbitrary Python object. This is useful for callback
+        data, etc. Note that you must keep reference to passed object in
+        your Python application, otherwise it may be garbage-collected,
+        and then when you get back this value from callback it may be
+        invalid (and lead to crash).
+    S - Pointer to a string (returned as a Python string). Note the
+        difference from "Ns", - the latter says "in this place of structure
+        is character data of up to N bytes length", while "S" means
+        "in this place of a structure is a pointer to zero-terminated
+        character data".
+ */
+
+static char get_fmt_type(const char **fmt) {
+    char t = **fmt;
+    switch (t) {
+        case '!':
+            t = '>';
+            break;
+        case '@':
+        case '=':
+        case '<':
+        case '>':
+            break;
+        default:
+            return '@';
+    }
+    // Skip type char
+    (*fmt)++;
+    return t;
+}
+
+static mp_uint_t get_fmt_num(const char **p) {
+    const char *num = *p;
+    uint len = 1;
+    while (unichar_isdigit(*++num)) {
+        len++;
+    }
+    mp_uint_t val = (mp_uint_t)MP_OBJ_SMALL_INT_VALUE(mp_parse_num_integer(*p, len, 10, NULL));
+    *p = num;
+    return val;
+}
+
+static size_t calc_size_items(const char *fmt, size_t *total_sz) {
+    char fmt_type = get_fmt_type(&fmt);
+    size_t total_cnt = 0;
+    size_t size;
+    for (size = 0; *fmt; fmt++) {
+        mp_uint_t cnt = 1;
+        if (unichar_isdigit(*fmt)) {
+            cnt = get_fmt_num(&fmt);
+        }
+
+        if (*fmt == 'x') {
+            size += cnt;
+        } else if (*fmt == 's') {
+            total_cnt += 1;
+            size += cnt;
+        } else {
+            total_cnt += cnt;
+            size_t align;
+            size_t sz = mp_binary_get_size(fmt_type, *fmt, &align);
+            while (cnt--) {
+                // Apply alignment
+                size = (size + align - 1) & ~(align - 1);
+                size += sz;
+            }
+        }
+    }
+    *total_sz = size;
+    return total_cnt;
+}
+
+static mp_obj_t struct_calcsize(mp_obj_t fmt_in) {
+    const char *fmt = mp_obj_str_get_str(fmt_in);
+    size_t size;
+    calc_size_items(fmt, &size);
+    return MP_OBJ_NEW_SMALL_INT(size);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(struct_calcsize_obj, struct_calcsize);
+
+static mp_obj_t struct_unpack_from(size_t n_args, const mp_obj_t *args) {
+    // unpack requires that the buffer be exactly the right size.
+    // unpack_from requires that the buffer be "big enough".
+    // Since we implement unpack and unpack_from using the same function
+    // we relax the "exact" requirement, and only implement "big enough".
+    const char *fmt = mp_obj_str_get_str(args[0]);
+    size_t total_sz;
+    size_t num_items = calc_size_items(fmt, &total_sz);
+    char fmt_type = get_fmt_type(&fmt);
+    mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_items, NULL));
+    mp_buffer_info_t bufinfo;
+    mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+    byte *p = bufinfo.buf;
+    byte *end_p = &p[bufinfo.len];
+    mp_int_t offset = 0;
+
+    if (n_args > 2) {
+        // offset arg provided
+        offset = mp_obj_get_int(args[2]);
+        if (offset < 0) {
+            // negative offsets are relative to the end of the buffer
+            offset = bufinfo.len + offset;
+            if (offset < 0) {
+                mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+            }
+        }
+        p += offset;
+    }
+    byte *p_base = p;
+
+    // Check that the input buffer is big enough to unpack all the values
+    if (p + total_sz > end_p) {
+        mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+    }
+
+    for (size_t i = 0; i < num_items;) {
+        mp_uint_t cnt = 1;
+        if (unichar_isdigit(*fmt)) {
+            cnt = get_fmt_num(&fmt);
+        }
+        mp_obj_t item;
+        if (*fmt == 'x') {
+            p += cnt;
+        } else if (*fmt == 's') {
+            item = mp_obj_new_bytes(p, cnt);
+            p += cnt;
+            res->items[i++] = item;
+        } else {
+            while (cnt--) {
+                item = mp_binary_get_val(fmt_type, *fmt, p_base, &p);
+                res->items[i++] = item;
+            }
+        }
+        fmt++;
+    }
+    return MP_OBJ_FROM_PTR(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_unpack_from_obj, 2, 3, struct_unpack_from);
+
+// This function assumes there is enough room in p to store all the values
+static void struct_pack_into_internal(mp_obj_t fmt_in, byte *p, size_t n_args, const mp_obj_t *args) {
+    const char *fmt = mp_obj_str_get_str(fmt_in);
+    char fmt_type = get_fmt_type(&fmt);
+
+    byte *p_base = p;
+    size_t i;
+    for (i = 0; i < n_args;) {
+        mp_uint_t cnt = 1;
+        if (*fmt == '\0') {
+            // more arguments given than used by format string; CPython raises struct.error here
+            break;
+        }
+        if (unichar_isdigit(*fmt)) {
+            cnt = get_fmt_num(&fmt);
+        }
+
+        if (*fmt == 'x') {
+            memset(p, 0, cnt);
+            p += cnt;
+        } else if (*fmt == 's') {
+            mp_buffer_info_t bufinfo;
+            mp_get_buffer_raise(args[i++], &bufinfo, MP_BUFFER_READ);
+            mp_uint_t to_copy = cnt;
+            if (bufinfo.len < to_copy) {
+                to_copy = bufinfo.len;
+            }
+            memcpy(p, bufinfo.buf, to_copy);
+            memset(p + to_copy, 0, cnt - to_copy);
+            p += cnt;
+        } else {
+            // If we run out of args then we just finish; CPython would raise struct.error
+            while (cnt-- && i < n_args) {
+                mp_binary_set_val(fmt_type, *fmt, args[i++], p_base, &p);
+            }
+        }
+        fmt++;
+    }
+}
+
+static mp_obj_t struct_pack(size_t n_args, const mp_obj_t *args) {
+    // TODO: "The arguments must match the values required by the format exactly."
+    mp_int_t size = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
+    vstr_t vstr;
+    vstr_init_len(&vstr, size);
+    byte *p = (byte *)vstr.buf;
+    memset(p, 0, size);
+    struct_pack_into_internal(args[0], p, n_args - 1, &args[1]);
+    return mp_obj_new_bytes_from_vstr(&vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_obj, 1, MP_OBJ_FUN_ARGS_MAX, struct_pack);
+
+static mp_obj_t struct_pack_into(size_t n_args, const mp_obj_t *args) {
+    mp_buffer_info_t bufinfo;
+    mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE);
+    mp_int_t offset = mp_obj_get_int(args[2]);
+    if (offset < 0) {
+        // negative offsets are relative to the end of the buffer
+        offset = (mp_int_t)bufinfo.len + offset;
+        if (offset < 0) {
+            mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+        }
+    }
+    byte *p = (byte *)bufinfo.buf;
+    byte *end_p = &p[bufinfo.len];
+    p += offset;
+
+    // Check that the output buffer is big enough to hold all the values
+    mp_int_t sz = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
+    if (p + sz > end_p) {
+        mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+    }
+
+    struct_pack_into_internal(args[0], p, n_args - 3, &args[3]);
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_into_obj, 3, MP_OBJ_FUN_ARGS_MAX, struct_pack_into);
+
+static const mp_rom_map_elem_t mp_module_struct_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_struct) },
+    { MP_ROM_QSTR(MP_QSTR_calcsize), MP_ROM_PTR(&struct_calcsize_obj) },
+    { MP_ROM_QSTR(MP_QSTR_pack), MP_ROM_PTR(&struct_pack_obj) },
+    { MP_ROM_QSTR(MP_QSTR_pack_into), MP_ROM_PTR(&struct_pack_into_obj) },
+    { MP_ROM_QSTR(MP_QSTR_unpack), MP_ROM_PTR(&struct_unpack_from_obj) },
+    { MP_ROM_QSTR(MP_QSTR_unpack_from), MP_ROM_PTR(&struct_unpack_from_obj) },
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_struct_globals, mp_module_struct_globals_table);
+
+const mp_obj_module_t mp_module_struct = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_struct_globals,
+};
+
+MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_struct, mp_module_struct);
+
+#endif

+ 376 - 0
mp_flipper/lib/micropython/py/modsys.c

@@ -0,0 +1,376 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+#include "py/objlist.h"
+#include "py/objmodule.h"
+#include "py/objtuple.h"
+#include "py/objstr.h"
+#include "py/objint.h"
+#include "py/objtype.h"
+#include "py/stream.h"
+#include "py/smallint.h"
+#include "py/runtime.h"
+#include "py/persistentcode.h"
+#include "extmod/modplatform.h"
+#include "genhdr/mpversion.h"
+
+#if MICROPY_PY_SYS_SETTRACE
+#include "py/objmodule.h"
+#include "py/profile.h"
+#endif
+
+#if MICROPY_PY_SYS
+
+// defined per port; type of these is irrelevant, just need pointer
+extern struct _mp_dummy_t mp_sys_stdin_obj;
+extern struct _mp_dummy_t mp_sys_stdout_obj;
+extern struct _mp_dummy_t mp_sys_stderr_obj;
+
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+const mp_print_t mp_sys_stdout_print = {&mp_sys_stdout_obj, mp_stream_write_adaptor};
+#endif
+
+// version - Python language version that this implementation conforms to, as a string
+static const MP_DEFINE_STR_OBJ(mp_sys_version_obj, "3.4.0; " MICROPY_BANNER_NAME_AND_VERSION);
+
+// version_info - Python language version that this implementation conforms to, as a tuple of ints
+// TODO: CPython is now at 5-element array (major, minor, micro, releaselevel, serial), but save 2 els so far...
+static const mp_rom_obj_tuple_t mp_sys_version_info_obj = {{&mp_type_tuple}, 3, {MP_ROM_INT(3), MP_ROM_INT(4), MP_ROM_INT(0)}};
+
+// sys.implementation object
+// this holds the MicroPython version
+static const mp_rom_obj_tuple_t mp_sys_implementation_version_info_obj = {
+    {&mp_type_tuple},
+    4,
+    {
+        MP_ROM_INT(MICROPY_VERSION_MAJOR),
+        MP_ROM_INT(MICROPY_VERSION_MINOR),
+        MP_ROM_INT(MICROPY_VERSION_MICRO),
+        #if MICROPY_VERSION_PRERELEASE
+        MP_ROM_QSTR(MP_QSTR_preview),
+        #else
+        MP_ROM_QSTR(MP_QSTR_),
+        #endif
+    }
+};
+static const MP_DEFINE_STR_OBJ(mp_sys_implementation_machine_obj, MICROPY_BANNER_MACHINE);
+#define SYS_IMPLEMENTATION_ELEMS_BASE \
+    MP_ROM_QSTR(MP_QSTR_micropython), \
+    MP_ROM_PTR(&mp_sys_implementation_version_info_obj), \
+    MP_ROM_PTR(&mp_sys_implementation_machine_obj)
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+#define SYS_IMPLEMENTATION_ELEMS__MPY \
+    , MP_ROM_INT(MPY_FILE_HEADER_INT)
+#else
+#define SYS_IMPLEMENTATION_ELEMS__MPY
+#endif
+
+#if MICROPY_PY_ATTRTUPLE
+#if MICROPY_PREVIEW_VERSION_2
+#define SYS_IMPLEMENTATION_ELEMS__V2 \
+    , MP_ROM_TRUE
+#else
+#define SYS_IMPLEMENTATION_ELEMS__V2
+#endif
+
+static const qstr impl_fields[] = {
+    MP_QSTR_name,
+    MP_QSTR_version,
+    MP_QSTR__machine,
+    #if MICROPY_PERSISTENT_CODE_LOAD
+    MP_QSTR__mpy,
+    #endif
+    #if MICROPY_PREVIEW_VERSION_2
+    MP_QSTR__v2,
+    #endif
+};
+static MP_DEFINE_ATTRTUPLE(
+    mp_sys_implementation_obj,
+    impl_fields,
+    3 + MICROPY_PERSISTENT_CODE_LOAD + MICROPY_PREVIEW_VERSION_2,
+    SYS_IMPLEMENTATION_ELEMS_BASE
+    SYS_IMPLEMENTATION_ELEMS__MPY
+    SYS_IMPLEMENTATION_ELEMS__V2
+    );
+#else
+static const mp_rom_obj_tuple_t mp_sys_implementation_obj = {
+    {&mp_type_tuple},
+    3 + MICROPY_PERSISTENT_CODE_LOAD,
+    // Do not include SYS_IMPLEMENTATION_ELEMS__V2 because
+    // SYS_IMPLEMENTATION_ELEMS__MPY may be empty if
+    // MICROPY_PERSISTENT_CODE_LOAD is disabled, which means they'll share
+    // the same index. Cannot query _v2 if MICROPY_PY_ATTRTUPLE is
+    // disabled.
+    {
+        SYS_IMPLEMENTATION_ELEMS_BASE
+                                SYS_IMPLEMENTATION_ELEMS__MPY
+    }
+};
+#endif
+
+#undef I
+
+#ifdef MICROPY_PY_SYS_PLATFORM
+// platform - the platform that MicroPython is running on
+static const MP_DEFINE_STR_OBJ(mp_sys_platform_obj, MICROPY_PY_SYS_PLATFORM);
+#endif
+
+#ifdef MICROPY_PY_SYS_EXECUTABLE
+// executable - the path to the micropython binary
+// This object is non-const and is populated at startup in main()
+MP_DEFINE_STR_OBJ(mp_sys_executable_obj, "");
+#endif
+
+#if MICROPY_PY_SYS_INTERN
+MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_intern_obj, mp_obj_str_intern_checked);
+#endif
+
+// exit([retval]): raise SystemExit, with optional argument given to the exception
+static mp_obj_t mp_sys_exit(size_t n_args, const mp_obj_t *args) {
+    if (n_args == 0) {
+        mp_raise_type(&mp_type_SystemExit);
+    } else {
+        mp_raise_type_arg(&mp_type_SystemExit, args[0]);
+    }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_exit_obj, 0, 1, mp_sys_exit);
+
+static mp_obj_t mp_sys_print_exception(size_t n_args, const mp_obj_t *args) {
+    #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+    void *stream_obj = &mp_sys_stdout_obj;
+    if (n_args > 1) {
+        mp_get_stream_raise(args[1], MP_STREAM_OP_WRITE);
+        stream_obj = MP_OBJ_TO_PTR(args[1]);
+    }
+
+    mp_print_t print = {stream_obj, mp_stream_write_adaptor};
+    mp_obj_print_exception(&print, args[0]);
+    #else
+    (void)n_args;
+    mp_obj_print_exception(&mp_plat_print, args[0]);
+    #endif
+
+    return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_print_exception_obj, 1, 2, mp_sys_print_exception);
+
+#if MICROPY_PY_SYS_EXC_INFO
+static mp_obj_t mp_sys_exc_info(void) {
+    mp_obj_t cur_exc = MP_OBJ_FROM_PTR(MP_STATE_VM(cur_exception));
+    mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(3, NULL));
+
+    if (cur_exc == MP_OBJ_NULL) {
+        t->items[0] = mp_const_none;
+        t->items[1] = mp_const_none;
+        t->items[2] = mp_const_none;
+        return MP_OBJ_FROM_PTR(t);
+    }
+
+    t->items[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(cur_exc));
+    t->items[1] = cur_exc;
+    t->items[2] = mp_const_none;
+    return MP_OBJ_FROM_PTR(t);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_sys_exc_info_obj, mp_sys_exc_info);
+#endif
+
+#if MICROPY_PY_SYS_GETSIZEOF
+static mp_obj_t mp_sys_getsizeof(mp_obj_t obj) {
+    return mp_unary_op(MP_UNARY_OP_SIZEOF, obj);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_getsizeof_obj, mp_sys_getsizeof);
+#endif
+
+#if MICROPY_PY_SYS_ATEXIT
+// atexit(callback): Callback is called when sys.exit is called.
+static mp_obj_t mp_sys_atexit(mp_obj_t obj) {
+    mp_obj_t old = MP_STATE_VM(sys_exitfunc);
+    MP_STATE_VM(sys_exitfunc) = obj;
+    return old;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_atexit_obj, mp_sys_atexit);
+#endif
+
+#if MICROPY_PY_SYS_SETTRACE
+// settrace(tracefunc): Set the system's trace function.
+static mp_obj_t mp_sys_settrace(mp_obj_t obj) {
+    return mp_prof_settrace(obj);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_settrace_obj, mp_sys_settrace);
+#endif // MICROPY_PY_SYS_SETTRACE
+
+#if MICROPY_PY_SYS_PATH && !MICROPY_PY_SYS_ATTR_DELEGATION
+#error "MICROPY_PY_SYS_PATH requires MICROPY_PY_SYS_ATTR_DELEGATION"
+#endif
+
+#if MICROPY_PY_SYS_PS1_PS2 && !MICROPY_PY_SYS_ATTR_DELEGATION
+#error "MICROPY_PY_SYS_PS1_PS2 requires MICROPY_PY_SYS_ATTR_DELEGATION"
+#endif
+
+#if MICROPY_PY_SYS_TRACEBACKLIMIT && !MICROPY_PY_SYS_ATTR_DELEGATION
+#error "MICROPY_PY_SYS_TRACEBACKLIMIT requires MICROPY_PY_SYS_ATTR_DELEGATION"
+#endif
+
+#if MICROPY_PY_SYS_ATTR_DELEGATION && !MICROPY_MODULE_ATTR_DELEGATION
+#error "MICROPY_PY_SYS_ATTR_DELEGATION requires MICROPY_MODULE_ATTR_DELEGATION"
+#endif
+
+#if MICROPY_PY_SYS_ATTR_DELEGATION
+// Must be kept in sync with the enum at the top of mpstate.h.
+static const uint16_t sys_mutable_keys[] = {
+    #if MICROPY_PY_SYS_PATH
+    // Code should access this (as an mp_obj_t) for use with e.g.
+    // mp_obj_list_append by using the `mp_sys_path` macro defined in runtime.h.
+    MP_QSTR_path,
+    #endif
+    #if MICROPY_PY_SYS_PS1_PS2
+    MP_QSTR_ps1,
+    MP_QSTR_ps2,
+    #endif
+    #if MICROPY_PY_SYS_TRACEBACKLIMIT
+    MP_QSTR_tracebacklimit,
+    #endif
+    MP_QSTRnull,
+};
+
+void mp_module_sys_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+    MP_STATIC_ASSERT(MP_ARRAY_SIZE(sys_mutable_keys) == MP_SYS_MUTABLE_NUM + 1);
+    MP_STATIC_ASSERT(MP_ARRAY_SIZE(MP_STATE_VM(sys_mutable)) == MP_SYS_MUTABLE_NUM);
+    mp_module_generic_attr(attr, dest, sys_mutable_keys, MP_STATE_VM(sys_mutable));
+}
+#endif
+
+static const mp_rom_map_elem_t mp_module_sys_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_sys) },
+
+    #if MICROPY_PY_SYS_ARGV
+    { MP_ROM_QSTR(MP_QSTR_argv), MP_ROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)) },
+    #endif
+    { MP_ROM_QSTR(MP_QSTR_version), MP_ROM_PTR(&mp_sys_version_obj) },
+    { MP_ROM_QSTR(MP_QSTR_version_info), MP_ROM_PTR(&mp_sys_version_info_obj) },
+    { MP_ROM_QSTR(MP_QSTR_implementation), MP_ROM_PTR(&mp_sys_implementation_obj) },
+    #ifdef MICROPY_PY_SYS_PLATFORM
+    { MP_ROM_QSTR(MP_QSTR_platform), MP_ROM_PTR(&mp_sys_platform_obj) },
+    #endif
+    #if MP_ENDIANNESS_LITTLE
+    { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_little) },
+    #else
+    { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_big) },
+    #endif
+
+    #if MICROPY_PY_SYS_MAXSIZE
+    #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+    // Maximum mp_int_t value is not representable as small int, so we have
+    // little choice but to use MP_SMALL_INT_MAX. Apps also should be careful
+    // to not try to compare sys.maxsize to some literal number (as this
+    // number might not fit in available int size), but instead count number
+    // of "one" bits in sys.maxsize.
+    { MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_INT(MP_SMALL_INT_MAX) },
+    #else
+    { MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_PTR(&mp_sys_maxsize_obj) },
+    #endif
+    #endif
+
+    #if MICROPY_PY_SYS_INTERN
+    { MP_ROM_QSTR(MP_QSTR_intern), MP_ROM_PTR(&mp_sys_intern_obj) },
+    #endif
+
+    #if MICROPY_PY_SYS_EXIT
+    { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mp_sys_exit_obj) },
+    #endif
+
+    #if MICROPY_PY_SYS_SETTRACE
+    { MP_ROM_QSTR(MP_QSTR_settrace), MP_ROM_PTR(&mp_sys_settrace_obj) },
+    #endif
+
+    #if MICROPY_PY_SYS_STDFILES
+    { MP_ROM_QSTR(MP_QSTR_stdin), MP_ROM_PTR(&mp_sys_stdin_obj) },
+    { MP_ROM_QSTR(MP_QSTR_stdout), MP_ROM_PTR(&mp_sys_stdout_obj) },
+    { MP_ROM_QSTR(MP_QSTR_stderr), MP_ROM_PTR(&mp_sys_stderr_obj) },
+    #endif
+
+    #if MICROPY_PY_SYS_MODULES
+    { MP_ROM_QSTR(MP_QSTR_modules), MP_ROM_PTR(&MP_STATE_VM(mp_loaded_modules_dict)) },
+    #endif
+    #if MICROPY_PY_SYS_EXC_INFO
+    { MP_ROM_QSTR(MP_QSTR_exc_info), MP_ROM_PTR(&mp_sys_exc_info_obj) },
+    #endif
+    #if MICROPY_PY_SYS_GETSIZEOF
+    { MP_ROM_QSTR(MP_QSTR_getsizeof), MP_ROM_PTR(&mp_sys_getsizeof_obj) },
+    #endif
+
+    #if MICROPY_PY_SYS_EXECUTABLE
+    { MP_ROM_QSTR(MP_QSTR_executable), MP_ROM_PTR(&mp_sys_executable_obj) },
+    #endif
+
+    /*
+     * Extensions to CPython
+     */
+
+    { MP_ROM_QSTR(MP_QSTR_print_exception), MP_ROM_PTR(&mp_sys_print_exception_obj) },
+    #if MICROPY_PY_SYS_ATEXIT
+    { MP_ROM_QSTR(MP_QSTR_atexit), MP_ROM_PTR(&mp_sys_atexit_obj) },
+    #endif
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_sys_globals, mp_module_sys_globals_table);
+
+const mp_obj_module_t mp_module_sys = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_sys_globals,
+};
+
+// Unlike the other CPython-compatible modules, sys is not extensible from the
+// filesystem. We rely on it to work so that things like sys.path are always
+// available.
+MP_REGISTER_MODULE(MP_QSTR_sys, mp_module_sys);
+
+#if MICROPY_PY_SYS_ARGV
+// Code should access this (as an mp_obj_t) for use with e.g.
+// mp_obj_list_append by using the `mp_sys_argv` macro defined in runtime.h.
+MP_REGISTER_ROOT_POINTER(mp_obj_list_t mp_sys_argv_obj);
+#endif
+
+#if MICROPY_PY_SYS_EXC_INFO
+// current exception being handled, for sys.exc_info()
+MP_REGISTER_ROOT_POINTER(mp_obj_base_t * cur_exception);
+#endif
+
+#if MICROPY_PY_SYS_ATEXIT
+// exposed through sys.atexit function
+MP_REGISTER_ROOT_POINTER(mp_obj_t sys_exitfunc);
+#endif
+
+#if MICROPY_PY_SYS_ATTR_DELEGATION
+// Contains mutable sys attributes.
+MP_REGISTER_ROOT_POINTER(mp_obj_t sys_mutable[MP_SYS_MUTABLE_NUM]);
+MP_REGISTER_MODULE_DELEGATION(mp_module_sys, mp_module_sys_attr);
+#endif
+
+#endif // MICROPY_PY_SYS

+ 292 - 0
mp_flipper/lib/micropython/py/modthread.c

@@ -0,0 +1,292 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+#if MICROPY_PY_THREAD
+
+#include "py/mpthread.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+/****************************************************************/
+// Lock object
+
+static const mp_obj_type_t mp_type_thread_lock;
+
+typedef struct _mp_obj_thread_lock_t {
+    mp_obj_base_t base;
+    mp_thread_mutex_t mutex;
+    volatile bool locked;
+} mp_obj_thread_lock_t;
+
+static mp_obj_thread_lock_t *mp_obj_new_thread_lock(void) {
+    mp_obj_thread_lock_t *self = mp_obj_malloc(mp_obj_thread_lock_t, &mp_type_thread_lock);
+    mp_thread_mutex_init(&self->mutex);
+    self->locked = false;
+    return self;
+}
+
+static mp_obj_t thread_lock_acquire(size_t n_args, const mp_obj_t *args) {
+    mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(args[0]);
+    bool wait = true;
+    if (n_args > 1) {
+        wait = mp_obj_get_int(args[1]);
+        // TODO support timeout arg
+    }
+    MP_THREAD_GIL_EXIT();
+    int ret = mp_thread_mutex_lock(&self->mutex, wait);
+    MP_THREAD_GIL_ENTER();
+    if (ret == 0) {
+        return mp_const_false;
+    } else if (ret == 1) {
+        self->locked = true;
+        return mp_const_true;
+    } else {
+        mp_raise_OSError(-ret);
+    }
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock_acquire_obj, 1, 3, thread_lock_acquire);
+
+static mp_obj_t thread_lock_release(mp_obj_t self_in) {
+    mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in);
+    if (!self->locked) {
+        mp_raise_msg(&mp_type_RuntimeError, NULL);
+    }
+    self->locked = false;
+    MP_THREAD_GIL_EXIT();
+    mp_thread_mutex_unlock(&self->mutex);
+    MP_THREAD_GIL_ENTER();
+    return mp_const_none;
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_release_obj, thread_lock_release);
+
+static mp_obj_t thread_lock_locked(mp_obj_t self_in) {
+    mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in);
+    return mp_obj_new_bool(self->locked);
+}
+static MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_locked_obj, thread_lock_locked);
+
+static mp_obj_t thread_lock___exit__(size_t n_args, const mp_obj_t *args) {
+    (void)n_args; // unused
+    return thread_lock_release(args[0]);
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock___exit___obj, 4, 4, thread_lock___exit__);
+
+static const mp_rom_map_elem_t thread_lock_locals_dict_table[] = {
+    { MP_ROM_QSTR(MP_QSTR_acquire), MP_ROM_PTR(&thread_lock_acquire_obj) },
+    { MP_ROM_QSTR(MP_QSTR_release), MP_ROM_PTR(&thread_lock_release_obj) },
+    { MP_ROM_QSTR(MP_QSTR_locked), MP_ROM_PTR(&thread_lock_locked_obj) },
+    { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&thread_lock_acquire_obj) },
+    { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&thread_lock___exit___obj) },
+};
+
+static MP_DEFINE_CONST_DICT(thread_lock_locals_dict, thread_lock_locals_dict_table);
+
+static MP_DEFINE_CONST_OBJ_TYPE(
+    mp_type_thread_lock,
+    MP_QSTR_lock,
+    MP_TYPE_FLAG_NONE,
+    locals_dict, &thread_lock_locals_dict
+    );
+
+/****************************************************************/
+// _thread module
+
+static size_t thread_stack_size = 0;
+
+static mp_obj_t mod_thread_get_ident(void) {
+    return mp_obj_new_int_from_uint(mp_thread_get_id());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_get_ident_obj, mod_thread_get_ident);
+
+static mp_obj_t mod_thread_stack_size(size_t n_args, const mp_obj_t *args) {
+    mp_obj_t ret = mp_obj_new_int_from_uint(thread_stack_size);
+    if (n_args == 0) {
+        thread_stack_size = 0;
+    } else {
+        thread_stack_size = mp_obj_get_int(args[0]);
+    }
+    return ret;
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_stack_size_obj, 0, 1, mod_thread_stack_size);
+
+typedef struct _thread_entry_args_t {
+    mp_obj_dict_t *dict_locals;
+    mp_obj_dict_t *dict_globals;
+    size_t stack_size;
+    mp_obj_t fun;
+    size_t n_args;
+    size_t n_kw;
+    mp_obj_t args[];
+} thread_entry_args_t;
+
+static void *thread_entry(void *args_in) {
+    // Execution begins here for a new thread.  We do not have the GIL.
+
+    thread_entry_args_t *args = (thread_entry_args_t *)args_in;
+
+    mp_state_thread_t ts;
+    mp_thread_init_state(&ts, args->stack_size, args->dict_locals, args->dict_globals);
+
+    #if MICROPY_ENABLE_PYSTACK
+    // TODO threading and pystack is not fully supported, for now just make a small stack
+    mp_obj_t mini_pystack[128];
+    mp_pystack_init(mini_pystack, &mini_pystack[128]);
+    #endif
+
+    MP_THREAD_GIL_ENTER();
+
+    // signal that we are set up and running
+    mp_thread_start();
+
+    // TODO set more thread-specific state here:
+    //  cur_exception (root pointer)
+
+    DEBUG_printf("[thread] start ts=%p args=%p stack=%p\n", &ts, &args, MP_STATE_THREAD(stack_top));
+
+    nlr_buf_t nlr;
+    if (nlr_push(&nlr) == 0) {
+        mp_call_function_n_kw(args->fun, args->n_args, args->n_kw, args->args);
+        nlr_pop();
+    } else {
+        // uncaught exception
+        // check for SystemExit
+        mp_obj_base_t *exc = (mp_obj_base_t *)nlr.ret_val;
+        if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(exc->type), MP_OBJ_FROM_PTR(&mp_type_SystemExit))) {
+            // swallow exception silently
+        } else {
+            // print exception out
+            mp_printf(MICROPY_ERROR_PRINTER, "Unhandled exception in thread started by ");
+            mp_obj_print_helper(MICROPY_ERROR_PRINTER, args->fun, PRINT_REPR);
+            mp_printf(MICROPY_ERROR_PRINTER, "\n");
+            mp_obj_print_exception(MICROPY_ERROR_PRINTER, MP_OBJ_FROM_PTR(exc));
+        }
+    }
+
+    DEBUG_printf("[thread] finish ts=%p\n", &ts);
+
+    // signal that we are finished
+    mp_thread_finish();
+
+    MP_THREAD_GIL_EXIT();
+
+    return NULL;
+}
+
+static mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) {
+    // This structure holds the Python function and arguments for thread entry.
+    // We copy all arguments into this structure to keep ownership of them.
+    // We must be very careful about root pointers because this pointer may
+    // disappear from our address space before the thread is created.
+    thread_entry_args_t *th_args;
+
+    // get positional arguments
+    size_t pos_args_len;
+    mp_obj_t *pos_args_items;
+    mp_obj_get_array(args[1], &pos_args_len, &pos_args_items);
+
+    // check for keyword arguments
+    if (n_args == 2) {
+        // just position arguments
+        th_args = m_new_obj_var(thread_entry_args_t, args, mp_obj_t, pos_args_len);
+        th_args->n_kw = 0;
+    } else {
+        // positional and keyword arguments
+        if (mp_obj_get_type(args[2]) != &mp_type_dict) {
+            mp_raise_TypeError(MP_ERROR_TEXT("expecting a dict for keyword args"));
+        }
+        mp_map_t *map = &((mp_obj_dict_t *)MP_OBJ_TO_PTR(args[2]))->map;
+        th_args = m_new_obj_var(thread_entry_args_t, args, mp_obj_t, pos_args_len + 2 * map->used);
+        th_args->n_kw = map->used;
+        // copy across the keyword arguments
+        for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) {
+            if (mp_map_slot_is_filled(map, i)) {
+                th_args->args[n++] = map->table[i].key;
+                th_args->args[n++] = map->table[i].value;
+            }
+        }
+    }
+
+    // copy across the positional arguments
+    th_args->n_args = pos_args_len;
+    memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t));
+
+    // pass our locals and globals into the new thread
+    th_args->dict_locals = mp_locals_get();
+    th_args->dict_globals = mp_globals_get();
+
+    // set the stack size to use
+    th_args->stack_size = thread_stack_size;
+
+    // set the function for thread entry
+    th_args->fun = args[0];
+
+    // spawn the thread!
+    return mp_obj_new_int_from_uint(mp_thread_create(thread_entry, th_args, &th_args->stack_size));
+}
+static MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_start_new_thread_obj, 2, 3, mod_thread_start_new_thread);
+
+static mp_obj_t mod_thread_exit(void) {
+    mp_raise_type(&mp_type_SystemExit);
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_exit_obj, mod_thread_exit);
+
+static mp_obj_t mod_thread_allocate_lock(void) {
+    return MP_OBJ_FROM_PTR(mp_obj_new_thread_lock());
+}
+static MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_allocate_lock_obj, mod_thread_allocate_lock);
+
+static const mp_rom_map_elem_t mp_module_thread_globals_table[] = {
+    { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__thread) },
+    { MP_ROM_QSTR(MP_QSTR_LockType), MP_ROM_PTR(&mp_type_thread_lock) },
+    { MP_ROM_QSTR(MP_QSTR_get_ident), MP_ROM_PTR(&mod_thread_get_ident_obj) },
+    { MP_ROM_QSTR(MP_QSTR_stack_size), MP_ROM_PTR(&mod_thread_stack_size_obj) },
+    { MP_ROM_QSTR(MP_QSTR_start_new_thread), MP_ROM_PTR(&mod_thread_start_new_thread_obj) },
+    { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mod_thread_exit_obj) },
+    { MP_ROM_QSTR(MP_QSTR_allocate_lock), MP_ROM_PTR(&mod_thread_allocate_lock_obj) },
+};
+
+static MP_DEFINE_CONST_DICT(mp_module_thread_globals, mp_module_thread_globals_table);
+
+const mp_obj_module_t mp_module_thread = {
+    .base = { &mp_type_module },
+    .globals = (mp_obj_dict_t *)&mp_module_thread_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR__thread, mp_module_thread);
+
+#endif // MICROPY_PY_THREAD

+ 2081 - 0
mp_flipper/lib/micropython/py/mpconfig.h

@@ -0,0 +1,2081 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPCONFIG_H
+#define MICROPY_INCLUDED_PY_MPCONFIG_H
+
+// Current version of MicroPython. This is used by sys.implementation.version
+// as well as a fallback to generate MICROPY_GIT_TAG if the git repo or tags
+// are unavailable.
+#define MICROPY_VERSION_MAJOR 1
+#define MICROPY_VERSION_MINOR 23
+#define MICROPY_VERSION_MICRO 0
+#define MICROPY_VERSION_PRERELEASE 0
+
+// Combined version as a 32-bit number for convenience to allow version
+// comparison. Doesn't include prerelease state.
+// e.g. #if MICROPY_VERSION < MICROPY_MAKE_VERSION(1, 22, 0)
+#define MICROPY_MAKE_VERSION(major, minor, patch) (major << 16 | minor << 8 | patch)
+#define MICROPY_VERSION MICROPY_MAKE_VERSION(MICROPY_VERSION_MAJOR, MICROPY_VERSION_MINOR, MICROPY_VERSION_MICRO)
+
+// String version. This is only used directly for platform.platform and
+// os.uname().release. All other version info available in the firmware (e.g.
+// the REPL banner) comes from MICROPY_GIT_TAG.
+#define MICROPY_VERSION_STRING_BASE \
+    MP_STRINGIFY(MICROPY_VERSION_MAJOR) "." \
+    MP_STRINGIFY(MICROPY_VERSION_MINOR) "." \
+    MP_STRINGIFY(MICROPY_VERSION_MICRO)
+#if MICROPY_VERSION_PRERELEASE
+#define MICROPY_VERSION_STRING MICROPY_VERSION_STRING_BASE "-preview"
+#else
+#define MICROPY_VERSION_STRING MICROPY_VERSION_STRING_BASE
+#endif
+
+// If this is enabled, then in-progress/breaking changes slated for the 2.x
+// release will be enabled.
+#ifndef MICROPY_PREVIEW_VERSION_2
+#define MICROPY_PREVIEW_VERSION_2 (0)
+#endif
+
+// This file contains default configuration settings for MicroPython.
+// You can override any of the options below using mpconfigport.h file
+// located in a directory of your port.
+
+// mpconfigport.h is a file containing configuration settings for a
+// particular port. mpconfigport.h is actually a default name for
+// such config, and it can be overridden using MP_CONFIGFILE preprocessor
+// define (you can do that by passing CFLAGS_EXTRA='-DMP_CONFIGFILE="<file.h>"'
+// argument to make when using standard MicroPython makefiles).
+// This is useful to have more than one config per port, for example,
+// release vs debug configs, etc. Note that if you switch from one config
+// to another, you must rebuild from scratch using "-B" switch to make.
+
+// Disable all optional features (i.e. minimal port).
+#define MICROPY_CONFIG_ROM_LEVEL_MINIMUM (0)
+// Only enable core features (constrained flash, e.g. STM32L072)
+#define MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES (10)
+// Enable most common features (small on-device flash, e.g. STM32F411)
+#define MICROPY_CONFIG_ROM_LEVEL_BASIC_FEATURES (20)
+// Enable convenience features (medium on-device flash, e.g. STM32F405)
+#define MICROPY_CONFIG_ROM_LEVEL_EXTRA_FEATURES (30)
+// Enable all common features (large/external flash, rp2, unix)
+#define MICROPY_CONFIG_ROM_LEVEL_FULL_FEATURES (40)
+// Enable everything (e.g. coverage)
+#define MICROPY_CONFIG_ROM_LEVEL_EVERYTHING (50)
+
+#ifdef MP_CONFIGFILE
+#include MP_CONFIGFILE
+#else
+#include <mpconfigport.h>
+#endif
+
+// Ports/boards should set this, but default to level=core.
+#ifndef MICROPY_CONFIG_ROM_LEVEL
+#define MICROPY_CONFIG_ROM_LEVEL (MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES)
+#endif
+
+// Helper macros for "have at least this level".
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_BASIC_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_BASIC_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_EXTRA_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_FULL_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_FULL_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_EVERYTHING)
+
+// Any options not explicitly set in mpconfigport.h will get default
+// values below.
+
+/*****************************************************************************/
+/* Object representation                                                     */
+
+// A MicroPython object is a machine word having the following form:
+//  - xxxx...xxx1 : a small int, bits 1 and above are the value
+//  - xxxx...x010 : a qstr, bits 3 and above are the value
+//  - xxxx...x110 : an immediate object, bits 3 and above are the value
+//  - xxxx...xx00 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_A (0)
+
+// A MicroPython object is a machine word having the following form:
+//  - xxxx...xx01 : a small int, bits 2 and above are the value
+//  - xxxx...x011 : a qstr, bits 3 and above are the value
+//  - xxxx...x111 : an immediate object, bits 3 and above are the value
+//  - xxxx...xxx0 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_B (1)
+
+// A MicroPython object is a machine word having the following form (called R):
+//  - iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int with 31-bit signed value
+//  - 01111111 1qqqqqqq qqqqqqqq qqqq0110 str with 19-bit qstr value
+//  - 01111111 10000000 00000000 ssss1110 immediate object with 4-bit value
+//  - s1111111 10000000 00000000 00000010 +/- inf
+//  - s1111111 1xxxxxxx xxxxxxxx xxxxx010 nan, x != 0
+//  - seeeeeee efffffff ffffffff ffffff10 30-bit fp, e != 0xff
+//  - pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Str, immediate and float stored as O = R + 0x80800000, retrieved as R = O - 0x80800000.
+// This makes strs/immediates easier to encode/decode as they have zeros in the top 9 bits.
+// This scheme only works with 32-bit word size and float enabled.
+#define MICROPY_OBJ_REPR_C (2)
+
+// A MicroPython object is a 64-bit word having the following form (called R):
+//  - seeeeeee eeeeffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff 64-bit fp, e != 0x7ff
+//  - s1111111 11110000 00000000 00000000 00000000 00000000 00000000 00000000 +/- inf
+//  - 01111111 11111000 00000000 00000000 00000000 00000000 00000000 00000000 normalised nan
+//  - 01111111 11111101 iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int
+//  - 01111111 11111110 00000000 00000000 qqqqqqqq qqqqqqqq qqqqqqqq qqqqqqq1 str
+//  - 01111111 11111111 ss000000 00000000 00000000 00000000 00000000 00000000 immediate object
+//  - 01111111 11111100 00000000 00000000 pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Stored as O = R + 0x8004000000000000, retrieved as R = O - 0x8004000000000000.
+// This makes pointers have all zeros in the top 32 bits.
+// Small-ints and strs have 1 as LSB to make sure they don't look like pointers
+// to the garbage collector.
+#define MICROPY_OBJ_REPR_D (3)
+
+#ifndef MICROPY_OBJ_REPR
+#define MICROPY_OBJ_REPR (MICROPY_OBJ_REPR_A)
+#endif
+
+// Whether to encode None/False/True as immediate objects instead of pointers to
+// real objects.  Reduces code size by a decent amount without hurting
+// performance, for all representations except D on some architectures.
+#ifndef MICROPY_OBJ_IMMEDIATE_OBJS
+#define MICROPY_OBJ_IMMEDIATE_OBJS (MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D)
+#endif
+
+/*****************************************************************************/
+/* Memory allocation policy                                                  */
+
+// Number of bytes in memory allocation/GC block. Any size allocated will be
+// rounded up to be multiples of this.
+#ifndef MICROPY_BYTES_PER_GC_BLOCK
+#define MICROPY_BYTES_PER_GC_BLOCK (4 * MP_BYTES_PER_OBJ_WORD)
+#endif
+
+// Number of words allocated (in BSS) to the GC stack (minimum is 1)
+#ifndef MICROPY_ALLOC_GC_STACK_SIZE
+#define MICROPY_ALLOC_GC_STACK_SIZE (64)
+#endif
+
+// The C-type to use for entries in the GC stack.  By default it allows the
+// heap to be as large as the address space, but the bit-width of this type can
+// be reduced to save memory when the heap is small enough.  The type must be
+// big enough to index all blocks in the heap, which is set by
+// heap-size-in-bytes / MICROPY_BYTES_PER_GC_BLOCK.
+#ifndef MICROPY_GC_STACK_ENTRY_TYPE
+#define MICROPY_GC_STACK_ENTRY_TYPE size_t
+#endif
+
+// Be conservative and always clear to zero newly (re)allocated memory in the GC.
+// This helps eliminate stray pointers that hold on to memory that's no longer
+// used.  It decreases performance due to unnecessary memory clearing.
+// A memory manager which always clears memory can set this to 0.
+// TODO Do analysis to understand why some memory is not properly cleared and
+// find a more efficient way to clear it.
+#ifndef MICROPY_GC_CONSERVATIVE_CLEAR
+#define MICROPY_GC_CONSERVATIVE_CLEAR (MICROPY_ENABLE_GC)
+#endif
+
+// Support automatic GC when reaching allocation threshold,
+// configurable by gc.threshold().
+#ifndef MICROPY_GC_ALLOC_THRESHOLD
+#define MICROPY_GC_ALLOC_THRESHOLD (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// interned string data.  Smaller numbers lead to more chunks being needed
+// and more wastage at the end of the chunk.  Larger numbers lead to wasted
+// space at the end when no more strings need interning.
+#ifndef MICROPY_ALLOC_QSTR_CHUNK_INIT
+#define MICROPY_ALLOC_QSTR_CHUNK_INIT (128)
+#endif
+
+// Initial amount for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXER_INDENT_INIT
+#define MICROPY_ALLOC_LEXER_INDENT_INIT (10)
+#endif
+
+// Increment for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXEL_INDENT_INC
+#define MICROPY_ALLOC_LEXEL_INDENT_INC (8)
+#endif
+
+// Initial amount for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INIT
+#define MICROPY_ALLOC_PARSE_RULE_INIT (64)
+#endif
+
+// Increment for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INC
+#define MICROPY_ALLOC_PARSE_RULE_INC (16)
+#endif
+
+// Initial amount for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INIT
+#define MICROPY_ALLOC_PARSE_RESULT_INIT (32)
+#endif
+
+// Increment for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INC
+#define MICROPY_ALLOC_PARSE_RESULT_INC (16)
+#endif
+
+// Strings this length or less will be interned by the parser
+#ifndef MICROPY_ALLOC_PARSE_INTERN_STRING_LEN
+#define MICROPY_ALLOC_PARSE_INTERN_STRING_LEN (10)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// parse nodes.  Small leads to fragmentation, large leads to excess use.
+#ifndef MICROPY_ALLOC_PARSE_CHUNK_INIT
+#define MICROPY_ALLOC_PARSE_CHUNK_INIT (128)
+#endif
+
+// Initial amount for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INIT
+#define MICROPY_ALLOC_SCOPE_ID_INIT (4)
+#endif
+
+// Increment for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INC
+#define MICROPY_ALLOC_SCOPE_ID_INC (6)
+#endif
+
+// Maximum length of a path in the filesystem
+// So we can allocate a buffer on the stack for path manipulation in import
+#ifndef MICROPY_ALLOC_PATH_MAX
+#define MICROPY_ALLOC_PATH_MAX (512)
+#endif
+
+// Initial size of module dict
+#ifndef MICROPY_MODULE_DICT_SIZE
+#define MICROPY_MODULE_DICT_SIZE (1)
+#endif
+
+// Initial size of sys.modules dict
+#ifndef MICROPY_LOADED_MODULES_DICT_SIZE
+#define MICROPY_LOADED_MODULES_DICT_SIZE (3)
+#endif
+
+// Whether realloc/free should be passed allocated memory region size
+// You must enable this if MICROPY_MEM_STATS is enabled
+#ifndef MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define MICROPY_MALLOC_USES_ALLOCATED_SIZE (0)
+#endif
+
+// Number of bytes used to store qstr length
+// Dictates hard limit on maximum Python identifier length, but 1 byte
+// (limit of 255 bytes in an identifier) should be enough for everyone
+#ifndef MICROPY_QSTR_BYTES_IN_LEN
+#define MICROPY_QSTR_BYTES_IN_LEN (1)
+#endif
+
+// Number of bytes used to store qstr hash
+#ifndef MICROPY_QSTR_BYTES_IN_HASH
+#if MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES
+#define MICROPY_QSTR_BYTES_IN_HASH (2)
+#elif MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES
+#define MICROPY_QSTR_BYTES_IN_HASH (1)
+#else
+#define MICROPY_QSTR_BYTES_IN_HASH (0)
+#endif
+#endif
+
+// Avoid using C stack when making Python function calls. C stack still
+// may be used if there's no free heap.
+#ifndef MICROPY_STACKLESS
+#define MICROPY_STACKLESS (0)
+#endif
+
+// Never use C stack when making Python function calls. This may break
+// testsuite as will subtly change which exception is thrown in case
+// of too deep recursion and other similar cases.
+#ifndef MICROPY_STACKLESS_STRICT
+#define MICROPY_STACKLESS_STRICT (0)
+#endif
+
+// Don't use alloca calls. As alloca() is not part of ANSI C, this
+// workaround option is provided for compilers lacking this de-facto
+// standard function. The way it works is allocating from heap, and
+// relying on garbage collection to free it eventually. This is of
+// course much less optimal than real alloca().
+#if defined(MICROPY_NO_ALLOCA) && MICROPY_NO_ALLOCA
+#undef alloca
+#define alloca(x) m_malloc(x)
+#endif
+
+/*****************************************************************************/
+/* MicroPython emitters                                                     */
+
+// Whether to support loading of persistent code
+#ifndef MICROPY_PERSISTENT_CODE_LOAD
+#define MICROPY_PERSISTENT_CODE_LOAD (0)
+#endif
+
+// Whether to support saving of persistent code, i.e. for mpy-cross to
+// generate .mpy files. Enabling this enables additional metadata on raw code
+// objects which is also required for sys.settrace.
+#ifndef MICROPY_PERSISTENT_CODE_SAVE
+#define MICROPY_PERSISTENT_CODE_SAVE (MICROPY_PY_SYS_SETTRACE)
+#endif
+
+// Whether to support saving persistent code to a file via mp_raw_code_save_file
+#ifndef MICROPY_PERSISTENT_CODE_SAVE_FILE
+#define MICROPY_PERSISTENT_CODE_SAVE_FILE (0)
+#endif
+
+// Whether generated code can persist independently of the VM/runtime instance
+// This is enabled automatically when needed by other features
+#ifndef MICROPY_PERSISTENT_CODE
+#define MICROPY_PERSISTENT_CODE (MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE || MICROPY_MODULE_FROZEN_MPY)
+#endif
+
+// Whether bytecode uses a qstr_table to map internal qstr indices in the bytecode
+// to global qstr values in the runtime (behaviour when feature is enabled), or
+// just stores global qstr values directly in the bytecode.  This must be enabled
+// if MICROPY_PERSISTENT_CODE is enabled.
+#ifndef MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE
+#define MICROPY_EMIT_BYTECODE_USES_QSTR_TABLE (MICROPY_PERSISTENT_CODE)
+#endif
+
+// Whether to emit x64 native code
+#ifndef MICROPY_EMIT_X64
+#define MICROPY_EMIT_X64 (0)
+#endif
+
+// Whether to emit x86 native code
+#ifndef MICROPY_EMIT_X86
+#define MICROPY_EMIT_X86 (0)
+#endif
+
+// Whether to emit thumb native code
+#ifndef MICROPY_EMIT_THUMB
+#define MICROPY_EMIT_THUMB (0)
+#endif
+
+// Whether to emit ARMv7-M instruction support in thumb native code
+#ifndef MICROPY_EMIT_THUMB_ARMV7M
+#define MICROPY_EMIT_THUMB_ARMV7M (1)
+#endif
+
+// Whether to enable the thumb inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB
+#define MICROPY_EMIT_INLINE_THUMB (0)
+#endif
+
+// Whether to enable float support in the Thumb2 inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB_FLOAT
+#define MICROPY_EMIT_INLINE_THUMB_FLOAT (1)
+#endif
+
+// Whether to emit ARM native code
+#ifndef MICROPY_EMIT_ARM
+#define MICROPY_EMIT_ARM (0)
+#endif
+
+// Whether to emit Xtensa native code
+#ifndef MICROPY_EMIT_XTENSA
+#define MICROPY_EMIT_XTENSA (0)
+#endif
+
+// Whether to enable the Xtensa inline assembler
+#ifndef MICROPY_EMIT_INLINE_XTENSA
+#define MICROPY_EMIT_INLINE_XTENSA (0)
+#endif
+
+// Whether to emit Xtensa-Windowed native code
+#ifndef MICROPY_EMIT_XTENSAWIN
+#define MICROPY_EMIT_XTENSAWIN (0)
+#endif
+
+// Convenience definition for whether any native emitter is enabled
+#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA || MICROPY_EMIT_XTENSAWIN)
+
+// Some architectures cannot read byte-wise from executable memory.  In this case
+// the prelude for a native function (which usually sits after the machine code)
+// must be separated and placed somewhere where it can be read byte-wise.
+#define MICROPY_EMIT_NATIVE_PRELUDE_SEPARATE_FROM_MACHINE_CODE (MICROPY_EMIT_XTENSAWIN)
+
+// Convenience definition for whether any inline assembler emitter is enabled
+#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA)
+
+// Convenience definition for whether any native or inline assembler emitter is enabled
+#define MICROPY_EMIT_MACHINE_CODE (MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM)
+
+// Whether native relocatable code loaded from .mpy files is explicitly tracked
+// so that the GC cannot reclaim it.  Needed on architectures that allocate
+// executable memory on the MicroPython heap and don't explicitly track this
+// data some other way.
+#ifndef MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
+#if !MICROPY_EMIT_MACHINE_CODE || defined(MP_PLAT_ALLOC_EXEC) || defined(MP_PLAT_COMMIT_EXEC)
+#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (0)
+#else
+#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (1)
+#endif
+#endif
+
+/*****************************************************************************/
+/* Compiler configuration                                                    */
+
+// Whether to include the compiler
+#ifndef MICROPY_ENABLE_COMPILER
+#define MICROPY_ENABLE_COMPILER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether the compiler is dynamically configurable (ie at runtime)
+// This will disable the ability to execute native/viper code
+#ifndef MICROPY_DYNAMIC_COMPILER
+#define MICROPY_DYNAMIC_COMPILER (0)
+#endif
+
+// Whether the compiler allows compiling top-level await expressions
+#ifndef MICROPY_COMP_ALLOW_TOP_LEVEL_AWAIT
+#define MICROPY_COMP_ALLOW_TOP_LEVEL_AWAIT (0)
+#endif
+
+// Whether to enable constant folding; eg 1+2 rewritten as 3
+#ifndef MICROPY_COMP_CONST_FOLDING
+#define MICROPY_COMP_CONST_FOLDING (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to compile constant tuples immediately to their respective objects; eg (1, True)
+// Otherwise the tuple will be built at runtime
+#ifndef MICROPY_COMP_CONST_TUPLE
+#define MICROPY_COMP_CONST_TUPLE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisations for constant literals, eg OrderedDict
+#ifndef MICROPY_COMP_CONST_LITERAL
+#define MICROPY_COMP_CONST_LITERAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable lookup of constants in modules; eg module.CONST
+#ifndef MICROPY_COMP_MODULE_CONST
+#define MICROPY_COMP_MODULE_CONST (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable constant optimisation; id = const(value)
+#ifndef MICROPY_COMP_CONST
+#define MICROPY_COMP_CONST (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisation of: a, b = c, d
+// Costs 124 bytes (Thumb2)
+#ifndef MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisation of: a, b, c = d, e, f
+// Requires MICROPY_COMP_DOUBLE_TUPLE_ASSIGN and costs 68 bytes (Thumb2)
+#ifndef MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+#define MICROPY_COMP_TRIPLE_TUPLE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable optimisation of: return a if b else c
+// Costs about 80 bytes (Thumb2) and saves 2 bytes of bytecode for each use
+#ifndef MICROPY_COMP_RETURN_IF_EXPR
+#define MICROPY_COMP_RETURN_IF_EXPR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+/*****************************************************************************/
+/* Internal debugging stuff                                                  */
+
+// Whether to collect memory allocation stats
+#ifndef MICROPY_MEM_STATS
+#define MICROPY_MEM_STATS (0)
+#endif
+
+// The mp_print_t printer used for debugging output
+#ifndef MICROPY_DEBUG_PRINTER
+#define MICROPY_DEBUG_PRINTER (&mp_plat_print)
+#endif
+
+// Whether to build functions that print debugging info:
+//   mp_bytecode_print
+//   mp_parse_node_print
+#ifndef MICROPY_DEBUG_PRINTERS
+#define MICROPY_DEBUG_PRINTERS (0)
+#endif
+
+// Whether to enable all debugging outputs (it will be extremely verbose)
+#ifndef MICROPY_DEBUG_VERBOSE
+#define MICROPY_DEBUG_VERBOSE (0)
+#endif
+
+// Whether to enable debugging versions of MP_OBJ_NULL/STOP_ITERATION/SENTINEL
+#ifndef MICROPY_DEBUG_MP_OBJ_SENTINELS
+#define MICROPY_DEBUG_MP_OBJ_SENTINELS (0)
+#endif
+
+// Whether to print parse rule names (rather than integers) in mp_parse_node_print
+#ifndef MICROPY_DEBUG_PARSE_RULE_NAME
+#define MICROPY_DEBUG_PARSE_RULE_NAME (0)
+#endif
+
+// Whether to enable a simple VM stack overflow check
+#ifndef MICROPY_DEBUG_VM_STACK_OVERFLOW
+#define MICROPY_DEBUG_VM_STACK_OVERFLOW (0)
+#endif
+
+// Whether to enable extra instrumentation for valgrind
+#ifndef MICROPY_DEBUG_VALGRIND
+#define MICROPY_DEBUG_VALGRIND (0)
+#endif
+
+/*****************************************************************************/
+/* Optimisations                                                             */
+
+// Whether to use computed gotos in the VM, or a switch
+// Computed gotos are roughly 10% faster, and increase VM code size by a little,
+// e.g. ~1kiB on Cortex M4.
+// Note: enabling this will use the gcc-specific extensions of ranged designated
+// initialisers and addresses of labels, which are not part of the C99 standard.
+#ifndef MICROPY_OPT_COMPUTED_GOTO
+#define MICROPY_OPT_COMPUTED_GOTO (0)
+#endif
+
+// Optimise the fast path for loading attributes from instance types. Increases
+// Thumb2 code size by about 48 bytes.
+#ifndef MICROPY_OPT_LOAD_ATTR_FAST_PATH
+#define MICROPY_OPT_LOAD_ATTR_FAST_PATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Use extra RAM to cache map lookups by remembering the likely location of
+// the index. Avoids the hash computation on unordered maps, and avoids the
+// linear search on ordered (especially in-ROM) maps. Can provide a +10-15%
+// performance improvement on benchmarks involving lots of attribute access
+// or dictionary lookup.
+#ifndef MICROPY_OPT_MAP_LOOKUP_CACHE
+#define MICROPY_OPT_MAP_LOOKUP_CACHE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// How much RAM (in bytes) to use for the map lookup cache.
+#ifndef MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE
+#define MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE (128)
+#endif
+
+// Whether to use fast versions of bitwise operations (and, or, xor) when the
+// arguments are both positive.  Increases Thumb2 code size by about 250 bytes.
+#ifndef MICROPY_OPT_MPZ_BITWISE
+#define MICROPY_OPT_MPZ_BITWISE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+
+// Whether math.factorial is large, fast and recursive (1) or small and slow (0).
+#ifndef MICROPY_OPT_MATH_FACTORIAL
+#define MICROPY_OPT_MATH_FACTORIAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+/*****************************************************************************/
+/* Python internal features                                                  */
+
+// Use a special long jump in nlrthumb.c, which may be necessary if nlr.o and
+// nlrthumb.o are linked far apart from each other.
+#ifndef MICROPY_NLR_THUMB_USE_LONG_JUMP
+#define MICROPY_NLR_THUMB_USE_LONG_JUMP (0)
+#endif
+
+// Whether to enable import of external modules
+// When disabled, only importing of built-in modules is supported
+// When enabled, a port must implement mp_import_stat (among other things)
+#ifndef MICROPY_ENABLE_EXTERNAL_IMPORT
+#define MICROPY_ENABLE_EXTERNAL_IMPORT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to use the POSIX reader for importing files
+#ifndef MICROPY_READER_POSIX
+#define MICROPY_READER_POSIX (0)
+#endif
+
+// Whether to use the VFS reader for importing files
+#ifndef MICROPY_READER_VFS
+#define MICROPY_READER_VFS (0)
+#endif
+
+// Whether any readers have been defined
+#ifndef MICROPY_HAS_FILE_READER
+#define MICROPY_HAS_FILE_READER (MICROPY_READER_POSIX || MICROPY_READER_VFS)
+#endif
+
+// Hook for the VM at the start of the opcode loop (can contain variable
+// definitions usable by the other hook functions)
+#ifndef MICROPY_VM_HOOK_INIT
+#define MICROPY_VM_HOOK_INIT
+#endif
+
+// Hook for the VM during the opcode loop (but only after jump opcodes)
+#ifndef MICROPY_VM_HOOK_LOOP
+#define MICROPY_VM_HOOK_LOOP
+#endif
+
+// Hook for the VM just before return opcode is finished being interpreted
+#ifndef MICROPY_VM_HOOK_RETURN
+#define MICROPY_VM_HOOK_RETURN
+#endif
+
+// Hook for mp_sched_schedule when a function gets scheduled on sched_queue
+// (this macro executes within an atomic section)
+#ifndef MICROPY_SCHED_HOOK_SCHEDULED
+#define MICROPY_SCHED_HOOK_SCHEDULED
+#endif
+
+// Whether to include the garbage collector
+#ifndef MICROPY_ENABLE_GC
+#define MICROPY_ENABLE_GC (0)
+#endif
+
+// Whether the garbage-collected heap can be split over multiple memory areas.
+#ifndef MICROPY_GC_SPLIT_HEAP
+#define MICROPY_GC_SPLIT_HEAP (0)
+#endif
+
+// Whether regions should be added/removed from the split heap as needed.
+#ifndef MICROPY_GC_SPLIT_HEAP_AUTO
+#define MICROPY_GC_SPLIT_HEAP_AUTO (0)
+#endif
+
+// Hook to run code during time consuming garbage collector operations
+// *i* is the loop index variable (e.g. can be used to run every x loops)
+#ifndef MICROPY_GC_HOOK_LOOP
+#define MICROPY_GC_HOOK_LOOP(i)
+#endif
+
+// Whether to provide m_tracked_calloc, m_tracked_free functions
+#ifndef MICROPY_TRACKED_ALLOC
+#define MICROPY_TRACKED_ALLOC (0)
+#endif
+
+// Whether to enable finalisers in the garbage collector (ie call __del__)
+#ifndef MICROPY_ENABLE_FINALISER
+#define MICROPY_ENABLE_FINALISER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable a separate allocator for the Python stack.
+// If enabled then the code must call mp_pystack_init before mp_init.
+#ifndef MICROPY_ENABLE_PYSTACK
+#define MICROPY_ENABLE_PYSTACK (0)
+#endif
+
+// Number of bytes that memory returned by mp_pystack_alloc will be aligned by.
+#ifndef MICROPY_PYSTACK_ALIGN
+#define MICROPY_PYSTACK_ALIGN (8)
+#endif
+
+// Whether to check C stack usage. C stack used for calling Python functions,
+// etc. Not checking means segfault on overflow.
+#ifndef MICROPY_STACK_CHECK
+#define MICROPY_STACK_CHECK (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to have an emergency exception buffer
+#ifndef MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+#define MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF (0)
+#endif
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+#ifndef MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
+#define MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE (0)      // 0 - implies dynamic allocation
+#endif
+#endif
+
+// Whether to provide the mp_kbd_exception object, and micropython.kbd_intr function
+#ifndef MICROPY_KBD_EXCEPTION
+#define MICROPY_KBD_EXCEPTION (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Prefer to raise KeyboardInterrupt asynchronously (from signal or interrupt
+// handler) - if supported by a particular port.
+#ifndef MICROPY_ASYNC_KBD_INTR
+#define MICROPY_ASYNC_KBD_INTR (0)
+#endif
+
+// Whether to include REPL helper function
+#ifndef MICROPY_HELPER_REPL
+#define MICROPY_HELPER_REPL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Allow enabling debug prints after each REPL line
+#ifndef MICROPY_REPL_INFO
+#define MICROPY_REPL_INFO (0)
+#endif
+
+// Whether to include emacs-style readline behavior in REPL
+#ifndef MICROPY_REPL_EMACS_KEYS
+#define MICROPY_REPL_EMACS_KEYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include emacs-style word movement/kill readline behavior in REPL.
+// This adds Alt+F, Alt+B, Alt+D and Alt+Backspace for forward-word, backward-word, forward-kill-word
+// and backward-kill-word, respectively.
+#ifndef MICROPY_REPL_EMACS_WORDS_MOVE
+#define MICROPY_REPL_EMACS_WORDS_MOVE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to include extra convenience keys for word movement/kill in readline REPL.
+// This adds Ctrl+Right, Ctrl+Left and Ctrl+W for forward-word, backward-word and backward-kill-word
+// respectively. Ctrl+Delete is not implemented because it's a very different escape sequence.
+// Depends on MICROPY_REPL_EMACS_WORDS_MOVE.
+#ifndef MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE
+#define MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to implement auto-indent in REPL
+#ifndef MICROPY_REPL_AUTO_INDENT
+#define MICROPY_REPL_AUTO_INDENT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether port requires event-driven REPL functions
+#ifndef MICROPY_REPL_EVENT_DRIVEN
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#endif
+
+// The number of items to keep in the readline history.
+#ifndef MICROPY_READLINE_HISTORY_SIZE
+#define MICROPY_READLINE_HISTORY_SIZE (8)
+#endif
+
+// Whether to include lexer helper function for unix
+#ifndef MICROPY_HELPER_LEXER_UNIX
+#define MICROPY_HELPER_LEXER_UNIX (0)
+#endif
+
+// Long int implementation
+#define MICROPY_LONGINT_IMPL_NONE (0)
+#define MICROPY_LONGINT_IMPL_LONGLONG (1)
+#define MICROPY_LONGINT_IMPL_MPZ (2)
+
+#ifndef MICROPY_LONGINT_IMPL
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_NONE)
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+typedef long long mp_longint_impl_t;
+#endif
+
+// Whether to include information in the byte code to determine source
+// line number (increases RAM usage, but doesn't slow byte code execution)
+#ifndef MICROPY_ENABLE_SOURCE_LINE
+#define MICROPY_ENABLE_SOURCE_LINE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include doc strings (increases RAM usage)
+#ifndef MICROPY_ENABLE_DOC_STRING
+#define MICROPY_ENABLE_DOC_STRING (0)
+#endif
+
+// Exception messages are removed (requires disabling MICROPY_ROM_TEXT_COMPRESSION)
+#define MICROPY_ERROR_REPORTING_NONE     (0)
+// Exception messages are short static strings
+#define MICROPY_ERROR_REPORTING_TERSE    (1)
+// Exception messages provide basic error details
+#define MICROPY_ERROR_REPORTING_NORMAL   (2)
+// Exception messages provide full info, e.g. object names
+#define MICROPY_ERROR_REPORTING_DETAILED (3)
+
+#ifndef MICROPY_ERROR_REPORTING
+#if MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_FULL_FEATURES
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_DETAILED)
+#elif MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_NORMAL)
+#else
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_TERSE)
+#endif
+#endif
+
+// Whether issue warnings during compiling/execution
+#ifndef MICROPY_WARNINGS
+#define MICROPY_WARNINGS (0)
+#endif
+
+// Whether to support warning categories
+#ifndef MICROPY_WARNINGS_CATEGORY
+#define MICROPY_WARNINGS_CATEGORY (0)
+#endif
+
+// This macro is used when printing runtime warnings and errors
+#ifndef MICROPY_ERROR_PRINTER
+#define MICROPY_ERROR_PRINTER (&mp_plat_print)
+#endif
+
+// Float and complex implementation
+#define MICROPY_FLOAT_IMPL_NONE (0)
+#define MICROPY_FLOAT_IMPL_FLOAT (1)
+#define MICROPY_FLOAT_IMPL_DOUBLE (2)
+
+#ifndef MICROPY_FLOAT_IMPL
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_NONE)
+#endif
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_CONST(x) x##F
+#define MICROPY_FLOAT_C_FUN(fun) fun##f
+typedef float mp_float_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_CONST(x) x
+#define MICROPY_FLOAT_C_FUN(fun) fun
+typedef double mp_float_t;
+#else
+#define MICROPY_PY_BUILTINS_FLOAT (0)
+#endif
+
+#ifndef MICROPY_PY_BUILTINS_COMPLEX
+#define MICROPY_PY_BUILTINS_COMPLEX (MICROPY_PY_BUILTINS_FLOAT)
+#endif
+
+// Whether to use the native _Float16 for 16-bit float support
+#ifndef MICROPY_FLOAT_USE_NATIVE_FLT16
+#ifdef __FLT16_MAX__
+#define MICROPY_FLOAT_USE_NATIVE_FLT16 (1)
+#else
+#define MICROPY_FLOAT_USE_NATIVE_FLT16 (0)
+#endif
+#endif
+
+// Whether to provide a high-quality hash for float and complex numbers.
+// Otherwise the default is a very simple but correct hashing function.
+#ifndef MICROPY_FLOAT_HIGH_QUALITY_HASH
+#define MICROPY_FLOAT_HIGH_QUALITY_HASH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Enable features which improve CPython compatibility
+// but may lead to more code size/memory usage.
+// TODO: Originally intended as generic category to not
+// add bunch of once-off options. May need refactoring later
+#ifndef MICROPY_CPYTHON_COMPAT
+#define MICROPY_CPYTHON_COMPAT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Perform full checks as done by CPython. Disabling this
+// may produce incorrect results, if incorrect data is fed,
+// but should not lead to MicroPython crashes or similar
+// grave issues (in other words, only user app should be,
+// affected, not system).
+#ifndef MICROPY_FULL_CHECKS
+#define MICROPY_FULL_CHECKS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether POSIX-semantics non-blocking streams are supported
+#ifndef MICROPY_STREAMS_NON_BLOCK
+#define MICROPY_STREAMS_NON_BLOCK (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide stream functions with POSIX-like signatures
+// (useful for porting existing libraries to MicroPython).
+#ifndef MICROPY_STREAMS_POSIX_API
+#define MICROPY_STREAMS_POSIX_API (0)
+#endif
+
+// Whether modules can use MP_REGISTER_MODULE_DELEGATION() to delegate failed
+// attribute lookups to a custom handler function.
+#ifndef MICROPY_MODULE_ATTR_DELEGATION
+#define MICROPY_MODULE_ATTR_DELEGATION (MICROPY_PY_SYS_ATTR_DELEGATION || MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to call __init__ when importing builtin modules for the first time.
+// Modules using this need to handle the possibility that __init__ might be
+// called multiple times.
+#ifndef MICROPY_MODULE_BUILTIN_INIT
+#define MICROPY_MODULE_BUILTIN_INIT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to allow built-in modules to have sub-packages (by making the
+// sub-package a member of their locals dict). Sub-packages should not be
+// registered with MP_REGISTER_MODULE, instead they should be added as
+// members of the parent's globals dict. To match CPython behavior,
+// their __name__ should be "foo.bar"(i.e. QSTR_foo_dot_bar) which will
+// require an entry in qstrdefs, although it does also work to just call
+// it "bar". Also, because subpackages can be accessed without being
+// imported (e.g. as foo.bar after `import foo`), they should not
+// have __init__ methods. Instead, the top-level package's __init__ should
+// initialise all sub-packages.
+#ifndef MICROPY_MODULE_BUILTIN_SUBPACKAGES
+#define MICROPY_MODULE_BUILTIN_SUBPACKAGES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to support module-level __getattr__ (see PEP 562)
+#ifndef MICROPY_MODULE_GETATTR
+#define MICROPY_MODULE_GETATTR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable importing foo.py with __name__ set to '__main__'
+// Used by the unix port for the -m flag.
+#ifndef MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+#define MICROPY_MODULE_OVERRIDE_MAIN_IMPORT (0)
+#endif
+
+// Whether frozen modules are supported in the form of strings
+#ifndef MICROPY_MODULE_FROZEN_STR
+#define MICROPY_MODULE_FROZEN_STR (0)
+#endif
+
+// Whether frozen modules are supported in the form of .mpy files
+#ifndef MICROPY_MODULE_FROZEN_MPY
+#define MICROPY_MODULE_FROZEN_MPY (0)
+#endif
+
+// Convenience macro for whether frozen modules are supported
+#ifndef MICROPY_MODULE_FROZEN
+#define MICROPY_MODULE_FROZEN (MICROPY_MODULE_FROZEN_STR || MICROPY_MODULE_FROZEN_MPY)
+#endif
+
+// Whether you can override builtins in the builtins module
+#ifndef MICROPY_CAN_OVERRIDE_BUILTINS
+#define MICROPY_CAN_OVERRIDE_BUILTINS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to check that the "self" argument of a builtin method has the
+// correct type.  Such an explicit check is only needed if a builtin
+// method escapes to Python land without a first argument, eg
+// list.append([], 1).  Without this check such calls will have undefined
+// behaviour (usually segfault) if the first argument is the wrong type.
+#ifndef MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to use internally defined errno's (otherwise system provided ones)
+#ifndef MICROPY_USE_INTERNAL_ERRNO
+#define MICROPY_USE_INTERNAL_ERRNO (0)
+#endif
+
+// Whether to use internally defined *printf() functions (otherwise external ones)
+#ifndef MICROPY_USE_INTERNAL_PRINTF
+#define MICROPY_USE_INTERNAL_PRINTF (1)
+#endif
+
+// The mp_print_t printer used for printf output when MICROPY_USE_INTERNAL_PRINTF is enabled
+#ifndef MICROPY_INTERNAL_PRINTF_PRINTER
+#define MICROPY_INTERNAL_PRINTF_PRINTER (&mp_plat_print)
+#endif
+
+// Whether to support mp_sched_vm_abort to asynchronously abort to the top level.
+#ifndef MICROPY_ENABLE_VM_ABORT
+#define MICROPY_ENABLE_VM_ABORT (0)
+#endif
+
+// Support for internal scheduler
+#ifndef MICROPY_ENABLE_SCHEDULER
+#define MICROPY_ENABLE_SCHEDULER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether the scheduler supports scheduling static nodes with C callbacks
+#ifndef MICROPY_SCHEDULER_STATIC_NODES
+#define MICROPY_SCHEDULER_STATIC_NODES (0)
+#endif
+
+// Maximum number of entries in the scheduler
+#ifndef MICROPY_SCHEDULER_DEPTH
+#define MICROPY_SCHEDULER_DEPTH (4)
+#endif
+
+// Support for generic VFS sub-system
+#ifndef MICROPY_VFS
+#define MICROPY_VFS (0)
+#endif
+
+// Support for VFS POSIX component, to mount a POSIX filesystem within VFS
+#ifndef MICROPY_VFS_POSIX
+#define MICROPY_VFS_POSIX (0)
+#endif
+
+// Support for VFS FAT component, to mount a FAT filesystem within VFS
+#ifndef MICROPY_VFS_FAT
+#define MICROPY_VFS_FAT (0)
+#endif
+
+// Support for VFS LittleFS v1 component, to mount a LFSv1 filesystem within VFS
+#ifndef MICROPY_VFS_LFS1
+#define MICROPY_VFS_LFS1 (0)
+#endif
+
+// Support for VFS LittleFS v2 component, to mount a LFSv2 filesystem within VFS
+#ifndef MICROPY_VFS_LFS2
+#define MICROPY_VFS_LFS2 (0)
+#endif
+
+/*****************************************************************************/
+/* Fine control over Python builtins, classes, modules, etc                  */
+
+// Whether to support multiple inheritance of Python classes.  Multiple
+// inheritance makes some C functions inherently recursive, and adds a bit of
+// code overhead.
+#ifndef MICROPY_MULTIPLE_INHERITANCE
+#define MICROPY_MULTIPLE_INHERITANCE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to implement attributes on functions
+#ifndef MICROPY_PY_FUNCTION_ATTRS
+#define MICROPY_PY_FUNCTION_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the descriptors __get__, __set__, __delete__
+// This costs some code size and makes load/store/delete of instance
+// attributes slower for the classes that use this feature
+#ifndef MICROPY_PY_DESCRIPTORS
+#define MICROPY_PY_DESCRIPTORS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support class __delattr__ and __setattr__ methods
+// This costs some code size and makes store/delete of instance
+// attributes slower for the classes that use this feature
+#ifndef MICROPY_PY_DELATTR_SETATTR
+#define MICROPY_PY_DELATTR_SETATTR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Support for async/await/async for/async with
+#ifndef MICROPY_PY_ASYNC_AWAIT
+#define MICROPY_PY_ASYNC_AWAIT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Support for literal string interpolation, f-strings (see PEP 498, Python 3.6+)
+#ifndef MICROPY_PY_FSTRINGS
+#define MICROPY_PY_FSTRINGS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Support for assignment expressions with := (see PEP 572, Python 3.8+)
+#ifndef MICROPY_PY_ASSIGN_EXPR
+#define MICROPY_PY_ASSIGN_EXPR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Non-standard .pend_throw() method for generators, allowing for
+// Future-like behavior with respect to exception handling: an
+// exception set with .pend_throw() will activate on the next call
+// to generator's .send() or .__next__(). (This is useful to implement
+// async schedulers.)
+#ifndef MICROPY_PY_GENERATOR_PEND_THROW
+#define MICROPY_PY_GENERATOR_PEND_THROW (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Issue a warning when comparing str and bytes objects
+#ifndef MICROPY_PY_STR_BYTES_CMP_WARN
+#define MICROPY_PY_STR_BYTES_CMP_WARN (0)
+#endif
+
+// Add bytes.hex and bytes.fromhex
+#ifndef MICROPY_PY_BUILTINS_BYTES_HEX
+#define MICROPY_PY_BUILTINS_BYTES_HEX (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether str object is proper unicode
+#ifndef MICROPY_PY_BUILTINS_STR_UNICODE
+#define MICROPY_PY_BUILTINS_STR_UNICODE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to check for valid UTF-8 when converting bytes to str
+#ifndef MICROPY_PY_BUILTINS_STR_UNICODE_CHECK
+#define MICROPY_PY_BUILTINS_STR_UNICODE_CHECK (MICROPY_PY_BUILTINS_STR_UNICODE)
+#endif
+
+// Whether str.center() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_CENTER
+#define MICROPY_PY_BUILTINS_STR_CENTER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether str.count() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_COUNT
+#define MICROPY_PY_BUILTINS_STR_COUNT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether str % (...) formatting operator provided
+#ifndef MICROPY_PY_BUILTINS_STR_OP_MODULO
+#define MICROPY_PY_BUILTINS_STR_OP_MODULO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether str.partition()/str.rpartition() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_PARTITION
+#define MICROPY_PY_BUILTINS_STR_PARTITION (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether str.splitlines() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_SPLITLINES
+#define MICROPY_PY_BUILTINS_STR_SPLITLINES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support bytearray object
+#ifndef MICROPY_PY_BUILTINS_BYTEARRAY
+#define MICROPY_PY_BUILTINS_BYTEARRAY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support dict.fromkeys() class method
+#ifndef MICROPY_PY_BUILTINS_DICT_FROMKEYS
+#define MICROPY_PY_BUILTINS_DICT_FROMKEYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support memoryview object
+#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support memoryview.itemsize attribute
+#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE
+#define MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to support set object
+#ifndef MICROPY_PY_BUILTINS_SET
+#define MICROPY_PY_BUILTINS_SET (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice subscript operators and slice object
+#ifndef MICROPY_PY_BUILTINS_SLICE
+#define MICROPY_PY_BUILTINS_SLICE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice attribute read access,
+// i.e. slice.start, slice.stop, slice.step
+#ifndef MICROPY_PY_BUILTINS_SLICE_ATTRS
+#define MICROPY_PY_BUILTINS_SLICE_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the .indices(len) method on slice objects
+#ifndef MICROPY_PY_BUILTINS_SLICE_INDICES
+#define MICROPY_PY_BUILTINS_SLICE_INDICES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support frozenset object
+#ifndef MICROPY_PY_BUILTINS_FROZENSET
+#define MICROPY_PY_BUILTINS_FROZENSET (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support property object
+#ifndef MICROPY_PY_BUILTINS_PROPERTY
+#define MICROPY_PY_BUILTINS_PROPERTY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to implement the start/stop/step attributes (readback) on
+// the "range" builtin type. Rarely used, and costs ~60 bytes (x86).
+#ifndef MICROPY_PY_BUILTINS_RANGE_ATTRS
+#define MICROPY_PY_BUILTINS_RANGE_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support binary ops [only (in)equality is defined] between range
+// objects.  With this option disabled all range objects that are not exactly
+// the same object will compare as not-equal.  With it enabled the semantics
+// match CPython and ranges are equal if they yield the same sequence of items.
+#ifndef MICROPY_PY_BUILTINS_RANGE_BINOP
+#define MICROPY_PY_BUILTINS_RANGE_BINOP (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Support for calling next() with second argument
+#ifndef MICROPY_PY_BUILTINS_NEXT2
+#define MICROPY_PY_BUILTINS_NEXT2 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to support rounding of integers (incl bignum); eg round(123,-1)=120
+#ifndef MICROPY_PY_BUILTINS_ROUND_INT
+#define MICROPY_PY_BUILTINS_ROUND_INT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support complete set of special methods for user
+// classes, or only the most used ones. "Inplace" methods are
+// controlled by MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS below.
+// "Reverse" methods are controlled by
+// MICROPY_PY_REVERSE_SPECIAL_METHODS below.
+#ifndef MICROPY_PY_ALL_SPECIAL_METHODS
+#define MICROPY_PY_ALL_SPECIAL_METHODS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support all inplace arithmetic operation methods
+// (__imul__, etc.)
+#ifndef MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
+#define MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to support reverse arithmetic operation methods
+// (__radd__, etc.). Additionally gated by
+// MICROPY_PY_ALL_SPECIAL_METHODS.
+#ifndef MICROPY_PY_REVERSE_SPECIAL_METHODS
+#define MICROPY_PY_REVERSE_SPECIAL_METHODS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support compile function
+#ifndef MICROPY_PY_BUILTINS_COMPILE
+#define MICROPY_PY_BUILTINS_COMPILE (MICROPY_ENABLE_COMPILER && MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support enumerate function(type)
+#ifndef MICROPY_PY_BUILTINS_ENUMERATE
+#define MICROPY_PY_BUILTINS_ENUMERATE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support eval and exec functions
+// By default they are supported if the compiler is enabled
+#ifndef MICROPY_PY_BUILTINS_EVAL_EXEC
+#define MICROPY_PY_BUILTINS_EVAL_EXEC (MICROPY_ENABLE_COMPILER)
+#endif
+
+// Whether to support the Python 2 execfile function
+#ifndef MICROPY_PY_BUILTINS_EXECFILE
+#define MICROPY_PY_BUILTINS_EXECFILE (MICROPY_ENABLE_COMPILER && MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support filter function(type)
+#ifndef MICROPY_PY_BUILTINS_FILTER
+#define MICROPY_PY_BUILTINS_FILTER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support reversed function(type)
+#ifndef MICROPY_PY_BUILTINS_REVERSED
+#define MICROPY_PY_BUILTINS_REVERSED (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to define "NotImplemented" special constant
+#ifndef MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+#define MICROPY_PY_BUILTINS_NOTIMPLEMENTED (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the built-in input() function. The implementation of this
+// uses shared/readline, so can only be enabled if the port uses this readline.
+#ifndef MICROPY_PY_BUILTINS_INPUT
+#define MICROPY_PY_BUILTINS_INPUT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support min/max functions
+#ifndef MICROPY_PY_BUILTINS_MIN_MAX
+#define MICROPY_PY_BUILTINS_MIN_MAX (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Support for calls to pow() with 3 integer arguments
+#ifndef MICROPY_PY_BUILTINS_POW3
+#define MICROPY_PY_BUILTINS_POW3 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the help function
+#ifndef MICROPY_PY_BUILTINS_HELP
+#define MICROPY_PY_BUILTINS_HELP (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Use this to configure the help text shown for help().  It should be a
+// variable with the type "const char*".  A sensible default is provided.
+#ifndef MICROPY_PY_BUILTINS_HELP_TEXT
+#define MICROPY_PY_BUILTINS_HELP_TEXT mp_help_default_text
+#endif
+
+// Add the ability to list the available modules when executing help('modules')
+#ifndef MICROPY_PY_BUILTINS_HELP_MODULES
+#define MICROPY_PY_BUILTINS_HELP_MODULES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to set __file__ for imported modules
+#ifndef MICROPY_PY___FILE__
+#define MICROPY_PY___FILE__ (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide mem-info related functions in micropython module
+#ifndef MICROPY_PY_MICROPYTHON_MEM_INFO
+#define MICROPY_PY_MICROPYTHON_MEM_INFO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "micropython.stack_use" function
+#ifndef MICROPY_PY_MICROPYTHON_STACK_USE
+#define MICROPY_PY_MICROPYTHON_STACK_USE (MICROPY_PY_MICROPYTHON_MEM_INFO)
+#endif
+
+// Whether to provide the "micropython.heap_locked" function
+#ifndef MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+#define MICROPY_PY_MICROPYTHON_HEAP_LOCKED (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to provide "array" module. Note that large chunk of the
+// underlying code is shared with "bytearray" builtin type, so to
+// get real savings, it should be disabled too.
+#ifndef MICROPY_PY_ARRAY
+#define MICROPY_PY_ARRAY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice assignments for array (and bytearray).
+// This is rarely used, but adds ~0.5K of code.
+#ifndef MICROPY_PY_ARRAY_SLICE_ASSIGN
+#define MICROPY_PY_ARRAY_SLICE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support attrtuple type (MicroPython extension)
+// It provides space-efficient tuples with attribute access
+#ifndef MICROPY_PY_ATTRTUPLE
+#define MICROPY_PY_ATTRTUPLE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "collections" module
+#ifndef MICROPY_PY_COLLECTIONS
+#define MICROPY_PY_COLLECTIONS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "collections.deque" type
+#ifndef MICROPY_PY_COLLECTIONS_DEQUE
+#define MICROPY_PY_COLLECTIONS_DEQUE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether "collections.deque" supports iteration
+#ifndef MICROPY_PY_COLLECTIONS_DEQUE_ITER
+#define MICROPY_PY_COLLECTIONS_DEQUE_ITER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether "collections.deque" supports subscription
+#ifndef MICROPY_PY_COLLECTIONS_DEQUE_SUBSCR
+#define MICROPY_PY_COLLECTIONS_DEQUE_SUBSCR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "collections.OrderedDict" type
+#ifndef MICROPY_PY_COLLECTIONS_ORDEREDDICT
+#define MICROPY_PY_COLLECTIONS_ORDEREDDICT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the _asdict function for namedtuple
+#ifndef MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT
+#define MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to provide "math" module
+#ifndef MICROPY_PY_MATH
+#define MICROPY_PY_MATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide all math module constants (Python 3.5+), or just pi and e.
+#ifndef MICROPY_PY_MATH_CONSTANTS
+#define MICROPY_PY_MATH_CONSTANTS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide special math functions: math.{erf,erfc,gamma,lgamma}
+#ifndef MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+#define MICROPY_PY_MATH_SPECIAL_FUNCTIONS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide math.factorial function
+#ifndef MICROPY_PY_MATH_FACTORIAL
+#define MICROPY_PY_MATH_FACTORIAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide math.isclose function
+#ifndef MICROPY_PY_MATH_ISCLOSE
+#define MICROPY_PY_MATH_ISCLOSE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide fix for atan2 Inf handling.
+#ifndef MICROPY_PY_MATH_ATAN2_FIX_INFNAN
+#define MICROPY_PY_MATH_ATAN2_FIX_INFNAN (0)
+#endif
+
+// Whether to provide fix for fmod Inf handling.
+#ifndef MICROPY_PY_MATH_FMOD_FIX_INFNAN
+#define MICROPY_PY_MATH_FMOD_FIX_INFNAN (0)
+#endif
+
+// Whether to provide fix for modf negative zero handling.
+#ifndef MICROPY_PY_MATH_MODF_FIX_NEGZERO
+#define MICROPY_PY_MATH_MODF_FIX_NEGZERO (0)
+#endif
+
+// Whether to provide fix for pow(1, NaN) and pow(NaN, 0), which both should be 1 not NaN.
+#ifndef MICROPY_PY_MATH_POW_FIX_NAN
+#define MICROPY_PY_MATH_POW_FIX_NAN (0)
+#endif
+
+// Whether to provide "cmath" module
+#ifndef MICROPY_PY_CMATH
+#define MICROPY_PY_CMATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "micropython" module
+#ifndef MICROPY_PY_MICROPYTHON
+#define MICROPY_PY_MICROPYTHON (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "gc" module
+#ifndef MICROPY_PY_GC
+#define MICROPY_PY_GC (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to return number of collected objects from gc.collect()
+#ifndef MICROPY_PY_GC_COLLECT_RETVAL
+#define MICROPY_PY_GC_COLLECT_RETVAL (0)
+#endif
+
+// Whether to provide "io" module
+#ifndef MICROPY_PY_IO
+#define MICROPY_PY_IO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "io.IOBase" class to support user streams
+#ifndef MICROPY_PY_IO_IOBASE
+#define MICROPY_PY_IO_IOBASE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "io.BytesIO" class
+#ifndef MICROPY_PY_IO_BYTESIO
+#define MICROPY_PY_IO_BYTESIO (1)
+#endif
+
+// Whether to provide "io.BufferedWriter" class
+#ifndef MICROPY_PY_IO_BUFFEREDWRITER
+#define MICROPY_PY_IO_BUFFEREDWRITER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to provide "struct" module
+#ifndef MICROPY_PY_STRUCT
+#define MICROPY_PY_STRUCT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "sys" module
+#ifndef MICROPY_PY_SYS
+#define MICROPY_PY_SYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to initialise "sys.path" and "sys.argv" to their defaults in mp_init()
+#ifndef MICROPY_PY_SYS_PATH_ARGV_DEFAULTS
+#define MICROPY_PY_SYS_PATH_ARGV_DEFAULTS (MICROPY_PY_SYS)
+#endif
+
+// Whether to provide "sys.maxsize" constant
+#ifndef MICROPY_PY_SYS_MAXSIZE
+#define MICROPY_PY_SYS_MAXSIZE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "sys.modules" dictionary
+#ifndef MICROPY_PY_SYS_MODULES
+#define MICROPY_PY_SYS_MODULES (1)
+#endif
+
+// Whether to provide "sys.exc_info" function
+// Avoid enabling this, this function is Python2 heritage
+#ifndef MICROPY_PY_SYS_EXC_INFO
+#define MICROPY_PY_SYS_EXC_INFO (0)
+#endif
+
+// Whether to provide "sys.executable", which is the absolute path to the
+// micropython binary
+// Intended for use on the "OS" ports (e.g. Unix)
+#ifndef MICROPY_PY_SYS_EXECUTABLE
+#define MICROPY_PY_SYS_EXECUTABLE (0)
+#endif
+
+// Whether to provide "sys.intern"
+#ifndef MICROPY_PY_SYS_INTERN
+#define MICROPY_PY_SYS_INTERN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to provide "sys.exit" function
+#ifndef MICROPY_PY_SYS_EXIT
+#define MICROPY_PY_SYS_EXIT (1)
+#endif
+
+// Whether to provide "sys.atexit" function (MicroPython extension)
+#ifndef MICROPY_PY_SYS_ATEXIT
+#define MICROPY_PY_SYS_ATEXIT (0)
+#endif
+
+// Whether to provide the "sys.path" attribute (which forces module delegation
+// and mutable sys attributes to be enabled).
+// If MICROPY_PY_SYS_PATH_ARGV_DEFAULTS is enabled, this is initialised in
+// mp_init to an empty list. Otherwise the port must initialise it using
+// `mp_sys_path = mp_obj_new_list(...)`.
+#ifndef MICROPY_PY_SYS_PATH
+#define MICROPY_PY_SYS_PATH (1)
+#endif
+
+// Whether to provide the "sys.argv" attribute.
+// If MICROPY_PY_SYS_PATH_ARGV_DEFAULTS is enabled, this is initialised in
+// mp_init to an empty list. Otherwise the port must initialise it using
+// `mp_obj_list_init(MP_OBJ_TO_PTR(mp_sys_argv), ...);`
+#ifndef MICROPY_PY_SYS_ARGV
+#define MICROPY_PY_SYS_ARGV (1)
+#endif
+
+// Whether to provide sys.{ps1,ps2} mutable attributes, to control REPL prompts
+#ifndef MICROPY_PY_SYS_PS1_PS2
+#define MICROPY_PY_SYS_PS1_PS2 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "sys.settrace" function
+#ifndef MICROPY_PY_SYS_SETTRACE
+#define MICROPY_PY_SYS_SETTRACE (0)
+#endif
+
+// Whether to provide "sys.getsizeof" function
+#ifndef MICROPY_PY_SYS_GETSIZEOF
+#define MICROPY_PY_SYS_GETSIZEOF (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr} objects
+#ifndef MICROPY_PY_SYS_STDFILES
+#define MICROPY_PY_SYS_STDFILES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr}.buffer object
+// This is implemented per-port
+#ifndef MICROPY_PY_SYS_STDIO_BUFFER
+#define MICROPY_PY_SYS_STDIO_BUFFER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide sys.tracebacklimit mutable attribute
+#ifndef MICROPY_PY_SYS_TRACEBACKLIMIT
+#define MICROPY_PY_SYS_TRACEBACKLIMIT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+// Whether the sys module supports attribute delegation
+// This is enabled automatically when needed by other features
+#ifndef MICROPY_PY_SYS_ATTR_DELEGATION
+#define MICROPY_PY_SYS_ATTR_DELEGATION (MICROPY_PY_SYS_PATH || MICROPY_PY_SYS_PS1_PS2 || MICROPY_PY_SYS_TRACEBACKLIMIT)
+#endif
+
+// Whether to provide "errno" module
+#ifndef MICROPY_PY_ERRNO
+#define MICROPY_PY_ERRNO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the errno.errorcode dict
+#ifndef MICROPY_PY_ERRNO_ERRORCODE
+#define MICROPY_PY_ERRNO_ERRORCODE (1)
+#endif
+
+// Whether to provide "select" module
+#ifndef MICROPY_PY_SELECT
+#define MICROPY_PY_SELECT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable POSIX optimisations in the "select" module (requires system poll)
+#ifndef MICROPY_PY_SELECT_POSIX_OPTIMISATIONS
+#define MICROPY_PY_SELECT_POSIX_OPTIMISATIONS (0)
+#endif
+
+// Whether to enable the select() function in the "select" module (baremetal
+// implementation). This is present for compatibility but can be disabled to
+// save space.
+#ifndef MICROPY_PY_SELECT_SELECT
+#define MICROPY_PY_SELECT_SELECT (1)
+#endif
+
+// Whether to provide the "time" module
+#ifndef MICROPY_PY_TIME
+#define MICROPY_PY_TIME (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_BASIC_FEATURES)
+#endif
+
+// Whether to provide time.gmtime/localtime/mktime functions
+#ifndef MICROPY_PY_TIME_GMTIME_LOCALTIME_MKTIME
+#define MICROPY_PY_TIME_GMTIME_LOCALTIME_MKTIME (0)
+#endif
+
+// Whether to provide time.time/time_ns functions
+#ifndef MICROPY_PY_TIME_TIME_TIME_NS
+#define MICROPY_PY_TIME_TIME_TIME_NS (0)
+#endif
+
+// Period of values returned by time.ticks_ms(), ticks_us(), ticks_cpu()
+// functions. Should be power of two. All functions above use the same
+// period, so if underlying hardware/API has different periods, the
+// minimum of them should be used. The value below is the maximum value
+// this parameter can take (corresponding to 30 bit tick values on 32-bit
+// system).
+#ifndef MICROPY_PY_TIME_TICKS_PERIOD
+#define MICROPY_PY_TIME_TICKS_PERIOD (MP_SMALL_INT_POSITIVE_MASK + 1)
+#endif
+
+// Whether to provide "_thread" module
+#ifndef MICROPY_PY_THREAD
+#define MICROPY_PY_THREAD (0)
+#endif
+
+// Whether to make the VM/runtime thread-safe using a global lock
+// If not enabled then thread safety must be provided at the Python level
+#ifndef MICROPY_PY_THREAD_GIL
+#define MICROPY_PY_THREAD_GIL (MICROPY_PY_THREAD)
+#endif
+
+// Number of VM jump-loops to do before releasing the GIL.
+// Set this to 0 to disable the divisor.
+#ifndef MICROPY_PY_THREAD_GIL_VM_DIVISOR
+#define MICROPY_PY_THREAD_GIL_VM_DIVISOR (32)
+#endif
+
+// Extended modules
+
+#ifndef MICROPY_PY_ASYNCIO
+#define MICROPY_PY_ASYNCIO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_UCTYPES
+#define MICROPY_PY_UCTYPES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide SHORT, INT, LONG, etc. types in addition to
+// exact-bitness types like INT16, INT32, etc.
+#ifndef MICROPY_PY_UCTYPES_NATIVE_C_TYPES
+#define MICROPY_PY_UCTYPES_NATIVE_C_TYPES (1)
+#endif
+
+// Whether to provide "deflate" module (decompression-only by default)
+#ifndef MICROPY_PY_DEFLATE
+#define MICROPY_PY_DEFLATE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide compression support in "deflate" module
+#ifndef MICROPY_PY_DEFLATE_COMPRESS
+#define MICROPY_PY_DEFLATE_COMPRESS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_FULL_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_JSON
+#define MICROPY_PY_JSON (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the "separators" argument to dump, dumps
+#ifndef MICROPY_PY_JSON_SEPARATORS
+#define MICROPY_PY_JSON_SEPARATORS (1)
+#endif
+
+#ifndef MICROPY_PY_OS
+#define MICROPY_PY_OS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_OS_STATVFS
+#define MICROPY_PY_OS_STATVFS (MICROPY_PY_OS)
+#endif
+
+#ifndef MICROPY_PY_RE
+#define MICROPY_PY_RE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_RE_DEBUG
+#define MICROPY_PY_RE_DEBUG (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+#ifndef MICROPY_PY_RE_MATCH_GROUPS
+#define MICROPY_PY_RE_MATCH_GROUPS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+#ifndef MICROPY_PY_RE_MATCH_SPAN_START_END
+#define MICROPY_PY_RE_MATCH_SPAN_START_END (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING)
+#endif
+
+#ifndef MICROPY_PY_RE_SUB
+#define MICROPY_PY_RE_SUB (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_HEAPQ
+#define MICROPY_PY_HEAPQ (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_HASHLIB
+#define MICROPY_PY_HASHLIB (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_HASHLIB_MD5
+#define MICROPY_PY_HASHLIB_MD5 (0)
+#endif
+
+#ifndef MICROPY_PY_HASHLIB_SHA1
+#define MICROPY_PY_HASHLIB_SHA1  (0)
+#endif
+
+#ifndef MICROPY_PY_HASHLIB_SHA256
+#define MICROPY_PY_HASHLIB_SHA256 (1)
+#endif
+
+#ifndef MICROPY_PY_CRYPTOLIB
+#define MICROPY_PY_CRYPTOLIB (0)
+#endif
+
+// Depends on MICROPY_PY_CRYPTOLIB
+#ifndef MICROPY_PY_CRYPTOLIB_CTR
+#define MICROPY_PY_CRYPTOLIB_CTR (0)
+#endif
+
+#ifndef MICROPY_PY_CRYPTOLIB_CONSTS
+#define MICROPY_PY_CRYPTOLIB_CONSTS (0)
+#endif
+
+#ifndef MICROPY_PY_BINASCII
+#define MICROPY_PY_BINASCII (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Depends on MICROPY_PY_DEFLATE
+#ifndef MICROPY_PY_BINASCII_CRC32
+#define MICROPY_PY_BINASCII_CRC32 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_RANDOM
+#define MICROPY_PY_RANDOM (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include: randrange, randint, choice, random, uniform
+#ifndef MICROPY_PY_RANDOM_EXTRA_FUNCS
+#define MICROPY_PY_RANDOM_EXTRA_FUNCS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_MACHINE
+#define MICROPY_PY_MACHINE (0)
+#endif
+
+// Whether to include: reset, reset_cause
+#ifndef MICROPY_PY_MACHINE_RESET
+#define MICROPY_PY_MACHINE_RESET (0)
+#endif
+
+// Whether to include: bitstream
+#ifndef MICROPY_PY_MACHINE_BITSTREAM
+#define MICROPY_PY_MACHINE_BITSTREAM (0)
+#endif
+
+// Whether to include: time_pulse_us
+#ifndef MICROPY_PY_MACHINE_PULSE
+#define MICROPY_PY_MACHINE_PULSE (0)
+#endif
+
+// Whether to provide the "machine.mem8/16/32" objects
+#ifndef MICROPY_PY_MACHINE_MEMX
+#define MICROPY_PY_MACHINE_MEMX (MICROPY_PY_MACHINE)
+#endif
+
+// Whether to provide the "machine.Signal" class
+#ifndef MICROPY_PY_MACHINE_SIGNAL
+#define MICROPY_PY_MACHINE_SIGNAL (MICROPY_PY_MACHINE)
+#endif
+
+#ifndef MICROPY_PY_MACHINE_I2C
+#define MICROPY_PY_MACHINE_I2C (0)
+#endif
+
+// Whether the low-level I2C transfer function supports a separate write as the first transfer
+#ifndef MICROPY_PY_MACHINE_I2C_TRANSFER_WRITE1
+#define MICROPY_PY_MACHINE_I2C_TRANSFER_WRITE1 (0)
+#endif
+
+// Whether to provide the "machine.SoftI2C" class
+#ifndef MICROPY_PY_MACHINE_SOFTI2C
+#define MICROPY_PY_MACHINE_SOFTI2C (0)
+#endif
+
+#ifndef MICROPY_PY_MACHINE_SPI
+#define MICROPY_PY_MACHINE_SPI (0)
+#endif
+
+// Whether to provide the "machine.SoftSPI" class
+#ifndef MICROPY_PY_MACHINE_SOFTSPI
+#define MICROPY_PY_MACHINE_SOFTSPI (0)
+#endif
+
+// Whether to provide the "machine.Timer" class
+#ifndef MICROPY_PY_MACHINE_TIMER
+#define MICROPY_PY_MACHINE_TIMER (0)
+#endif
+
+// The default backlog value for socket.listen(backlog)
+#ifndef MICROPY_PY_SOCKET_LISTEN_BACKLOG_DEFAULT
+#define MICROPY_PY_SOCKET_LISTEN_BACKLOG_DEFAULT (2)
+#endif
+
+#ifndef MICROPY_PY_SSL
+#define MICROPY_PY_SSL (0)
+#endif
+
+// Whether to add finaliser code to ssl objects
+#ifndef MICROPY_PY_SSL_FINALISER
+#define MICROPY_PY_SSL_FINALISER (MICROPY_ENABLE_FINALISER)
+#endif
+
+// Whether to provide the "vfs" module
+#ifndef MICROPY_PY_VFS
+#define MICROPY_PY_VFS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES && MICROPY_VFS)
+#endif
+
+#ifndef MICROPY_PY_WEBSOCKET
+#define MICROPY_PY_WEBSOCKET (0)
+#endif
+
+#ifndef MICROPY_PY_FRAMEBUF
+#define MICROPY_PY_FRAMEBUF (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_BTREE
+#define MICROPY_PY_BTREE (0)
+#endif
+
+// Whether to provide the low-level "_onewire" module
+#ifndef MICROPY_PY_ONEWIRE
+#define MICROPY_PY_ONEWIRE (0)
+#endif
+
+// Whether to provide the "platform" module
+#ifndef MICROPY_PY_PLATFORM
+#define MICROPY_PY_PLATFORM (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+/*****************************************************************************/
+/* Hooks for a port to add builtins                                          */
+
+// Additional builtin function definitions - see modbuiltins.c:mp_module_builtins_globals_table for format.
+#ifndef MICROPY_PORT_BUILTINS
+#define MICROPY_PORT_BUILTINS
+#endif
+
+// Additional builtin function definitions for extension by command-line, boards or variants.
+// See modbuiltins.c:mp_module_builtins_globals_table for format.
+#ifndef MICROPY_PORT_EXTRA_BUILTINS
+#define MICROPY_PORT_EXTRA_BUILTINS
+#endif
+
+// Additional constant definitions for the compiler - see compile.c:mp_constants_table.
+#ifndef MICROPY_PORT_CONSTANTS
+#define MICROPY_PORT_CONSTANTS
+#endif
+
+/*****************************************************************************/
+/* Hooks for a port to wrap functions with attributes                        */
+
+#ifndef MICROPY_WRAP_MP_BINARY_OP
+#define MICROPY_WRAP_MP_BINARY_OP(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_EXECUTE_BYTECODE
+#define MICROPY_WRAP_MP_EXECUTE_BYTECODE(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_LOAD_GLOBAL
+#define MICROPY_WRAP_MP_LOAD_GLOBAL(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_LOAD_NAME
+#define MICROPY_WRAP_MP_LOAD_NAME(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_MAP_LOOKUP
+#define MICROPY_WRAP_MP_MAP_LOOKUP(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_OBJ_GET_TYPE
+#define MICROPY_WRAP_MP_OBJ_GET_TYPE(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_EXCEPTION
+#define MICROPY_WRAP_MP_SCHED_EXCEPTION(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT
+#define MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_SCHEDULE
+#define MICROPY_WRAP_MP_SCHED_SCHEDULE(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_VM_ABORT
+#define MICROPY_WRAP_MP_SCHED_VM_ABORT(f) f
+#endif
+
+/*****************************************************************************/
+/* Miscellaneous settings                                                    */
+
+// All uPy objects in ROM must be aligned on at least a 4 byte boundary
+// so that the small-int/qstr/pointer distinction can be made.  For machines
+// that don't do this (eg 16-bit CPU), define the following macro to something
+// like __attribute__((aligned(4))).
+#ifndef MICROPY_OBJ_BASE_ALIGNMENT
+#define MICROPY_OBJ_BASE_ALIGNMENT
+#endif
+
+// String used for the banner, and sys.version additional information
+#ifndef MICROPY_BANNER_NAME_AND_VERSION
+#if MICROPY_PREVIEW_VERSION_2
+#define MICROPY_BANNER_NAME_AND_VERSION "MicroPython (with v2.0 preview) " MICROPY_GIT_TAG " on " MICROPY_BUILD_DATE
+#else
+#define MICROPY_BANNER_NAME_AND_VERSION "MicroPython " MICROPY_GIT_TAG " on " MICROPY_BUILD_DATE
+#endif
+#endif
+
+// String used for the second part of the banner, and sys.implementation._machine
+#ifndef MICROPY_BANNER_MACHINE
+#ifdef MICROPY_HW_BOARD_NAME
+#define MICROPY_BANNER_MACHINE MICROPY_HW_BOARD_NAME " with " MICROPY_HW_MCU_NAME
+#else
+#define MICROPY_BANNER_MACHINE MICROPY_PY_SYS_PLATFORM " [" MICROPY_PLATFORM_COMPILER "] version"
+#endif
+#endif
+
+// Number of bytes in an object word: mp_obj_t, mp_uint_t, mp_uint_t
+#ifndef MP_BYTES_PER_OBJ_WORD
+#define MP_BYTES_PER_OBJ_WORD (sizeof(mp_uint_t))
+#endif
+
+// Number of bits in a byte
+#ifndef MP_BITS_PER_BYTE
+#define MP_BITS_PER_BYTE (8)
+#endif
+// mp_int_t value with most significant bit set
+#define MP_OBJ_WORD_MSBIT_HIGH (((mp_uint_t)1) << (MP_BYTES_PER_OBJ_WORD * MP_BITS_PER_BYTE - 1))
+
+// Make sure both MP_ENDIANNESS_LITTLE and MP_ENDIANNESS_BIG are
+// defined and that they are the opposite of each other.
+#if defined(MP_ENDIANNESS_LITTLE)
+#define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#elif defined(MP_ENDIANNESS_BIG)
+#define MP_ENDIANNESS_LITTLE (!MP_ENDIANNESS_BIG)
+#else
+// Endianness not defined by port so try to autodetect it.
+  #if defined(__BYTE_ORDER__)
+    #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+      #define MP_ENDIANNESS_LITTLE (1)
+    #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+      #define MP_ENDIANNESS_LITTLE (0)
+    #endif
+  #else
+    #include <endian.h>
+      #if defined(__BYTE_ORDER)
+        #if __BYTE_ORDER == __LITTLE_ENDIAN
+          #define MP_ENDIANNESS_LITTLE (1)
+        #elif __BYTE_ORDER == __BIG_ENDIAN
+          #define MP_ENDIANNESS_LITTLE (0)
+        #endif
+      #endif
+  #endif
+  #ifndef MP_ENDIANNESS_LITTLE
+    #error endianness not defined and cannot detect it
+  #endif
+  #define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#endif
+
+// Make a pointer to RAM callable (eg set lower bit for Thumb code)
+// (This scheme won't work if we want to mix Thumb and normal ARM code.)
+#ifndef MICROPY_MAKE_POINTER_CALLABLE
+#define MICROPY_MAKE_POINTER_CALLABLE(p) (p)
+#endif
+
+// If these MP_PLAT_*_EXEC macros are overridden then the memory allocated by them
+// must be somehow reachable for marking by the GC, since the native code
+// generators store pointers to GC managed memory in the code.
+#ifndef MP_PLAT_ALLOC_EXEC
+#define MP_PLAT_ALLOC_EXEC(min_size, ptr, size) do { *ptr = m_new(byte, min_size); *size = min_size; } while (0)
+#endif
+
+#ifndef MP_PLAT_FREE_EXEC
+#define MP_PLAT_FREE_EXEC(ptr, size) m_del(byte, ptr, size)
+#endif
+
+// Allocating new heap area at runtime requires port to be able to allocate from system heap
+#if MICROPY_GC_SPLIT_HEAP_AUTO
+#ifndef MP_PLAT_ALLOC_HEAP
+#define MP_PLAT_ALLOC_HEAP(size) malloc(size)
+#endif
+#ifndef MP_PLAT_FREE_HEAP
+#define MP_PLAT_FREE_HEAP(ptr) free(ptr)
+#endif
+#endif
+
+// This macro is used to do all output (except when MICROPY_PY_IO is defined)
+#ifndef MP_PLAT_PRINT_STRN
+#define MP_PLAT_PRINT_STRN(str, len) mp_hal_stdout_tx_strn_cooked(str, len)
+#endif
+
+#ifndef MP_SSIZE_MAX
+#define MP_SSIZE_MAX SSIZE_MAX
+#endif
+
+// printf format spec to use for mp_int_t and friends
+#ifndef INT_FMT
+#if defined(__LP64__)
+// Archs where mp_int_t == long, long != int
+#define UINT_FMT "%lu"
+#define INT_FMT "%ld"
+#elif defined(_WIN64)
+#define UINT_FMT "%llu"
+#define INT_FMT "%lld"
+#else
+// Archs where mp_int_t == int
+#define UINT_FMT "%u"
+#define INT_FMT "%d"
+#endif
+#endif // INT_FMT
+
+// Modifier for function which doesn't return
+#ifndef NORETURN
+#define NORETURN __attribute__((noreturn))
+#endif
+
+// Modifier for weak functions
+#ifndef MP_WEAK
+#define MP_WEAK __attribute__((weak))
+#endif
+
+// Modifier for functions which should be never inlined
+#ifndef MP_NOINLINE
+#define MP_NOINLINE __attribute__((noinline))
+#endif
+
+// Modifier for functions which should be always inlined
+#ifndef MP_ALWAYSINLINE
+#define MP_ALWAYSINLINE __attribute__((always_inline))
+#endif
+
+// Condition is likely to be true, to help branch prediction
+#ifndef MP_LIKELY
+#define MP_LIKELY(x) __builtin_expect((x), 1)
+#endif
+
+// Condition is likely to be false, to help branch prediction
+#ifndef MP_UNLIKELY
+#define MP_UNLIKELY(x) __builtin_expect((x), 0)
+#endif
+
+// To annotate that code is unreachable
+#ifndef MP_UNREACHABLE
+#if defined(__GNUC__)
+#define MP_UNREACHABLE __builtin_unreachable();
+#else
+#define MP_UNREACHABLE for (;;);
+#endif
+#endif
+
+// Explicitly annotate switch case fall throughs
+#if defined(__GNUC__) && __GNUC__ >= 7
+#define MP_FALLTHROUGH __attribute__((fallthrough));
+#else
+#define MP_FALLTHROUGH
+#endif
+
+#ifndef MP_HTOBE16
+#if MP_ENDIANNESS_LITTLE
+#define MP_HTOBE16(x) ((uint16_t)((((x) & 0xff) << 8) | (((x) >> 8) & 0xff)))
+#define MP_BE16TOH(x) MP_HTOBE16(x)
+#else
+#define MP_HTOBE16(x) (x)
+#define MP_BE16TOH(x) (x)
+#endif
+#endif
+
+#ifndef MP_HTOBE32
+#if MP_ENDIANNESS_LITTLE
+#define MP_HTOBE32(x) ((uint32_t)((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | (((x) >> 8) & 0xff00) | (((x) >> 24) & 0xff)))
+#define MP_BE32TOH(x) MP_HTOBE32(x)
+#else
+#define MP_HTOBE32(x) (x)
+#define MP_BE32TOH(x) (x)
+#endif
+#endif
+
+// Warning categories are by default implemented as strings, though
+// hook is left for a port to define them as something else.
+#if MICROPY_WARNINGS_CATEGORY
+#ifndef MP_WARN_CAT
+#define MP_WARN_CAT(x) #x
+#endif
+#else
+#undef MP_WARN_CAT
+#define MP_WARN_CAT(x) (NULL)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPCONFIG_H

+ 152 - 0
mp_flipper/lib/micropython/py/mperrno.h

@@ -0,0 +1,152 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPERRNO_H
+#define MICROPY_INCLUDED_PY_MPERRNO_H
+
+#include "py/mpconfig.h"
+
+#if MICROPY_USE_INTERNAL_ERRNO
+
+// MP_Exxx errno's are defined directly as numeric values
+// (Linux constants are used as a reference)
+
+#define MP_EPERM              (1) // Operation not permitted
+#define MP_ENOENT             (2) // No such file or directory
+#define MP_ESRCH              (3) // No such process
+#define MP_EINTR              (4) // Interrupted system call
+#define MP_EIO                (5) // I/O error
+#define MP_ENXIO              (6) // No such device or address
+#define MP_E2BIG              (7) // Argument list too long
+#define MP_ENOEXEC            (8) // Exec format error
+#define MP_EBADF              (9) // Bad file number
+#define MP_ECHILD            (10) // No child processes
+#define MP_EAGAIN            (11) // Try again
+#define MP_ENOMEM            (12) // Out of memory
+#define MP_EACCES            (13) // Permission denied
+#define MP_EFAULT            (14) // Bad address
+#define MP_ENOTBLK           (15) // Block device required
+#define MP_EBUSY             (16) // Device or resource busy
+#define MP_EEXIST            (17) // File exists
+#define MP_EXDEV             (18) // Cross-device link
+#define MP_ENODEV            (19) // No such device
+#define MP_ENOTDIR           (20) // Not a directory
+#define MP_EISDIR            (21) // Is a directory
+#define MP_EINVAL            (22) // Invalid argument
+#define MP_ENFILE            (23) // File table overflow
+#define MP_EMFILE            (24) // Too many open files
+#define MP_ENOTTY            (25) // Not a typewriter
+#define MP_ETXTBSY           (26) // Text file busy
+#define MP_EFBIG             (27) // File too large
+#define MP_ENOSPC            (28) // No space left on device
+#define MP_ESPIPE            (29) // Illegal seek
+#define MP_EROFS             (30) // Read-only file system
+#define MP_EMLINK            (31) // Too many links
+#define MP_EPIPE             (32) // Broken pipe
+#define MP_EDOM              (33) // Math argument out of domain of func
+#define MP_ERANGE            (34) // Math result not representable
+#define MP_EWOULDBLOCK  MP_EAGAIN // Operation would block
+#define MP_EOPNOTSUPP        (95) // Operation not supported on transport endpoint
+#define MP_EAFNOSUPPORT      (97) // Address family not supported by protocol
+#define MP_EADDRINUSE        (98) // Address already in use
+#define MP_ECONNABORTED     (103) // Software caused connection abort
+#define MP_ECONNRESET       (104) // Connection reset by peer
+#define MP_ENOBUFS          (105) // No buffer space available
+#define MP_EISCONN          (106) // Transport endpoint is already connected
+#define MP_ENOTCONN         (107) // Transport endpoint is not connected
+#define MP_ETIMEDOUT        (110) // Connection timed out
+#define MP_ECONNREFUSED     (111) // Connection refused
+#define MP_EHOSTUNREACH     (113) // No route to host
+#define MP_EALREADY         (114) // Operation already in progress
+#define MP_EINPROGRESS      (115) // Operation now in progress
+#define MP_ECANCELED        (125) // Operation canceled
+
+#else
+
+// MP_Exxx errno's are defined in terms of system supplied ones
+
+#include <errno.h>
+
+#define MP_EPERM            EPERM
+#define MP_ENOENT           ENOENT
+#define MP_ESRCH            ESRCH
+#define MP_EINTR            EINTR
+#define MP_EIO              EIO
+#define MP_ENXIO            ENXIO
+#define MP_E2BIG            E2BIG
+#define MP_ENOEXEC          ENOEXEC
+#define MP_EBADF            EBADF
+#define MP_ECHILD           ECHILD
+#define MP_EAGAIN           EAGAIN
+#define MP_ENOMEM           ENOMEM
+#define MP_EACCES           EACCES
+#define MP_EFAULT           EFAULT
+#define MP_ENOTBLK          ENOTBLK
+#define MP_EBUSY            EBUSY
+#define MP_EEXIST           EEXIST
+#define MP_EXDEV            EXDEV
+#define MP_ENODEV           ENODEV
+#define MP_ENOTDIR          ENOTDIR
+#define MP_EISDIR           EISDIR
+#define MP_EINVAL           EINVAL
+#define MP_ENFILE           ENFILE
+#define MP_EMFILE           EMFILE
+#define MP_ENOTTY           ENOTTY
+#define MP_ETXTBSY          ETXTBSY
+#define MP_EFBIG            EFBIG
+#define MP_ENOSPC           ENOSPC
+#define MP_ESPIPE           ESPIPE
+#define MP_EROFS            EROFS
+#define MP_EMLINK           EMLINK
+#define MP_EPIPE            EPIPE
+#define MP_EDOM             EDOM
+#define MP_ERANGE           ERANGE
+#define MP_EWOULDBLOCK      EWOULDBLOCK
+#define MP_EOPNOTSUPP       EOPNOTSUPP
+#define MP_EAFNOSUPPORT     EAFNOSUPPORT
+#define MP_EADDRINUSE       EADDRINUSE
+#define MP_ECONNABORTED     ECONNABORTED
+#define MP_ECONNRESET       ECONNRESET
+#define MP_ENOBUFS          ENOBUFS
+#define MP_EISCONN          EISCONN
+#define MP_ENOTCONN         ENOTCONN
+#define MP_ETIMEDOUT        ETIMEDOUT
+#define MP_ECONNREFUSED     ECONNREFUSED
+#define MP_EHOSTUNREACH     EHOSTUNREACH
+#define MP_EALREADY         EALREADY
+#define MP_EINPROGRESS      EINPROGRESS
+#define MP_ECANCELED        ECANCELED
+
+#endif
+
+#if MICROPY_PY_ERRNO
+
+#include "py/obj.h"
+
+qstr mp_errno_to_str(mp_obj_t errno_val);
+
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPERRNO_H

+ 114 - 0
mp_flipper/lib/micropython/py/mphal.h

@@ -0,0 +1,114 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPHAL_H
+#define MICROPY_INCLUDED_PY_MPHAL_H
+
+#include <stdint.h>
+#include "py/mpconfig.h"
+
+#ifdef MICROPY_MPHALPORT_H
+#include MICROPY_MPHALPORT_H
+#else
+#include <mphalport.h>
+#endif
+
+// On embedded platforms, these will typically enable/disable irqs.
+#ifndef MICROPY_BEGIN_ATOMIC_SECTION
+#define MICROPY_BEGIN_ATOMIC_SECTION() (0)
+#endif
+#ifndef MICROPY_END_ATOMIC_SECTION
+#define MICROPY_END_ATOMIC_SECTION(state) (void)(state)
+#endif
+
+#ifndef mp_hal_stdio_poll
+uintptr_t mp_hal_stdio_poll(uintptr_t poll_flags);
+#endif
+
+#ifndef mp_hal_stdin_rx_chr
+int mp_hal_stdin_rx_chr(void);
+#endif
+
+#ifndef mp_hal_stdout_tx_str
+void mp_hal_stdout_tx_str(const char *str);
+#endif
+
+#ifndef mp_hal_stdout_tx_strn
+mp_uint_t mp_hal_stdout_tx_strn(const char *str, size_t len);
+#endif
+
+#ifndef mp_hal_stdout_tx_strn_cooked
+void mp_hal_stdout_tx_strn_cooked(const char *str, size_t len);
+#endif
+
+#ifndef mp_hal_delay_ms
+void mp_hal_delay_ms(mp_uint_t ms);
+#endif
+
+#ifndef mp_hal_delay_us
+void mp_hal_delay_us(mp_uint_t us);
+#endif
+
+#ifndef mp_hal_ticks_ms
+mp_uint_t mp_hal_ticks_ms(void);
+#endif
+
+#ifndef mp_hal_ticks_us
+mp_uint_t mp_hal_ticks_us(void);
+#endif
+
+#ifndef mp_hal_ticks_cpu
+mp_uint_t mp_hal_ticks_cpu(void);
+#endif
+
+#ifndef mp_hal_time_ns
+// Nanoseconds since the Epoch.
+uint64_t mp_hal_time_ns(void);
+#endif
+
+// If port HAL didn't define its own pin API, use generic
+// "virtual pin" API from the core.
+#ifndef mp_hal_pin_obj_t
+#define mp_hal_pin_obj_t mp_obj_t
+#define mp_hal_get_pin_obj(pin) (pin)
+#define mp_hal_pin_read(pin) mp_virtual_pin_read(pin)
+#define mp_hal_pin_write(pin, v) mp_virtual_pin_write(pin, v)
+#include "extmod/virtpin.h"
+#endif
+
+// Event handling and wait-for-event functions.
+
+#ifndef MICROPY_INTERNAL_WFE
+// Fallback definition for ports that don't need to suspend the CPU.
+#define MICROPY_INTERNAL_WFE(TIMEOUT_MS) (void)0
+#endif
+
+#ifndef MICROPY_INTERNAL_EVENT_HOOK
+// Fallback definition for ports that don't need any port-specific
+// non-blocking event processing.
+#define MICROPY_INTERNAL_EVENT_HOOK (void)0
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPHAL_H

+ 576 - 0
mp_flipper/lib/micropython/py/mpprint.c

@@ -0,0 +1,576 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/mphal.h"
+#include "py/mpprint.h"
+#include "py/obj.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include "py/formatfloat.h"
+#endif
+
+static const char pad_spaces[] = "                ";
+static const char pad_zeroes[] = "0000000000000000";
+
+static void plat_print_strn(void *env, const char *str, size_t len) {
+    (void)env;
+    MP_PLAT_PRINT_STRN(str, len);
+}
+
+const mp_print_t mp_plat_print = {NULL, plat_print_strn};
+
+int mp_print_str(const mp_print_t *print, const char *str) {
+    size_t len = strlen(str);
+    if (len) {
+        print->print_strn(print->data, str, len);
+    }
+    return len;
+}
+
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width) {
+    int left_pad = 0;
+    int right_pad = 0;
+    int pad = width - len;
+    int pad_size;
+    int total_chars_printed = 0;
+    const char *pad_chars;
+
+    if (!fill || fill == ' ') {
+        pad_chars = pad_spaces;
+        pad_size = sizeof(pad_spaces) - 1;
+    } else if (fill == '0') {
+        pad_chars = pad_zeroes;
+        pad_size = sizeof(pad_zeroes) - 1;
+    } else {
+        // Other pad characters are fairly unusual, so we'll take the hit
+        // and output them 1 at a time.
+        pad_chars = &fill;
+        pad_size = 1;
+    }
+
+    if (flags & PF_FLAG_CENTER_ADJUST) {
+        left_pad = pad / 2;
+        right_pad = pad - left_pad;
+    } else if (flags & PF_FLAG_LEFT_ADJUST) {
+        right_pad = pad;
+    } else {
+        left_pad = pad;
+    }
+
+    if (left_pad > 0) {
+        total_chars_printed += left_pad;
+        while (left_pad > 0) {
+            int p = left_pad;
+            if (p > pad_size) {
+                p = pad_size;
+            }
+            print->print_strn(print->data, pad_chars, p);
+            left_pad -= p;
+        }
+    }
+    if (len) {
+        print->print_strn(print->data, str, len);
+        total_chars_printed += len;
+    }
+    if (right_pad > 0) {
+        total_chars_printed += right_pad;
+        while (right_pad > 0) {
+            int p = right_pad;
+            if (p > pad_size) {
+                p = pad_size;
+            }
+            print->print_strn(print->data, pad_chars, p);
+            right_pad -= p;
+        }
+    }
+    return total_chars_printed;
+}
+
+// 32-bits is 10 digits, add 3 for commas, 1 for sign, 1 for terminating null
+// We can use 16 characters for 32-bit and 32 characters for 64-bit
+#define INT_BUF_SIZE (sizeof(mp_int_t) * 4)
+
+// Our mp_vprintf function below does not support the '#' format modifier to
+// print the prefix of a non-base-10 number, so we don't need code for this.
+#define SUPPORT_INT_BASE_PREFIX (0)
+
+// This function is used exclusively by mp_vprintf to format ints.
+// It needs to be a separate function to mp_print_mp_int, since converting to a mp_int looses the MSB.
+static int mp_print_int(const mp_print_t *print, mp_uint_t x, int sgn, int base, int base_char, int flags, char fill, int width) {
+    char sign = 0;
+    if (sgn) {
+        if ((mp_int_t)x < 0) {
+            sign = '-';
+            x = -x;
+        } else if (flags & PF_FLAG_SHOW_SIGN) {
+            sign = '+';
+        } else if (flags & PF_FLAG_SPACE_SIGN) {
+            sign = ' ';
+        }
+    }
+
+    char buf[INT_BUF_SIZE];
+    char *b = buf + INT_BUF_SIZE;
+
+    if (x == 0) {
+        *(--b) = '0';
+    } else {
+        do {
+            int c = x % base;
+            x /= base;
+            if (c >= 10) {
+                c += base_char - 10;
+            } else {
+                c += '0';
+            }
+            *(--b) = c;
+        } while (b > buf && x != 0);
+    }
+
+    #if SUPPORT_INT_BASE_PREFIX
+    char prefix_char = '\0';
+
+    if (flags & PF_FLAG_SHOW_PREFIX) {
+        if (base == 2) {
+            prefix_char = base_char + 'b' - 'a';
+        } else if (base == 8) {
+            prefix_char = base_char + 'o' - 'a';
+        } else if (base == 16) {
+            prefix_char = base_char + 'x' - 'a';
+        }
+    }
+    #endif
+
+    int len = 0;
+    if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+        if (sign) {
+            len += mp_print_strn(print, &sign, 1, flags, fill, 1);
+            width--;
+        }
+        #if SUPPORT_INT_BASE_PREFIX
+        if (prefix_char) {
+            len += mp_print_strn(print, "0", 1, flags, fill, 1);
+            len += mp_print_strn(print, &prefix_char, 1, flags, fill, 1);
+            width -= 2;
+        }
+        #endif
+    } else {
+        #if SUPPORT_INT_BASE_PREFIX
+        if (prefix_char && b > &buf[1]) {
+            *(--b) = prefix_char;
+            *(--b) = '0';
+        }
+        #endif
+        if (sign && b > buf) {
+            *(--b) = sign;
+        }
+    }
+
+    len += mp_print_strn(print, b, buf + INT_BUF_SIZE - b, flags, fill, width);
+    return len;
+}
+
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec) {
+    // These are the only values for "base" that are required to be supported by this
+    // function, since Python only allows the user to format integers in these bases.
+    // If needed this function could be generalised to handle other values.
+    assert(base == 2 || base == 8 || base == 10 || base == 16);
+
+    if (!mp_obj_is_int(x)) {
+        // This will convert booleans to int, or raise an error for
+        // non-integer types.
+        x = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(x));
+    }
+
+    if ((flags & (PF_FLAG_LEFT_ADJUST | PF_FLAG_CENTER_ADJUST)) == 0 && fill == '0') {
+        if (prec > width) {
+            width = prec;
+        }
+        prec = 0;
+    }
+    char prefix_buf[4];
+    char *prefix = prefix_buf;
+
+    if (mp_obj_int_sign(x) >= 0) {
+        if (flags & PF_FLAG_SHOW_SIGN) {
+            *prefix++ = '+';
+        } else if (flags & PF_FLAG_SPACE_SIGN) {
+            *prefix++ = ' ';
+        }
+    }
+
+    if (flags & PF_FLAG_SHOW_PREFIX) {
+        if (base == 2) {
+            *prefix++ = '0';
+            *prefix++ = base_char + 'b' - 'a';
+        } else if (base == 8) {
+            *prefix++ = '0';
+            if (flags & PF_FLAG_SHOW_OCTAL_LETTER) {
+                *prefix++ = base_char + 'o' - 'a';
+            }
+        } else if (base == 16) {
+            *prefix++ = '0';
+            *prefix++ = base_char + 'x' - 'a';
+        }
+    }
+    *prefix = '\0';
+    int prefix_len = prefix - prefix_buf;
+    prefix = prefix_buf;
+
+    char comma = '\0';
+    if (flags & PF_FLAG_SHOW_COMMA) {
+        comma = ',';
+    }
+
+    // The size of this buffer is rather arbitrary. If it's not large
+    // enough, a dynamic one will be allocated.
+    char stack_buf[sizeof(mp_int_t) * 4];
+    char *buf = stack_buf;
+    size_t buf_size = sizeof(stack_buf);
+    size_t fmt_size = 0;
+    char *str;
+
+    if (prec > 1) {
+        flags |= PF_FLAG_PAD_AFTER_SIGN;
+    }
+    char sign = '\0';
+    if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+        // We add the pad in this function, so since the pad goes after
+        // the sign & prefix, we format without a prefix
+        str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+            x, base, NULL, base_char, comma);
+        if (*str == '-') {
+            sign = *str++;
+            fmt_size--;
+        }
+    } else {
+        str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+            x, base, prefix, base_char, comma);
+    }
+
+    int spaces_before = 0;
+    int spaces_after = 0;
+
+    if (prec > 1) {
+        // If prec was specified, then prec specifies the width to zero-pad the
+        // the number to. This zero-padded number then gets left or right
+        // aligned in width characters.
+
+        int prec_width = fmt_size;  // The digits
+        if (prec_width < prec) {
+            prec_width = prec;
+        }
+        if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+            if (sign) {
+                prec_width++;
+            }
+            prec_width += prefix_len;
+        }
+        if (prec_width < width) {
+            if (flags & PF_FLAG_LEFT_ADJUST) {
+                spaces_after = width - prec_width;
+            } else {
+                spaces_before = width - prec_width;
+            }
+        }
+        fill = '0';
+        flags &= ~PF_FLAG_LEFT_ADJUST;
+    }
+
+    int len = 0;
+    if (spaces_before) {
+        len += mp_print_strn(print, "", 0, 0, ' ', spaces_before);
+    }
+    if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+        // pad after sign implies pad after prefix as well.
+        if (sign) {
+            len += mp_print_strn(print, &sign, 1, 0, 0, 1);
+            width--;
+        }
+        if (prefix_len) {
+            len += mp_print_strn(print, prefix, prefix_len, 0, 0, 1);
+            width -= prefix_len;
+        }
+    }
+    if (prec > 1) {
+        width = prec;
+    }
+
+    len += mp_print_strn(print, str, fmt_size, flags, fill, width);
+
+    if (spaces_after) {
+        len += mp_print_strn(print, "", 0, 0, ' ', spaces_after);
+    }
+
+    if (buf != stack_buf) {
+        m_del(char, buf, buf_size);
+    }
+    return len;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec) {
+    char buf[32];
+    char sign = '\0';
+    int chrs = 0;
+
+    if (flags & PF_FLAG_SHOW_SIGN) {
+        sign = '+';
+    } else
+    if (flags & PF_FLAG_SPACE_SIGN) {
+        sign = ' ';
+    }
+
+    int len = mp_format_float(f, buf, sizeof(buf), fmt, prec, sign);
+
+    char *s = buf;
+
+    if ((flags & PF_FLAG_ADD_PERCENT) && (size_t)(len + 1) < sizeof(buf)) {
+        buf[len++] = '%';
+        buf[len] = '\0';
+    }
+
+    // buf[0] < '0' returns true if the first character is space, + or -
+    if ((flags & PF_FLAG_PAD_AFTER_SIGN) && buf[0] < '0') {
+        // We have a sign character
+        s++;
+        chrs += mp_print_strn(print, &buf[0], 1, 0, 0, 1);
+        width--;
+        len--;
+    }
+
+    chrs += mp_print_strn(print, s, len, flags, fill, width);
+
+    return chrs;
+}
+#endif
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...) {
+    va_list ap;
+    va_start(ap, fmt);
+    int ret = mp_vprintf(print, fmt, ap);
+    va_end(ap);
+    return ret;
+}
+
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
+    int chrs = 0;
+    for (;;) {
+        {
+            const char *f = fmt;
+            while (*f != '\0' && *f != '%') {
+                ++f; // XXX UTF8 advance char
+            }
+            if (f > fmt) {
+                print->print_strn(print->data, fmt, f - fmt);
+                chrs += f - fmt;
+                fmt = f;
+            }
+        }
+
+        if (*fmt == '\0') {
+            break;
+        }
+
+        // move past % character
+        ++fmt;
+
+        // parse flags, if they exist
+        int flags = 0;
+        char fill = ' ';
+        while (*fmt != '\0') {
+            if (*fmt == '-') {
+                flags |= PF_FLAG_LEFT_ADJUST;
+            } else if (*fmt == '+') {
+                flags |= PF_FLAG_SHOW_SIGN;
+            } else if (*fmt == ' ') {
+                flags |= PF_FLAG_SPACE_SIGN;
+            } else if (*fmt == '!') {
+                flags |= PF_FLAG_NO_TRAILZ;
+            } else if (*fmt == '0') {
+                flags |= PF_FLAG_PAD_AFTER_SIGN;
+                fill = '0';
+            } else {
+                break;
+            }
+            ++fmt;
+        }
+
+        // parse width, if it exists
+        int width = 0;
+        for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+            width = width * 10 + *fmt - '0';
+        }
+
+        // parse precision, if it exists
+        int prec = -1;
+        if (*fmt == '.') {
+            ++fmt;
+            if (*fmt == '*') {
+                ++fmt;
+                prec = va_arg(args, int);
+            } else {
+                prec = 0;
+                for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+                    prec = prec * 10 + *fmt - '0';
+                }
+            }
+            if (prec < 0) {
+                prec = 0;
+            }
+        }
+
+        // parse long specifiers (only for LP64 model where they make a difference)
+        #ifndef __LP64__
+        const
+        #endif
+        bool long_arg = false;
+        if (*fmt == 'l') {
+            ++fmt;
+            #ifdef __LP64__
+            long_arg = true;
+            #endif
+        }
+
+        if (*fmt == '\0') {
+            break;
+        }
+
+        switch (*fmt) {
+            case 'b':
+                if (va_arg(args, int)) {
+                    chrs += mp_print_strn(print, "true", 4, flags, fill, width);
+                } else {
+                    chrs += mp_print_strn(print, "false", 5, flags, fill, width);
+                }
+                break;
+            case 'c': {
+                char str = va_arg(args, int);
+                chrs += mp_print_strn(print, &str, 1, flags, fill, width);
+                break;
+            }
+            case 'q': {
+                qstr qst = va_arg(args, qstr);
+                size_t len;
+                const char *str = (const char *)qstr_data(qst, &len);
+                if (prec >= 0 && (size_t)prec < len) {
+                    len = prec;
+                }
+                chrs += mp_print_strn(print, str, len, flags, fill, width);
+                break;
+            }
+            case 's': {
+                const char *str = va_arg(args, const char *);
+                #ifndef NDEBUG
+                // With debugging enabled, catch printing of null string pointers
+                if (prec != 0 && str == NULL) {
+                    chrs += mp_print_strn(print, "(null)", 6, flags, fill, width);
+                    break;
+                }
+                #endif
+                size_t len = strlen(str);
+                if (prec >= 0 && (size_t)prec < len) {
+                    len = prec;
+                }
+                chrs += mp_print_strn(print, str, len, flags, fill, width);
+                break;
+            }
+            case 'd': {
+                mp_int_t val;
+                if (long_arg) {
+                    val = va_arg(args, long int);
+                } else {
+                    val = va_arg(args, int);
+                }
+                chrs += mp_print_int(print, val, 1, 10, 'a', flags, fill, width);
+                break;
+            }
+            case 'u':
+            case 'x':
+            case 'X': {
+                int base = 16 - ((*fmt + 1) & 6); // maps char u/x/X to base 10/16/16
+                char fmt_c = (*fmt & 0xf0) - 'P' + 'A'; // maps char u/x/X to char a/a/A
+                mp_uint_t val;
+                if (long_arg) {
+                    val = va_arg(args, unsigned long int);
+                } else {
+                    val = va_arg(args, unsigned int);
+                }
+                chrs += mp_print_int(print, val, 0, base, fmt_c, flags, fill, width);
+                break;
+            }
+            case 'p':
+            case 'P': // don't bother to handle upcase for 'P'
+                // Use unsigned long int to work on both ILP32 and LP64 systems
+                chrs += mp_print_int(print, va_arg(args, unsigned long int), 0, 16, 'a', flags, fill, width);
+                break;
+            #if MICROPY_PY_BUILTINS_FLOAT
+            case 'e':
+            case 'E':
+            case 'f':
+            case 'F':
+            case 'g':
+            case 'G': {
+                #if ((MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT) || (MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE))
+                mp_float_t f = (mp_float_t)va_arg(args, double);
+                chrs += mp_print_float(print, f, *fmt, flags, fill, width, prec);
+                #else
+                #error Unknown MICROPY FLOAT IMPL
+                #endif
+                break;
+            }
+            #endif
+                // Because 'l' is eaten above, another 'l' means %ll.  We need to support
+                // this length specifier for OBJ_REPR_D (64-bit NaN boxing).
+                // TODO Either enable this unconditionally, or provide a specific config var.
+            #if (MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D) || defined(_WIN64)
+            case 'l': {
+                unsigned long long int arg_value = va_arg(args, unsigned long long int);
+                ++fmt;
+                assert(*fmt == 'u' || *fmt == 'd' || !"unsupported fmt char");
+                chrs += mp_print_int(print, arg_value, *fmt == 'd', 10, 'a', flags, fill, width);
+                break;
+            }
+            #endif
+            default:
+                // if it's not %% then it's an unsupported format character
+                assert(*fmt == '%' || !"unsupported fmt char");
+                print->print_strn(print->data, fmt, 1);
+                chrs += 1;
+                break;
+        }
+        ++fmt;
+    }
+    return chrs;
+}

+ 82 - 0
mp_flipper/lib/micropython/py/mpprint.h

@@ -0,0 +1,82 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPPRINT_H
+#define MICROPY_INCLUDED_PY_MPPRINT_H
+
+#include "py/mpconfig.h"
+
+#define PF_FLAG_LEFT_ADJUST       (0x001)
+#define PF_FLAG_SHOW_SIGN         (0x002)
+#define PF_FLAG_SPACE_SIGN        (0x004)
+#define PF_FLAG_NO_TRAILZ         (0x008)
+#define PF_FLAG_SHOW_PREFIX       (0x010)
+#define PF_FLAG_SHOW_COMMA        (0x020)
+#define PF_FLAG_PAD_AFTER_SIGN    (0x040)
+#define PF_FLAG_CENTER_ADJUST     (0x080)
+#define PF_FLAG_ADD_PERCENT       (0x100)
+#define PF_FLAG_SHOW_OCTAL_LETTER (0x200)
+
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+#define MP_PYTHON_PRINTER &mp_sys_stdout_print
+#else
+#define MP_PYTHON_PRINTER &mp_plat_print
+#endif
+
+typedef void (*mp_print_strn_t)(void *data, const char *str, size_t len);
+
+typedef struct _mp_print_t {
+    void *data;
+    mp_print_strn_t print_strn;
+} mp_print_t;
+
+typedef struct _mp_print_ext_t {
+    mp_print_t base;
+    const char *item_separator;
+    const char *key_separator;
+} mp_print_ext_t;
+
+#define MP_PRINT_GET_EXT(print) ((mp_print_ext_t *)print)
+
+// All (non-debug) prints go through one of the two interfaces below.
+// 1) Wrapper for platform print function, which wraps MP_PLAT_PRINT_STRN.
+extern const mp_print_t mp_plat_print;
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+// 2) Wrapper for printing to sys.stdout.
+extern const mp_print_t mp_sys_stdout_print;
+#endif
+
+int mp_print_str(const mp_print_t *print, const char *str);
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width);
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec);
+#endif
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...);
+#ifdef va_start
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPPRINT_H

+ 33 - 0
mp_flipper/lib/micropython/py/mpstate.c

@@ -0,0 +1,33 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if MICROPY_DYNAMIC_COMPILER
+mp_dynamic_compiler_t mp_dynamic_compiler = {0};
+#endif
+
+mp_state_ctx_t mp_state_ctx;

+ 320 - 0
mp_flipper/lib/micropython/py/mpstate.h

@@ -0,0 +1,320 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPSTATE_H
+#define MICROPY_INCLUDED_PY_MPSTATE_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/mpthread.h"
+#include "py/misc.h"
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/objlist.h"
+#include "py/objexcept.h"
+
+// This file contains structures defining the state of the MicroPython
+// memory system, runtime and virtual machine.  The state is a global
+// variable, but in the future it is hoped that the state can become local.
+
+#if MICROPY_PY_SYS_ATTR_DELEGATION
+// Must be kept in sync with sys_mutable_keys in modsys.c.
+enum {
+    #if MICROPY_PY_SYS_PATH
+    MP_SYS_MUTABLE_PATH,
+    #endif
+    #if MICROPY_PY_SYS_PS1_PS2
+    MP_SYS_MUTABLE_PS1,
+    MP_SYS_MUTABLE_PS2,
+    #endif
+    #if MICROPY_PY_SYS_TRACEBACKLIMIT
+    MP_SYS_MUTABLE_TRACEBACKLIMIT,
+    #endif
+    MP_SYS_MUTABLE_NUM,
+};
+#endif // MICROPY_PY_SYS_ATTR_DELEGATION
+
+// This structure contains dynamic configuration for the compiler.
+#if MICROPY_DYNAMIC_COMPILER
+typedef struct mp_dynamic_compiler_t {
+    uint8_t small_int_bits; // must be <= host small_int_bits
+    uint8_t native_arch;
+    uint8_t nlr_buf_num_regs;
+} mp_dynamic_compiler_t;
+extern mp_dynamic_compiler_t mp_dynamic_compiler;
+#endif
+
+// These are the values for sched_state
+#define MP_SCHED_IDLE (1)
+#define MP_SCHED_LOCKED (-1)
+#define MP_SCHED_PENDING (0) // 0 so it's a quick check in the VM
+
+typedef struct _mp_sched_item_t {
+    mp_obj_t func;
+    mp_obj_t arg;
+} mp_sched_item_t;
+
+// This structure holds information about a single contiguous area of
+// memory reserved for the memory manager.
+typedef struct _mp_state_mem_area_t {
+    #if MICROPY_GC_SPLIT_HEAP
+    struct _mp_state_mem_area_t *next;
+    #endif
+
+    byte *gc_alloc_table_start;
+    size_t gc_alloc_table_byte_len;
+    #if MICROPY_ENABLE_FINALISER
+    byte *gc_finaliser_table_start;
+    #endif
+    byte *gc_pool_start;
+    byte *gc_pool_end;
+
+    size_t gc_last_free_atb_index;
+    size_t gc_last_used_block; // The block ID of the highest block allocated in the area
+} mp_state_mem_area_t;
+
+// This structure hold information about the memory allocation system.
+typedef struct _mp_state_mem_t {
+    #if MICROPY_MEM_STATS
+    size_t total_bytes_allocated;
+    size_t current_bytes_allocated;
+    size_t peak_bytes_allocated;
+    #endif
+
+    mp_state_mem_area_t area;
+
+    int gc_stack_overflow;
+    MICROPY_GC_STACK_ENTRY_TYPE gc_block_stack[MICROPY_ALLOC_GC_STACK_SIZE];
+    #if MICROPY_GC_SPLIT_HEAP
+    // Array that tracks the area for each block on gc_block_stack.
+    mp_state_mem_area_t *gc_area_stack[MICROPY_ALLOC_GC_STACK_SIZE];
+    #endif
+
+    // This variable controls auto garbage collection.  If set to 0 then the
+    // GC won't automatically run when gc_alloc can't find enough blocks.  But
+    // you can still allocate/free memory and also explicitly call gc_collect.
+    uint16_t gc_auto_collect_enabled;
+
+    #if MICROPY_GC_ALLOC_THRESHOLD
+    size_t gc_alloc_amount;
+    size_t gc_alloc_threshold;
+    #endif
+
+    #if MICROPY_GC_SPLIT_HEAP
+    mp_state_mem_area_t *gc_last_free_area;
+    #endif
+
+    #if MICROPY_PY_GC_COLLECT_RETVAL
+    size_t gc_collected;
+    #endif
+
+    #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+    // This is a global mutex used to make the GC thread-safe.
+    mp_thread_mutex_t gc_mutex;
+    #endif
+} mp_state_mem_t;
+
+// This structure hold runtime and VM information.  It includes a section
+// which contains root pointers that must be scanned by the GC.
+typedef struct _mp_state_vm_t {
+    //
+    // CONTINUE ROOT POINTER SECTION
+    // This must start at the start of this structure and follows
+    // the state in the mp_state_thread_t structure, continuing
+    // the root pointer section from there.
+    //
+
+    qstr_pool_t *last_pool;
+
+    #if MICROPY_TRACKED_ALLOC
+    struct _m_tracked_node_t *m_tracked_head;
+    #endif
+
+    // non-heap memory for creating an exception if we can't allocate RAM
+    mp_obj_exception_t mp_emergency_exception_obj;
+
+    // memory for exception arguments if we can't allocate RAM
+    #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+    #if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+    // statically allocated buf (needs to be aligned to mp_obj_t)
+    mp_obj_t mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE / sizeof(mp_obj_t)];
+    #else
+    // dynamically allocated buf
+    byte *mp_emergency_exception_buf;
+    #endif
+    #endif
+
+    #if MICROPY_KBD_EXCEPTION
+    // exception object of type KeyboardInterrupt
+    mp_obj_exception_t mp_kbd_exception;
+    #endif
+
+    // dictionary with loaded modules (may be exposed as sys.modules)
+    mp_obj_dict_t mp_loaded_modules_dict;
+
+    // dictionary for the __main__ module
+    mp_obj_dict_t dict_main;
+
+    // dictionary for overridden builtins
+    #if MICROPY_CAN_OVERRIDE_BUILTINS
+    mp_obj_dict_t *mp_module_builtins_override_dict;
+    #endif
+
+    // Include any root pointers registered with MP_REGISTER_ROOT_POINTER().
+    #ifndef NO_QSTR
+    // Only include root pointer definitions when not doing qstr extraction, because
+    // the qstr extraction stage also generates the root pointers header file.
+    #include "genhdr/root_pointers.h"
+    #endif
+
+    //
+    // END ROOT POINTER SECTION
+    ////////////////////////////////////////////////////////////
+
+    // pointer and sizes to store interned string data
+    // (qstr_last_chunk can be root pointer but is also stored in qstr pool)
+    char *qstr_last_chunk;
+    size_t qstr_last_alloc;
+    size_t qstr_last_used;
+
+    #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+    // This is a global mutex used to make qstr interning thread-safe.
+    mp_thread_mutex_t qstr_mutex;
+    #endif
+
+    #if MICROPY_ENABLE_COMPILER
+    mp_uint_t mp_optimise_value;
+    #if MICROPY_EMIT_NATIVE
+    uint8_t default_emit_opt; // one of MP_EMIT_OPT_xxx
+    #endif
+    #endif
+
+    // size of the emergency exception buf, if it's dynamically allocated
+    #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
+    mp_int_t mp_emergency_exception_buf_size;
+    #endif
+
+    #if MICROPY_ENABLE_SCHEDULER
+    volatile int16_t sched_state;
+
+    #if MICROPY_SCHEDULER_STATIC_NODES
+    // These will usually point to statically allocated memory.  They are not
+    // traced by the GC.  They are assumed to be zero'd out before mp_init() is
+    // called (usually because this struct lives in the BSS).
+    struct _mp_sched_node_t *sched_head;
+    struct _mp_sched_node_t *sched_tail;
+    #endif
+
+    // These index sched_queue.
+    uint8_t sched_len;
+    uint8_t sched_idx;
+    #endif
+
+    #if MICROPY_ENABLE_VM_ABORT
+    bool vm_abort;
+    nlr_buf_t *nlr_abort;
+    #endif
+
+    #if MICROPY_PY_THREAD_GIL
+    // This is a global mutex used to make the VM/runtime thread-safe.
+    mp_thread_mutex_t gil_mutex;
+    #endif
+
+    #if MICROPY_OPT_MAP_LOOKUP_CACHE
+    // See mp_map_lookup.
+    uint8_t map_lookup_cache[MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE];
+    #endif
+} mp_state_vm_t;
+
+// This structure holds state that is specific to a given thread. Everything
+// in this structure is scanned for root pointers.  Anything added to this
+// structure must have corresponding initialisation added to thread_entry (in
+// py/modthread.c).
+typedef struct _mp_state_thread_t {
+    // Stack top at the start of program
+    char *stack_top;
+
+    #if MICROPY_STACK_CHECK
+    size_t stack_limit;
+    #endif
+
+    #if MICROPY_ENABLE_PYSTACK
+    uint8_t *pystack_start;
+    uint8_t *pystack_end;
+    uint8_t *pystack_cur;
+    #endif
+
+    // Locking of the GC is done per thread.
+    uint16_t gc_lock_depth;
+
+    ////////////////////////////////////////////////////////////
+    // START ROOT POINTER SECTION
+    // Everything that needs GC scanning must start here, and
+    // is followed by state in the mp_state_vm_t structure.
+    //
+
+    mp_obj_dict_t *dict_locals;
+    mp_obj_dict_t *dict_globals;
+
+    nlr_buf_t *nlr_top;
+    nlr_jump_callback_node_t *nlr_jump_callback_top;
+
+    // pending exception object (MP_OBJ_NULL if not pending)
+    volatile mp_obj_t mp_pending_exception;
+
+    // If MP_OBJ_STOP_ITERATION is propagated then this holds its argument.
+    mp_obj_t stop_iteration_arg;
+
+    #if MICROPY_PY_SYS_SETTRACE
+    mp_obj_t prof_trace_callback;
+    bool prof_callback_is_executing;
+    struct _mp_code_state_t *current_code_state;
+    #endif
+} mp_state_thread_t;
+
+// This structure combines the above 3 structures.
+// The order of the entries are important for root pointer scanning in the GC to work.
+typedef struct _mp_state_ctx_t {
+    mp_state_thread_t thread;
+    mp_state_vm_t vm;
+    mp_state_mem_t mem;
+} mp_state_ctx_t;
+
+extern mp_state_ctx_t mp_state_ctx;
+
+#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
+#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
+#define MP_STATE_MAIN_THREAD(x) (mp_state_ctx.thread.x)
+
+#if MICROPY_PY_THREAD
+#define MP_STATE_THREAD(x) (mp_thread_get_state()->x)
+#define mp_thread_is_main_thread() (mp_thread_get_state() == &mp_state_ctx.thread)
+#else
+#define MP_STATE_THREAD(x)  MP_STATE_MAIN_THREAD(x)
+#define mp_thread_is_main_thread() (true)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPSTATE_H

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików