asmarm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2014 Fabian Vogt
  7. * Copyright (c) 2013, 2014 Damien P. George
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <assert.h>
  29. #include <string.h>
  30. #include "py/mpconfig.h"
  31. // wrapper around everything in this file
  32. #if MICROPY_EMIT_ARM
  33. #include "py/asmarm.h"
  34. #define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
  35. // Insert word into instruction flow
  36. STATIC void emit(asm_arm_t *as, uint op) {
  37. uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
  38. if (c != NULL) {
  39. *(uint32_t *)c = op;
  40. }
  41. }
  42. // Insert word into instruction flow, add "ALWAYS" condition code
  43. STATIC void emit_al(asm_arm_t *as, uint op) {
  44. emit(as, op | ASM_ARM_CC_AL);
  45. }
  46. // Basic instructions without condition code
  47. STATIC uint asm_arm_op_push(uint reglist) {
  48. // stmfd sp!, {reglist}
  49. return 0x92d0000 | (reglist & 0xFFFF);
  50. }
  51. STATIC uint asm_arm_op_pop(uint reglist) {
  52. // ldmfd sp!, {reglist}
  53. return 0x8bd0000 | (reglist & 0xFFFF);
  54. }
  55. STATIC uint asm_arm_op_mov_reg(uint rd, uint rn) {
  56. // mov rd, rn
  57. return 0x1a00000 | (rd << 12) | rn;
  58. }
  59. STATIC uint asm_arm_op_mov_imm(uint rd, uint imm) {
  60. // mov rd, #imm
  61. return 0x3a00000 | (rd << 12) | imm;
  62. }
  63. STATIC uint asm_arm_op_mvn_imm(uint rd, uint imm) {
  64. // mvn rd, #imm
  65. return 0x3e00000 | (rd << 12) | imm;
  66. }
  67. STATIC uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
  68. // add rd, rn, #imm
  69. return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  70. }
  71. STATIC uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
  72. // add rd, rn, rm
  73. return 0x0800000 | (rn << 16) | (rd << 12) | rm;
  74. }
  75. STATIC uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
  76. // sub rd, rn, #imm
  77. return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  78. }
  79. STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
  80. // sub rd, rn, rm
  81. return 0x0400000 | (rn << 16) | (rd << 12) | rm;
  82. }
  83. STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
  84. // mul rd, rm, rs
  85. assert(rd != rm);
  86. return 0x0000090 | (rd << 16) | (rs << 8) | rm;
  87. }
  88. STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
  89. // and rd, rn, rm
  90. return 0x0000000 | (rn << 16) | (rd << 12) | rm;
  91. }
  92. STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
  93. // eor rd, rn, rm
  94. return 0x0200000 | (rn << 16) | (rd << 12) | rm;
  95. }
  96. STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
  97. // orr rd, rn, rm
  98. return 0x1800000 | (rn << 16) | (rd << 12) | rm;
  99. }
  100. void asm_arm_bkpt(asm_arm_t *as) {
  101. // bkpt #0
  102. emit_al(as, 0x1200070);
  103. }
  104. // locals:
  105. // - stored on the stack in ascending order
  106. // - numbered 0 through num_locals-1
  107. // - SP points to first local
  108. //
  109. // | SP
  110. // v
  111. // l0 l1 l2 ... l(n-1)
  112. // ^ ^
  113. // | low address | high address in RAM
  114. void asm_arm_entry(asm_arm_t *as, int num_locals) {
  115. assert(num_locals >= 0);
  116. as->stack_adjust = 0;
  117. as->push_reglist = 1 << ASM_ARM_REG_R1
  118. | 1 << ASM_ARM_REG_R2
  119. | 1 << ASM_ARM_REG_R3
  120. | 1 << ASM_ARM_REG_R4
  121. | 1 << ASM_ARM_REG_R5
  122. | 1 << ASM_ARM_REG_R6
  123. | 1 << ASM_ARM_REG_R7
  124. | 1 << ASM_ARM_REG_R8;
  125. // Only adjust the stack if there are more locals than usable registers
  126. if (num_locals > 3) {
  127. as->stack_adjust = num_locals * 4;
  128. // Align stack to 8 bytes
  129. if (num_locals & 1) {
  130. as->stack_adjust += 4;
  131. }
  132. }
  133. emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
  134. if (as->stack_adjust > 0) {
  135. emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  136. }
  137. }
  138. void asm_arm_exit(asm_arm_t *as) {
  139. if (as->stack_adjust > 0) {
  140. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  141. }
  142. emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
  143. }
  144. void asm_arm_push(asm_arm_t *as, uint reglist) {
  145. emit_al(as, asm_arm_op_push(reglist));
  146. }
  147. void asm_arm_pop(asm_arm_t *as, uint reglist) {
  148. emit_al(as, asm_arm_op_pop(reglist));
  149. }
  150. void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
  151. emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
  152. }
  153. size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
  154. // Insert immediate into code and jump over it
  155. emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
  156. emit_al(as, 0xa000000); // b pc
  157. size_t loc = mp_asm_base_get_code_pos(&as->base);
  158. emit(as, imm);
  159. return loc;
  160. }
  161. void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm) {
  162. // TODO: There are more variants of immediate values
  163. if ((imm & 0xFF) == imm) {
  164. emit_al(as, asm_arm_op_mov_imm(rd, imm));
  165. } else if (imm < 0 && imm >= -256) {
  166. // mvn is "move not", not "move negative"
  167. emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
  168. } else {
  169. asm_arm_mov_reg_i32(as, rd, imm);
  170. }
  171. }
  172. void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
  173. // str rd, [sp, #local_num*4]
  174. emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
  175. }
  176. void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
  177. // ldr rd, [sp, #local_num*4]
  178. emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
  179. }
  180. void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
  181. // cmp rd, #imm
  182. emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
  183. }
  184. void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  185. // cmp rd, rn
  186. emit_al(as, 0x1500000 | (rd << 16) | rn);
  187. }
  188. void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
  189. emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
  190. emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
  191. }
  192. void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  193. // add rd, rn, rm
  194. emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
  195. }
  196. void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  197. // sub rd, rn, rm
  198. emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
  199. }
  200. void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
  201. // rs and rm are swapped because of restriction rd!=rm
  202. // mul rd, rm, rs
  203. emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
  204. }
  205. void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  206. // and rd, rn, rm
  207. emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
  208. }
  209. void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  210. // eor rd, rn, rm
  211. emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
  212. }
  213. void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  214. // orr rd, rn, rm
  215. emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
  216. }
  217. void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
  218. // add rd, sp, #local_num*4
  219. emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
  220. }
  221. void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label) {
  222. assert(label < as->base.max_num_labels);
  223. mp_uint_t dest = as->base.label_offsets[label];
  224. mp_int_t rel = dest - as->base.code_offset;
  225. rel -= 12 + 8; // adjust for load of rel, and then PC+8 prefetch of add_reg_reg_reg
  226. // To load rel int reg_dest, insert immediate into code and jump over it
  227. emit_al(as, 0x59f0000 | (reg_dest << 12)); // ldr rd, [pc]
  228. emit_al(as, 0xa000000); // b pc
  229. emit(as, rel);
  230. // Do reg_dest += PC
  231. asm_arm_add_reg_reg_reg(as, reg_dest, reg_dest, ASM_ARM_REG_PC);
  232. }
  233. void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  234. // mov rd, rd, lsl rs
  235. emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
  236. }
  237. void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  238. // mov rd, rd, lsr rs
  239. emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd);
  240. }
  241. void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  242. // mov rd, rd, asr rs
  243. emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
  244. }
  245. void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  246. // ldr rd, [rn, #off]
  247. emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
  248. }
  249. void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  250. // ldrh rd, [rn]
  251. emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
  252. }
  253. void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  254. // ldrh rd, [rn, #off]
  255. emit_al(as, 0x1f000b0 | (rn << 16) | (rd << 12) | ((byte_offset & 0xf0) << 4) | (byte_offset & 0xf));
  256. }
  257. void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  258. // ldrb rd, [rn]
  259. emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
  260. }
  261. void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
  262. // str rd, [rm, #off]
  263. emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
  264. }
  265. void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  266. // strh rd, [rm]
  267. emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
  268. }
  269. void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  270. // strb rd, [rm]
  271. emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
  272. }
  273. void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  274. // str rd, [rm, rn, lsl #2]
  275. emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
  276. }
  277. void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  278. // strh doesn't support scaled register index
  279. emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
  280. emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
  281. }
  282. void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  283. // strb rd, [rm, rn]
  284. emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
  285. }
  286. void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
  287. assert(label < as->base.max_num_labels);
  288. mp_uint_t dest = as->base.label_offsets[label];
  289. mp_int_t rel = dest - as->base.code_offset;
  290. rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
  291. rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
  292. if (SIGNED_FIT24(rel)) {
  293. emit(as, cond | 0xa000000 | (rel & 0xffffff));
  294. } else {
  295. printf("asm_arm_bcc: branch does not fit in 24 bits\n");
  296. }
  297. }
  298. void asm_arm_b_label(asm_arm_t *as, uint label) {
  299. asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
  300. }
  301. void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp) {
  302. // The table offset should fit into the ldr instruction
  303. assert(fun_id < (0x1000 / 4));
  304. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
  305. emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
  306. }
  307. void asm_arm_bx_reg(asm_arm_t *as, uint reg_src) {
  308. emit_al(as, 0x012fff10 | reg_src);
  309. }
  310. #endif // MICROPY_EMIT_ARM