asmarm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2014 Fabian Vogt
  7. * Copyright (c) 2013, 2014 Damien P. George
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <assert.h>
  29. #include <string.h>
  30. #include "py/mpconfig.h"
  31. // wrapper around everything in this file
  32. #if MICROPY_EMIT_ARM
  33. #include "py/asmarm.h"
  34. #define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
  35. // Insert word into instruction flow
  36. static void emit(asm_arm_t *as, uint op) {
  37. uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
  38. if (c != NULL) {
  39. *(uint32_t *)c = op;
  40. }
  41. }
  42. // Insert word into instruction flow, add "ALWAYS" condition code
  43. static void emit_al(asm_arm_t *as, uint op) {
  44. emit(as, op | ASM_ARM_CC_AL);
  45. }
  46. // Basic instructions without condition code
  47. static uint asm_arm_op_push(uint reglist) {
  48. // stmfd sp!, {reglist}
  49. return 0x92d0000 | (reglist & 0xFFFF);
  50. }
  51. static uint asm_arm_op_pop(uint reglist) {
  52. // ldmfd sp!, {reglist}
  53. return 0x8bd0000 | (reglist & 0xFFFF);
  54. }
  55. static uint asm_arm_op_mov_reg(uint rd, uint rn) {
  56. // mov rd, rn
  57. return 0x1a00000 | (rd << 12) | rn;
  58. }
  59. static uint asm_arm_op_mov_imm(uint rd, uint imm) {
  60. // mov rd, #imm
  61. return 0x3a00000 | (rd << 12) | imm;
  62. }
  63. static uint asm_arm_op_mvn_imm(uint rd, uint imm) {
  64. // mvn rd, #imm
  65. return 0x3e00000 | (rd << 12) | imm;
  66. }
  67. static uint asm_arm_op_mvn_reg(uint rd, uint rm) {
  68. // mvn rd, rm
  69. return 0x1e00000 | (rd << 12) | rm;
  70. }
  71. static uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
  72. // add rd, rn, #imm
  73. return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  74. }
  75. static uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
  76. // add rd, rn, rm
  77. return 0x0800000 | (rn << 16) | (rd << 12) | rm;
  78. }
  79. static uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
  80. // sub rd, rn, #imm
  81. return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  82. }
  83. static uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
  84. // sub rd, rn, rm
  85. return 0x0400000 | (rn << 16) | (rd << 12) | rm;
  86. }
  87. static uint asm_arm_op_rsb_imm(uint rd, uint rn, uint imm) {
  88. // rsb rd, rn, #imm
  89. return 0x2600000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  90. }
  91. static uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
  92. // mul rd, rm, rs
  93. assert(rd != rm);
  94. return 0x0000090 | (rd << 16) | (rs << 8) | rm;
  95. }
  96. static uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
  97. // and rd, rn, rm
  98. return 0x0000000 | (rn << 16) | (rd << 12) | rm;
  99. }
  100. static uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
  101. // eor rd, rn, rm
  102. return 0x0200000 | (rn << 16) | (rd << 12) | rm;
  103. }
  104. static uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
  105. // orr rd, rn, rm
  106. return 0x1800000 | (rn << 16) | (rd << 12) | rm;
  107. }
  108. void asm_arm_bkpt(asm_arm_t *as) {
  109. // bkpt #0
  110. emit_al(as, 0x1200070);
  111. }
  112. // locals:
  113. // - stored on the stack in ascending order
  114. // - numbered 0 through num_locals-1
  115. // - SP points to first local
  116. //
  117. // | SP
  118. // v
  119. // l0 l1 l2 ... l(n-1)
  120. // ^ ^
  121. // | low address | high address in RAM
  122. void asm_arm_entry(asm_arm_t *as, int num_locals) {
  123. assert(num_locals >= 0);
  124. as->stack_adjust = 0;
  125. as->push_reglist = 1 << ASM_ARM_REG_R1
  126. | 1 << ASM_ARM_REG_R2
  127. | 1 << ASM_ARM_REG_R3
  128. | 1 << ASM_ARM_REG_R4
  129. | 1 << ASM_ARM_REG_R5
  130. | 1 << ASM_ARM_REG_R6
  131. | 1 << ASM_ARM_REG_R7
  132. | 1 << ASM_ARM_REG_R8;
  133. // Only adjust the stack if there are more locals than usable registers
  134. if (num_locals > 3) {
  135. as->stack_adjust = num_locals * 4;
  136. // Align stack to 8 bytes
  137. if (num_locals & 1) {
  138. as->stack_adjust += 4;
  139. }
  140. }
  141. emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
  142. if (as->stack_adjust > 0) {
  143. emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  144. }
  145. }
  146. void asm_arm_exit(asm_arm_t *as) {
  147. if (as->stack_adjust > 0) {
  148. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  149. }
  150. emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
  151. }
  152. void asm_arm_push(asm_arm_t *as, uint reglist) {
  153. emit_al(as, asm_arm_op_push(reglist));
  154. }
  155. void asm_arm_pop(asm_arm_t *as, uint reglist) {
  156. emit_al(as, asm_arm_op_pop(reglist));
  157. }
  158. void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
  159. emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
  160. }
  161. size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
  162. // Insert immediate into code and jump over it
  163. emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
  164. emit_al(as, 0xa000000); // b pc
  165. size_t loc = mp_asm_base_get_code_pos(&as->base);
  166. emit(as, imm);
  167. return loc;
  168. }
  169. void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm) {
  170. // TODO: There are more variants of immediate values
  171. if ((imm & 0xFF) == imm) {
  172. emit_al(as, asm_arm_op_mov_imm(rd, imm));
  173. } else if (imm < 0 && imm >= -256) {
  174. // mvn is "move not", not "move negative"
  175. emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
  176. } else {
  177. asm_arm_mov_reg_i32(as, rd, imm);
  178. }
  179. }
  180. void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
  181. // str rd, [sp, #local_num*4]
  182. emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
  183. }
  184. void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
  185. // ldr rd, [sp, #local_num*4]
  186. emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
  187. }
  188. void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
  189. // cmp rd, #imm
  190. emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
  191. }
  192. void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  193. // cmp rd, rn
  194. emit_al(as, 0x1500000 | (rd << 16) | rn);
  195. }
  196. void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
  197. emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
  198. emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
  199. }
  200. void asm_arm_mvn_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  201. // mvn rd, rm
  202. // computes: rd := ~rm
  203. emit_al(as, asm_arm_op_mvn_reg(rd, rm));
  204. }
  205. void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  206. // add rd, rn, rm
  207. emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
  208. }
  209. void asm_arm_rsb_reg_reg_imm(asm_arm_t *as, uint rd, uint rn, uint imm) {
  210. // rsb rd, rn, #imm
  211. // computes: rd := #imm - rn
  212. emit_al(as, asm_arm_op_rsb_imm(rd, rn, imm));
  213. }
  214. void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  215. // sub rd, rn, rm
  216. emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
  217. }
  218. void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
  219. // rs and rm are swapped because of restriction rd!=rm
  220. // mul rd, rm, rs
  221. emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
  222. }
  223. void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  224. // and rd, rn, rm
  225. emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
  226. }
  227. void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  228. // eor rd, rn, rm
  229. emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
  230. }
  231. void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  232. // orr rd, rn, rm
  233. emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
  234. }
  235. void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
  236. // add rd, sp, #local_num*4
  237. emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
  238. }
  239. void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label) {
  240. assert(label < as->base.max_num_labels);
  241. mp_uint_t dest = as->base.label_offsets[label];
  242. mp_int_t rel = dest - as->base.code_offset;
  243. rel -= 12 + 8; // adjust for load of rel, and then PC+8 prefetch of add_reg_reg_reg
  244. // To load rel int reg_dest, insert immediate into code and jump over it
  245. emit_al(as, 0x59f0000 | (reg_dest << 12)); // ldr rd, [pc]
  246. emit_al(as, 0xa000000); // b pc
  247. emit(as, rel);
  248. // Do reg_dest += PC
  249. asm_arm_add_reg_reg_reg(as, reg_dest, reg_dest, ASM_ARM_REG_PC);
  250. }
  251. void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  252. // mov rd, rd, lsl rs
  253. emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
  254. }
  255. void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  256. // mov rd, rd, lsr rs
  257. emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd);
  258. }
  259. void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  260. // mov rd, rd, asr rs
  261. emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
  262. }
  263. void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  264. // ldr rd, [rn, #off]
  265. emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
  266. }
  267. void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  268. // ldrh rd, [rn]
  269. emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
  270. }
  271. void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  272. // ldrh rd, [rn, #off]
  273. emit_al(as, 0x1f000b0 | (rn << 16) | (rd << 12) | ((byte_offset & 0xf0) << 4) | (byte_offset & 0xf));
  274. }
  275. void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  276. // ldrb rd, [rn]
  277. emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
  278. }
  279. void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
  280. // str rd, [rm, #off]
  281. emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
  282. }
  283. void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  284. // strh rd, [rm]
  285. emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
  286. }
  287. void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  288. // strb rd, [rm]
  289. emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
  290. }
  291. void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  292. // str rd, [rm, rn, lsl #2]
  293. emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
  294. }
  295. void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  296. // strh doesn't support scaled register index
  297. emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
  298. emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
  299. }
  300. void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  301. // strb rd, [rm, rn]
  302. emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
  303. }
  304. void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
  305. assert(label < as->base.max_num_labels);
  306. mp_uint_t dest = as->base.label_offsets[label];
  307. mp_int_t rel = dest - as->base.code_offset;
  308. rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
  309. rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
  310. if (SIGNED_FIT24(rel)) {
  311. emit(as, cond | 0xa000000 | (rel & 0xffffff));
  312. } else {
  313. printf("asm_arm_bcc: branch does not fit in 24 bits\n");
  314. }
  315. }
  316. void asm_arm_b_label(asm_arm_t *as, uint label) {
  317. asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
  318. }
  319. void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp) {
  320. // The table offset should fit into the ldr instruction
  321. assert(fun_id < (0x1000 / 4));
  322. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
  323. emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
  324. }
  325. void asm_arm_bx_reg(asm_arm_t *as, uint reg_src) {
  326. emit_al(as, 0x012fff10 | reg_src);
  327. }
  328. #endif // MICROPY_EMIT_ARM