asm_arm.inc 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
  2. #ifndef _UECC_ASM_ARM_H_
  3. #define _UECC_ASM_ARM_H_
  4. #if (uECC_SUPPORTS_secp256r1 || uECC_SUPPORTS_secp256k1)
  5. #define uECC_MIN_WORDS 8
  6. #endif
  7. #if uECC_SUPPORTS_secp224r1
  8. #undef uECC_MIN_WORDS
  9. #define uECC_MIN_WORDS 7
  10. #endif
  11. #if uECC_SUPPORTS_secp192r1
  12. #undef uECC_MIN_WORDS
  13. #define uECC_MIN_WORDS 6
  14. #endif
  15. #if uECC_SUPPORTS_secp160r1
  16. #undef uECC_MIN_WORDS
  17. #define uECC_MIN_WORDS 5
  18. #endif
  19. #if (uECC_PLATFORM == uECC_arm_thumb)
  20. #define REG_RW "+l"
  21. #define REG_WRITE "=l"
  22. #else
  23. #define REG_RW "+r"
  24. #define REG_WRITE "=r"
  25. #endif
  26. #if (uECC_PLATFORM == uECC_arm_thumb || uECC_PLATFORM == uECC_arm_thumb2)
  27. #define REG_RW_LO "+l"
  28. #define REG_WRITE_LO "=l"
  29. #else
  30. #define REG_RW_LO "+r"
  31. #define REG_WRITE_LO "=r"
  32. #endif
  33. #if (uECC_PLATFORM == uECC_arm_thumb2)
  34. #define RESUME_SYNTAX
  35. #else
  36. #define RESUME_SYNTAX ".syntax divided \n\t"
  37. #endif
  38. #if (uECC_OPTIMIZATION_LEVEL >= 2)
  39. uECC_VLI_API uECC_word_t uECC_vli_add(uECC_word_t *result,
  40. const uECC_word_t *left,
  41. const uECC_word_t *right,
  42. wordcount_t num_words) {
  43. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  44. #if (uECC_PLATFORM == uECC_arm_thumb) || (uECC_PLATFORM == uECC_arm_thumb2)
  45. uint32_t jump = (uECC_MAX_WORDS - num_words) * 4 * 2 + 1;
  46. #else /* ARM */
  47. uint32_t jump = (uECC_MAX_WORDS - num_words) * 4 * 4;
  48. #endif
  49. #endif
  50. uint32_t carry;
  51. uint32_t left_word;
  52. uint32_t right_word;
  53. __asm__ volatile (
  54. ".syntax unified \n\t"
  55. "movs %[carry], #0 \n\t"
  56. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  57. "adr %[left], 1f \n\t"
  58. ".align 4 \n\t"
  59. "adds %[jump], %[left] \n\t"
  60. #endif
  61. "ldmia %[lptr]!, {%[left]} \n\t"
  62. "ldmia %[rptr]!, {%[right]} \n\t"
  63. "adds %[left], %[right] \n\t"
  64. "stmia %[dptr]!, {%[left]} \n\t"
  65. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  66. "bx %[jump] \n\t"
  67. #endif
  68. "1: \n\t"
  69. REPEAT(DEC(uECC_MAX_WORDS),
  70. "ldmia %[lptr]!, {%[left]} \n\t"
  71. "ldmia %[rptr]!, {%[right]} \n\t"
  72. "adcs %[left], %[right] \n\t"
  73. "stmia %[dptr]!, {%[left]} \n\t")
  74. "adcs %[carry], %[carry] \n\t"
  75. RESUME_SYNTAX
  76. : [dptr] REG_RW_LO (result), [lptr] REG_RW_LO (left), [rptr] REG_RW_LO (right),
  77. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  78. [jump] REG_RW_LO (jump),
  79. #endif
  80. [carry] REG_WRITE_LO (carry), [left] REG_WRITE_LO (left_word),
  81. [right] REG_WRITE_LO (right_word)
  82. :
  83. : "cc", "memory"
  84. );
  85. return carry;
  86. }
  87. #define asm_add 1
  88. #pragma GCC diagnostic ignored "-Wredundant-decls"
  89. uECC_VLI_API uECC_word_t uECC_vli_sub(uECC_word_t *result,
  90. const uECC_word_t *left,
  91. const uECC_word_t *right,
  92. wordcount_t num_words) {
  93. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  94. #if (uECC_PLATFORM == uECC_arm_thumb) || (uECC_PLATFORM == uECC_arm_thumb2)
  95. uint32_t jump = (uECC_MAX_WORDS - num_words) * 4 * 2 + 1;
  96. #else /* ARM */
  97. uint32_t jump = (uECC_MAX_WORDS - num_words) * 4 * 4;
  98. #endif
  99. #endif
  100. uint32_t carry;
  101. uint32_t left_word;
  102. uint32_t right_word;
  103. __asm__ volatile (
  104. ".syntax unified \n\t"
  105. "movs %[carry], #0 \n\t"
  106. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  107. "adr %[left], 1f \n\t"
  108. ".align 4 \n\t"
  109. "adds %[jump], %[left] \n\t"
  110. #endif
  111. "ldmia %[lptr]!, {%[left]} \n\t"
  112. "ldmia %[rptr]!, {%[right]} \n\t"
  113. "subs %[left], %[right] \n\t"
  114. "stmia %[dptr]!, {%[left]} \n\t"
  115. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  116. "bx %[jump] \n\t"
  117. #endif
  118. "1: \n\t"
  119. REPEAT(DEC(uECC_MAX_WORDS),
  120. "ldmia %[lptr]!, {%[left]} \n\t"
  121. "ldmia %[rptr]!, {%[right]} \n\t"
  122. "sbcs %[left], %[right] \n\t"
  123. "stmia %[dptr]!, {%[left]} \n\t")
  124. "adcs %[carry], %[carry] \n\t"
  125. RESUME_SYNTAX
  126. : [dptr] REG_RW_LO (result), [lptr] REG_RW_LO (left), [rptr] REG_RW_LO (right),
  127. #if (uECC_MAX_WORDS != uECC_MIN_WORDS)
  128. [jump] REG_RW_LO (jump),
  129. #endif
  130. [carry] REG_WRITE_LO (carry), [left] REG_WRITE_LO (left_word),
  131. [right] REG_WRITE_LO (right_word)
  132. :
  133. : "cc", "memory"
  134. );
  135. return !carry; /* Note that on ARM, carry flag set means "no borrow" when subtracting
  136. (for some reason...) */
  137. }
  138. #define asm_sub 1
  139. #endif /* (uECC_OPTIMIZATION_LEVEL >= 2) */
  140. #if (uECC_OPTIMIZATION_LEVEL >= 3)
  141. #if (uECC_PLATFORM != uECC_arm_thumb)
  142. #if uECC_ARM_USE_UMAAL
  143. #include "asm_arm_mult_square_umaal.inc"
  144. #else
  145. #include "asm_arm_mult_square.inc"
  146. #endif
  147. #if (uECC_OPTIMIZATION_LEVEL == 3)
  148. uECC_VLI_API void uECC_vli_mult(uint32_t *result,
  149. const uint32_t *left,
  150. const uint32_t *right,
  151. wordcount_t num_words) {
  152. register uint32_t *r0 __asm__("r0") = result;
  153. register const uint32_t *r1 __asm__("r1") = left;
  154. register const uint32_t *r2 __asm__("r2") = right;
  155. register uint32_t r3 __asm__("r3") = num_words;
  156. __asm__ volatile (
  157. ".syntax unified \n\t"
  158. #if (uECC_MIN_WORDS == 5)
  159. FAST_MULT_ASM_5
  160. #if (uECC_MAX_WORDS > 5)
  161. FAST_MULT_ASM_5_TO_6
  162. #endif
  163. #if (uECC_MAX_WORDS > 6)
  164. FAST_MULT_ASM_6_TO_7
  165. #endif
  166. #if (uECC_MAX_WORDS > 7)
  167. FAST_MULT_ASM_7_TO_8
  168. #endif
  169. #elif (uECC_MIN_WORDS == 6)
  170. FAST_MULT_ASM_6
  171. #if (uECC_MAX_WORDS > 6)
  172. FAST_MULT_ASM_6_TO_7
  173. #endif
  174. #if (uECC_MAX_WORDS > 7)
  175. FAST_MULT_ASM_7_TO_8
  176. #endif
  177. #elif (uECC_MIN_WORDS == 7)
  178. FAST_MULT_ASM_7
  179. #if (uECC_MAX_WORDS > 7)
  180. FAST_MULT_ASM_7_TO_8
  181. #endif
  182. #elif (uECC_MIN_WORDS == 8)
  183. FAST_MULT_ASM_8
  184. #endif
  185. "1: \n\t"
  186. RESUME_SYNTAX
  187. : "+r" (r0), "+r" (r1), "+r" (r2)
  188. : "r" (r3)
  189. : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  190. );
  191. }
  192. #define asm_mult 1
  193. #if uECC_SQUARE_FUNC
  194. uECC_VLI_API void uECC_vli_square(uECC_word_t *result,
  195. const uECC_word_t *left,
  196. wordcount_t num_words) {
  197. register uint32_t *r0 __asm__("r0") = result;
  198. register const uint32_t *r1 __asm__("r1") = left;
  199. register uint32_t r2 __asm__("r2") = num_words;
  200. __asm__ volatile (
  201. ".syntax unified \n\t"
  202. #if (uECC_MIN_WORDS == 5)
  203. FAST_SQUARE_ASM_5
  204. #if (uECC_MAX_WORDS > 5)
  205. FAST_SQUARE_ASM_5_TO_6
  206. #endif
  207. #if (uECC_MAX_WORDS > 6)
  208. FAST_SQUARE_ASM_6_TO_7
  209. #endif
  210. #if (uECC_MAX_WORDS > 7)
  211. FAST_SQUARE_ASM_7_TO_8
  212. #endif
  213. #elif (uECC_MIN_WORDS == 6)
  214. FAST_SQUARE_ASM_6
  215. #if (uECC_MAX_WORDS > 6)
  216. FAST_SQUARE_ASM_6_TO_7
  217. #endif
  218. #if (uECC_MAX_WORDS > 7)
  219. FAST_SQUARE_ASM_7_TO_8
  220. #endif
  221. #elif (uECC_MIN_WORDS == 7)
  222. FAST_SQUARE_ASM_7
  223. #if (uECC_MAX_WORDS > 7)
  224. FAST_SQUARE_ASM_7_TO_8
  225. #endif
  226. #elif (uECC_MIN_WORDS == 8)
  227. FAST_SQUARE_ASM_8
  228. #endif
  229. "1: \n\t"
  230. RESUME_SYNTAX
  231. : "+r" (r0), "+r" (r1)
  232. : "r" (r2)
  233. : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  234. );
  235. }
  236. #define asm_square 1
  237. #endif /* uECC_SQUARE_FUNC */
  238. #else /* (uECC_OPTIMIZATION_LEVEL > 3) */
  239. uECC_VLI_API void uECC_vli_mult(uint32_t *result,
  240. const uint32_t *left,
  241. const uint32_t *right,
  242. wordcount_t num_words) {
  243. register uint32_t *r0 __asm__("r0") = result;
  244. register const uint32_t *r1 __asm__("r1") = left;
  245. register const uint32_t *r2 __asm__("r2") = right;
  246. register uint32_t r3 __asm__("r3") = num_words;
  247. #if uECC_SUPPORTS_secp160r1
  248. if (num_words == 5) {
  249. __asm__ volatile (
  250. ".syntax unified \n\t"
  251. FAST_MULT_ASM_5
  252. RESUME_SYNTAX
  253. : "+r" (r0), "+r" (r1), "+r" (r2)
  254. : "r" (r3)
  255. : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  256. );
  257. return;
  258. }
  259. #endif
  260. #if uECC_SUPPORTS_secp192r1
  261. if (num_words == 6) {
  262. __asm__ volatile (
  263. ".syntax unified \n\t"
  264. FAST_MULT_ASM_6
  265. RESUME_SYNTAX
  266. : "+r" (r0), "+r" (r1), "+r" (r2)
  267. : "r" (r3)
  268. : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  269. );
  270. return;
  271. }
  272. #endif
  273. #if uECC_SUPPORTS_secp224r1
  274. if (num_words == 7) {
  275. __asm__ volatile (
  276. ".syntax unified \n\t"
  277. FAST_MULT_ASM_7
  278. RESUME_SYNTAX
  279. : "+r" (r0), "+r" (r1), "+r" (r2)
  280. : "r" (r3)
  281. : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  282. );
  283. return;
  284. }
  285. #endif
  286. #if (uECC_SUPPORTS_secp256r1 || uECC_SUPPORTS_secp256k1)
  287. if (num_words == 8) {
  288. __asm__ volatile (
  289. ".syntax unified \n\t"
  290. FAST_MULT_ASM_8
  291. RESUME_SYNTAX
  292. : "+r" (r0), "+r" (r1), "+r" (r2)
  293. : "r" (r3)
  294. : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  295. );
  296. return;
  297. }
  298. #endif
  299. }
  300. #define asm_mult 1
  301. #if uECC_SQUARE_FUNC
  302. uECC_VLI_API void uECC_vli_square(uECC_word_t *result,
  303. const uECC_word_t *left,
  304. wordcount_t num_words) {
  305. register uint32_t *r0 __asm__("r0") = result;
  306. register const uint32_t *r1 __asm__("r1") = left;
  307. register uint32_t r2 __asm__("r2") = num_words;
  308. #if uECC_SUPPORTS_secp160r1
  309. if (num_words == 5) {
  310. __asm__ volatile (
  311. ".syntax unified \n\t"
  312. FAST_SQUARE_ASM_5
  313. RESUME_SYNTAX
  314. : "+r" (r0), "+r" (r1)
  315. : "r" (r2)
  316. : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  317. );
  318. return;
  319. }
  320. #endif
  321. #if uECC_SUPPORTS_secp192r1
  322. if (num_words == 6) {
  323. __asm__ volatile (
  324. ".syntax unified \n\t"
  325. FAST_SQUARE_ASM_6
  326. RESUME_SYNTAX
  327. : "+r" (r0), "+r" (r1)
  328. : "r" (r2)
  329. : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  330. );
  331. return;
  332. }
  333. #endif
  334. #if uECC_SUPPORTS_secp224r1
  335. if (num_words == 7) {
  336. __asm__ volatile (
  337. ".syntax unified \n\t"
  338. FAST_SQUARE_ASM_7
  339. RESUME_SYNTAX
  340. : "+r" (r0), "+r" (r1)
  341. : "r" (r2)
  342. : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  343. );
  344. return;
  345. }
  346. #endif
  347. #if (uECC_SUPPORTS_secp256r1 || uECC_SUPPORTS_secp256k1)
  348. if (num_words == 8) {
  349. __asm__ volatile (
  350. ".syntax unified \n\t"
  351. FAST_SQUARE_ASM_8
  352. RESUME_SYNTAX
  353. : "+r" (r0), "+r" (r1)
  354. : "r" (r2)
  355. : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  356. );
  357. return;
  358. }
  359. #endif
  360. }
  361. #define asm_square 1
  362. #endif /* uECC_SQUARE_FUNC */
  363. #endif /* (uECC_OPTIMIZATION_LEVEL > 3) */
  364. #endif /* uECC_PLATFORM != uECC_arm_thumb */
  365. #endif /* (uECC_OPTIMIZATION_LEVEL >= 3) */
  366. /* ---- "Small" implementations ---- */
  367. #if !asm_add
  368. uECC_VLI_API uECC_word_t uECC_vli_add(uECC_word_t *result,
  369. const uECC_word_t *left,
  370. const uECC_word_t *right,
  371. wordcount_t num_words) {
  372. uint32_t carry = 0;
  373. uint32_t left_word;
  374. uint32_t right_word;
  375. __asm__ volatile (
  376. ".syntax unified \n\t"
  377. "1: \n\t"
  378. "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
  379. "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
  380. "lsrs %[carry], #1 \n\t" /* Set up carry flag (carry = 0 after this). */
  381. "adcs %[left], %[left], %[right] \n\t" /* Add with carry. */
  382. "adcs %[carry], %[carry], %[carry] \n\t" /* Store carry bit. */
  383. "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
  384. "subs %[ctr], #1 \n\t" /* Decrement counter. */
  385. "bne 1b \n\t" /* Loop until counter == 0. */
  386. RESUME_SYNTAX
  387. : [dptr] REG_RW (result), [lptr] REG_RW (left), [rptr] REG_RW (right),
  388. [ctr] REG_RW (num_words), [carry] REG_RW (carry),
  389. [left] REG_WRITE (left_word), [right] REG_WRITE (right_word)
  390. :
  391. : "cc", "memory"
  392. );
  393. return carry;
  394. }
  395. #define asm_add 1
  396. #endif
  397. #if !asm_sub
  398. uECC_VLI_API uECC_word_t uECC_vli_sub(uECC_word_t *result,
  399. const uECC_word_t *left,
  400. const uECC_word_t *right,
  401. wordcount_t num_words) {
  402. uint32_t carry = 1; /* carry = 1 initially (means don't borrow) */
  403. uint32_t left_word;
  404. uint32_t right_word;
  405. __asm__ volatile (
  406. ".syntax unified \n\t"
  407. "1: \n\t"
  408. "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
  409. "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
  410. "lsrs %[carry], #1 \n\t" /* Set up carry flag (carry = 0 after this). */
  411. "sbcs %[left], %[left], %[right] \n\t" /* Subtract with borrow. */
  412. "adcs %[carry], %[carry], %[carry] \n\t" /* Store carry bit. */
  413. "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
  414. "subs %[ctr], #1 \n\t" /* Decrement counter. */
  415. "bne 1b \n\t" /* Loop until counter == 0. */
  416. RESUME_SYNTAX
  417. : [dptr] REG_RW (result), [lptr] REG_RW (left), [rptr] REG_RW (right),
  418. [ctr] REG_RW (num_words), [carry] REG_RW (carry),
  419. [left] REG_WRITE (left_word), [right] REG_WRITE (right_word)
  420. :
  421. : "cc", "memory"
  422. );
  423. return !carry;
  424. }
  425. #define asm_sub 1
  426. #endif
  427. #if !asm_mult
  428. uECC_VLI_API void uECC_vli_mult(uECC_word_t *result,
  429. const uECC_word_t *left,
  430. const uECC_word_t *right,
  431. wordcount_t num_words) {
  432. #if (uECC_PLATFORM != uECC_arm_thumb)
  433. uint32_t c0 = 0;
  434. uint32_t c1 = 0;
  435. uint32_t c2 = 0;
  436. uint32_t k = 0;
  437. uint32_t i;
  438. uint32_t t0, t1;
  439. __asm__ volatile (
  440. ".syntax unified \n\t"
  441. "1: \n\t" /* outer loop (k < num_words) */
  442. "movs %[i], #0 \n\t" /* i = 0 */
  443. "b 3f \n\t"
  444. "2: \n\t" /* outer loop (k >= num_words) */
  445. "movs %[i], %[k] \n\t" /* i = k */
  446. "subs %[i], %[last_word] \n\t" /* i = k - (num_words - 1) (times 4) */
  447. "3: \n\t" /* inner loop */
  448. "subs %[t0], %[k], %[i] \n\t" /* t0 = k-i */
  449. "ldr %[t1], [%[right], %[t0]] \n\t" /* t1 = right[k - i] */
  450. "ldr %[t0], [%[left], %[i]] \n\t" /* t0 = left[i] */
  451. "umull %[t0], %[t1], %[t0], %[t1] \n\t" /* (t0, t1) = left[i] * right[k - i] */
  452. "adds %[c0], %[c0], %[t0] \n\t" /* add low word to c0 */
  453. "adcs %[c1], %[c1], %[t1] \n\t" /* add high word to c1, including carry */
  454. "adcs %[c2], %[c2], #0 \n\t" /* add carry to c2 */
  455. "adds %[i], #4 \n\t" /* i += 4 */
  456. "cmp %[i], %[last_word] \n\t" /* i > (num_words - 1) (times 4)? */
  457. "bgt 4f \n\t" /* if so, exit the loop */
  458. "cmp %[i], %[k] \n\t" /* i <= k? */
  459. "ble 3b \n\t" /* if so, continue looping */
  460. "4: \n\t" /* end inner loop */
  461. "str %[c0], [%[result], %[k]] \n\t" /* result[k] = c0 */
  462. "mov %[c0], %[c1] \n\t" /* c0 = c1 */
  463. "mov %[c1], %[c2] \n\t" /* c1 = c2 */
  464. "movs %[c2], #0 \n\t" /* c2 = 0 */
  465. "adds %[k], #4 \n\t" /* k += 4 */
  466. "cmp %[k], %[last_word] \n\t" /* k <= (num_words - 1) (times 4) ? */
  467. "ble 1b \n\t" /* if so, loop back, start with i = 0 */
  468. "cmp %[k], %[last_word], lsl #1 \n\t" /* k <= (num_words * 2 - 2) (times 4) ? */
  469. "ble 2b \n\t" /* if so, loop back, start with i = (k + 1) - num_words */
  470. /* end outer loop */
  471. "str %[c0], [%[result], %[k]] \n\t" /* result[num_words * 2 - 1] = c0 */
  472. RESUME_SYNTAX
  473. : [c0] "+r" (c0), [c1] "+r" (c1), [c2] "+r" (c2),
  474. [k] "+r" (k), [i] "=&r" (i), [t0] "=&r" (t0), [t1] "=&r" (t1)
  475. : [result] "r" (result), [left] "r" (left), [right] "r" (right),
  476. [last_word] "r" ((num_words - 1) * 4)
  477. : "cc", "memory"
  478. );
  479. #else /* Thumb-1 */
  480. uint32_t r4, r5, r6, r7;
  481. __asm__ volatile (
  482. ".syntax unified \n\t"
  483. "subs %[r3], #1 \n\t" /* r3 = num_words - 1 */
  484. "lsls %[r3], #2 \n\t" /* r3 = (num_words - 1) * 4 */
  485. "mov r8, %[r3] \n\t" /* r8 = (num_words - 1) * 4 */
  486. "lsls %[r3], #1 \n\t" /* r3 = (num_words - 1) * 8 */
  487. "mov r9, %[r3] \n\t" /* r9 = (num_words - 1) * 8 */
  488. "movs %[r3], #0 \n\t" /* c0 = 0 */
  489. "movs %[r4], #0 \n\t" /* c1 = 0 */
  490. "movs %[r5], #0 \n\t" /* c2 = 0 */
  491. "movs %[r6], #0 \n\t" /* k = 0 */
  492. "push {%[r0]} \n\t" /* keep result on the stack */
  493. "1: \n\t" /* outer loop (k < num_words) */
  494. "movs %[r7], #0 \n\t" /* r7 = i = 0 */
  495. "b 3f \n\t"
  496. "2: \n\t" /* outer loop (k >= num_words) */
  497. "movs %[r7], %[r6] \n\t" /* r7 = k */
  498. "mov %[r0], r8 \n\t" /* r0 = (num_words - 1) * 4 */
  499. "subs %[r7], %[r0] \n\t" /* r7 = i = k - (num_words - 1) (times 4) */
  500. "3: \n\t" /* inner loop */
  501. "mov r10, %[r3] \n\t"
  502. "mov r11, %[r4] \n\t"
  503. "mov r12, %[r5] \n\t"
  504. "mov r14, %[r6] \n\t"
  505. "subs %[r0], %[r6], %[r7] \n\t" /* r0 = k - i */
  506. "ldr %[r4], [%[r2], %[r0]] \n\t" /* r4 = right[k - i] */
  507. "ldr %[r0], [%[r1], %[r7]] \n\t" /* r0 = left[i] */
  508. "lsrs %[r3], %[r0], #16 \n\t" /* r3 = a1 */
  509. "uxth %[r0], %[r0] \n\t" /* r0 = a0 */
  510. "lsrs %[r5], %[r4], #16 \n\t" /* r5 = b1 */
  511. "uxth %[r4], %[r4] \n\t" /* r4 = b0 */
  512. "movs %[r6], %[r3] \n\t" /* r6 = a1 */
  513. "muls %[r6], %[r5], %[r6] \n\t" /* r6 = a1 * b1 */
  514. "muls %[r3], %[r4], %[r3] \n\t" /* r3 = b0 * a1 */
  515. "muls %[r5], %[r0], %[r5] \n\t" /* r5 = a0 * b1 */
  516. "muls %[r0], %[r4], %[r0] \n\t" /* r0 = a0 * b0 */
  517. /* Add middle terms */
  518. "lsls %[r4], %[r3], #16 \n\t"
  519. "lsrs %[r3], %[r3], #16 \n\t"
  520. "adds %[r0], %[r4] \n\t"
  521. "adcs %[r6], %[r3] \n\t"
  522. "lsls %[r4], %[r5], #16 \n\t"
  523. "lsrs %[r5], %[r5], #16 \n\t"
  524. "adds %[r0], %[r4] \n\t"
  525. "adcs %[r6], %[r5] \n\t"
  526. "mov %[r3], r10\n\t"
  527. "mov %[r4], r11\n\t"
  528. "mov %[r5], r12\n\t"
  529. "adds %[r3], %[r0] \n\t" /* add low word to c0 */
  530. "adcs %[r4], %[r6] \n\t" /* add high word to c1, including carry */
  531. "movs %[r0], #0 \n\t" /* r0 = 0 (does not affect carry bit) */
  532. "adcs %[r5], %[r0] \n\t" /* add carry to c2 */
  533. "mov %[r6], r14\n\t" /* r6 = k */
  534. "adds %[r7], #4 \n\t" /* i += 4 */
  535. "cmp %[r7], r8 \n\t" /* i > (num_words - 1) (times 4)? */
  536. "bgt 4f \n\t" /* if so, exit the loop */
  537. "cmp %[r7], %[r6] \n\t" /* i <= k? */
  538. "ble 3b \n\t" /* if so, continue looping */
  539. "4: \n\t" /* end inner loop */
  540. "ldr %[r0], [sp, #0] \n\t" /* r0 = result */
  541. "str %[r3], [%[r0], %[r6]] \n\t" /* result[k] = c0 */
  542. "mov %[r3], %[r4] \n\t" /* c0 = c1 */
  543. "mov %[r4], %[r5] \n\t" /* c1 = c2 */
  544. "movs %[r5], #0 \n\t" /* c2 = 0 */
  545. "adds %[r6], #4 \n\t" /* k += 4 */
  546. "cmp %[r6], r8 \n\t" /* k <= (num_words - 1) (times 4) ? */
  547. "ble 1b \n\t" /* if so, loop back, start with i = 0 */
  548. "cmp %[r6], r9 \n\t" /* k <= (num_words * 2 - 2) (times 4) ? */
  549. "ble 2b \n\t" /* if so, loop back, with i = (k + 1) - num_words */
  550. /* end outer loop */
  551. "str %[r3], [%[r0], %[r6]] \n\t" /* result[num_words * 2 - 1] = c0 */
  552. "pop {%[r0]} \n\t" /* pop result off the stack */
  553. ".syntax divided \n\t"
  554. : [r3] "+l" (num_words), [r4] "=&l" (r4),
  555. [r5] "=&l" (r5), [r6] "=&l" (r6), [r7] "=&l" (r7)
  556. : [r0] "l" (result), [r1] "l" (left), [r2] "l" (right)
  557. : "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  558. );
  559. #endif
  560. }
  561. #define asm_mult 1
  562. #endif
  563. #if uECC_SQUARE_FUNC
  564. #if !asm_square
  565. uECC_VLI_API void uECC_vli_square(uECC_word_t *result,
  566. const uECC_word_t *left,
  567. wordcount_t num_words) {
  568. #if (uECC_PLATFORM != uECC_arm_thumb)
  569. uint32_t c0 = 0;
  570. uint32_t c1 = 0;
  571. uint32_t c2 = 0;
  572. uint32_t k = 0;
  573. uint32_t i, tt;
  574. uint32_t t0, t1;
  575. __asm__ volatile (
  576. ".syntax unified \n\t"
  577. "1: \n\t" /* outer loop (k < num_words) */
  578. "movs %[i], #0 \n\t" /* i = 0 */
  579. "b 3f \n\t"
  580. "2: \n\t" /* outer loop (k >= num_words) */
  581. "movs %[i], %[k] \n\t" /* i = k */
  582. "subs %[i], %[last_word] \n\t" /* i = k - (num_words - 1) (times 4) */
  583. "3: \n\t" /* inner loop */
  584. "subs %[tt], %[k], %[i] \n\t" /* tt = k-i */
  585. "ldr %[t1], [%[left], %[tt]] \n\t" /* t1 = left[k - i] */
  586. "ldr %[t0], [%[left], %[i]] \n\t" /* t0 = left[i] */
  587. "umull %[t0], %[t1], %[t0], %[t1] \n\t" /* (t0, t1) = left[i] * right[k - i] */
  588. "cmp %[i], %[tt] \n\t" /* (i < k - i) ? */
  589. "bge 4f \n\t" /* if i >= k - i, skip */
  590. "adds %[c0], %[c0], %[t0] \n\t" /* add low word to c0 */
  591. "adcs %[c1], %[c1], %[t1] \n\t" /* add high word to c1, including carry */
  592. "adcs %[c2], %[c2], #0 \n\t" /* add carry to c2 */
  593. "4: \n\t"
  594. "adds %[c0], %[c0], %[t0] \n\t" /* add low word to c0 */
  595. "adcs %[c1], %[c1], %[t1] \n\t" /* add high word to c1, including carry */
  596. "adcs %[c2], %[c2], #0 \n\t" /* add carry to c2 */
  597. "adds %[i], #4 \n\t" /* i += 4 */
  598. "cmp %[i], %[k] \n\t" /* i >= k? */
  599. "bge 5f \n\t" /* if so, exit the loop */
  600. "subs %[tt], %[k], %[i] \n\t" /* tt = k - i */
  601. "cmp %[i], %[tt] \n\t" /* i <= k - i? */
  602. "ble 3b \n\t" /* if so, continue looping */
  603. "5: \n\t" /* end inner loop */
  604. "str %[c0], [%[result], %[k]] \n\t" /* result[k] = c0 */
  605. "mov %[c0], %[c1] \n\t" /* c0 = c1 */
  606. "mov %[c1], %[c2] \n\t" /* c1 = c2 */
  607. "movs %[c2], #0 \n\t" /* c2 = 0 */
  608. "adds %[k], #4 \n\t" /* k += 4 */
  609. "cmp %[k], %[last_word] \n\t" /* k <= (num_words - 1) (times 4) ? */
  610. "ble 1b \n\t" /* if so, loop back, start with i = 0 */
  611. "cmp %[k], %[last_word], lsl #1 \n\t" /* k <= (num_words * 2 - 2) (times 4) ? */
  612. "ble 2b \n\t" /* if so, loop back, start with i = (k + 1) - num_words */
  613. /* end outer loop */
  614. "str %[c0], [%[result], %[k]] \n\t" /* result[num_words * 2 - 1] = c0 */
  615. RESUME_SYNTAX
  616. : [c0] "+r" (c0), [c1] "+r" (c1), [c2] "+r" (c2),
  617. [k] "+r" (k), [i] "=&r" (i), [tt] "=&r" (tt), [t0] "=&r" (t0), [t1] "=&r" (t1)
  618. : [result] "r" (result), [left] "r" (left), [last_word] "r" ((num_words - 1) * 4)
  619. : "cc", "memory"
  620. );
  621. #else
  622. uint32_t r3, r4, r5, r6, r7;
  623. __asm__ volatile (
  624. ".syntax unified \n\t"
  625. "subs %[r2], #1 \n\t" /* r2 = num_words - 1 */
  626. "lsls %[r2], #2 \n\t" /* r2 = (num_words - 1) * 4 */
  627. "mov r8, %[r2] \n\t" /* r8 = (num_words - 1) * 4 */
  628. "lsls %[r2], #1 \n\t" /* r2 = (num_words - 1) * 8 */
  629. "mov r9, %[r2] \n\t" /* r9 = (num_words - 1) * 8 */
  630. "movs %[r2], #0 \n\t" /* c0 = 0 */
  631. "movs %[r3], #0 \n\t" /* c1 = 0 */
  632. "movs %[r4], #0 \n\t" /* c2 = 0 */
  633. "movs %[r5], #0 \n\t" /* k = 0 */
  634. "push {%[r0]} \n\t" /* keep result on the stack */
  635. "1: \n\t" /* outer loop (k < num_words) */
  636. "movs %[r6], #0 \n\t" /* r6 = i = 0 */
  637. "b 3f \n\t"
  638. "2: \n\t" /* outer loop (k >= num_words) */
  639. "movs %[r6], %[r5] \n\t" /* r6 = k */
  640. "mov %[r0], r8 \n\t" /* r0 = (num_words - 1) * 4 */
  641. "subs %[r6], %[r0] \n\t" /* r6 = i = k - (num_words - 1) (times 4) */
  642. "3: \n\t" /* inner loop */
  643. "mov r10, %[r2] \n\t"
  644. "mov r11, %[r3] \n\t"
  645. "mov r12, %[r4] \n\t"
  646. "mov r14, %[r5] \n\t"
  647. "subs %[r7], %[r5], %[r6] \n\t" /* r7 = k - i */
  648. "ldr %[r3], [%[r1], %[r7]] \n\t" /* r3 = left[k - i] */
  649. "ldr %[r0], [%[r1], %[r6]] \n\t" /* r0 = left[i] */
  650. "lsrs %[r2], %[r0], #16 \n\t" /* r2 = a1 */
  651. "uxth %[r0], %[r0] \n\t" /* r0 = a0 */
  652. "lsrs %[r4], %[r3], #16 \n\t" /* r4 = b1 */
  653. "uxth %[r3], %[r3] \n\t" /* r3 = b0 */
  654. "movs %[r5], %[r2] \n\t" /* r5 = a1 */
  655. "muls %[r5], %[r4], %[r5] \n\t" /* r5 = a1 * b1 */
  656. "muls %[r2], %[r3], %[r2] \n\t" /* r2 = b0 * a1 */
  657. "muls %[r4], %[r0], %[r4] \n\t" /* r4 = a0 * b1 */
  658. "muls %[r0], %[r3], %[r0] \n\t" /* r0 = a0 * b0 */
  659. /* Add middle terms */
  660. "lsls %[r3], %[r2], #16 \n\t"
  661. "lsrs %[r2], %[r2], #16 \n\t"
  662. "adds %[r0], %[r3] \n\t"
  663. "adcs %[r5], %[r2] \n\t"
  664. "lsls %[r3], %[r4], #16 \n\t"
  665. "lsrs %[r4], %[r4], #16 \n\t"
  666. "adds %[r0], %[r3] \n\t"
  667. "adcs %[r5], %[r4] \n\t"
  668. /* Add to acc, doubling if necessary */
  669. "mov %[r2], r10\n\t"
  670. "mov %[r3], r11\n\t"
  671. "mov %[r4], r12\n\t"
  672. "cmp %[r6], %[r7] \n\t" /* (i < k - i) ? */
  673. "bge 4f \n\t" /* if i >= k - i, skip */
  674. "movs %[r7], #0 \n\t" /* r7 = 0 */
  675. "adds %[r2], %[r0] \n\t" /* add low word to c0 */
  676. "adcs %[r3], %[r5] \n\t" /* add high word to c1, including carry */
  677. "adcs %[r4], %[r7] \n\t" /* add carry to c2 */
  678. "4: \n\t"
  679. "movs %[r7], #0 \n\t" /* r7 = 0 */
  680. "adds %[r2], %[r0] \n\t" /* add low word to c0 */
  681. "adcs %[r3], %[r5] \n\t" /* add high word to c1, including carry */
  682. "adcs %[r4], %[r7] \n\t" /* add carry to c2 */
  683. "mov %[r5], r14\n\t" /* r5 = k */
  684. "adds %[r6], #4 \n\t" /* i += 4 */
  685. "cmp %[r6], %[r5] \n\t" /* i >= k? */
  686. "bge 5f \n\t" /* if so, exit the loop */
  687. "subs %[r7], %[r5], %[r6] \n\t" /* r7 = k - i */
  688. "cmp %[r6], %[r7] \n\t" /* i <= k - i? */
  689. "ble 3b \n\t" /* if so, continue looping */
  690. "5: \n\t" /* end inner loop */
  691. "ldr %[r0], [sp, #0] \n\t" /* r0 = result */
  692. "str %[r2], [%[r0], %[r5]] \n\t" /* result[k] = c0 */
  693. "mov %[r2], %[r3] \n\t" /* c0 = c1 */
  694. "mov %[r3], %[r4] \n\t" /* c1 = c2 */
  695. "movs %[r4], #0 \n\t" /* c2 = 0 */
  696. "adds %[r5], #4 \n\t" /* k += 4 */
  697. "cmp %[r5], r8 \n\t" /* k <= (num_words - 1) (times 4) ? */
  698. "ble 1b \n\t" /* if so, loop back, start with i = 0 */
  699. "cmp %[r5], r9 \n\t" /* k <= (num_words * 2 - 2) (times 4) ? */
  700. "ble 2b \n\t" /* if so, loop back, with i = (k + 1) - num_words */
  701. /* end outer loop */
  702. "str %[r2], [%[r0], %[r5]] \n\t" /* result[num_words * 2 - 1] = c0 */
  703. "pop {%[r0]} \n\t" /* pop result off the stack */
  704. ".syntax divided \n\t"
  705. : [r2] "+l" (num_words), [r3] "=&l" (r3), [r4] "=&l" (r4),
  706. [r5] "=&l" (r5), [r6] "=&l" (r6), [r7] "=&l" (r7)
  707. : [r0] "l" (result), [r1] "l" (left)
  708. : "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
  709. );
  710. #endif
  711. }
  712. #define asm_square 1
  713. #endif
  714. #endif /* uECC_SQUARE_FUNC */
  715. #endif /* _UECC_ASM_ARM_H_ */