aescrypt.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /*
  2. ---------------------------------------------------------------------------
  3. Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
  4. The redistribution and use of this software (with or without changes)
  5. is allowed without the payment of fees or royalties provided that:
  6. source code distributions include the above copyright notice, this
  7. list of conditions and the following disclaimer;
  8. binary distributions include the above copyright notice, this list
  9. of conditions and the following disclaimer in their documentation.
  10. This software is provided 'as is' with no explicit or implied warranties
  11. in respect of its operation, including, but not limited to, correctness
  12. and fitness for purpose.
  13. ---------------------------------------------------------------------------
  14. Issue Date: 20/12/2007
  15. */
  16. #include "aesopt.h"
  17. #include "aestab.h"
  18. #if defined( USE_INTEL_AES_IF_PRESENT )
  19. # include "aes_ni.h"
  20. #else
  21. /* map names here to provide the external API ('name' -> 'aes_name') */
  22. # define aes_xi(x) aes_ ## x
  23. #endif
  24. #if defined(__cplusplus)
  25. extern "C"
  26. {
  27. #endif
  28. #define si(y,x,k,c) (s(y,c) = word_in(x, c) ^ (k)[c])
  29. #define so(y,x,c) word_out(y, c, s(x,c))
  30. #if defined(ARRAYS)
  31. #define locals(y,x) x[4],y[4]
  32. #else
  33. #define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3
  34. #endif
  35. #define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \
  36. s(y,2) = s(x,2); s(y,3) = s(x,3);
  37. #define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3)
  38. #define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
  39. #define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3)
  40. #if ( FUNCS_IN_C & ENCRYPTION_IN_C )
  41. /* Visual C++ .Net v7.1 provides the fastest encryption code when using
  42. Pentium optimiation with small code but this is poor for decryption
  43. so we need to control this with the following VC++ pragmas
  44. */
  45. #if defined( _MSC_VER ) && !defined( _WIN64 ) && !defined( __clang__ )
  46. #pragma optimize( "s", on )
  47. #endif
  48. /* Given the column (c) of the output state variable, the following
  49. macros give the input state variables which are needed in its
  50. computation for each row (r) of the state. All the alternative
  51. macros give the same end values but expand into different ways
  52. of calculating these values. In particular the complex macro
  53. used for dynamically variable block sizes is designed to expand
  54. to a compile time constant whenever possible but will expand to
  55. conditional clauses on some branches (I am grateful to Frank
  56. Yellin for this construction)
  57. */
  58. #define fwd_var(x,r,c)\
  59. ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
  60. : r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
  61. : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
  62. : ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
  63. #if defined(FT4_SET)
  64. #undef dec_fmvars
  65. #define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
  66. #elif defined(FT1_SET)
  67. #undef dec_fmvars
  68. #define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(f,n),fwd_var,rf1,c))
  69. #else
  70. #define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ fwd_mcol(no_table(x,t_use(s,box),fwd_var,rf1,c)))
  71. #endif
  72. #if defined(FL4_SET)
  73. #define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,l),fwd_var,rf1,c))
  74. #elif defined(FL1_SET)
  75. #define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(f,l),fwd_var,rf1,c))
  76. #else
  77. #define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(s,box),fwd_var,rf1,c))
  78. #endif
  79. AES_RETURN aes_xi(encrypt)(const unsigned char *in, unsigned char *out, const aes_encrypt_ctx cx[1])
  80. { uint32_t locals(b0, b1);
  81. const uint32_t *kp = NULL;
  82. #if defined( dec_fmvars )
  83. dec_fmvars; /* declare variables for fwd_mcol() if needed */
  84. #endif
  85. if(cx->inf.b[0] != 10 * AES_BLOCK_SIZE && cx->inf.b[0] != 12 * AES_BLOCK_SIZE && cx->inf.b[0] != 14 * AES_BLOCK_SIZE)
  86. return EXIT_FAILURE;
  87. kp = cx->ks;
  88. state_in(b0, in, kp);
  89. #if (ENC_UNROLL == FULL)
  90. switch(cx->inf.b[0])
  91. {
  92. case 14 * AES_BLOCK_SIZE:
  93. round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
  94. round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
  95. kp += 2 * N_COLS;
  96. //-fallthrough
  97. case 12 * AES_BLOCK_SIZE:
  98. round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
  99. round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
  100. kp += 2 * N_COLS;
  101. //-fallthrough
  102. case 10 * AES_BLOCK_SIZE:
  103. round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
  104. round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
  105. round(fwd_rnd, b1, b0, kp + 3 * N_COLS);
  106. round(fwd_rnd, b0, b1, kp + 4 * N_COLS);
  107. round(fwd_rnd, b1, b0, kp + 5 * N_COLS);
  108. round(fwd_rnd, b0, b1, kp + 6 * N_COLS);
  109. round(fwd_rnd, b1, b0, kp + 7 * N_COLS);
  110. round(fwd_rnd, b0, b1, kp + 8 * N_COLS);
  111. round(fwd_rnd, b1, b0, kp + 9 * N_COLS);
  112. round(fwd_lrnd, b0, b1, kp +10 * N_COLS);
  113. //-fallthrough
  114. }
  115. #else
  116. #if (ENC_UNROLL == PARTIAL)
  117. { uint32_t rnd;
  118. for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
  119. {
  120. kp += N_COLS;
  121. round(fwd_rnd, b1, b0, kp);
  122. kp += N_COLS;
  123. round(fwd_rnd, b0, b1, kp);
  124. }
  125. kp += N_COLS;
  126. round(fwd_rnd, b1, b0, kp);
  127. #else
  128. { uint32_t rnd;
  129. for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
  130. {
  131. kp += N_COLS;
  132. round(fwd_rnd, b1, b0, kp);
  133. l_copy(b0, b1);
  134. }
  135. #endif
  136. kp += N_COLS;
  137. round(fwd_lrnd, b0, b1, kp);
  138. }
  139. #endif
  140. state_out(out, b0);
  141. return EXIT_SUCCESS;
  142. }
  143. #endif
  144. #if ( FUNCS_IN_C & DECRYPTION_IN_C)
  145. /* Visual C++ .Net v7.1 provides the fastest encryption code when using
  146. Pentium optimiation with small code but this is poor for decryption
  147. so we need to control this with the following VC++ pragmas
  148. */
  149. #if defined( _MSC_VER ) && !defined( _WIN64 ) && !defined( __clang__ )
  150. #pragma optimize( "t", on )
  151. #endif
  152. /* Given the column (c) of the output state variable, the following
  153. macros give the input state variables which are needed in its
  154. computation for each row (r) of the state. All the alternative
  155. macros give the same end values but expand into different ways
  156. of calculating these values. In particular the complex macro
  157. used for dynamically variable block sizes is designed to expand
  158. to a compile time constant whenever possible but will expand to
  159. conditional clauses on some branches (I am grateful to Frank
  160. Yellin for this construction)
  161. */
  162. #define inv_var(x,r,c)\
  163. ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
  164. : r == 1 ? ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))\
  165. : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
  166. : ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0)))
  167. #if defined(IT4_SET)
  168. #undef dec_imvars
  169. #define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,n),inv_var,rf1,c))
  170. #elif defined(IT1_SET)
  171. #undef dec_imvars
  172. #define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(i,n),inv_var,rf1,c))
  173. #else
  174. #define inv_rnd(y,x,k,c) (s(y,c) = inv_mcol((k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c)))
  175. #endif
  176. #if defined(IL4_SET)
  177. #define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,l),inv_var,rf1,c))
  178. #elif defined(IL1_SET)
  179. #define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(i,l),inv_var,rf1,c))
  180. #else
  181. #define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c))
  182. #endif
  183. /* This code can work with the decryption key schedule in the */
  184. /* order that is used for encrytpion (where the 1st decryption */
  185. /* round key is at the high end ot the schedule) or with a key */
  186. /* schedule that has been reversed to put the 1st decryption */
  187. /* round key at the low end of the schedule in memory (when */
  188. /* AES_REV_DKS is defined) */
  189. #ifdef AES_REV_DKS
  190. #define key_ofs 0
  191. #define rnd_key(n) (kp + n * N_COLS)
  192. #else
  193. #define key_ofs 1
  194. #define rnd_key(n) (kp - n * N_COLS)
  195. #endif
  196. AES_RETURN aes_xi(decrypt)(const unsigned char *in, unsigned char *out, const aes_decrypt_ctx cx[1])
  197. { uint32_t locals(b0, b1);
  198. #if defined( dec_imvars )
  199. dec_imvars; /* declare variables for inv_mcol() if needed */
  200. #endif
  201. const uint32_t *kp = NULL;
  202. if(cx->inf.b[0] != 10 * AES_BLOCK_SIZE && cx->inf.b[0] != 12 * AES_BLOCK_SIZE && cx->inf.b[0] != 14 * AES_BLOCK_SIZE)
  203. return EXIT_FAILURE;
  204. kp = cx->ks + (key_ofs ? (cx->inf.b[0] >> 2) : 0);
  205. state_in(b0, in, kp);
  206. #if (DEC_UNROLL == FULL)
  207. kp = cx->ks + (key_ofs ? 0 : (cx->inf.b[0] >> 2));
  208. switch(cx->inf.b[0])
  209. {
  210. case 14 * AES_BLOCK_SIZE:
  211. round(inv_rnd, b1, b0, rnd_key(-13));
  212. round(inv_rnd, b0, b1, rnd_key(-12));
  213. //-fallthrough
  214. case 12 * AES_BLOCK_SIZE:
  215. round(inv_rnd, b1, b0, rnd_key(-11));
  216. round(inv_rnd, b0, b1, rnd_key(-10));
  217. //-fallthrough
  218. case 10 * AES_BLOCK_SIZE:
  219. round(inv_rnd, b1, b0, rnd_key(-9));
  220. round(inv_rnd, b0, b1, rnd_key(-8));
  221. round(inv_rnd, b1, b0, rnd_key(-7));
  222. round(inv_rnd, b0, b1, rnd_key(-6));
  223. round(inv_rnd, b1, b0, rnd_key(-5));
  224. round(inv_rnd, b0, b1, rnd_key(-4));
  225. round(inv_rnd, b1, b0, rnd_key(-3));
  226. round(inv_rnd, b0, b1, rnd_key(-2));
  227. round(inv_rnd, b1, b0, rnd_key(-1));
  228. round(inv_lrnd, b0, b1, rnd_key( 0));
  229. //-fallthrough
  230. }
  231. #else
  232. #if (DEC_UNROLL == PARTIAL)
  233. { uint32_t rnd;
  234. for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
  235. {
  236. kp = rnd_key(1);
  237. round(inv_rnd, b1, b0, kp);
  238. kp = rnd_key(1);
  239. round(inv_rnd, b0, b1, kp);
  240. }
  241. kp = rnd_key(1);
  242. round(inv_rnd, b1, b0, kp);
  243. #else
  244. { uint32_t rnd;
  245. for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
  246. {
  247. kp = rnd_key(1);
  248. round(inv_rnd, b1, b0, kp);
  249. l_copy(b0, b1);
  250. }
  251. #endif
  252. kp = rnd_key(1);
  253. round(inv_lrnd, b0, b1, kp);
  254. }
  255. #endif
  256. state_out(out, b0);
  257. return EXIT_SUCCESS;
  258. }
  259. #endif
  260. #if defined(__cplusplus)
  261. }
  262. #endif