| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932 |
- /*
- ---------------------------------------------------------------------------
- Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
- The redistribution and use of this software (with or without changes)
- is allowed without the payment of fees or royalties provided that:
- source code distributions include the above copyright notice, this
- list of conditions and the following disclaimer;
- binary distributions include the above copyright notice, this list
- of conditions and the following disclaimer in their documentation.
- This software is provided 'as is' with no explicit or implied warranties
- in respect of its operation, including, but not limited to, correctness
- and fitness for purpose.
- ---------------------------------------------------------------------------
- Issue Date: 20/12/2007
- These subroutines implement multiple block AES modes for ECB, CBC, CFB,
- OFB and CTR encryption, The code provides support for the VIA Advanced
- Cryptography Engine (ACE).
- NOTE: In the following subroutines, the AES contexts (ctx) must be
- 16 byte aligned if VIA ACE is being used
- */
- #include <string.h>
- #include <assert.h>
- #include <stdint.h>
- #include "aesopt.h"
- #if defined(AES_MODES)
- #if defined(__cplusplus)
- extern "C" {
- #endif
- #if defined(_MSC_VER) && (_MSC_VER > 800)
- #pragma intrinsic(memcpy)
- #endif
- #define BFR_BLOCKS 8
- /* These values are used to detect long word alignment in order to */
- /* speed up some buffer operations. This facility may not work on */
- /* some machines so this define can be commented out if necessary */
- #define FAST_BUFFER_OPERATIONS
- #define lp32(x) ((uint32_t*)(x))
- #if defined(USE_VIA_ACE_IF_PRESENT)
- #include "aes_via_ace.h"
- #pragma pack(16)
- aligned_array(unsigned long, enc_gen_table, 12, 16) = NEH_ENC_GEN_DATA;
- aligned_array(unsigned long, enc_load_table, 12, 16) = NEH_ENC_LOAD_DATA;
- aligned_array(unsigned long, enc_hybrid_table, 12, 16) = NEH_ENC_HYBRID_DATA;
- aligned_array(unsigned long, dec_gen_table, 12, 16) = NEH_DEC_GEN_DATA;
- aligned_array(unsigned long, dec_load_table, 12, 16) = NEH_DEC_LOAD_DATA;
- aligned_array(unsigned long, dec_hybrid_table, 12, 16) = NEH_DEC_HYBRID_DATA;
- /* NOTE: These control word macros must only be used after */
- /* a key has been set up because they depend on key size */
- /* See the VIA ACE documentation for key type information */
- /* and aes_via_ace.h for non-default NEH_KEY_TYPE values */
- #ifndef NEH_KEY_TYPE
- #define NEH_KEY_TYPE NEH_HYBRID
- #endif
- #if NEH_KEY_TYPE == NEH_LOAD
- #define kd_adr(c) ((uint8_t*)(c)->ks)
- #elif NEH_KEY_TYPE == NEH_GENERATE
- #define kd_adr(c) ((uint8_t*)(c)->ks + (c)->inf.b[0])
- #elif NEH_KEY_TYPE == NEH_HYBRID
- #define kd_adr(c) ((uint8_t*)(c)->ks + ((c)->inf.b[0] == 160 ? 160 : 0))
- #else
- #error no key type defined for VIA ACE
- #endif
- #else
- #define aligned_array(type, name, no, stride) type name[no]
- #define aligned_auto(type, name, no, stride) type name[no]
- #endif
- #if defined(_MSC_VER) && _MSC_VER > 1200
- #define via_cwd(cwd, ty, dir, len) unsigned long* cwd = (dir##_##ty##_table + ((len - 128) >> 4))
- #else
- #define via_cwd(cwd, ty, dir, len) \
- aligned_auto(unsigned long, cwd, 4, 16); \
- cwd[1] = cwd[2] = cwd[3] = 0; \
- cwd[0] = neh_##dir##_##ty##_key(len)
- #endif
- /* test the code for detecting and setting pointer alignment */
- AES_RETURN aes_test_alignment_detection(unsigned int n) /* 4 <= n <= 16 */
- {
- uint8_t p[16];
- uint32_t i = 0, count_eq = 0, count_neq = 0;
- if(n < 4 || n > 16) return EXIT_FAILURE;
- for(i = 0; i < n; ++i) {
- uint8_t *qf = ALIGN_FLOOR(p + i, n), *qh = ALIGN_CEIL(p + i, n);
- if(qh == qf)
- ++count_eq;
- else if(qh == qf + n)
- ++count_neq;
- else
- return EXIT_FAILURE;
- }
- return (count_eq != 1 || count_neq != n - 1 ? EXIT_FAILURE : EXIT_SUCCESS);
- }
- AES_RETURN aes_mode_reset(aes_encrypt_ctx ctx[1]) {
- ctx->inf.b[2] = 0;
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_ecb_encrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- const aes_encrypt_ctx ctx[1]) {
- int nb = len >> AES_BLOCK_SIZE_P2;
- if(len & (AES_BLOCK_SIZE - 1)) return EXIT_FAILURE;
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- uint8_t* ksp = (uint8_t*)(ctx->ks);
- via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16)) {
- via_ecb_op5(ksp, cwd, ibuf, obuf, nb);
- } else {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_ecb_op5(ksp, cwd, ip, op, m);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- nb -= m;
- }
- }
- return EXIT_SUCCESS;
- }
- #endif
- #if !defined(ASSUME_VIA_ACE_PRESENT)
- while(nb--) {
- if(aes_encrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- #endif
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_ecb_decrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- const aes_decrypt_ctx ctx[1]) {
- int nb = len >> AES_BLOCK_SIZE_P2;
- if(len & (AES_BLOCK_SIZE - 1)) return EXIT_FAILURE;
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- uint8_t* ksp = kd_adr(ctx);
- via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16)) {
- via_ecb_op5(ksp, cwd, ibuf, obuf, nb);
- } else {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_ecb_op5(ksp, cwd, ip, op, m);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- nb -= m;
- }
- }
- return EXIT_SUCCESS;
- }
- #endif
- #if !defined(ASSUME_VIA_ACE_PRESENT)
- while(nb--) {
- if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- #endif
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_cbc_encrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* iv,
- const aes_encrypt_ctx ctx[1]) {
- int nb = len >> AES_BLOCK_SIZE_P2;
- if(len & (AES_BLOCK_SIZE - 1)) return EXIT_FAILURE;
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
- aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
- via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(ALIGN_OFFSET(iv, 16)) /* ensure an aligned iv */
- {
- ivp = liv;
- memcpy(liv, iv, AES_BLOCK_SIZE);
- }
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16) && !ALIGN_OFFSET(iv, 16)) {
- via_cbc_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp);
- } else {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_cbc_op7(ksp, cwd, ip, op, m, ivp, ivp);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- nb -= m;
- }
- }
- if(iv != ivp) memcpy(iv, ivp, AES_BLOCK_SIZE);
- return EXIT_SUCCESS;
- }
- #endif
- #if !defined(ASSUME_VIA_ACE_PRESENT)
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(ibuf, 4) && !ALIGN_OFFSET(iv, 4))
- while(nb--) {
- lp32(iv)[0] ^= lp32(ibuf)[0];
- lp32(iv)[1] ^= lp32(ibuf)[1];
- lp32(iv)[2] ^= lp32(ibuf)[2];
- lp32(iv)[3] ^= lp32(ibuf)[3];
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- memcpy(obuf, iv, AES_BLOCK_SIZE);
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(nb--) {
- iv[0] ^= ibuf[0];
- iv[1] ^= ibuf[1];
- iv[2] ^= ibuf[2];
- iv[3] ^= ibuf[3];
- iv[4] ^= ibuf[4];
- iv[5] ^= ibuf[5];
- iv[6] ^= ibuf[6];
- iv[7] ^= ibuf[7];
- iv[8] ^= ibuf[8];
- iv[9] ^= ibuf[9];
- iv[10] ^= ibuf[10];
- iv[11] ^= ibuf[11];
- iv[12] ^= ibuf[12];
- iv[13] ^= ibuf[13];
- iv[14] ^= ibuf[14];
- iv[15] ^= ibuf[15];
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- memcpy(obuf, iv, AES_BLOCK_SIZE);
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- #endif
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_cbc_decrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* iv,
- const aes_decrypt_ctx ctx[1]) {
- unsigned char tmp[AES_BLOCK_SIZE];
- int nb = len >> AES_BLOCK_SIZE_P2;
- if(len & (AES_BLOCK_SIZE - 1)) return EXIT_FAILURE;
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- uint8_t *ksp = kd_adr(ctx), *ivp = iv;
- aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
- via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(ALIGN_OFFSET(iv, 16)) /* ensure an aligned iv */
- {
- ivp = liv;
- memcpy(liv, iv, AES_BLOCK_SIZE);
- }
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16) && !ALIGN_OFFSET(iv, 16)) {
- via_cbc_op6(ksp, cwd, ibuf, obuf, nb, ivp);
- } else {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_cbc_op6(ksp, cwd, ip, op, m, ivp);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- nb -= m;
- }
- }
- if(iv != ivp) memcpy(iv, ivp, AES_BLOCK_SIZE);
- return EXIT_SUCCESS;
- }
- #endif
- #if !defined(ASSUME_VIA_ACE_PRESENT)
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(obuf, 4) && !ALIGN_OFFSET(iv, 4))
- while(nb--) {
- memcpy(tmp, ibuf, AES_BLOCK_SIZE);
- if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- lp32(obuf)[0] ^= lp32(iv)[0];
- lp32(obuf)[1] ^= lp32(iv)[1];
- lp32(obuf)[2] ^= lp32(iv)[2];
- lp32(obuf)[3] ^= lp32(iv)[3];
- memcpy(iv, tmp, AES_BLOCK_SIZE);
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(nb--) {
- memcpy(tmp, ibuf, AES_BLOCK_SIZE);
- if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- obuf[0] ^= iv[0];
- obuf[1] ^= iv[1];
- obuf[2] ^= iv[2];
- obuf[3] ^= iv[3];
- obuf[4] ^= iv[4];
- obuf[5] ^= iv[5];
- obuf[6] ^= iv[6];
- obuf[7] ^= iv[7];
- obuf[8] ^= iv[8];
- obuf[9] ^= iv[9];
- obuf[10] ^= iv[10];
- obuf[11] ^= iv[11];
- obuf[12] ^= iv[12];
- obuf[13] ^= iv[13];
- obuf[14] ^= iv[14];
- obuf[15] ^= iv[15];
- memcpy(iv, tmp, AES_BLOCK_SIZE);
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- #endif
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_cfb_encrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* iv,
- aes_encrypt_ctx ctx[1]) {
- int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
- if(b_pos) /* complete any partial block */
- {
- while(b_pos < AES_BLOCK_SIZE && cnt < len) {
- *obuf++ = (iv[b_pos++] ^= *ibuf++);
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
- {
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- int m;
- uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
- aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
- via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(ALIGN_OFFSET(iv, 16)) /* ensure an aligned iv */
- {
- ivp = liv;
- memcpy(liv, iv, AES_BLOCK_SIZE);
- }
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16)) {
- via_cfb_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp);
- ibuf += nb * AES_BLOCK_SIZE;
- obuf += nb * AES_BLOCK_SIZE;
- cnt += nb * AES_BLOCK_SIZE;
- } else /* input, output or both are unaligned */
- {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_cfb_op7(ksp, cwd, ip, op, m, ivp, ivp);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- cnt += m * AES_BLOCK_SIZE;
- }
- }
- if(ivp != iv) memcpy(iv, ivp, AES_BLOCK_SIZE);
- }
- #else
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(ibuf, 4) && !ALIGN_OFFSET(obuf, 4) && !ALIGN_OFFSET(iv, 4))
- while(cnt + AES_BLOCK_SIZE <= len) {
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- lp32(obuf)[0] = lp32(iv)[0] ^= lp32(ibuf)[0];
- lp32(obuf)[1] = lp32(iv)[1] ^= lp32(ibuf)[1];
- lp32(obuf)[2] = lp32(iv)[2] ^= lp32(ibuf)[2];
- lp32(obuf)[3] = lp32(iv)[3] ^= lp32(ibuf)[3];
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(cnt + AES_BLOCK_SIZE <= len) {
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- obuf[0] = iv[0] ^= ibuf[0];
- obuf[1] = iv[1] ^= ibuf[1];
- obuf[2] = iv[2] ^= ibuf[2];
- obuf[3] = iv[3] ^= ibuf[3];
- obuf[4] = iv[4] ^= ibuf[4];
- obuf[5] = iv[5] ^= ibuf[5];
- obuf[6] = iv[6] ^= ibuf[6];
- obuf[7] = iv[7] ^= ibuf[7];
- obuf[8] = iv[8] ^= ibuf[8];
- obuf[9] = iv[9] ^= ibuf[9];
- obuf[10] = iv[10] ^= ibuf[10];
- obuf[11] = iv[11] ^= ibuf[11];
- obuf[12] = iv[12] ^= ibuf[12];
- obuf[13] = iv[13] ^= ibuf[13];
- obuf[14] = iv[14] ^= ibuf[14];
- obuf[15] = iv[15] ^= ibuf[15];
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- #endif
- }
- while(cnt < len) {
- if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- while(cnt < len && b_pos < AES_BLOCK_SIZE) {
- *obuf++ = (iv[b_pos++] ^= *ibuf++);
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- ctx->inf.b[2] = (uint8_t)b_pos;
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_cfb_decrypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* iv,
- aes_encrypt_ctx ctx[1]) {
- int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
- if(b_pos) /* complete any partial block */
- {
- uint8_t t;
- while(b_pos < AES_BLOCK_SIZE && cnt < len) {
- t = *ibuf++;
- *obuf++ = t ^ iv[b_pos];
- iv[b_pos++] = t;
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
- {
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- int m;
- uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
- aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
- via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(ALIGN_OFFSET(iv, 16)) /* ensure an aligned iv */
- {
- ivp = liv;
- memcpy(liv, iv, AES_BLOCK_SIZE);
- }
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16)) {
- via_cfb_op6(ksp, cwd, ibuf, obuf, nb, ivp);
- ibuf += nb * AES_BLOCK_SIZE;
- obuf += nb * AES_BLOCK_SIZE;
- cnt += nb * AES_BLOCK_SIZE;
- } else /* input, output or both are unaligned */
- {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) /* input buffer is not aligned */
- memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_cfb_op6(ksp, cwd, ip, op, m, ivp);
- if(op != obuf) /* output buffer is not aligned */
- memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- cnt += m * AES_BLOCK_SIZE;
- }
- }
- if(ivp != iv) memcpy(iv, ivp, AES_BLOCK_SIZE);
- }
- #else
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(ibuf, 4) && !ALIGN_OFFSET(obuf, 4) && !ALIGN_OFFSET(iv, 4))
- while(cnt + AES_BLOCK_SIZE <= len) {
- uint32_t t;
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- t = lp32(ibuf)[0], lp32(obuf)[0] = t ^ lp32(iv)[0], lp32(iv)[0] = t;
- t = lp32(ibuf)[1], lp32(obuf)[1] = t ^ lp32(iv)[1], lp32(iv)[1] = t;
- t = lp32(ibuf)[2], lp32(obuf)[2] = t ^ lp32(iv)[2], lp32(iv)[2] = t;
- t = lp32(ibuf)[3], lp32(obuf)[3] = t ^ lp32(iv)[3], lp32(iv)[3] = t;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(cnt + AES_BLOCK_SIZE <= len) {
- uint8_t t;
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- t = ibuf[0], obuf[0] = t ^ iv[0], iv[0] = t;
- t = ibuf[1], obuf[1] = t ^ iv[1], iv[1] = t;
- t = ibuf[2], obuf[2] = t ^ iv[2], iv[2] = t;
- t = ibuf[3], obuf[3] = t ^ iv[3], iv[3] = t;
- t = ibuf[4], obuf[4] = t ^ iv[4], iv[4] = t;
- t = ibuf[5], obuf[5] = t ^ iv[5], iv[5] = t;
- t = ibuf[6], obuf[6] = t ^ iv[6], iv[6] = t;
- t = ibuf[7], obuf[7] = t ^ iv[7], iv[7] = t;
- t = ibuf[8], obuf[8] = t ^ iv[8], iv[8] = t;
- t = ibuf[9], obuf[9] = t ^ iv[9], iv[9] = t;
- t = ibuf[10], obuf[10] = t ^ iv[10], iv[10] = t;
- t = ibuf[11], obuf[11] = t ^ iv[11], iv[11] = t;
- t = ibuf[12], obuf[12] = t ^ iv[12], iv[12] = t;
- t = ibuf[13], obuf[13] = t ^ iv[13], iv[13] = t;
- t = ibuf[14], obuf[14] = t ^ iv[14], iv[14] = t;
- t = ibuf[15], obuf[15] = t ^ iv[15], iv[15] = t;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- #endif
- }
- while(cnt < len) {
- uint8_t t;
- if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- while(cnt < len && b_pos < AES_BLOCK_SIZE) {
- t = *ibuf++;
- *obuf++ = t ^ iv[b_pos];
- iv[b_pos++] = t;
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- ctx->inf.b[2] = (uint8_t)b_pos;
- return EXIT_SUCCESS;
- }
- AES_RETURN aes_ofb_crypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* iv,
- aes_encrypt_ctx ctx[1]) {
- int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
- if(b_pos) /* complete any partial block */
- {
- while(b_pos < AES_BLOCK_SIZE && cnt < len) {
- *obuf++ = iv[b_pos++] ^ *ibuf++;
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
- {
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- int m;
- uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
- aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
- via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
- if(ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- if(ALIGN_OFFSET(iv, 16)) /* ensure an aligned iv */
- {
- ivp = liv;
- memcpy(liv, iv, AES_BLOCK_SIZE);
- }
- if(!ALIGN_OFFSET(ibuf, 16) && !ALIGN_OFFSET(obuf, 16)) {
- via_ofb_op6(ksp, cwd, ibuf, obuf, nb, ivp);
- ibuf += nb * AES_BLOCK_SIZE;
- obuf += nb * AES_BLOCK_SIZE;
- cnt += nb * AES_BLOCK_SIZE;
- } else /* input, output or both are unaligned */
- {
- aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
- uint8_t *ip = NULL, *op = NULL;
- while(nb) {
- m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
- ip = (ALIGN_OFFSET(ibuf, 16) ? buf : ibuf);
- op = (ALIGN_OFFSET(obuf, 16) ? buf : obuf);
- if(ip != ibuf) memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
- via_ofb_op6(ksp, cwd, ip, op, m, ivp);
- if(op != obuf) memcpy(obuf, buf, m * AES_BLOCK_SIZE);
- ibuf += m * AES_BLOCK_SIZE;
- obuf += m * AES_BLOCK_SIZE;
- cnt += m * AES_BLOCK_SIZE;
- }
- }
- if(ivp != iv) memcpy(iv, ivp, AES_BLOCK_SIZE);
- }
- #else
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(ibuf, 4) && !ALIGN_OFFSET(obuf, 4) && !ALIGN_OFFSET(iv, 4))
- while(cnt + AES_BLOCK_SIZE <= len) {
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- lp32(obuf)[0] = lp32(iv)[0] ^ lp32(ibuf)[0];
- lp32(obuf)[1] = lp32(iv)[1] ^ lp32(ibuf)[1];
- lp32(obuf)[2] = lp32(iv)[2] ^ lp32(ibuf)[2];
- lp32(obuf)[3] = lp32(iv)[3] ^ lp32(ibuf)[3];
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(cnt + AES_BLOCK_SIZE <= len) {
- assert(b_pos == 0);
- if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- obuf[0] = iv[0] ^ ibuf[0];
- obuf[1] = iv[1] ^ ibuf[1];
- obuf[2] = iv[2] ^ ibuf[2];
- obuf[3] = iv[3] ^ ibuf[3];
- obuf[4] = iv[4] ^ ibuf[4];
- obuf[5] = iv[5] ^ ibuf[5];
- obuf[6] = iv[6] ^ ibuf[6];
- obuf[7] = iv[7] ^ ibuf[7];
- obuf[8] = iv[8] ^ ibuf[8];
- obuf[9] = iv[9] ^ ibuf[9];
- obuf[10] = iv[10] ^ ibuf[10];
- obuf[11] = iv[11] ^ ibuf[11];
- obuf[12] = iv[12] ^ ibuf[12];
- obuf[13] = iv[13] ^ ibuf[13];
- obuf[14] = iv[14] ^ ibuf[14];
- obuf[15] = iv[15] ^ ibuf[15];
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- cnt += AES_BLOCK_SIZE;
- }
- #endif
- }
- while(cnt < len) {
- if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- while(cnt < len && b_pos < AES_BLOCK_SIZE) {
- *obuf++ = iv[b_pos++] ^ *ibuf++;
- cnt++;
- }
- b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
- }
- ctx->inf.b[2] = (uint8_t)b_pos;
- return EXIT_SUCCESS;
- }
- #define BFR_LENGTH (BFR_BLOCKS * AES_BLOCK_SIZE)
- AES_RETURN aes_ctr_crypt(
- const unsigned char* ibuf,
- unsigned char* obuf,
- int len,
- unsigned char* cbuf,
- cbuf_inc ctr_inc,
- aes_encrypt_ctx ctx[1]) {
- unsigned char* ip;
- int i = 0, blen = 0, b_pos = (int)(ctx->inf.b[2]);
- #if defined(USE_VIA_ACE_IF_PRESENT)
- aligned_auto(uint8_t, buf, BFR_LENGTH, 16);
- if(ctx->inf.b[1] == 0xff && ALIGN_OFFSET(ctx, 16)) return EXIT_FAILURE;
- #else
- uint8_t buf[BFR_LENGTH] = {0};
- #endif
- if(b_pos) {
- memcpy(buf, cbuf, AES_BLOCK_SIZE);
- if(aes_ecb_encrypt(buf, buf, AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS) return EXIT_FAILURE;
- while(b_pos < AES_BLOCK_SIZE && len) {
- *obuf++ = *ibuf++ ^ buf[b_pos++];
- --len;
- }
- if(len) ctr_inc(cbuf), b_pos = 0;
- }
- while(len) {
- blen = (len > BFR_LENGTH ? BFR_LENGTH : len), len -= blen;
- for(i = 0, ip = buf; i < (blen >> AES_BLOCK_SIZE_P2); ++i) {
- memcpy(ip, cbuf, AES_BLOCK_SIZE);
- ctr_inc(cbuf);
- ip += AES_BLOCK_SIZE;
- }
- if(blen & (AES_BLOCK_SIZE - 1)) memcpy(ip, cbuf, AES_BLOCK_SIZE), i++;
- #if defined(USE_VIA_ACE_IF_PRESENT)
- if(ctx->inf.b[1] == 0xff) {
- via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
- via_ecb_op5((ctx->ks), cwd, buf, buf, i);
- } else
- #endif
- if(aes_ecb_encrypt(buf, buf, i * AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS)
- return EXIT_FAILURE;
- i = 0;
- ip = buf;
- #ifdef FAST_BUFFER_OPERATIONS
- if(!ALIGN_OFFSET(ibuf, 4) && !ALIGN_OFFSET(obuf, 4) && !ALIGN_OFFSET(ip, 4))
- while(i + AES_BLOCK_SIZE <= blen) {
- lp32(obuf)[0] = lp32(ibuf)[0] ^ lp32(ip)[0];
- lp32(obuf)[1] = lp32(ibuf)[1] ^ lp32(ip)[1];
- lp32(obuf)[2] = lp32(ibuf)[2] ^ lp32(ip)[2];
- lp32(obuf)[3] = lp32(ibuf)[3] ^ lp32(ip)[3];
- i += AES_BLOCK_SIZE;
- ip += AES_BLOCK_SIZE;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- else
- #endif
- while(i + AES_BLOCK_SIZE <= blen) {
- obuf[0] = ibuf[0] ^ ip[0];
- obuf[1] = ibuf[1] ^ ip[1];
- obuf[2] = ibuf[2] ^ ip[2];
- obuf[3] = ibuf[3] ^ ip[3];
- obuf[4] = ibuf[4] ^ ip[4];
- obuf[5] = ibuf[5] ^ ip[5];
- obuf[6] = ibuf[6] ^ ip[6];
- obuf[7] = ibuf[7] ^ ip[7];
- obuf[8] = ibuf[8] ^ ip[8];
- obuf[9] = ibuf[9] ^ ip[9];
- obuf[10] = ibuf[10] ^ ip[10];
- obuf[11] = ibuf[11] ^ ip[11];
- obuf[12] = ibuf[12] ^ ip[12];
- obuf[13] = ibuf[13] ^ ip[13];
- obuf[14] = ibuf[14] ^ ip[14];
- obuf[15] = ibuf[15] ^ ip[15];
- i += AES_BLOCK_SIZE;
- ip += AES_BLOCK_SIZE;
- ibuf += AES_BLOCK_SIZE;
- obuf += AES_BLOCK_SIZE;
- }
- while(i++ < blen) *obuf++ = *ibuf++ ^ ip[b_pos++];
- }
- ctx->inf.b[2] = (uint8_t)b_pos;
- return EXIT_SUCCESS;
- }
- void aes_ctr_cbuf_inc(unsigned char* cbuf) {
- int i = AES_BLOCK_SIZE - 1;
- while(i >= 0) {
- cbuf[i]++;
- if(cbuf[i]) return; // if there was no overflow
- i--;
- }
- }
- #if defined(__cplusplus)
- }
- #endif
- #endif
|