diff --git a/.ci/meta_builds.sh b/.ci/meta_builds.sh index 2ba99a9e7..3f14562f6 100755 --- a/.ci/meta_builds.sh +++ b/.ci/meta_builds.sh @@ -56,7 +56,7 @@ function run_clang() { scan_build=$(which scan-build) || true [ -z "$scan_build" ] && scan_build=$(find /usr/bin/ -name 'scan-build-[0-9]*' | sort -nr | head -n1) || true [ -z "$scan_build" ] && { echo "couldn't find clang scan-build"; exit 1; } || echo "run $scan_build" - $scan_build --status-bugs make -j$MAKE_JOBS all CFLAGS="$2 $CFLAGS $4" EXTRALIBS="$5" + $scan_build --status-bugs make -j$MAKE_JOBS all CFLAGS="$2 $CFLAGS $4 -DLTC_NO_FAST" EXTRALIBS="$5" make clean &>/dev/null diff --git a/demos/timing.c b/demos/timing.c index 8b252e222..fe238ec8a 100644 --- a/demos/timing.c +++ b/demos/timing.c @@ -1154,7 +1154,7 @@ static void time_encmacs_(unsigned long MAC_SIZE) ulong64 t1, t2; unsigned long x, z; int err, cipher_idx; - symmetric_key skey; + symmetric_ECB skey; fprintf(stderr, "\nENC+MAC Timings (zero byte AAD, 16 byte IV, cycles/byte on %luKB blocks):\n", MAC_SIZE); @@ -1233,7 +1233,7 @@ static void time_encmacs_(unsigned long MAC_SIZE) } fprintf(stderr, "CCM (no-precomp) \t%9"PRI64"u\n", t2/(ulong64)(MAC_SIZE*1024)); - cipher_descriptor[cipher_idx].setup(key, 16, 0, &skey); + ecb_start(cipher_idx, key, 16, 0, &skey); t2 = -1; for (x = 0; x < 10000; x++) { t_start(); @@ -1247,7 +1247,7 @@ static void time_encmacs_(unsigned long MAC_SIZE) if (t1 < t2) t2 = t1; } fprintf(stderr, "CCM (precomp) \t\t%9"PRI64"u\n", t2/(ulong64)(MAC_SIZE*1024)); - cipher_descriptor[cipher_idx].done(&skey); + ecb_done(&skey); #endif #ifdef LTC_GCM_MODE diff --git a/src/encauth/ccm/ccm_add_aad.c b/src/encauth/ccm/ccm_add_aad.c index 130d3041f..94d2e2b56 100644 --- a/src/encauth/ccm/ccm_add_aad.c +++ b/src/encauth/ccm/ccm_add_aad.c @@ -29,7 +29,7 @@ int ccm_add_aad(ccm_state *ccm, for (y = 0; y < adatalen; y++) { if (ccm->x == 16) { /* full block so let's encrypt it */ - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { return err; } ccm->x = 0; @@ -40,7 +40,7 @@ int ccm_add_aad(ccm_state *ccm, /* remainder? */ if (ccm->aadlen == ccm->current_aadlen) { if (ccm->x != 0) { - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { return err; } } diff --git a/src/encauth/ccm/ccm_add_nonce.c b/src/encauth/ccm/ccm_add_nonce.c index bda74411c..2fe347857 100644 --- a/src/encauth/ccm/ccm_add_nonce.c +++ b/src/encauth/ccm/ccm_add_nonce.c @@ -60,7 +60,7 @@ int ccm_add_nonce(ccm_state *ccm, } /* encrypt PAD */ - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { return err; } diff --git a/src/encauth/ccm/ccm_done.c b/src/encauth/ccm/ccm_done.c index 965121d71..51ce6ccd4 100644 --- a/src/encauth/ccm/ccm_done.c +++ b/src/encauth/ccm/ccm_done.c @@ -28,7 +28,7 @@ int ccm_done(ccm_state *ccm, LTC_ARGCHK(taglen != NULL); if (ccm->x != 0) { - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { return err; } } @@ -37,11 +37,11 @@ int ccm_done(ccm_state *ccm, for (y = 15; y > 15 - ccm->L; y--) { ccm->ctr[y] = 0x00; } - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->ctr, ccm->CTRPAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->ctr, ccm->CTRPAD, &ccm->K)) != CRYPT_OK) { return err; } - cipher_descriptor[ccm->cipher].done(&ccm->K); + ecb_done(&ccm->K); /* store the TAG */ for (x = 0; x < 16 && x < *taglen; x++) { diff --git a/src/encauth/ccm/ccm_init.c b/src/encauth/ccm/ccm_init.c index c98929ecf..06a152250 100644 --- a/src/encauth/ccm/ccm_init.c +++ b/src/encauth/ccm/ccm_init.c @@ -41,10 +41,9 @@ int ccm_init(ccm_state *ccm, int cipher, ccm->taglen = taglen; /* schedule key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &ccm->K)) != CRYPT_OK) { return err; } - ccm->cipher = cipher; /* let's get the L value */ ccm->ptlen = ptlen; diff --git a/src/encauth/ccm/ccm_memory.c b/src/encauth/ccm/ccm_memory.c index d22c0fb84..44a4a34a8 100644 --- a/src/encauth/ccm/ccm_memory.c +++ b/src/encauth/ccm/ccm_memory.c @@ -32,7 +32,7 @@ */ int ccm_memory(int cipher, const unsigned char *key, unsigned long keylen, - symmetric_key *uskey, + symmetric_ECB *uskey, const unsigned char *nonce, unsigned long noncelen, const unsigned char *header, unsigned long headerlen, unsigned char *pt, unsigned long ptlen, @@ -42,7 +42,7 @@ int ccm_memory(int cipher, { unsigned char PAD[16], ctr[16], CTRPAD[16], ptTag[16], b, *pt_real; unsigned char *pt_work = NULL; - symmetric_key *skey; + symmetric_ECB *skey; int err; unsigned long len, L, x, y, z, CTRlen; @@ -78,12 +78,15 @@ int ccm_memory(int cipher, if (*taglen < 4 || *taglen > 16 || (*taglen % 2) == 1) { return CRYPT_INVALID_ARG; } + if (noncelen < 7) { + return CRYPT_INVALID_ARG; + } /* is there an accelerator? */ if (cipher_descriptor[cipher].accel_ccm_memory != NULL) { return cipher_descriptor[cipher].accel_ccm_memory( key, keylen, - uskey, + &uskey->key, nonce, noncelen, header, headerlen, pt, ptlen, @@ -117,7 +120,7 @@ int ccm_memory(int cipher, } /* initialize the cipher */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, skey)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, skey)) != CRYPT_OK) { XFREE(skey); return err; } @@ -141,7 +144,7 @@ int ccm_memory(int cipher, (L-1)); /* nonce */ - for (y = 0; y < (16 - (L + 1)); y++) { + for (y = 0; y < noncelen; y++) { PAD[x++] = nonce[y]; } @@ -163,7 +166,7 @@ int ccm_memory(int cipher, } /* encrypt PAD */ - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } @@ -188,7 +191,7 @@ int ccm_memory(int cipher, for (y = 0; y < headerlen; y++) { if (x == 16) { /* full block so let's encrypt it */ - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } x = 0; @@ -197,7 +200,7 @@ int ccm_memory(int cipher, } /* remainder */ - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } } @@ -232,7 +235,7 @@ int ccm_memory(int cipher, ctr[z] = (ctr[z] + 1) & 255; if (ctr[z]) break; } - if ((err = cipher_descriptor[cipher].ecb_encrypt(ctr, CTRPAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ctr, CTRPAD, skey)) != CRYPT_OK) { goto error; } @@ -241,7 +244,7 @@ int ccm_memory(int cipher, *(LTC_FAST_TYPE_PTR_CAST(&PAD[z])) ^= *(LTC_FAST_TYPE_PTR_CAST(&pt[y+z])); *(LTC_FAST_TYPE_PTR_CAST(&ct[y+z])) = *(LTC_FAST_TYPE_PTR_CAST(&pt[y+z])) ^ *(LTC_FAST_TYPE_PTR_CAST(&CTRPAD[z])); } - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } } @@ -252,7 +255,7 @@ int ccm_memory(int cipher, ctr[z] = (ctr[z] + 1) & 255; if (ctr[z]) break; } - if ((err = cipher_descriptor[cipher].ecb_encrypt(ctr, CTRPAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ctr, CTRPAD, skey)) != CRYPT_OK) { goto error; } @@ -261,7 +264,7 @@ int ccm_memory(int cipher, *(LTC_FAST_TYPE_PTR_CAST(&pt[y+z])) = *(LTC_FAST_TYPE_PTR_CAST(&ct[y+z])) ^ *(LTC_FAST_TYPE_PTR_CAST(&CTRPAD[z])); *(LTC_FAST_TYPE_PTR_CAST(&PAD[z])) ^= *(LTC_FAST_TYPE_PTR_CAST(&pt[y+z])); } - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } } @@ -276,7 +279,7 @@ int ccm_memory(int cipher, ctr[z] = (ctr[z] + 1) & 255; if (ctr[z]) break; } - if ((err = cipher_descriptor[cipher].ecb_encrypt(ctr, CTRPAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ctr, CTRPAD, skey)) != CRYPT_OK) { goto error; } CTRlen = 0; @@ -292,7 +295,7 @@ int ccm_memory(int cipher, } if (x == 16) { - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } x = 0; @@ -301,7 +304,7 @@ int ccm_memory(int cipher, } if (x != 0) { - if ((err = cipher_descriptor[cipher].ecb_encrypt(PAD, PAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(PAD, PAD, skey)) != CRYPT_OK) { goto error; } } @@ -311,12 +314,12 @@ int ccm_memory(int cipher, for (y = 15; y > 15 - L; y--) { ctr[y] = 0x00; } - if ((err = cipher_descriptor[cipher].ecb_encrypt(ctr, CTRPAD, skey)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ctr, CTRPAD, skey)) != CRYPT_OK) { goto error; } if (skey != uskey) { - cipher_descriptor[cipher].done(skey); + ecb_done(skey); #ifdef LTC_CLEAN_STACK zeromem(skey, sizeof(*skey)); #endif diff --git a/src/encauth/ccm/ccm_process.c b/src/encauth/ccm/ccm_process.c index 3d2248091..b5f973d5b 100644 --- a/src/encauth/ccm/ccm_process.c +++ b/src/encauth/ccm/ccm_process.c @@ -47,7 +47,7 @@ int ccm_process(ccm_state *ccm, ccm->ctr[z] = (ccm->ctr[z] + 1) & 255; if (ccm->ctr[z]) break; } - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->ctr, ccm->CTRPAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->ctr, ccm->CTRPAD, &ccm->K)) != CRYPT_OK) { return err; } ccm->CTRlen = 0; @@ -63,7 +63,7 @@ int ccm_process(ccm_state *ccm, } if (ccm->x == 16) { - if ((err = cipher_descriptor[ccm->cipher].ecb_encrypt(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ccm->PAD, ccm->PAD, &ccm->K)) != CRYPT_OK) { return err; } ccm->x = 0; diff --git a/src/encauth/ccm/ccm_test.c b/src/encauth/ccm/ccm_test.c index 04d97b19e..d76b40685 100644 --- a/src/encauth/ccm/ccm_test.c +++ b/src/encauth/ccm/ccm_test.c @@ -108,7 +108,7 @@ int ccm_test(void) unsigned long taglen, x, y; unsigned char buf[64], buf2[64], tag[16], tag2[16], tag3[16], zero[64]; int err, idx; - symmetric_key skey; + symmetric_ECB skey; ccm_state ccm; zeromem(zero, 64); @@ -125,7 +125,7 @@ int ccm_test(void) for (y = 0; y < 2; y++) { taglen = tests[x].taglen; if (y == 0) { - if ((err = cipher_descriptor[idx].setup(tests[x].key, 16, 0, &skey)) != CRYPT_OK) { + if ((err = ecb_start(idx, tests[x].key, 16, 0, &skey)) != CRYPT_OK) { return err; } @@ -235,7 +235,7 @@ int ccm_test(void) } if (y == 0) { - cipher_descriptor[idx].done(&skey); + ecb_done(&skey); } } } diff --git a/src/encauth/gcm/gcm_add_aad.c b/src/encauth/gcm/gcm_add_aad.c index 1e71639f7..43e742dfb 100644 --- a/src/encauth/gcm/gcm_add_aad.c +++ b/src/encauth/gcm/gcm_add_aad.c @@ -20,7 +20,6 @@ int gcm_add_aad(gcm_state *gcm, const unsigned char *adata, unsigned long adatalen) { unsigned long x; - int err; #ifdef LTC_FAST unsigned long y; #endif @@ -34,10 +33,6 @@ int gcm_add_aad(gcm_state *gcm, return CRYPT_INVALID_ARG; } - if ((err = cipher_is_valid(gcm->cipher)) != CRYPT_OK) { - return err; - } - /* in IV mode? */ if (gcm->mode == LTC_GCM_MODE_IV) { /* IV length must be > 0 */ diff --git a/src/encauth/gcm/gcm_add_iv.c b/src/encauth/gcm/gcm_add_iv.c index 33a24445d..b37a55bf8 100644 --- a/src/encauth/gcm/gcm_add_iv.c +++ b/src/encauth/gcm/gcm_add_iv.c @@ -20,7 +20,6 @@ int gcm_add_iv(gcm_state *gcm, const unsigned char *IV, unsigned long IVlen) { unsigned long x, y; - int err; LTC_ARGCHK(gcm != NULL); if (IVlen > 0) { @@ -36,11 +35,6 @@ int gcm_add_iv(gcm_state *gcm, return CRYPT_INVALID_ARG; } - if ((err = cipher_is_valid(gcm->cipher)) != CRYPT_OK) { - return err; - } - - /* trip the ivmode flag */ if (IVlen + gcm->buflen > 12) { gcm->ivmode |= 1; diff --git a/src/encauth/gcm/gcm_done.c b/src/encauth/gcm/gcm_done.c index 464f87ad1..5e135d140 100644 --- a/src/encauth/gcm/gcm_done.c +++ b/src/encauth/gcm/gcm_done.c @@ -30,10 +30,6 @@ int gcm_done(gcm_state *gcm, return CRYPT_INVALID_ARG; } - if ((err = cipher_is_valid(gcm->cipher)) != CRYPT_OK) { - return err; - } - if (gcm->mode == LTC_GCM_MODE_IV) { /* let's process the IV */ if ((err = gcm_add_aad(gcm, NULL, 0)) != CRYPT_OK) return err; @@ -63,7 +59,7 @@ int gcm_done(gcm_state *gcm, gcm_mult_h(gcm, gcm->X); /* encrypt original counter */ - if ((err = cipher_descriptor[gcm->cipher].ecb_encrypt(gcm->Y_0, gcm->buf, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(gcm->Y_0, gcm->buf, &gcm->K)) != CRYPT_OK) { return err; } for (x = 0; x < 16 && x < *taglen; x++) { @@ -71,7 +67,7 @@ int gcm_done(gcm_state *gcm, } *taglen = x; - cipher_descriptor[gcm->cipher].done(&gcm->K); + ecb_done(&gcm->K); return CRYPT_OK; } diff --git a/src/encauth/gcm/gcm_init.c b/src/encauth/gcm/gcm_init.c index 1822bdccd..96abfe2c2 100644 --- a/src/encauth/gcm/gcm_init.c +++ b/src/encauth/gcm/gcm_init.c @@ -44,20 +44,19 @@ int gcm_init(gcm_state *gcm, int cipher, } /* schedule key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &gcm->K)) != CRYPT_OK) { return err; } /* H = E(0) */ zeromem(B, 16); - if ((err = cipher_descriptor[cipher].ecb_encrypt(B, gcm->H, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(B, gcm->H, &gcm->K)) != CRYPT_OK) { return err; } /* setup state */ zeromem(gcm->buf, sizeof(gcm->buf)); zeromem(gcm->X, sizeof(gcm->X)); - gcm->cipher = cipher; gcm->mode = LTC_GCM_MODE_IV; gcm->ivmode = 0; gcm->buflen = 0; diff --git a/src/encauth/gcm/gcm_process.c b/src/encauth/gcm/gcm_process.c index e3c956c65..b75c1d040 100644 --- a/src/encauth/gcm/gcm_process.c +++ b/src/encauth/gcm/gcm_process.c @@ -37,10 +37,6 @@ int gcm_process(gcm_state *gcm, return CRYPT_INVALID_ARG; } - if ((err = cipher_is_valid(gcm->cipher)) != CRYPT_OK) { - return err; - } - /* 0xFFFFFFFE0 = ((2^39)-256)/8 */ if (gcm->pttotlen / 8 + (ulong64)gcm->buflen + (ulong64)ptlen >= CONST64(0xFFFFFFFE0)) { return CRYPT_INVALID_ARG; @@ -64,7 +60,7 @@ int gcm_process(gcm_state *gcm, if (++gcm->Y[y] & 255) { break; } } /* encrypt the counter */ - if ((err = cipher_descriptor[gcm->cipher].ecb_encrypt(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { return err; } @@ -93,7 +89,7 @@ int gcm_process(gcm_state *gcm, for (y = 15; y >= 12; y--) { if (++gcm->Y[y] & 255) { break; } } - if ((err = cipher_descriptor[gcm->cipher].ecb_encrypt(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { return err; } } @@ -111,7 +107,7 @@ int gcm_process(gcm_state *gcm, for (y = 15; y >= 12; y--) { if (++gcm->Y[y] & 255) { break; } } - if ((err = cipher_descriptor[gcm->cipher].ecb_encrypt(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { return err; } } @@ -129,7 +125,7 @@ int gcm_process(gcm_state *gcm, for (y = 15; y >= 12; y--) { if (++gcm->Y[y] & 255) { break; } } - if ((err = cipher_descriptor[gcm->cipher].ecb_encrypt(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(gcm->Y, gcm->buf, &gcm->K)) != CRYPT_OK) { return err; } gcm->buflen = 0; diff --git a/src/encauth/ocb/ocb_decrypt.c b/src/encauth/ocb/ocb_decrypt.c index dd512ce28..98f36e48a 100644 --- a/src/encauth/ocb/ocb_decrypt.c +++ b/src/encauth/ocb/ocb_decrypt.c @@ -25,16 +25,8 @@ int ocb_decrypt(ocb_state *ocb, const unsigned char *ct, unsigned char *pt) LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); - /* check if valid cipher */ - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - return err; - } - LTC_ARGCHK(cipher_descriptor[ocb->cipher].ecb_decrypt != NULL); - - /* check length */ - if (ocb->block_len != cipher_descriptor[ocb->cipher].block_length) { - return CRYPT_INVALID_ARG; - } + /* can't use a encrypt-only descriptor */ + LTC_ARGCHK(cipher_descriptor[ocb->key.cipher].ecb_decrypt != NULL); /* Get Z[i] value */ ocb_shift_xor(ocb, Z); @@ -43,7 +35,7 @@ int ocb_decrypt(ocb_state *ocb, const unsigned char *ct, unsigned char *pt) for (x = 0; x < ocb->block_len; x++) { tmp[x] = ct[x] ^ Z[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_decrypt(tmp, pt, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(tmp, pt, &ocb->key)) != CRYPT_OK) { return err; } for (x = 0; x < ocb->block_len; x++) { diff --git a/src/encauth/ocb/ocb_encrypt.c b/src/encauth/ocb/ocb_encrypt.c index ad6260f4f..a38765a7b 100644 --- a/src/encauth/ocb/ocb_encrypt.c +++ b/src/encauth/ocb/ocb_encrypt.c @@ -24,12 +24,6 @@ int ocb_encrypt(ocb_state *ocb, const unsigned char *pt, unsigned char *ct) LTC_ARGCHK(ocb != NULL); LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - return err; - } - if (ocb->block_len != cipher_descriptor[ocb->cipher].block_length) { - return CRYPT_INVALID_ARG; - } /* compute checksum */ for (x = 0; x < ocb->block_len; x++) { @@ -43,7 +37,7 @@ int ocb_encrypt(ocb_state *ocb, const unsigned char *pt, unsigned char *ct) for (x = 0; x < ocb->block_len; x++) { tmp[x] = pt[x] ^ Z[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(tmp, ct, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tmp, ct, &ocb->key)) != CRYPT_OK) { return err; } for (x = 0; x < ocb->block_len; x++) { diff --git a/src/encauth/ocb/ocb_init.c b/src/encauth/ocb/ocb_init.c index 1ae58b6c7..6d68c8664 100644 --- a/src/encauth/ocb/ocb_init.c +++ b/src/encauth/ocb/ocb_init.c @@ -66,13 +66,13 @@ int ocb_init(ocb_state *ocb, int cipher, } /* schedule the key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &ocb->key)) != CRYPT_OK) { return err; } /* find L = E[0] */ zeromem(ocb->L, ocb->block_len); - if ((err = cipher_descriptor[cipher].ecb_encrypt(ocb->L, ocb->L, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->L, ocb->L, &ocb->key)) != CRYPT_OK) { return err; } @@ -80,7 +80,7 @@ int ocb_init(ocb_state *ocb, int cipher, for (x = 0; x < ocb->block_len; x++) { ocb->R[x] = ocb->L[x] ^ nonce[x]; } - if ((err = cipher_descriptor[cipher].ecb_encrypt(ocb->R, ocb->R, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->R, ocb->R, &ocb->key)) != CRYPT_OK) { return err; } @@ -121,7 +121,6 @@ int ocb_init(ocb_state *ocb, int cipher, /* set other params */ ocb->block_index = 1; - ocb->cipher = cipher; return CRYPT_OK; } diff --git a/src/encauth/ocb/s_ocb_done.c b/src/encauth/ocb/s_ocb_done.c index c5987b9b5..9336b754c 100644 --- a/src/encauth/ocb/s_ocb_done.c +++ b/src/encauth/ocb/s_ocb_done.c @@ -40,11 +40,7 @@ int s_ocb_done(ocb_state *ocb, const unsigned char *pt, unsigned long ptlen, LTC_ARGCHK(ct != NULL); LTC_ARGCHK(tag != NULL); LTC_ARGCHK(taglen != NULL); - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - return err; - } - if (ocb->block_len != cipher_descriptor[ocb->cipher].block_length || - (int)ptlen > ocb->block_len || (int)ptlen < 0) { + if ((int)ptlen > ocb->block_len || (int)ptlen < 0) { return CRYPT_INVALID_ARG; } @@ -76,7 +72,7 @@ int s_ocb_done(ocb_state *ocb, const unsigned char *pt, unsigned long ptlen, } /* Y[m] = E(X[m])) */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(X, Y, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(X, Y, &ocb->key)) != CRYPT_OK) { goto error; } @@ -107,10 +103,10 @@ int s_ocb_done(ocb_state *ocb, const unsigned char *pt, unsigned long ptlen, } /* encrypt checksum, er... tag!! */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(ocb->checksum, X, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->checksum, X, &ocb->key)) != CRYPT_OK) { goto error; } - cipher_descriptor[ocb->cipher].done(&ocb->key); + ecb_done(&ocb->key); /* now store it */ for (x = 0; x < ocb->block_len && x < (int)*taglen; x++) { diff --git a/src/encauth/ocb3/ocb3_add_aad.c b/src/encauth/ocb3/ocb3_add_aad.c index fd09abaa9..5e5d39877 100644 --- a/src/encauth/ocb3/ocb3_add_aad.c +++ b/src/encauth/ocb3/ocb3_add_aad.c @@ -25,7 +25,7 @@ static int s_ocb3_int_aad_add_block(ocb3_state *ocb, const unsigned char *aad_bl /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ ocb3_int_xor_blocks(tmp, aad_block, ocb->aOffset_current, ocb->block_len); - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(tmp, tmp, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tmp, tmp, &ocb->key)) != CRYPT_OK) { return err; } ocb3_int_xor_blocks(ocb->aSum_current, ocb->aSum_current, tmp, ocb->block_len); diff --git a/src/encauth/ocb3/ocb3_decrypt.c b/src/encauth/ocb3/ocb3_decrypt.c index 5261518f0..a0b2d301e 100644 --- a/src/encauth/ocb3/ocb3_decrypt.c +++ b/src/encauth/ocb3/ocb3_decrypt.c @@ -28,14 +28,7 @@ int ocb3_decrypt(ocb3_state *ocb, const unsigned char *ct, unsigned long ctlen, LTC_ARGCHK(ct != NULL); LTC_ARGCHK(pt != NULL); - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - return err; - } - if (ocb->block_len != cipher_descriptor[ocb->cipher].block_length) { - return CRYPT_INVALID_ARG; - } - - if (ctlen % ocb->block_len) { /* ctlen has to bu multiple of block_len */ + if (ctlen % ocb->block_len) { /* ctlen has to be multiple of block_len */ return CRYPT_INVALID_ARG; } @@ -51,7 +44,7 @@ int ocb3_decrypt(ocb3_state *ocb, const unsigned char *ct, unsigned long ctlen, ocb3_int_xor_blocks(tmp, ct_b, ocb->Offset_current, ocb->block_len); /* decrypt */ - if ((err = cipher_descriptor[ocb->cipher].ecb_decrypt(tmp, tmp, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(tmp, tmp, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } diff --git a/src/encauth/ocb3/ocb3_decrypt_last.c b/src/encauth/ocb3/ocb3_decrypt_last.c index f69e34b71..0bb4f6089 100644 --- a/src/encauth/ocb3/ocb3_decrypt_last.c +++ b/src/encauth/ocb3/ocb3_decrypt_last.c @@ -30,10 +30,6 @@ int ocb3_decrypt_last(ocb3_state *ocb, const unsigned char *ct, unsigned long ct LTC_ARGCHK(pt != NULL); } - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - goto LBL_ERR; - } - full_blocks = ctlen/ocb->block_len; full_blocks_len = full_blocks * ocb->block_len; last_block_len = ctlen - full_blocks_len; @@ -50,7 +46,7 @@ int ocb3_decrypt_last(ocb3_state *ocb, const unsigned char *ct, unsigned long ct ocb3_int_xor_blocks(iOffset_star, ocb->Offset_current, ocb->L_star, ocb->block_len); /* Pad = ENCIPHER(K, Offset_*) */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(iOffset_star, iPad, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(iOffset_star, iPad, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } @@ -72,7 +68,7 @@ int ocb3_decrypt_last(ocb3_state *ocb, const unsigned char *ct, unsigned long ct for(x=0; xblock_len; x++) { ocb->tag_part[x] = (ocb->checksum[x] ^ iOffset_star[x]) ^ ocb->L_dollar[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } } @@ -82,7 +78,7 @@ int ocb3_decrypt_last(ocb3_state *ocb, const unsigned char *ct, unsigned long ct for(x=0; xblock_len; x++) { ocb->tag_part[x] = (ocb->checksum[x] ^ ocb->Offset_current[x]) ^ ocb->L_dollar[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } } diff --git a/src/encauth/ocb3/ocb3_done.c b/src/encauth/ocb3/ocb3_done.c index 688aa80eb..00a8c2924 100644 --- a/src/encauth/ocb3/ocb3_done.c +++ b/src/encauth/ocb3/ocb3_done.c @@ -24,9 +24,6 @@ int ocb3_done(ocb3_state *ocb, unsigned char *tag, unsigned long *taglen) LTC_ARGCHK(ocb != NULL); LTC_ARGCHK(tag != NULL); LTC_ARGCHK(taglen != NULL); - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - goto LBL_ERR; - } /* check taglen */ if ((int)*taglen < ocb->tag_len) { @@ -52,7 +49,7 @@ int ocb3_done(ocb3_state *ocb, unsigned char *tag, unsigned long *taglen) } /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(tmp, tmp, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tmp, tmp, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } ocb3_int_xor_blocks(ocb->aSum_current, ocb->aSum_current, tmp, ocb->block_len); diff --git a/src/encauth/ocb3/ocb3_encrypt.c b/src/encauth/ocb3/ocb3_encrypt.c index 45ba8a97a..57162a8ac 100644 --- a/src/encauth/ocb3/ocb3_encrypt.c +++ b/src/encauth/ocb3/ocb3_encrypt.c @@ -28,14 +28,7 @@ int ocb3_encrypt(ocb3_state *ocb, const unsigned char *pt, unsigned long ptlen, LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - return err; - } - if (ocb->block_len != cipher_descriptor[ocb->cipher].block_length) { - return CRYPT_INVALID_ARG; - } - - if (ptlen % ocb->block_len) { /* ptlen has to bu multiple of block_len */ + if (ptlen % ocb->block_len) { /* ptlen has to be multiple of block_len */ return CRYPT_INVALID_ARG; } @@ -51,7 +44,7 @@ int ocb3_encrypt(ocb3_state *ocb, const unsigned char *pt, unsigned long ptlen, ocb3_int_xor_blocks(tmp, pt_b, ocb->Offset_current, ocb->block_len); /* encrypt */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(tmp, tmp, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tmp, tmp, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } diff --git a/src/encauth/ocb3/ocb3_encrypt_last.c b/src/encauth/ocb3/ocb3_encrypt_last.c index 76bd1ede8..c0122a0e5 100644 --- a/src/encauth/ocb3/ocb3_encrypt_last.c +++ b/src/encauth/ocb3/ocb3_encrypt_last.c @@ -30,10 +30,6 @@ int ocb3_encrypt_last(ocb3_state *ocb, const unsigned char *pt, unsigned long pt LTC_ARGCHK(ct != NULL); } - if ((err = cipher_is_valid(ocb->cipher)) != CRYPT_OK) { - goto LBL_ERR; - } - full_blocks = ptlen/ocb->block_len; full_blocks_len = full_blocks * ocb->block_len; last_block_len = ptlen - full_blocks_len; @@ -52,7 +48,7 @@ int ocb3_encrypt_last(ocb3_state *ocb, const unsigned char *pt, unsigned long pt ocb3_int_xor_blocks(iOffset_star, ocb->Offset_current, ocb->L_star, ocb->block_len); /* Pad = ENCIPHER(K, Offset_*) */ - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(iOffset_star, iPad, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(iOffset_star, iPad, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } @@ -74,7 +70,7 @@ int ocb3_encrypt_last(ocb3_state *ocb, const unsigned char *pt, unsigned long pt for(x=0; xblock_len; x++) { ocb->tag_part[x] = (ocb->checksum[x] ^ iOffset_star[x]) ^ ocb->L_dollar[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } } else { @@ -83,7 +79,7 @@ int ocb3_encrypt_last(ocb3_state *ocb, const unsigned char *pt, unsigned long pt for(x=0; xblock_len; x++) { ocb->tag_part[x] = (ocb->checksum[x] ^ ocb->Offset_current[x]) ^ ocb->L_dollar[x]; } - if ((err = cipher_descriptor[ocb->cipher].ecb_encrypt(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->tag_part, ocb->tag_part, &ocb->key)) != CRYPT_OK) { goto LBL_ERR; } } diff --git a/src/encauth/ocb3/ocb3_init.c b/src/encauth/ocb3/ocb3_init.c index 09493acc9..a212a83b1 100644 --- a/src/encauth/ocb3/ocb3_init.c +++ b/src/encauth/ocb3/ocb3_init.c @@ -30,7 +30,7 @@ static void s_ocb3_int_calc_offset_zero(ocb3_state *ocb, const unsigned char *no /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */ iNonce[ocb->block_len-1] = iNonce[ocb->block_len-1] & 0xC0; - if ((cipher_descriptor[ocb->cipher].ecb_encrypt(iNonce, iKtop, &ocb->key)) != CRYPT_OK) { + if ((ecb_encrypt_block(iNonce, iKtop, &ocb->key)) != CRYPT_OK) { zeromem(ocb->Offset_current, ocb->block_len); return; } @@ -95,7 +95,6 @@ int ocb3_init(ocb3_state *ocb, int cipher, if ((err = cipher_is_valid(cipher)) != CRYPT_OK) { return err; } - ocb->cipher = cipher; /* Valid Nonce? * As of RFC7253: "string of no more than 120 bits" */ @@ -130,13 +129,13 @@ int ocb3_init(ocb3_state *ocb, int cipher, } /* schedule the key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &ocb->key)) != CRYPT_OK) { return err; } /* L_* = ENCIPHER(K, zeros(128)) */ zeromem(ocb->L_star, ocb->block_len); - if ((err = cipher_descriptor[cipher].ecb_encrypt(ocb->L_star, ocb->L_star, &ocb->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ocb->L_star, ocb->L_star, &ocb->key)) != CRYPT_OK) { return err; } diff --git a/src/headers/tomcrypt_cipher.h b/src/headers/tomcrypt_cipher.h index 7d8d5ad61..ec6390526 100644 --- a/src/headers/tomcrypt_cipher.h +++ b/src/headers/tomcrypt_cipher.h @@ -257,48 +257,36 @@ typedef struct { #ifdef LTC_CFB_MODE /** A block cipher CFB structure */ typedef struct { - /** The index of the cipher chosen */ - int cipher, - /** The block size of the given cipher */ - blocklen, + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The padding offset */ - padlen; + int padlen; /** The current IV */ unsigned char IV[MAXBLOCKSIZE], /** The pad used to encrypt/decrypt */ pad[MAXBLOCKSIZE]; - /** The scheduled key */ - symmetric_key key; } symmetric_CFB; #endif #ifdef LTC_OFB_MODE /** A block cipher OFB structure */ typedef struct { - /** The index of the cipher chosen */ - int cipher, - /** The block size of the given cipher */ - blocklen, + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The padding offset */ - padlen; + int padlen; /** The current IV */ unsigned char IV[MAXBLOCKSIZE]; - /** The scheduled key */ - symmetric_key key; } symmetric_OFB; #endif #ifdef LTC_CBC_MODE /** A block cipher CBC structure */ typedef struct { - /** The index of the cipher chosen */ - int cipher, - /** The block size of the given cipher */ - blocklen; + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The current IV */ unsigned char IV[MAXBLOCKSIZE]; - /** The scheduled key */ - symmetric_key key; } symmetric_CBC; #endif @@ -306,23 +294,18 @@ typedef struct { #ifdef LTC_CTR_MODE /** A block cipher CTR structure */ typedef struct { - /** The index of the cipher chosen */ - int cipher, - /** The block size of the given cipher */ - blocklen, + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The padding offset */ - padlen, + int padlen, /** The mode (endianess) of the CTR, 0==little, 1==big */ mode, /** counter width */ ctrlen; - /** The counter */ unsigned char ctr[MAXBLOCKSIZE], /** The pad used to encrypt/decrypt */ pad[MAXBLOCKSIZE]; - /** The scheduled key */ - symmetric_key key; } symmetric_CTR; #endif @@ -330,21 +313,14 @@ typedef struct { #ifdef LTC_LRW_MODE /** A LRW structure */ typedef struct { - /** The index of the cipher chosen (must be a 128-bit block cipher) */ - int cipher; - + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The current IV */ unsigned char IV[16], - /** the tweak key */ tweak[16], - /** The current pad, it's the product of the first 15 bytes against the tweak key */ pad[16]; - - /** The scheduled symmetric key */ - symmetric_key key; - #ifdef LTC_LRW_TABLES /** The pre-computed multiplication table */ unsigned char PC[16][256][16]; @@ -355,19 +331,15 @@ typedef struct { #ifdef LTC_F8_MODE /** A block cipher F8 structure */ typedef struct { - /** The index of the cipher chosen */ - int cipher, - /** The block size of the given cipher */ - blocklen, + /** The ECB context of the cipher */ + symmetric_ECB ecb; /** The padding offset */ - padlen; + int padlen; /** The current IV */ unsigned char IV[MAXBLOCKSIZE], MIV[MAXBLOCKSIZE]; /** Current block count */ ulong32 blockcnt; - /** The scheduled key */ - symmetric_key key; } symmetric_F8; #endif @@ -432,7 +404,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_ecb_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, symmetric_key *skey); + int (*accel_ecb_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, const symmetric_key *skey); /** Accelerated ECB decryption @param pt Plaintext @@ -441,7 +413,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_ecb_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, symmetric_key *skey); + int (*accel_ecb_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, const symmetric_key *skey); /** Accelerated CBC encryption @param pt Plaintext @@ -451,7 +423,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_cbc_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, symmetric_key *skey); + int (*accel_cbc_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, const symmetric_key *skey); /** Accelerated CBC decryption @param pt Plaintext @@ -461,7 +433,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_cbc_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, unsigned char *IV, symmetric_key *skey); + int (*accel_cbc_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, unsigned char *IV, const symmetric_key *skey); /** Accelerated CTR encryption @param pt Plaintext @@ -472,7 +444,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_ctr_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, int mode, symmetric_key *skey); + int (*accel_ctr_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, int mode, const symmetric_key *skey); /** Accelerated LRW @param pt Plaintext @@ -483,7 +455,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_lrw_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, const unsigned char *tweak, symmetric_key *skey); + int (*accel_lrw_encrypt)(const unsigned char *pt, unsigned char *ct, unsigned long blocks, unsigned char *IV, const unsigned char *tweak, const symmetric_key *skey); /** Accelerated LRW @param ct Ciphertext @@ -494,7 +466,7 @@ extern struct ltc_cipher_descriptor { @param skey The scheduled key context @return CRYPT_OK if successful */ - int (*accel_lrw_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, unsigned char *IV, const unsigned char *tweak, symmetric_key *skey); + int (*accel_lrw_decrypt)(const unsigned char *ct, unsigned char *pt, unsigned long blocks, unsigned char *IV, const unsigned char *tweak, const symmetric_key *skey); /** Accelerated CCM packet (one-shot) @param key The secret key to use @@ -514,7 +486,7 @@ extern struct ltc_cipher_descriptor { */ int (*accel_ccm_memory)( const unsigned char *key, unsigned long keylen, - symmetric_key *uskey, + const symmetric_key *uskey, const unsigned char *nonce, unsigned long noncelen, const unsigned char *header, unsigned long headerlen, unsigned char *pt, unsigned long ptlen, @@ -875,8 +847,8 @@ extern const struct ltc_cipher_descriptor tea_desc; #ifdef LTC_ECB_MODE int ecb_start(int cipher, const unsigned char *key, int keylen, int num_rounds, symmetric_ECB *ecb); -int ecb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, symmetric_ECB *ecb); -int ecb_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, symmetric_ECB *ecb); +int ecb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, const symmetric_ECB *ecb); +int ecb_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, const symmetric_ECB *ecb); int ecb_done(symmetric_ECB *ecb); #endif @@ -966,7 +938,7 @@ int f8_test_mode(void); #ifdef LTC_XTS_MODE typedef struct { - symmetric_key key1, key2; + symmetric_ECB key1, key2; int cipher; } symmetric_xts; diff --git a/src/headers/tomcrypt_custom.h b/src/headers/tomcrypt_custom.h index fef71d2e7..6c97b090f 100644 --- a/src/headers/tomcrypt_custom.h +++ b/src/headers/tomcrypt_custom.h @@ -645,6 +645,18 @@ #error LTC_NO_MATH defined, but also a math descriptor #endif +#if !defined(LTC_ECB_MODE) +#if defined(LTC_CFB_MODE) || defined(LTC_OFB_MODE) || defined(LTC_CBC_MODE) || defined(LTC_CTR_MODE) || \ + defined(LTC_F8_MODE) || defined(LTC_LRW_MODE) || defined(LTC_XTS_MODE) ) + #error LTC_ECB_MODE not defined, but all other modes depend on it +#endif +#if defined(LTC_OMAC) || defined(LTC_PMAC) || defined(LTC_XCBC) || defined(LTC_F9_MODE) || defined(LTC_EAX_MODE) || \ + defined(LTC_OCB_MODE) || defined(LTC_OCB3_MODE) || defined(LTC_CCM_MODE) || defined(LTC_GCM_MODE) ) + #error LTC_ECB_MODE not defined, but most MAC and AEAD modes depend on it +#endif +#endif + + /* THREAD management */ #ifdef LTC_PTHREAD diff --git a/src/headers/tomcrypt_mac.h b/src/headers/tomcrypt_mac.h index 60ab1acb0..642ab8968 100644 --- a/src/headers/tomcrypt_mac.h +++ b/src/headers/tomcrypt_mac.h @@ -28,13 +28,12 @@ int hmac_file(int hash, const char *fname, const unsigned char *key, #ifdef LTC_OMAC typedef struct { - int cipher_idx, - buflen, + int buflen, blklen; unsigned char block[MAXBLOCKSIZE], prev[MAXBLOCKSIZE], Lu[2][MAXBLOCKSIZE]; - symmetric_key key; + symmetric_ECB key; } omac_state; int omac_init(omac_state *omac, int cipher, const unsigned char *key, unsigned long keylen); @@ -64,10 +63,9 @@ typedef struct { block[MAXBLOCKSIZE], /* currently accumulated block */ checksum[MAXBLOCKSIZE]; /* current checksum */ - symmetric_key key; /* scheduled key for cipher */ + symmetric_ECB key; /* scheduled key for cipher */ unsigned long block_index; /* index # for current block */ - int cipher_idx, /* cipher idx */ - block_len, /* length of block */ + int block_len, /* length of block */ buflen; /* number of bytes in the buffer */ } pmac_state; @@ -169,10 +167,9 @@ typedef struct { unsigned char K[3][MAXBLOCKSIZE], IV[MAXBLOCKSIZE]; - symmetric_key key; + symmetric_ECB key; - int cipher, - buflen, + int buflen, blocksize; } xcbc_state; @@ -202,7 +199,7 @@ typedef struct { ACC[MAXBLOCKSIZE], IV[MAXBLOCKSIZE]; - symmetric_key key; + symmetric_ECB key; int cipher, buflen, @@ -283,10 +280,9 @@ typedef struct { R[MAXBLOCKSIZE], /* R value */ checksum[MAXBLOCKSIZE]; /* current checksum */ - symmetric_key key; /* scheduled key for cipher */ + symmetric_ECB key; /* scheduled key for cipher */ unsigned long block_index; /* index # for current block */ - int cipher, /* cipher idx */ - block_len; /* length of block */ + int block_len; /* length of block */ } ocb_state; int ocb_init(ocb_state *ocb, int cipher, @@ -347,10 +343,9 @@ typedef struct { int adata_buffer_bytes; /* bytes in AAD buffer */ unsigned long ablock_index; /* index # for current adata (AAD) block */ - symmetric_key key; /* scheduled key for cipher */ + symmetric_ECB key; /* scheduled key for cipher */ unsigned long block_index; /* index # for current data block */ - int cipher, /* cipher idx */ - tag_len, /* length of tag */ + int tag_len, /* length of tag */ block_len; /* length of block */ } ocb3_state; @@ -393,9 +388,8 @@ int ocb3_test(void); #define CCM_DECRYPT LTC_DECRYPT typedef struct { - symmetric_key K; - int cipher, /* which cipher */ - taglen, /* length of the tag */ + symmetric_ECB K; + int taglen, /* length of the tag */ x; /* index in PAD */ unsigned long L, /* L value */ @@ -432,7 +426,7 @@ int ccm_done(ccm_state *ccm, int ccm_memory(int cipher, const unsigned char *key, unsigned long keylen, - symmetric_key *uskey, + symmetric_ECB *uskey, const unsigned char *nonce, unsigned long noncelen, const unsigned char *header, unsigned long headerlen, unsigned char *pt, unsigned long ptlen, @@ -464,15 +458,14 @@ extern const unsigned char gcm_shift_table[]; #define LTC_GCM_MODE_TEXT 2 typedef struct { - symmetric_key K; + symmetric_ECB K; unsigned char H[16], /* multiplier */ X[16], /* accumulator */ Y[16], /* counter */ Y_0[16], /* initial counter */ buf[16]; /* buffer for stuff */ - int cipher, /* which cipher */ - ivmode, /* Which mode is the IV in? */ + int ivmode, /* Which mode is the IV in? */ mode, /* mode the GCM code is in */ buflen; /* length of data in buf */ diff --git a/src/headers/tomcrypt_private.h b/src/headers/tomcrypt_private.h index e5e716584..386f0e6c3 100644 --- a/src/headers/tomcrypt_private.h +++ b/src/headers/tomcrypt_private.h @@ -69,6 +69,10 @@ typedef struct /* tomcrypt_cipher.h */ +int ecb_encrypt_block(const unsigned char *pt, unsigned char *ct, const symmetric_ECB *ecb); +int ecb_decrypt_block(const unsigned char *ct, unsigned char *pt, const symmetric_ECB *ecb); + + void blowfish_enc(ulong32 *data, unsigned long blocks, const symmetric_key *skey); int blowfish_expand(const unsigned char *key, int keylen, const unsigned char *data, int datalen, diff --git a/src/mac/f9/f9_done.c b/src/mac/f9/f9_done.c index 38d1371a7..596a33ba5 100644 --- a/src/mac/f9/f9_done.c +++ b/src/mac/f9/f9_done.c @@ -33,7 +33,7 @@ int f9_done(f9_state *f9, unsigned char *out, unsigned long *outlen) if (f9->buflen != 0) { /* encrypt */ - cipher_descriptor[f9->cipher].ecb_encrypt(f9->IV, f9->IV, &f9->key); + ecb_encrypt_block(f9->IV, f9->IV, &f9->key); f9->buflen = 0; for (x = 0; x < f9->blocksize; x++) { f9->ACC[x] ^= f9->IV[x]; @@ -41,13 +41,13 @@ int f9_done(f9_state *f9, unsigned char *out, unsigned long *outlen) } /* schedule modified key */ - if ((err = cipher_descriptor[f9->cipher].setup(f9->akey, f9->keylen, 0, &f9->key)) != CRYPT_OK) { + if ((err = ecb_start(f9->cipher, f9->akey, f9->keylen, 0, &f9->key)) != CRYPT_OK) { return err; } /* encrypt the ACC */ - cipher_descriptor[f9->cipher].ecb_encrypt(f9->ACC, f9->ACC, &f9->key); - cipher_descriptor[f9->cipher].done(&f9->key); + ecb_encrypt_block(f9->ACC, f9->ACC, &f9->key); + ecb_done(&f9->key); /* extract tag */ for (x = 0; x < f9->blocksize && (unsigned long)x < *outlen; x++) { diff --git a/src/mac/f9/f9_init.c b/src/mac/f9/f9_init.c index 0bd599e2b..e619cbf53 100644 --- a/src/mac/f9/f9_init.c +++ b/src/mac/f9/f9_init.c @@ -34,7 +34,7 @@ int f9_init(f9_state *f9, int cipher, const unsigned char *key, unsigned long ke } #endif - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &f9->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &f9->key)) != CRYPT_OK) { goto done; } diff --git a/src/mac/f9/f9_process.c b/src/mac/f9/f9_process.c index e416b5401..8860da387 100644 --- a/src/mac/f9/f9_process.c +++ b/src/mac/f9/f9_process.c @@ -38,7 +38,7 @@ int f9_process(f9_state *f9, const unsigned char *in, unsigned long inlen) for (x = 0; x < f9->blocksize; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST(&(f9->IV[x]))) ^= *(LTC_FAST_TYPE_PTR_CAST(&(in[x]))); } - cipher_descriptor[f9->cipher].ecb_encrypt(f9->IV, f9->IV, &f9->key); + ecb_encrypt_block(f9->IV, f9->IV, &f9->key); for (x = 0; x < f9->blocksize; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST(&(f9->ACC[x]))) ^= *(LTC_FAST_TYPE_PTR_CAST(&(f9->IV[x]))); } @@ -50,7 +50,7 @@ int f9_process(f9_state *f9, const unsigned char *in, unsigned long inlen) while (inlen) { if (f9->buflen == f9->blocksize) { - cipher_descriptor[f9->cipher].ecb_encrypt(f9->IV, f9->IV, &f9->key); + ecb_encrypt_block(f9->IV, f9->IV, &f9->key); for (x = 0; x < f9->blocksize; x++) { f9->ACC[x] ^= f9->IV[x]; } diff --git a/src/mac/omac/omac_done.c b/src/mac/omac/omac_done.c index c60067f8f..7997d7a3f 100644 --- a/src/mac/omac/omac_done.c +++ b/src/mac/omac/omac_done.c @@ -24,9 +24,6 @@ int omac_done(omac_state *omac, unsigned char *out, unsigned long *outlen) LTC_ARGCHK(omac != NULL); LTC_ARGCHK(out != NULL); LTC_ARGCHK(outlen != NULL); - if ((err = cipher_is_valid(omac->cipher_idx)) != CRYPT_OK) { - return err; - } if ((omac->buflen > (int)sizeof(omac->block)) || (omac->buflen < 0) || (omac->blklen > (int)sizeof(omac->block)) || (omac->buflen > omac->blklen)) { @@ -53,10 +50,10 @@ int omac_done(omac_state *omac, unsigned char *out, unsigned long *outlen) } /* encrypt it */ - if ((err = cipher_descriptor[omac->cipher_idx].ecb_encrypt(omac->block, omac->block, &omac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(omac->block, omac->block, &omac->key)) != CRYPT_OK) { return err; } - cipher_descriptor[omac->cipher_idx].done(&omac->key); + ecb_done(&omac->key); /* output it */ for (x = 0; x < (unsigned)omac->blklen && x < *outlen; x++) { diff --git a/src/mac/omac/omac_init.c b/src/mac/omac/omac_init.c index f527701d4..1b75505aa 100644 --- a/src/mac/omac/omac_init.c +++ b/src/mac/omac/omac_init.c @@ -47,7 +47,7 @@ int omac_init(omac_state *omac, int cipher, const unsigned char *key, unsigned l default: return CRYPT_INVALID_ARG; } - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &omac->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &omac->key)) != CRYPT_OK) { return err; } @@ -55,7 +55,7 @@ int omac_init(omac_state *omac, int cipher, const unsigned char *key, unsigned l /* first calc L which is Ek(0) */ zeromem(omac->Lu[0], cipher_descriptor[cipher].block_length); - if ((err = cipher_descriptor[cipher].ecb_encrypt(omac->Lu[0], omac->Lu[0], &omac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(omac->Lu[0], omac->Lu[0], &omac->key)) != CRYPT_OK) { return err; } @@ -77,7 +77,6 @@ int omac_init(omac_state *omac, int cipher, const unsigned char *key, unsigned l } /* setup state */ - omac->cipher_idx = cipher; omac->buflen = 0; omac->blklen = len; zeromem(omac->prev, sizeof(omac->prev)); diff --git a/src/mac/omac/omac_process.c b/src/mac/omac/omac_process.c index b8e3160a5..35ee9d261 100644 --- a/src/mac/omac/omac_process.c +++ b/src/mac/omac/omac_process.c @@ -24,9 +24,6 @@ int omac_process(omac_state *omac, const unsigned char *in, unsigned long inlen) LTC_ARGCHK(omac != NULL); LTC_ARGCHK(in != NULL); - if ((err = cipher_is_valid(omac->cipher_idx)) != CRYPT_OK) { - return err; - } if ((omac->buflen > (int)sizeof(omac->block)) || (omac->buflen < 0) || (omac->blklen > (int)sizeof(omac->block)) || (omac->buflen > omac->blklen)) { @@ -34,22 +31,17 @@ int omac_process(omac_state *omac, const unsigned char *in, unsigned long inlen) } #ifdef LTC_FAST - { - unsigned long blklen = cipher_descriptor[omac->cipher_idx].block_length; - - if (omac->buflen == 0 && inlen > blklen) { - unsigned long y; - for (x = 0; x < (inlen - blklen); x += blklen) { - for (y = 0; y < blklen; y += sizeof(LTC_FAST_TYPE)) { - *(LTC_FAST_TYPE_PTR_CAST(&omac->prev[y])) ^= *(LTC_FAST_TYPE_PTR_CAST(&in[y])); - } - in += blklen; - if ((err = cipher_descriptor[omac->cipher_idx].ecb_encrypt(omac->prev, omac->prev, &omac->key)) != CRYPT_OK) { - return err; - } - } - inlen -= x; - } + if (omac->buflen == 0 && inlen > (unsigned long)omac->blklen) { + for (x = 0; x < (inlen - omac->blklen); x += omac->blklen) { + for (n = 0; n < (unsigned long)omac->blklen; n += sizeof(LTC_FAST_TYPE)) { + *(LTC_FAST_TYPE_PTR_CAST(&omac->prev[n])) ^= *(LTC_FAST_TYPE_PTR_CAST(&in[n])); + } + in += omac->blklen; + if ((err = ecb_encrypt_block(omac->prev, omac->prev, &omac->key)) != CRYPT_OK) { + return err; + } + } + inlen -= x; } #endif @@ -59,7 +51,7 @@ int omac_process(omac_state *omac, const unsigned char *in, unsigned long inlen) for (x = 0; x < (unsigned long)omac->blklen; x++) { omac->block[x] ^= omac->prev[x]; } - if ((err = cipher_descriptor[omac->cipher_idx].ecb_encrypt(omac->block, omac->prev, &omac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(omac->block, omac->prev, &omac->key)) != CRYPT_OK) { return err; } omac->buflen = 0; diff --git a/src/mac/pmac/pmac_done.c b/src/mac/pmac/pmac_done.c index 222790d14..bcad06adc 100644 --- a/src/mac/pmac/pmac_done.c +++ b/src/mac/pmac/pmac_done.c @@ -15,9 +15,6 @@ int pmac_done(pmac_state *pmac, unsigned char *out, unsigned long *outlen) LTC_ARGCHK(pmac != NULL); LTC_ARGCHK(out != NULL); - if ((err = cipher_is_valid(pmac->cipher_idx)) != CRYPT_OK) { - return err; - } if ((pmac->buflen > (int)sizeof(pmac->block)) || (pmac->buflen < 0) || (pmac->block_len > (int)sizeof(pmac->block)) || (pmac->buflen > pmac->block_len)) { @@ -41,10 +38,10 @@ int pmac_done(pmac_state *pmac, unsigned char *out, unsigned long *outlen) } /* encrypt it */ - if ((err = cipher_descriptor[pmac->cipher_idx].ecb_encrypt(pmac->checksum, pmac->checksum, &pmac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(pmac->checksum, pmac->checksum, &pmac->key)) != CRYPT_OK) { return err; } - cipher_descriptor[pmac->cipher_idx].done(&pmac->key); + ecb_done(&pmac->key); /* store it */ for (x = 0; x < pmac->block_len && x < (int)*outlen; x++) { diff --git a/src/mac/pmac/pmac_init.c b/src/mac/pmac/pmac_init.c index 1af9c2957..3764cf004 100644 --- a/src/mac/pmac/pmac_init.c +++ b/src/mac/pmac/pmac_init.c @@ -70,7 +70,7 @@ int pmac_init(pmac_state *pmac, int cipher, const unsigned char *key, unsigned l /* schedule the key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, &pmac->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, &pmac->key)) != CRYPT_OK) { return err; } @@ -82,7 +82,7 @@ int pmac_init(pmac_state *pmac, int cipher, const unsigned char *key, unsigned l /* find L = E[0] */ zeromem(L, pmac->block_len); - if ((err = cipher_descriptor[cipher].ecb_encrypt(L, L, &pmac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(L, L, &pmac->key)) != CRYPT_OK) { goto error; } @@ -119,7 +119,6 @@ int pmac_init(pmac_state *pmac, int cipher, const unsigned char *key, unsigned l /* zero buffer, counters, etc... */ pmac->block_index = 1; - pmac->cipher_idx = cipher; pmac->buflen = 0; zeromem(pmac->block, sizeof(pmac->block)); zeromem(pmac->Li, sizeof(pmac->Li)); diff --git a/src/mac/pmac/pmac_process.c b/src/mac/pmac/pmac_process.c index 3f38e8a7c..8017e3893 100644 --- a/src/mac/pmac/pmac_process.c +++ b/src/mac/pmac/pmac_process.c @@ -25,9 +25,6 @@ int pmac_process(pmac_state *pmac, const unsigned char *in, unsigned long inlen) LTC_ARGCHK(pmac != NULL); LTC_ARGCHK(in != NULL); - if ((err = cipher_is_valid(pmac->cipher_idx)) != CRYPT_OK) { - return err; - } if ((pmac->buflen > (int)sizeof(pmac->block)) || (pmac->buflen < 0) || (pmac->block_len > (int)sizeof(pmac->block)) || (pmac->buflen > pmac->block_len)) { @@ -42,7 +39,7 @@ int pmac_process(pmac_state *pmac, const unsigned char *in, unsigned long inlen) for (y = 0; y < 16; y += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST(&Z[y])) = *(LTC_FAST_TYPE_PTR_CAST(&in[y])) ^ *(LTC_FAST_TYPE_PTR_CAST(&pmac->Li[y])); } - if ((err = cipher_descriptor[pmac->cipher_idx].ecb_encrypt(Z, Z, &pmac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(Z, Z, &pmac->key)) != CRYPT_OK) { return err; } for (y = 0; y < 16; y += sizeof(LTC_FAST_TYPE)) { @@ -61,7 +58,7 @@ int pmac_process(pmac_state *pmac, const unsigned char *in, unsigned long inlen) for (x = 0; x < (unsigned long)pmac->block_len; x++) { Z[x] = pmac->Li[x] ^ pmac->block[x]; } - if ((err = cipher_descriptor[pmac->cipher_idx].ecb_encrypt(Z, Z, &pmac->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(Z, Z, &pmac->key)) != CRYPT_OK) { return err; } for (x = 0; x < (unsigned long)pmac->block_len; x++) { diff --git a/src/mac/xcbc/xcbc_done.c b/src/mac/xcbc/xcbc_done.c index 7da72f38f..4163e2247 100644 --- a/src/mac/xcbc/xcbc_done.c +++ b/src/mac/xcbc/xcbc_done.c @@ -17,17 +17,11 @@ */ int xcbc_done(xcbc_state *xcbc, unsigned char *out, unsigned long *outlen) { - int err, x; + int x; LTC_ARGCHK(xcbc != NULL); LTC_ARGCHK(out != NULL); - /* check structure */ - if ((err = cipher_is_valid(xcbc->cipher)) != CRYPT_OK) { - return err; - } - - if ((xcbc->blocksize > cipher_descriptor[xcbc->cipher].block_length) || (xcbc->blocksize < 0) || - (xcbc->buflen > xcbc->blocksize) || (xcbc->buflen < 0)) { + if ((xcbc->blocksize < 0) || (xcbc->buflen > xcbc->blocksize) || (xcbc->buflen < 0)) { return CRYPT_INVALID_ARG; } @@ -46,8 +40,8 @@ int xcbc_done(xcbc_state *xcbc, unsigned char *out, unsigned long *outlen) } /* encrypt */ - cipher_descriptor[xcbc->cipher].ecb_encrypt(xcbc->IV, xcbc->IV, &xcbc->key); - cipher_descriptor[xcbc->cipher].done(&xcbc->key); + ecb_encrypt_block(xcbc->IV, xcbc->IV, &xcbc->key); + ecb_done(&xcbc->key); /* extract tag */ for (x = 0; x < xcbc->blocksize && (unsigned long)x < *outlen; x++) { diff --git a/src/mac/xcbc/xcbc_init.c b/src/mac/xcbc/xcbc_init.c index 3e4044a42..80964ef9e 100644 --- a/src/mac/xcbc/xcbc_init.c +++ b/src/mac/xcbc/xcbc_init.c @@ -19,7 +19,7 @@ int xcbc_init(xcbc_state *xcbc, int cipher, const unsigned char *key, unsigned long keylen) { int x, y, err; - symmetric_key *skey; + symmetric_ECB *skey; unsigned long k1; LTC_ARGCHK(xcbc != NULL); @@ -60,7 +60,7 @@ int xcbc_init(xcbc_state *xcbc, int cipher, const unsigned char *key, unsigned l return CRYPT_MEM; } - if ((err = cipher_descriptor[cipher].setup(key, keylen, 0, skey)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, 0, skey)) != CRYPT_OK) { goto done; } @@ -69,20 +69,19 @@ int xcbc_init(xcbc_state *xcbc, int cipher, const unsigned char *key, unsigned l for (x = 0; x < cipher_descriptor[cipher].block_length; x++) { xcbc->K[y][x] = y + 1; } - cipher_descriptor[cipher].ecb_encrypt(xcbc->K[y], xcbc->K[y], skey); + ecb_encrypt_block(xcbc->K[y], xcbc->K[y], skey); } } /* setup K1 */ - err = cipher_descriptor[cipher].setup(xcbc->K[0], k1, 0, &xcbc->key); + err = ecb_start(cipher, xcbc->K[0], k1, 0, &xcbc->key); /* setup struct */ zeromem(xcbc->IV, cipher_descriptor[cipher].block_length); xcbc->blocksize = cipher_descriptor[cipher].block_length; - xcbc->cipher = cipher; xcbc->buflen = 0; done: - cipher_descriptor[cipher].done(skey); + ecb_done(skey); if (skey != NULL) { #ifdef LTC_CLEAN_STACK zeromem(skey, sizeof(*skey)); diff --git a/src/mac/xcbc/xcbc_process.c b/src/mac/xcbc/xcbc_process.c index 3cb0c46a1..a6e5145f1 100644 --- a/src/mac/xcbc/xcbc_process.c +++ b/src/mac/xcbc/xcbc_process.c @@ -17,7 +17,6 @@ */ int xcbc_process(xcbc_state *xcbc, const unsigned char *in, unsigned long inlen) { - int err; #ifdef LTC_FAST int x; #endif @@ -25,13 +24,7 @@ int xcbc_process(xcbc_state *xcbc, const unsigned char *in, unsigned long inlen) LTC_ARGCHK(xcbc != NULL); LTC_ARGCHK(in != NULL); - /* check structure */ - if ((err = cipher_is_valid(xcbc->cipher)) != CRYPT_OK) { - return err; - } - - if ((xcbc->blocksize > cipher_descriptor[xcbc->cipher].block_length) || (xcbc->blocksize < 0) || - (xcbc->buflen > xcbc->blocksize) || (xcbc->buflen < 0)) { + if ((xcbc->blocksize < 0) || (xcbc->buflen > xcbc->blocksize) || (xcbc->buflen < 0)) { return CRYPT_INVALID_ARG; } @@ -41,7 +34,7 @@ int xcbc_process(xcbc_state *xcbc, const unsigned char *in, unsigned long inlen) for (x = 0; x < xcbc->blocksize; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST(&(xcbc->IV[x]))) ^= *(LTC_FAST_TYPE_PTR_CAST(&(in[x]))); } - cipher_descriptor[xcbc->cipher].ecb_encrypt(xcbc->IV, xcbc->IV, &xcbc->key); + ecb_encrypt_block(xcbc->IV, xcbc->IV, &xcbc->key); in += xcbc->blocksize; inlen -= xcbc->blocksize; } @@ -50,7 +43,7 @@ int xcbc_process(xcbc_state *xcbc, const unsigned char *in, unsigned long inlen) while (inlen) { if (xcbc->buflen == xcbc->blocksize) { - cipher_descriptor[xcbc->cipher].ecb_encrypt(xcbc->IV, xcbc->IV, &xcbc->key); + ecb_encrypt_block(xcbc->IV, xcbc->IV, &xcbc->key); xcbc->buflen = 0; } xcbc->IV[xcbc->buflen++] ^= *in++; diff --git a/src/modes/cbc/cbc_decrypt.c b/src/modes/cbc/cbc_decrypt.c index 5fa3ee36a..4c3add7ac 100644 --- a/src/modes/cbc/cbc_decrypt.c +++ b/src/modes/cbc/cbc_decrypt.c @@ -32,51 +32,51 @@ int cbc_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(cbc != NULL); - if ((err = cipher_is_valid(cbc->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(cbc->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen valid? */ - if (cbc->blocklen < 1 || cbc->blocklen > (int)sizeof(cbc->IV) || cbc->blocklen > (int)sizeof(tmp)) { + if (cbc->ecb.blocklen < 1 || cbc->ecb.blocklen > (int)sizeof(cbc->IV) || cbc->ecb.blocklen > (int)sizeof(tmp)) { return CRYPT_INVALID_ARG; } - if (len % cbc->blocklen) { + if (len % cbc->ecb.blocklen) { return CRYPT_INVALID_ARG; } #ifdef LTC_FAST - if (cbc->blocklen % sizeof(LTC_FAST_TYPE)) { + if (cbc->ecb.blocklen % sizeof(LTC_FAST_TYPE)) { return CRYPT_INVALID_ARG; } #endif - if (cipher_descriptor[cbc->cipher].accel_cbc_decrypt != NULL) { - return cipher_descriptor[cbc->cipher].accel_cbc_decrypt(ct, pt, len / cbc->blocklen, cbc->IV, &cbc->key); + if (cipher_descriptor[cbc->ecb.cipher].accel_cbc_decrypt != NULL) { + return cipher_descriptor[cbc->ecb.cipher].accel_cbc_decrypt(ct, pt, len / cbc->ecb.blocklen, cbc->IV, &cbc->ecb.key); } while (len) { /* decrypt */ - if ((err = cipher_descriptor[cbc->cipher].ecb_decrypt(ct, tmp, &cbc->key)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(ct, tmp, &cbc->ecb)) != CRYPT_OK) { return err; } /* xor IV against plaintext */ #if defined(LTC_FAST) - for (x = 0; x < cbc->blocklen; x += sizeof(LTC_FAST_TYPE)) { + for (x = 0; x < cbc->ecb.blocklen; x += sizeof(LTC_FAST_TYPE)) { tmpy = *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)cbc->IV + x)) ^ *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)tmp + x)); *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)cbc->IV + x)) = *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)ct + x)); *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)pt + x)) = tmpy; } #else - for (x = 0; x < cbc->blocklen; x++) { + for (x = 0; x < cbc->ecb.blocklen; x++) { tmpy = tmp[x] ^ cbc->IV[x]; cbc->IV[x] = ct[x]; pt[x] = tmpy; } #endif - ct += cbc->blocklen; - pt += cbc->blocklen; - len -= cbc->blocklen; + ct += cbc->ecb.blocklen; + pt += cbc->ecb.blocklen; + len -= cbc->ecb.blocklen; } return CRYPT_OK; } diff --git a/src/modes/cbc/cbc_done.c b/src/modes/cbc/cbc_done.c index 985551ff9..d66e86fbf 100644 --- a/src/modes/cbc/cbc_done.c +++ b/src/modes/cbc/cbc_done.c @@ -15,14 +15,9 @@ */ int cbc_done(symmetric_CBC *cbc) { - int err; LTC_ARGCHK(cbc != NULL); - if ((err = cipher_is_valid(cbc->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[cbc->cipher].done(&cbc->key); - return CRYPT_OK; + return ecb_done(&cbc->ecb); } diff --git a/src/modes/cbc/cbc_encrypt.c b/src/modes/cbc/cbc_encrypt.c index 50d91c42c..7274d695d 100644 --- a/src/modes/cbc/cbc_encrypt.c +++ b/src/modes/cbc/cbc_encrypt.c @@ -26,58 +26,58 @@ int cbc_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(cbc != NULL); - if ((err = cipher_is_valid(cbc->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(cbc->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen valid? */ - if (cbc->blocklen < 1 || cbc->blocklen > (int)sizeof(cbc->IV)) { + if (cbc->ecb.blocklen < 1 || cbc->ecb.blocklen > (int)sizeof(cbc->IV)) { return CRYPT_INVALID_ARG; } - if (len % cbc->blocklen) { + if (len % cbc->ecb.blocklen) { return CRYPT_INVALID_ARG; } #ifdef LTC_FAST - if (cbc->blocklen % sizeof(LTC_FAST_TYPE)) { + if (cbc->ecb.blocklen % sizeof(LTC_FAST_TYPE)) { return CRYPT_INVALID_ARG; } #endif - if (cipher_descriptor[cbc->cipher].accel_cbc_encrypt != NULL) { - return cipher_descriptor[cbc->cipher].accel_cbc_encrypt(pt, ct, len / cbc->blocklen, cbc->IV, &cbc->key); + if (cipher_descriptor[cbc->ecb.cipher].accel_cbc_encrypt != NULL) { + return cipher_descriptor[cbc->ecb.cipher].accel_cbc_encrypt(pt, ct, len / cbc->ecb.blocklen, cbc->IV, &cbc->ecb.key); } while (len) { /* xor IV against plaintext */ #if defined(LTC_FAST) - for (x = 0; x < cbc->blocklen; x += sizeof(LTC_FAST_TYPE)) { + for (x = 0; x < cbc->ecb.blocklen; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)cbc->IV + x)) ^= *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)pt + x)); } #else - for (x = 0; x < cbc->blocklen; x++) { + for (x = 0; x < cbc->ecb.blocklen; x++) { cbc->IV[x] ^= pt[x]; } #endif /* encrypt */ - if ((err = cipher_descriptor[cbc->cipher].ecb_encrypt(cbc->IV, ct, &cbc->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(cbc->IV, ct, &cbc->ecb)) != CRYPT_OK) { return err; } /* store IV [ciphertext] for a future block */ #if defined(LTC_FAST) - for (x = 0; x < cbc->blocklen; x += sizeof(LTC_FAST_TYPE)) { + for (x = 0; x < cbc->ecb.blocklen; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)cbc->IV + x)) = *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)ct + x)); } #else - for (x = 0; x < cbc->blocklen; x++) { + for (x = 0; x < cbc->ecb.blocklen; x++) { cbc->IV[x] = ct[x]; } #endif - ct += cbc->blocklen; - pt += cbc->blocklen; - len -= cbc->blocklen; + ct += cbc->ecb.blocklen; + pt += cbc->ecb.blocklen; + len -= cbc->ecb.blocklen; } return CRYPT_OK; } diff --git a/src/modes/cbc/cbc_getiv.c b/src/modes/cbc/cbc_getiv.c index 7af2cf176..37bf600c3 100644 --- a/src/modes/cbc/cbc_getiv.c +++ b/src/modes/cbc/cbc_getiv.c @@ -21,12 +21,12 @@ int cbc_getiv(unsigned char *IV, unsigned long *len, const symmetric_CBC *cbc) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(len != NULL); LTC_ARGCHK(cbc != NULL); - if ((unsigned long)cbc->blocklen > *len) { - *len = cbc->blocklen; + if ((unsigned long)cbc->ecb.blocklen > *len) { + *len = cbc->ecb.blocklen; return CRYPT_BUFFER_OVERFLOW; } - XMEMCPY(IV, cbc->IV, cbc->blocklen); - *len = cbc->blocklen; + XMEMCPY(IV, cbc->IV, cbc->ecb.blocklen); + *len = cbc->ecb.blocklen; return CRYPT_OK; } diff --git a/src/modes/cbc/cbc_setiv.c b/src/modes/cbc/cbc_setiv.c index a9e91c3ca..92620b0d5 100644 --- a/src/modes/cbc/cbc_setiv.c +++ b/src/modes/cbc/cbc_setiv.c @@ -21,7 +21,7 @@ int cbc_setiv(const unsigned char *IV, unsigned long len, symmetric_CBC *cbc) { LTC_ARGCHK(IV != NULL); LTC_ARGCHK(cbc != NULL); - if (len != (unsigned long)cbc->blocklen) { + if (len != (unsigned long)cbc->ecb.blocklen) { return CRYPT_INVALID_ARG; } XMEMCPY(cbc->IV, IV, len); diff --git a/src/modes/cbc/cbc_start.c b/src/modes/cbc/cbc_start.c index 415673921..58322e175 100644 --- a/src/modes/cbc/cbc_start.c +++ b/src/modes/cbc/cbc_start.c @@ -28,20 +28,13 @@ int cbc_start(int cipher, const unsigned char *IV, const unsigned char *key, LTC_ARGCHK(key != NULL); LTC_ARGCHK(cbc != NULL); - /* bad param? */ - if ((err = cipher_is_valid(cipher)) != CRYPT_OK) { - return err; - } - /* setup cipher */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, num_rounds, &cbc->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, num_rounds, &cbc->ecb)) != CRYPT_OK) { return err; } /* copy IV */ - cbc->blocklen = cipher_descriptor[cipher].block_length; - cbc->cipher = cipher; - for (x = 0; x < cbc->blocklen; x++) { + for (x = 0; x < cbc->ecb.blocklen; x++) { cbc->IV[x] = IV[x]; } return CRYPT_OK; diff --git a/src/modes/cfb/cfb_decrypt.c b/src/modes/cfb/cfb_decrypt.c index b61dc036e..7f44aa9be 100644 --- a/src/modes/cfb/cfb_decrypt.c +++ b/src/modes/cfb/cfb_decrypt.c @@ -25,19 +25,19 @@ int cfb_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(cfb != NULL); - if ((err = cipher_is_valid(cfb->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(cfb->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen/padlen valid? */ - if (cfb->blocklen < 0 || cfb->blocklen > (int)sizeof(cfb->IV) || + if (cfb->ecb.blocklen < 0 || cfb->ecb.blocklen > (int)sizeof(cfb->IV) || cfb->padlen < 0 || cfb->padlen > (int)sizeof(cfb->pad)) { return CRYPT_INVALID_ARG; } while (len-- > 0) { - if (cfb->padlen == cfb->blocklen) { - if ((err = cipher_descriptor[cfb->cipher].ecb_encrypt(cfb->pad, cfb->IV, &cfb->key)) != CRYPT_OK) { + if (cfb->padlen == cfb->ecb.blocklen) { + if ((err = ecb_encrypt_block(cfb->pad, cfb->IV, &cfb->ecb)) != CRYPT_OK) { return err; } cfb->padlen = 0; diff --git a/src/modes/cfb/cfb_done.c b/src/modes/cfb/cfb_done.c index 8f8f9cd0c..881a17c7b 100644 --- a/src/modes/cfb/cfb_done.c +++ b/src/modes/cfb/cfb_done.c @@ -15,14 +15,9 @@ */ int cfb_done(symmetric_CFB *cfb) { - int err; LTC_ARGCHK(cfb != NULL); - if ((err = cipher_is_valid(cfb->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[cfb->cipher].done(&cfb->key); - return CRYPT_OK; + return ecb_done(&cfb->ecb); } diff --git a/src/modes/cfb/cfb_encrypt.c b/src/modes/cfb/cfb_encrypt.c index 315a4f76a..9bc018e94 100644 --- a/src/modes/cfb/cfb_encrypt.c +++ b/src/modes/cfb/cfb_encrypt.c @@ -25,19 +25,19 @@ int cfb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(cfb != NULL); - if ((err = cipher_is_valid(cfb->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(cfb->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen/padlen valid? */ - if (cfb->blocklen < 0 || cfb->blocklen > (int)sizeof(cfb->IV) || + if (cfb->ecb.blocklen < 0 || cfb->ecb.blocklen > (int)sizeof(cfb->IV) || cfb->padlen < 0 || cfb->padlen > (int)sizeof(cfb->pad)) { return CRYPT_INVALID_ARG; } while (len-- > 0) { - if (cfb->padlen == cfb->blocklen) { - if ((err = cipher_descriptor[cfb->cipher].ecb_encrypt(cfb->pad, cfb->IV, &cfb->key)) != CRYPT_OK) { + if (cfb->padlen == cfb->ecb.blocklen) { + if ((err = ecb_encrypt_block(cfb->pad, cfb->IV, &cfb->ecb)) != CRYPT_OK) { return err; } cfb->padlen = 0; diff --git a/src/modes/cfb/cfb_getiv.c b/src/modes/cfb/cfb_getiv.c index 9dc2e8612..6b407c35f 100644 --- a/src/modes/cfb/cfb_getiv.c +++ b/src/modes/cfb/cfb_getiv.c @@ -21,12 +21,12 @@ int cfb_getiv(unsigned char *IV, unsigned long *len, const symmetric_CFB *cfb) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(len != NULL); LTC_ARGCHK(cfb != NULL); - if ((unsigned long)cfb->blocklen > *len) { - *len = cfb->blocklen; + if ((unsigned long)cfb->ecb.blocklen > *len) { + *len = cfb->ecb.blocklen; return CRYPT_BUFFER_OVERFLOW; } - XMEMCPY(IV, cfb->IV, cfb->blocklen); - *len = cfb->blocklen; + XMEMCPY(IV, cfb->IV, cfb->ecb.blocklen); + *len = cfb->ecb.blocklen; return CRYPT_OK; } diff --git a/src/modes/cfb/cfb_setiv.c b/src/modes/cfb/cfb_setiv.c index c5481ada7..6ff15f967 100644 --- a/src/modes/cfb/cfb_setiv.c +++ b/src/modes/cfb/cfb_setiv.c @@ -23,17 +23,17 @@ int cfb_setiv(const unsigned char *IV, unsigned long len, symmetric_CFB *cfb) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(cfb != NULL); - if ((err = cipher_is_valid(cfb->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(cfb->ecb.cipher)) != CRYPT_OK) { return err; } - if (len != (unsigned long)cfb->blocklen) { + if (len != (unsigned long)cfb->ecb.blocklen) { return CRYPT_INVALID_ARG; } /* force next block */ cfb->padlen = 0; - return cipher_descriptor[cfb->cipher].ecb_encrypt(IV, cfb->IV, &cfb->key); + return ecb_encrypt_block(IV, cfb->IV, &cfb->ecb); } #endif diff --git a/src/modes/cfb/cfb_start.c b/src/modes/cfb/cfb_start.c index cd5ffe793..d9c39da82 100644 --- a/src/modes/cfb/cfb_start.c +++ b/src/modes/cfb/cfb_start.c @@ -33,22 +33,18 @@ int cfb_start(int cipher, const unsigned char *IV, const unsigned char *key, return err; } - - /* copy data */ - cfb->cipher = cipher; - cfb->blocklen = cipher_descriptor[cipher].block_length; - for (x = 0; x < cfb->blocklen; x++) { - cfb->IV[x] = IV[x]; - } - /* init the cipher */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, num_rounds, &cfb->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, num_rounds, &cfb->ecb)) != CRYPT_OK) { return err; } + /* copy data */ + for (x = 0; x < cfb->ecb.blocklen; x++) { + cfb->IV[x] = IV[x]; + } /* encrypt the IV */ cfb->padlen = 0; - return cipher_descriptor[cfb->cipher].ecb_encrypt(cfb->IV, cfb->IV, &cfb->key); + return ecb_encrypt_block(cfb->IV, cfb->IV, &cfb->ecb); } #endif diff --git a/src/modes/ctr/ctr_done.c b/src/modes/ctr/ctr_done.c index f93d97177..670c63b4e 100644 --- a/src/modes/ctr/ctr_done.c +++ b/src/modes/ctr/ctr_done.c @@ -15,14 +15,9 @@ */ int ctr_done(symmetric_CTR *ctr) { - int err; LTC_ARGCHK(ctr != NULL); - if ((err = cipher_is_valid(ctr->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[ctr->cipher].done(&ctr->key); - return CRYPT_OK; + return ecb_done(&ctr->ecb); } diff --git a/src/modes/ctr/ctr_encrypt.c b/src/modes/ctr/ctr_encrypt.c index b8c08f7a6..2859574bc 100644 --- a/src/modes/ctr/ctr_encrypt.c +++ b/src/modes/ctr/ctr_encrypt.c @@ -24,7 +24,7 @@ static int s_ctr_encrypt(const unsigned char *pt, unsigned char *ct, unsigned lo while (len) { /* is the pad empty? */ - if (ctr->padlen == ctr->blocklen) { + if (ctr->padlen == ctr->ecb.blocklen) { /* increment counter */ if (ctr->mode == CTR_COUNTER_LITTLE_ENDIAN) { /* little-endian */ @@ -36,7 +36,7 @@ static int s_ctr_encrypt(const unsigned char *pt, unsigned char *ct, unsigned lo } } else { /* big-endian */ - for (x = ctr->blocklen-1; x >= ctr->ctrlen; x--) { + for (x = ctr->ecb.blocklen-1; x >= ctr->ctrlen; x--) { ctr->ctr[x] = (ctr->ctr[x] + (unsigned char)1) & (unsigned char)255; if (ctr->ctr[x] != (unsigned char)0) { break; @@ -45,21 +45,21 @@ static int s_ctr_encrypt(const unsigned char *pt, unsigned char *ct, unsigned lo } /* encrypt it */ - if ((err = cipher_descriptor[ctr->cipher].ecb_encrypt(ctr->ctr, ctr->pad, &ctr->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ctr->ctr, ctr->pad, &ctr->ecb)) != CRYPT_OK) { return err; } ctr->padlen = 0; } #ifdef LTC_FAST - if ((ctr->padlen == 0) && (len >= (unsigned long)ctr->blocklen)) { - for (x = 0; x < ctr->blocklen; x += sizeof(LTC_FAST_TYPE)) { + if ((ctr->padlen == 0) && (len >= (unsigned long)ctr->ecb.blocklen)) { + for (x = 0; x < ctr->ecb.blocklen; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)ct + x)) = *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)pt + x)) ^ *(LTC_FAST_TYPE_PTR_CAST((unsigned char *)ctr->pad + x)); } - pt += ctr->blocklen; - ct += ctr->blocklen; - len -= ctr->blocklen; - ctr->padlen = ctr->blocklen; + pt += ctr->ecb.blocklen; + ct += ctr->ecb.blocklen; + len -= ctr->ecb.blocklen; + ctr->padlen = ctr->ecb.blocklen; continue; } #endif @@ -85,26 +85,26 @@ int ctr_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(ctr != NULL); - if ((err = cipher_is_valid(ctr->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(ctr->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen/padlen valid? */ - if ((ctr->blocklen < 1) || (ctr->blocklen > (int)sizeof(ctr->ctr)) || + if ((ctr->ecb.blocklen < 1) || (ctr->ecb.blocklen > (int)sizeof(ctr->ctr)) || (ctr->padlen < 0) || (ctr->padlen > (int)sizeof(ctr->pad))) { return CRYPT_INVALID_ARG; } #ifdef LTC_FAST - if (ctr->blocklen % sizeof(LTC_FAST_TYPE)) { + if (ctr->ecb.blocklen % sizeof(LTC_FAST_TYPE)) { return CRYPT_INVALID_ARG; } #endif /* handle acceleration only if pad is empty, accelerator is present and length is >= a block size */ - if ((cipher_descriptor[ctr->cipher].accel_ctr_encrypt != NULL) && (len >= (unsigned long)ctr->blocklen)) { - if (ctr->padlen < ctr->blocklen) { - fr = ctr->blocklen - ctr->padlen; + if ((cipher_descriptor[ctr->ecb.cipher].accel_ctr_encrypt != NULL) && (len >= (unsigned long)ctr->ecb.blocklen)) { + if (ctr->padlen < ctr->ecb.blocklen) { + fr = ctr->ecb.blocklen - ctr->padlen; if ((err = s_ctr_encrypt(pt, ct, fr, ctr)) != CRYPT_OK) { return err; } @@ -113,13 +113,13 @@ int ctr_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s len -= fr; } - if (len >= (unsigned long)ctr->blocklen) { - if ((err = cipher_descriptor[ctr->cipher].accel_ctr_encrypt(pt, ct, len/ctr->blocklen, ctr->ctr, ctr->mode, &ctr->key)) != CRYPT_OK) { + if (len >= (unsigned long)ctr->ecb.blocklen) { + if ((err = cipher_descriptor[ctr->ecb.cipher].accel_ctr_encrypt(pt, ct, len/ctr->ecb.blocklen, ctr->ctr, ctr->mode, &ctr->ecb.key)) != CRYPT_OK) { return err; } - pt += (len / ctr->blocklen) * ctr->blocklen; - ct += (len / ctr->blocklen) * ctr->blocklen; - len %= ctr->blocklen; + pt += (len / ctr->ecb.blocklen) * ctr->ecb.blocklen; + ct += (len / ctr->ecb.blocklen) * ctr->ecb.blocklen; + len %= ctr->ecb.blocklen; } } diff --git a/src/modes/ctr/ctr_getiv.c b/src/modes/ctr/ctr_getiv.c index 05277faeb..7704a7f2d 100644 --- a/src/modes/ctr/ctr_getiv.c +++ b/src/modes/ctr/ctr_getiv.c @@ -21,12 +21,12 @@ int ctr_getiv(unsigned char *IV, unsigned long *len, const symmetric_CTR *ctr) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(len != NULL); LTC_ARGCHK(ctr != NULL); - if ((unsigned long)ctr->blocklen > *len) { - *len = ctr->blocklen; + if ((unsigned long)ctr->ecb.blocklen > *len) { + *len = ctr->ecb.blocklen; return CRYPT_BUFFER_OVERFLOW; } - XMEMCPY(IV, ctr->ctr, ctr->blocklen); - *len = ctr->blocklen; + XMEMCPY(IV, ctr->ctr, ctr->ecb.blocklen); + *len = ctr->ecb.blocklen; return CRYPT_OK; } diff --git a/src/modes/ctr/ctr_setiv.c b/src/modes/ctr/ctr_setiv.c index be80f1a24..75ea1ab37 100644 --- a/src/modes/ctr/ctr_setiv.c +++ b/src/modes/ctr/ctr_setiv.c @@ -24,11 +24,11 @@ int ctr_setiv(const unsigned char *IV, unsigned long len, symmetric_CTR *ctr) LTC_ARGCHK(ctr != NULL); /* bad param? */ - if ((err = cipher_is_valid(ctr->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(ctr->ecb.cipher)) != CRYPT_OK) { return err; } - if (len != (unsigned long)ctr->blocklen) { + if (len != (unsigned long)ctr->ecb.blocklen) { return CRYPT_INVALID_ARG; } @@ -37,7 +37,7 @@ int ctr_setiv(const unsigned char *IV, unsigned long len, symmetric_CTR *ctr) /* force next block */ ctr->padlen = 0; - return cipher_descriptor[ctr->cipher].ecb_encrypt(IV, ctr->pad, &ctr->key); + return ecb_encrypt_block(IV, ctr->pad, &ctr->ecb); } #endif diff --git a/src/modes/ctr/ctr_start.c b/src/modes/ctr/ctr_start.c index 0ccdfd22e..03b18a461 100644 --- a/src/modes/ctr/ctr_start.c +++ b/src/modes/ctr/ctr_start.c @@ -49,16 +49,14 @@ int ctr_start( int cipher, } /* setup cipher */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, num_rounds, &ctr->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, num_rounds, &ctr->ecb)) != CRYPT_OK) { return err; } /* copy ctr */ - ctr->blocklen = cipher_descriptor[cipher].block_length; - ctr->cipher = cipher; ctr->padlen = 0; ctr->mode = ctr_mode & 0x1000; - for (x = 0; x < ctr->blocklen; x++) { + for (x = 0; x < ctr->ecb.blocklen; x++) { ctr->ctr[x] = IV[x]; } @@ -74,7 +72,7 @@ int ctr_start( int cipher, } } else { /* big-endian */ - for (x = ctr->blocklen-1; x >= ctr->ctrlen; x--) { + for (x = ctr->ecb.blocklen-1; x >= ctr->ctrlen; x--) { ctr->ctr[x] = (ctr->ctr[x] + (unsigned char)1) & (unsigned char)255; if (ctr->ctr[x] != (unsigned char)0) { break; @@ -83,7 +81,7 @@ int ctr_start( int cipher, } } - return cipher_descriptor[ctr->cipher].ecb_encrypt(ctr->ctr, ctr->pad, &ctr->key); + return ecb_encrypt_block(ctr->ctr, ctr->pad, &ctr->ecb); } #endif diff --git a/src/modes/ecb/ecb_decrypt.c b/src/modes/ecb/ecb_decrypt.c index 30697791f..b8a48b003 100644 --- a/src/modes/ecb/ecb_decrypt.c +++ b/src/modes/ecb/ecb_decrypt.c @@ -8,6 +8,14 @@ */ #ifdef LTC_ECB_MODE +int ecb_decrypt_block(const unsigned char *ct, unsigned char *pt, const symmetric_ECB *ecb) +{ + /* check for accel */ + if (cipher_descriptor[ecb->cipher].accel_ecb_decrypt != NULL) { + return cipher_descriptor[ecb->cipher].accel_ecb_decrypt(ct, pt, 1, &ecb->key); + } + return cipher_descriptor[ecb->cipher].ecb_decrypt(ct, pt, &ecb->key); +} /** ECB decrypt @@ -17,7 +25,7 @@ @param ecb ECB state @return CRYPT_OK if successful */ -int ecb_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, symmetric_ECB *ecb) +int ecb_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, const symmetric_ECB *ecb) { int err; LTC_ARGCHK(pt != NULL); diff --git a/src/modes/ecb/ecb_encrypt.c b/src/modes/ecb/ecb_encrypt.c index 661d99424..07e86726f 100644 --- a/src/modes/ecb/ecb_encrypt.c +++ b/src/modes/ecb/ecb_encrypt.c @@ -8,6 +8,14 @@ */ #ifdef LTC_ECB_MODE +int ecb_encrypt_block(const unsigned char *pt, unsigned char *ct, const symmetric_ECB *ecb) +{ + /* check for accel */ + if (cipher_descriptor[ecb->cipher].accel_ecb_encrypt != NULL) { + return cipher_descriptor[ecb->cipher].accel_ecb_encrypt(pt, ct, 1, &ecb->key); + } + return cipher_descriptor[ecb->cipher].ecb_encrypt(pt, ct, &ecb->key); +} /** ECB encrypt @@ -17,7 +25,7 @@ @param ecb ECB state @return CRYPT_OK if successful */ -int ecb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, symmetric_ECB *ecb) +int ecb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, const symmetric_ECB *ecb) { int err; LTC_ARGCHK(pt != NULL); diff --git a/src/modes/f8/f8_done.c b/src/modes/f8/f8_done.c index 7d25b049e..78ee9fe48 100644 --- a/src/modes/f8/f8_done.c +++ b/src/modes/f8/f8_done.c @@ -15,14 +15,9 @@ */ int f8_done(symmetric_F8 *f8) { - int err; LTC_ARGCHK(f8 != NULL); - if ((err = cipher_is_valid(f8->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[f8->cipher].done(&f8->key); - return CRYPT_OK; + return ecb_done(&f8->ecb); } diff --git a/src/modes/f8/f8_encrypt.c b/src/modes/f8/f8_encrypt.c index 671c90418..ec147fdee 100644 --- a/src/modes/f8/f8_encrypt.c +++ b/src/modes/f8/f8_encrypt.c @@ -24,12 +24,12 @@ int f8_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, sy LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); LTC_ARGCHK(f8 != NULL); - if ((err = cipher_is_valid(f8->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(f8->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen/padlen valid? */ - if (f8->blocklen < 0 || f8->blocklen > (int)sizeof(f8->IV) || + if (f8->ecb.blocklen < 0 || f8->ecb.blocklen > (int)sizeof(f8->IV) || f8->padlen < 0 || f8->padlen > (int)sizeof(f8->IV)) { return CRYPT_INVALID_ARG; } @@ -37,14 +37,14 @@ int f8_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, sy zeromem(buf, sizeof(buf)); /* make sure the pad is empty */ - if (f8->padlen == f8->blocklen) { + if (f8->padlen == f8->ecb.blocklen) { /* xor of IV, MIV and blockcnt == what goes into cipher */ - STORE32H(f8->blockcnt, (buf+(f8->blocklen-4))); + STORE32H(f8->blockcnt, (buf+(f8->ecb.blocklen-4))); ++(f8->blockcnt); - for (x = 0; x < f8->blocklen; x++) { + for (x = 0; x < f8->ecb.blocklen; x++) { f8->IV[x] ^= f8->MIV[x] ^ buf[x]; } - if ((err = cipher_descriptor[f8->cipher].ecb_encrypt(f8->IV, f8->IV, &f8->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(f8->IV, f8->IV, &f8->ecb)) != CRYPT_OK) { return err; } f8->padlen = 0; @@ -52,14 +52,14 @@ int f8_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, sy #ifdef LTC_FAST if (f8->padlen == 0) { - while (len >= (unsigned long)f8->blocklen) { - STORE32H(f8->blockcnt, (buf+(f8->blocklen-4))); + while (len >= (unsigned long)f8->ecb.blocklen) { + STORE32H(f8->blockcnt, (buf+(f8->ecb.blocklen-4))); ++(f8->blockcnt); - for (x = 0; x < f8->blocklen; x += sizeof(LTC_FAST_TYPE)) { + for (x = 0; x < f8->ecb.blocklen; x += sizeof(LTC_FAST_TYPE)) { *(LTC_FAST_TYPE_PTR_CAST(&ct[x])) = *(LTC_FAST_TYPE_PTR_CAST(&pt[x])) ^ *(LTC_FAST_TYPE_PTR_CAST(&f8->IV[x])); *(LTC_FAST_TYPE_PTR_CAST(&f8->IV[x])) ^= *(LTC_FAST_TYPE_PTR_CAST(&f8->MIV[x])) ^ *(LTC_FAST_TYPE_PTR_CAST(&buf[x])); } - if ((err = cipher_descriptor[f8->cipher].ecb_encrypt(f8->IV, f8->IV, &f8->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(f8->IV, f8->IV, &f8->ecb)) != CRYPT_OK) { return err; } len -= x; @@ -70,14 +70,14 @@ int f8_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, sy #endif while (len > 0) { - if (f8->padlen == f8->blocklen) { + if (f8->padlen == f8->ecb.blocklen) { /* xor of IV, MIV and blockcnt == what goes into cipher */ - STORE32H(f8->blockcnt, (buf+(f8->blocklen-4))); + STORE32H(f8->blockcnt, (buf+(f8->ecb.blocklen-4))); ++(f8->blockcnt); - for (x = 0; x < f8->blocklen; x++) { + for (x = 0; x < f8->ecb.blocklen; x++) { f8->IV[x] ^= f8->MIV[x] ^ buf[x]; } - if ((err = cipher_descriptor[f8->cipher].ecb_encrypt(f8->IV, f8->IV, &f8->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(f8->IV, f8->IV, &f8->ecb)) != CRYPT_OK) { return err; } f8->padlen = 0; diff --git a/src/modes/f8/f8_getiv.c b/src/modes/f8/f8_getiv.c index 1a4e53f1e..a38ffde83 100644 --- a/src/modes/f8/f8_getiv.c +++ b/src/modes/f8/f8_getiv.c @@ -21,12 +21,12 @@ int f8_getiv(unsigned char *IV, unsigned long *len, const symmetric_F8 *f8) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(len != NULL); LTC_ARGCHK(f8 != NULL); - if ((unsigned long)f8->blocklen > *len) { - *len = f8->blocklen; + if ((unsigned long)f8->ecb.blocklen > *len) { + *len = f8->ecb.blocklen; return CRYPT_BUFFER_OVERFLOW; } - XMEMCPY(IV, f8->IV, f8->blocklen); - *len = f8->blocklen; + XMEMCPY(IV, f8->IV, f8->ecb.blocklen); + *len = f8->ecb.blocklen; return CRYPT_OK; } diff --git a/src/modes/f8/f8_setiv.c b/src/modes/f8/f8_setiv.c index 51a80abcc..fd5411e4f 100644 --- a/src/modes/f8/f8_setiv.c +++ b/src/modes/f8/f8_setiv.c @@ -23,17 +23,17 @@ int f8_setiv(const unsigned char *IV, unsigned long len, symmetric_F8 *f8) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(f8 != NULL); - if ((err = cipher_is_valid(f8->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(f8->ecb.cipher)) != CRYPT_OK) { return err; } - if (len != (unsigned long)f8->blocklen) { + if (len != (unsigned long)f8->ecb.blocklen) { return CRYPT_INVALID_ARG; } /* force next block */ f8->padlen = 0; - return cipher_descriptor[f8->cipher].ecb_encrypt(IV, f8->IV, &f8->key); + return ecb_encrypt_block(IV, f8->IV, &f8->ecb); } #endif diff --git a/src/modes/f8/f8_start.c b/src/modes/f8/f8_start.c index 58f126f6e..21d00915c 100644 --- a/src/modes/f8/f8_start.c +++ b/src/modes/f8/f8_start.c @@ -47,9 +47,7 @@ int f8_start( int cipher, const unsigned char *IV, /* copy details */ f8->blockcnt = 0; - f8->cipher = cipher; - f8->blocklen = cipher_descriptor[cipher].block_length; - f8->padlen = f8->blocklen; + f8->padlen = cipher_descriptor[cipher].block_length; /* now get key ^ salt_key [extend salt_ket with 0x55 as required to match length] */ zeromem(tkey, sizeof(tkey)); @@ -64,23 +62,22 @@ int f8_start( int cipher, const unsigned char *IV, } /* now encrypt with tkey[0..keylen-1] the IV and use that as the IV */ - if ((err = cipher_descriptor[cipher].setup(tkey, keylen, num_rounds, &f8->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, tkey, keylen, num_rounds, &f8->ecb)) != CRYPT_OK) { return err; } /* encrypt IV */ - if ((err = cipher_descriptor[f8->cipher].ecb_encrypt(IV, f8->MIV, &f8->key)) != CRYPT_OK) { - cipher_descriptor[f8->cipher].done(&f8->key); - return err; + if ((err = ecb_encrypt_block(IV, f8->MIV, &f8->ecb)) != CRYPT_OK) { + return ecb_done(&f8->ecb); } zeromem(tkey, sizeof(tkey)); zeromem(f8->IV, sizeof(f8->IV)); /* terminate this cipher */ - cipher_descriptor[f8->cipher].done(&f8->key); + ecb_done(&f8->ecb); /* init the cipher */ - return cipher_descriptor[cipher].setup(key, keylen, num_rounds, &f8->key); + return ecb_start(cipher, key, keylen, num_rounds, &f8->ecb); } #endif diff --git a/src/modes/lrw/lrw_decrypt.c b/src/modes/lrw/lrw_decrypt.c index ba3b38a40..b4f4059e8 100644 --- a/src/modes/lrw/lrw_decrypt.c +++ b/src/modes/lrw/lrw_decrypt.c @@ -24,12 +24,12 @@ int lrw_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(lrw != NULL); - if ((err = cipher_is_valid(lrw->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(lrw->ecb.cipher)) != CRYPT_OK) { return err; } - if (cipher_descriptor[lrw->cipher].accel_lrw_decrypt != NULL) { - return cipher_descriptor[lrw->cipher].accel_lrw_decrypt(ct, pt, len, lrw->IV, lrw->tweak, &lrw->key); + if (cipher_descriptor[lrw->ecb.cipher].accel_lrw_decrypt != NULL) { + return cipher_descriptor[lrw->ecb.cipher].accel_lrw_decrypt(ct, pt, len, lrw->IV, lrw->tweak, &lrw->ecb.key); } return lrw_process(ct, pt, len, LRW_DECRYPT, lrw); diff --git a/src/modes/lrw/lrw_done.c b/src/modes/lrw/lrw_done.c index dc649a019..83ad5f340 100644 --- a/src/modes/lrw/lrw_done.c +++ b/src/modes/lrw/lrw_done.c @@ -16,16 +16,9 @@ */ int lrw_done(symmetric_LRW *lrw) { - int err; - LTC_ARGCHK(lrw != NULL); - if ((err = cipher_is_valid(lrw->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[lrw->cipher].done(&lrw->key); - - return CRYPT_OK; + return ecb_done(&lrw->ecb); } #endif diff --git a/src/modes/lrw/lrw_encrypt.c b/src/modes/lrw/lrw_encrypt.c index fde442b15..7c8e1ba59 100644 --- a/src/modes/lrw/lrw_encrypt.c +++ b/src/modes/lrw/lrw_encrypt.c @@ -24,12 +24,12 @@ int lrw_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s LTC_ARGCHK(ct != NULL); LTC_ARGCHK(lrw != NULL); - if ((err = cipher_is_valid(lrw->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(lrw->ecb.cipher)) != CRYPT_OK) { return err; } - if (cipher_descriptor[lrw->cipher].accel_lrw_encrypt != NULL) { - return cipher_descriptor[lrw->cipher].accel_lrw_encrypt(pt, ct, len, lrw->IV, lrw->tweak, &lrw->key); + if (cipher_descriptor[lrw->ecb.cipher].accel_lrw_encrypt != NULL) { + return cipher_descriptor[lrw->ecb.cipher].accel_lrw_encrypt(pt, ct, len, lrw->IV, lrw->tweak, &lrw->ecb.key); } return lrw_process(pt, ct, len, LRW_ENCRYPT, lrw); diff --git a/src/modes/lrw/lrw_process.c b/src/modes/lrw/lrw_process.c index d9a3edd5b..a04f90d47 100644 --- a/src/modes/lrw/lrw_process.c +++ b/src/modes/lrw/lrw_process.c @@ -77,11 +77,11 @@ int lrw_process(const unsigned char *pt, unsigned char *ct, unsigned long len, i /* send through cipher */ if (mode == LRW_ENCRYPT) { - if ((err = cipher_descriptor[lrw->cipher].ecb_encrypt(ct, ct, &lrw->key)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(ct, ct, &lrw->ecb)) != CRYPT_OK) { return err; } } else { - if ((err = cipher_descriptor[lrw->cipher].ecb_decrypt(ct, ct, &lrw->key)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(ct, ct, &lrw->ecb)) != CRYPT_OK) { return err; } } diff --git a/src/modes/lrw/lrw_setiv.c b/src/modes/lrw/lrw_setiv.c index efb44125a..72615e773 100644 --- a/src/modes/lrw/lrw_setiv.c +++ b/src/modes/lrw/lrw_setiv.c @@ -30,7 +30,7 @@ int lrw_setiv(const unsigned char *IV, unsigned long len, symmetric_LRW *lrw) return CRYPT_INVALID_ARG; } - if ((err = cipher_is_valid(lrw->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(lrw->ecb.cipher)) != CRYPT_OK) { return err; } @@ -38,7 +38,7 @@ int lrw_setiv(const unsigned char *IV, unsigned long len, symmetric_LRW *lrw) XMEMCPY(lrw->IV, IV, 16); /* check if we have to actually do work */ - if (cipher_descriptor[lrw->cipher].accel_lrw_encrypt != NULL && cipher_descriptor[lrw->cipher].accel_lrw_decrypt != NULL) { + if (cipher_descriptor[lrw->ecb.cipher].accel_lrw_encrypt != NULL && cipher_descriptor[lrw->ecb.cipher].accel_lrw_decrypt != NULL) { /* we have accelerators, let's bail since they don't use lrw->pad anyways */ return CRYPT_OK; } diff --git a/src/modes/lrw/lrw_start.c b/src/modes/lrw/lrw_start.c index 20956859d..fb0b95ef0 100644 --- a/src/modes/lrw/lrw_start.c +++ b/src/modes/lrw/lrw_start.c @@ -53,10 +53,10 @@ int lrw_start( int cipher, } /* schedule key */ - if ((err = cipher_descriptor[cipher].setup(key, keylen, num_rounds, &lrw->key)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key, keylen, num_rounds, &lrw->ecb)) != CRYPT_OK) { return err; } - lrw->cipher = cipher; + lrw->ecb.cipher = cipher; /* copy the IV and tweak */ XMEMCPY(lrw->tweak, tweak, 16); diff --git a/src/modes/ofb/ofb_done.c b/src/modes/ofb/ofb_done.c index c4a018406..15cf1f665 100644 --- a/src/modes/ofb/ofb_done.c +++ b/src/modes/ofb/ofb_done.c @@ -15,14 +15,9 @@ */ int ofb_done(symmetric_OFB *ofb) { - int err; LTC_ARGCHK(ofb != NULL); - if ((err = cipher_is_valid(ofb->cipher)) != CRYPT_OK) { - return err; - } - cipher_descriptor[ofb->cipher].done(&ofb->key); - return CRYPT_OK; + return ecb_done(&ofb->ecb); } diff --git a/src/modes/ofb/ofb_encrypt.c b/src/modes/ofb/ofb_encrypt.c index 63f1e18e9..211efcc19 100644 --- a/src/modes/ofb/ofb_encrypt.c +++ b/src/modes/ofb/ofb_encrypt.c @@ -23,19 +23,19 @@ int ofb_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, s LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); LTC_ARGCHK(ofb != NULL); - if ((err = cipher_is_valid(ofb->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(ofb->ecb.cipher)) != CRYPT_OK) { return err; } /* is blocklen/padlen valid? */ - if (ofb->blocklen < 0 || ofb->blocklen > (int)sizeof(ofb->IV) || + if (ofb->ecb.blocklen < 0 || ofb->ecb.blocklen > (int)sizeof(ofb->IV) || ofb->padlen < 0 || ofb->padlen > (int)sizeof(ofb->IV)) { return CRYPT_INVALID_ARG; } while (len-- > 0) { - if (ofb->padlen == ofb->blocklen) { - if ((err = cipher_descriptor[ofb->cipher].ecb_encrypt(ofb->IV, ofb->IV, &ofb->key)) != CRYPT_OK) { + if (ofb->padlen == ofb->ecb.blocklen) { + if ((err = ecb_encrypt_block(ofb->IV, ofb->IV, &ofb->ecb)) != CRYPT_OK) { return err; } ofb->padlen = 0; diff --git a/src/modes/ofb/ofb_getiv.c b/src/modes/ofb/ofb_getiv.c index 0a799f0d9..6a40c1ba3 100644 --- a/src/modes/ofb/ofb_getiv.c +++ b/src/modes/ofb/ofb_getiv.c @@ -21,12 +21,12 @@ int ofb_getiv(unsigned char *IV, unsigned long *len, const symmetric_OFB *ofb) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(len != NULL); LTC_ARGCHK(ofb != NULL); - if ((unsigned long)ofb->blocklen > *len) { - *len = ofb->blocklen; + if ((unsigned long)ofb->ecb.blocklen > *len) { + *len = ofb->ecb.blocklen; return CRYPT_BUFFER_OVERFLOW; } - XMEMCPY(IV, ofb->IV, ofb->blocklen); - *len = ofb->blocklen; + XMEMCPY(IV, ofb->IV, ofb->ecb.blocklen); + *len = ofb->ecb.blocklen; return CRYPT_OK; } diff --git a/src/modes/ofb/ofb_setiv.c b/src/modes/ofb/ofb_setiv.c index 1fdec7fb2..9a06cc577 100644 --- a/src/modes/ofb/ofb_setiv.c +++ b/src/modes/ofb/ofb_setiv.c @@ -23,17 +23,17 @@ int ofb_setiv(const unsigned char *IV, unsigned long len, symmetric_OFB *ofb) LTC_ARGCHK(IV != NULL); LTC_ARGCHK(ofb != NULL); - if ((err = cipher_is_valid(ofb->cipher)) != CRYPT_OK) { + if ((err = cipher_is_valid(ofb->ecb.cipher)) != CRYPT_OK) { return err; } - if (len != (unsigned long)ofb->blocklen) { + if (len != (unsigned long)ofb->ecb.blocklen) { return CRYPT_INVALID_ARG; } /* force next block */ ofb->padlen = 0; - return cipher_descriptor[ofb->cipher].ecb_encrypt(IV, ofb->IV, &ofb->key); + return ecb_encrypt_block(IV, ofb->IV, &ofb->ecb); } #endif diff --git a/src/modes/ofb/ofb_start.c b/src/modes/ofb/ofb_start.c index 179fe5755..2998f3fc7 100644 --- a/src/modes/ofb/ofb_start.c +++ b/src/modes/ofb/ofb_start.c @@ -33,16 +33,18 @@ int ofb_start(int cipher, const unsigned char *IV, const unsigned char *key, return err; } + /* init the cipher */ + if ((err = ecb_start(cipher, key, keylen, num_rounds, &ofb->ecb)) != CRYPT_OK) { + return err; + } + ofb->padlen = cipher_descriptor[cipher].block_length; + /* copy details */ - ofb->cipher = cipher; - ofb->blocklen = cipher_descriptor[cipher].block_length; - for (x = 0; x < ofb->blocklen; x++) { + for (x = 0; x < ofb->ecb.blocklen; x++) { ofb->IV[x] = IV[x]; } - /* init the cipher */ - ofb->padlen = ofb->blocklen; - return cipher_descriptor[cipher].setup(key, keylen, num_rounds, &ofb->key); + return CRYPT_OK; } #endif diff --git a/src/modes/xts/xts_decrypt.c b/src/modes/xts/xts_decrypt.c index f1747d539..cbd955689 100644 --- a/src/modes/xts/xts_decrypt.c +++ b/src/modes/xts/xts_decrypt.c @@ -24,7 +24,7 @@ static int s_tweak_uncrypt(const unsigned char *C, unsigned char *P, unsigned ch } #endif - err = cipher_descriptor[xts->cipher].ecb_decrypt(P, P, &xts->key1); + err = ecb_decrypt_block(P, P, &xts->key1); #ifdef LTC_FAST for (x = 0; x < 16; x += sizeof(LTC_FAST_TYPE)) { @@ -86,7 +86,7 @@ int xts_decrypt(const unsigned char *ct, unsigned long ptlen, unsigned char *pt, if (cipher_descriptor[xts->cipher].accel_xts_decrypt && lim > 0) { /* use accelerated decryption for whole blocks */ - if ((err = cipher_descriptor[xts->cipher].accel_xts_decrypt(ct, pt, lim, tweak, &xts->key1, &xts->key2)) != + if ((err = cipher_descriptor[xts->cipher].accel_xts_decrypt(ct, pt, lim, tweak, &xts->key1.key, &xts->key2.key)) != CRYPT_OK) { return err; } @@ -97,7 +97,7 @@ int xts_decrypt(const unsigned char *ct, unsigned long ptlen, unsigned char *pt, XMEMCPY(T, tweak, sizeof(T)); } else { /* encrypt the tweak */ - if ((err = cipher_descriptor[xts->cipher].ecb_encrypt(tweak, T, &xts->key2)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tweak, T, &xts->key2)) != CRYPT_OK) { return err; } @@ -136,7 +136,7 @@ int xts_decrypt(const unsigned char *ct, unsigned long ptlen, unsigned char *pt, } /* Decrypt the tweak back */ - if ((err = cipher_descriptor[xts->cipher].ecb_decrypt(T, tweak, &xts->key2)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(T, tweak, &xts->key2)) != CRYPT_OK) { return err; } diff --git a/src/modes/xts/xts_done.c b/src/modes/xts/xts_done.c index 4f775217f..dd7ed25ce 100644 --- a/src/modes/xts/xts_done.c +++ b/src/modes/xts/xts_done.c @@ -14,8 +14,8 @@ void xts_done(symmetric_xts *xts) { LTC_ARGCHKVD(xts != NULL); - cipher_descriptor[xts->cipher].done(&xts->key1); - cipher_descriptor[xts->cipher].done(&xts->key2); + ecb_done(&xts->key1); + ecb_done(&xts->key2); } #endif diff --git a/src/modes/xts/xts_encrypt.c b/src/modes/xts/xts_encrypt.c index c269b7c70..8f8281c55 100644 --- a/src/modes/xts/xts_encrypt.c +++ b/src/modes/xts/xts_encrypt.c @@ -24,7 +24,7 @@ static int s_tweak_crypt(const unsigned char *P, unsigned char *C, unsigned char } #endif - if ((err = cipher_descriptor[xts->cipher].ecb_encrypt(C, C, &xts->key1)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(C, C, &xts->key1)) != CRYPT_OK) { return err; } @@ -88,7 +88,7 @@ int xts_encrypt(const unsigned char *pt, unsigned long ptlen, unsigned char *ct, if (cipher_descriptor[xts->cipher].accel_xts_encrypt && lim > 0) { /* use accelerated encryption for whole blocks */ - if ((err = cipher_descriptor[xts->cipher].accel_xts_encrypt(pt, ct, lim, tweak, &xts->key1, &xts->key2)) != + if ((err = cipher_descriptor[xts->cipher].accel_xts_encrypt(pt, ct, lim, tweak, &xts->key1.key, &xts->key2.key)) != CRYPT_OK) { return err; } @@ -100,7 +100,7 @@ int xts_encrypt(const unsigned char *pt, unsigned long ptlen, unsigned char *ct, } else { /* encrypt the tweak */ - if ((err = cipher_descriptor[xts->cipher].ecb_encrypt(tweak, T, &xts->key2)) != CRYPT_OK) { + if ((err = ecb_encrypt_block(tweak, T, &xts->key2)) != CRYPT_OK) { return err; } @@ -137,7 +137,7 @@ int xts_encrypt(const unsigned char *pt, unsigned long ptlen, unsigned char *ct, } /* Decrypt the tweak back */ - if ((err = cipher_descriptor[xts->cipher].ecb_decrypt(T, tweak, &xts->key2)) != CRYPT_OK) { + if ((err = ecb_decrypt_block(T, tweak, &xts->key2)) != CRYPT_OK) { return err; } diff --git a/src/modes/xts/xts_init.c b/src/modes/xts/xts_init.c index cec3a329b..d0158471f 100644 --- a/src/modes/xts/xts_init.c +++ b/src/modes/xts/xts_init.c @@ -37,10 +37,10 @@ int xts_start(int cipher, const unsigned char *key1, const unsigned char *key2, } /* schedule the two ciphers */ - if ((err = cipher_descriptor[cipher].setup(key1, keylen, num_rounds, &xts->key1)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key1, keylen, num_rounds, &xts->key1)) != CRYPT_OK) { return err; } - if ((err = cipher_descriptor[cipher].setup(key2, keylen, num_rounds, &xts->key2)) != CRYPT_OK) { + if ((err = ecb_start(cipher, key2, keylen, num_rounds, &xts->key2)) != CRYPT_OK) { return err; } xts->cipher = cipher; diff --git a/src/modes/xts/xts_test.c b/src/modes/xts/xts_test.c index ad9273576..90e5bdf09 100644 --- a/src/modes/xts/xts_test.c +++ b/src/modes/xts/xts_test.c @@ -23,8 +23,10 @@ static int s_xts_test_accel_xts_encrypt(const unsigned char *pt, unsigned char * orig = cipher_descriptor[xts.cipher].accel_xts_encrypt; cipher_descriptor[xts.cipher].accel_xts_encrypt = NULL; - XMEMCPY(&xts.key1, skey1, sizeof(symmetric_key)); - XMEMCPY(&xts.key2, skey2, sizeof(symmetric_key)); + XMEMCPY(&xts.key1.key, skey1, sizeof(xts.key1)); + XMEMCPY(&xts.key2.key, skey2, sizeof(xts.key2)); + xts.key1.cipher = xts.key2.cipher = xts.cipher; + xts.key1.blocklen = xts.key2.blocklen = cipher_descriptor[xts.cipher].block_length; ret = xts_encrypt(pt, blocks << 4, ct, tweak, &xts); cipher_descriptor[xts.cipher].accel_xts_encrypt = orig; @@ -50,8 +52,10 @@ static int s_xts_test_accel_xts_decrypt(const unsigned char *ct, unsigned char * orig = cipher_descriptor[xts.cipher].accel_xts_decrypt; cipher_descriptor[xts.cipher].accel_xts_decrypt = NULL; - XMEMCPY(&xts.key1, skey1, sizeof(symmetric_key)); - XMEMCPY(&xts.key2, skey2, sizeof(symmetric_key)); + XMEMCPY(&xts.key1.key, skey1, sizeof(xts.key1)); + XMEMCPY(&xts.key2.key, skey2, sizeof(xts.key2)); + xts.key1.cipher = xts.key2.cipher = xts.cipher; + xts.key1.blocklen = xts.key2.blocklen = cipher_descriptor[xts.cipher].block_length; ret = xts_decrypt(ct, blocks << 4, pt, tweak, &xts); cipher_descriptor[xts.cipher].accel_xts_decrypt = orig; diff --git a/tests/modes_test.c b/tests/modes_test.c index cd58af474..ac4428bda 100644 --- a/tests/modes_test.c +++ b/tests/modes_test.c @@ -91,21 +91,15 @@ int modes_test(void) /* encode the block */ DO(ret = ofb_start(cipher_idx, iv, key, 16, 0, &ofb)); l = sizeof(iv2); - DO(ret = ofb_getiv(iv2, &l, &ofb)); - if (l != 16 || memcmp(iv2, iv, 16)) { - fprintf(stderr, "ofb_getiv failed"); - return 1; - } + DO(ofb_getiv(iv2, &l, &ofb)); + DO(do_compare_testvector(iv2, l, iv, 16, "ofb_getiv", 0)); DO(ret = ofb_encrypt(pt, ct, 64, &ofb)); /* decode the block */ DO(ret = ofb_setiv(iv2, l, &ofb)); zeromem(tmp, sizeof(tmp)); DO(ret = ofb_decrypt(ct, tmp, 64, &ofb)); - if (memcmp(tmp, pt, 64) != 0) { - fprintf(stderr, "OFB failed"); - return 1; - } + DO(do_compare_testvector(tmp, 64, pt, 64, "OFB", 0)); #endif #if defined(LTC_CTR_MODE) && defined(LTC_RIJNDAEL)