| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282 |
- // adv_simd.h - written and placed in the public domain by Jeffrey Walton
- /// \file adv_simd.h
- /// \brief Template for AdvancedProcessBlocks and SIMD processing
- // The SIMD based implementations for ciphers that use SSE, NEON and Power7
- // have a common pattern. Namely, they have a specialized implementation of
- // AdvancedProcessBlocks which processes multiple block using hardware
- // acceleration. After several implementations we noticed a lot of copy and
- // paste occurring. adv_simd.h provides a template to avoid the copy and paste.
- //
- // There are 6 templates provided in this file. The number following the
- // function name, 128, is the block size in bits. The name following the
- // block size is the arrangement and acceleration. For example 4x1_SSE means
- // Intel SSE using two encrypt (or decrypt) functions: one that operates on
- // 4 SIMD words, and one that operates on 1 SIMD words.
- //
- // * AdvancedProcessBlocks128_4x1_SSE
- // * AdvancedProcessBlocks128_6x2_SSE
- // * AdvancedProcessBlocks128_4x1_NEON
- // * AdvancedProcessBlocks128_6x1_NEON
- // * AdvancedProcessBlocks128_4x1_ALTIVEC
- // * AdvancedProcessBlocks128_6x1_ALTIVEC
- //
- // If an arrangement ends in 2, like 6x2, then the template will handle the
- // single block case by padding with 0's and using the two SIMD word
- // function. This happens at most one time when processing multiple blocks.
- // The extra processing of a zero block is trivial and worth the tradeoff.
- //
- // The MAYBE_CONST macro present on x86 is a SunCC workaround. Some versions
- // of SunCC lose/drop the const-ness in the F1 and F4 functions. It eventually
- // results in a failed link due to the const/non-const mismatch.
- //
- // In July 2020 the library stopped using 64-bit block version of
- // AdvancedProcessBlocks. Testing showed unreliable results and failed
- // self tests on occasion. Also see Issue 945 and
- // https://github.com/weidai11/cryptopp/commit/dd7598e638bb.
- #ifndef CRYPTOPP_ADVANCED_SIMD_TEMPLATES
- #define CRYPTOPP_ADVANCED_SIMD_TEMPLATES
- #include "config.h"
- #include "misc.h"
- #include "stdcpp.h"
- #if (CRYPTOPP_ARM_NEON_HEADER)
- # include <arm_neon.h>
- #endif
- #if (CRYPTOPP_ARM_ACLE_HEADER)
- # include <stdint.h>
- # include <arm_acle.h>
- #endif
- #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
- # include <emmintrin.h>
- # include <xmmintrin.h>
- #endif
- // SunCC needs CRYPTOPP_SSSE3_AVAILABLE, too
- #if (CRYPTOPP_SSSE3_AVAILABLE)
- # include <emmintrin.h>
- # include <pmmintrin.h>
- # include <xmmintrin.h>
- #endif
- #if defined(__ALTIVEC__)
- # include "ppc_simd.h"
- #endif
- // ************************ All block ciphers *********************** //
- ANONYMOUS_NAMESPACE_BEGIN
- using CryptoPP::BlockTransformation;
- CRYPTOPP_CONSTANT(BT_XorInput = BlockTransformation::BT_XorInput);
- CRYPTOPP_CONSTANT(BT_AllowParallel = BlockTransformation::BT_AllowParallel);
- CRYPTOPP_CONSTANT(BT_InBlockIsCounter = BlockTransformation::BT_InBlockIsCounter);
- CRYPTOPP_CONSTANT(BT_ReverseDirection = BlockTransformation::BT_ReverseDirection);
- CRYPTOPP_CONSTANT(BT_DontIncrementInOutPointers = BlockTransformation::BT_DontIncrementInOutPointers);
- ANONYMOUS_NAMESPACE_END
- // *************************** ARM NEON ************************** //
- #if (CRYPTOPP_ARM_NEON_AVAILABLE) || (CRYPTOPP_ARM_ASIMD_AVAILABLE) || \
- defined(CRYPTOPP_DOXYGEN_PROCESSING)
- NAMESPACE_BEGIN(CryptoPP)
- /// \brief AdvancedProcessBlocks for 1 and 6 blocks
- /// \tparam F1 function to process 1 128-bit block
- /// \tparam F6 function to process 6 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_6x1_NEON processes 6 and 2 NEON SIMD words
- /// at a time.
- /// \details The subkey type is usually word32 or word64. F1 and F6 must use the
- /// same word type.
- template <typename F1, typename F6, typename W>
- inline size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6,
- const W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
- const uint32x4_t s_one = vld1q_u32(w_one);
- const size_t blockSize = 16;
- // const size_t neonBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 6*blockSize)
- {
- uint64x2_t block0, block1, block2, block3, block4, block5;
- if (flags & BT_InBlockIsCounter)
- {
- const uint64x2_t one = vreinterpretq_u64_u32(s_one);
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- block1 = vaddq_u64(block0, one);
- block2 = vaddq_u64(block1, one);
- block3 = vaddq_u64(block2, one);
- block4 = vaddq_u64(block3, one);
- block5 = vaddq_u64(block4, one);
- vst1q_u8(const_cast<byte*>(inBlocks),
- vreinterpretq_u8_u64(vaddq_u64(block5, one)));
- }
- else
- {
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block4 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block5 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block2));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block3));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block4));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block5));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 6*blockSize;
- }
- }
- while (length >= blockSize)
- {
- uint64x2_t block;
- block = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- if (xorInput)
- block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func1(block, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- /// \brief AdvancedProcessBlocks for 1 and 4 blocks
- /// \tparam F1 function to process 1 128-bit block
- /// \tparam F4 function to process 4 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_4x1_NEON processes 4 and 1 NEON SIMD words
- /// at a time.
- /// \details The subkey type is usually word32 or word64. V is the vector type and it is
- /// usually uint32x4_t or uint32x4_t. F1, F4, and W must use the same word and
- /// vector type.
- template <typename F1, typename F4, typename W>
- inline size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4,
- const W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
- const uint32x4_t s_one = vld1q_u32(w_one);
- const size_t blockSize = 16;
- // const size_t neonBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 4*blockSize)
- {
- uint32x4_t block0, block1, block2, block3;
- if (flags & BT_InBlockIsCounter)
- {
- const uint32x4_t one = s_one;
- block0 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- block1 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block0), vreinterpretq_u64_u32(one)));
- block2 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block1), vreinterpretq_u64_u32(one)));
- block3 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block2), vreinterpretq_u64_u32(one)));
- vst1q_u8(const_cast<byte*>(inBlocks), vreinterpretq_u8_u64(vaddq_u64(
- vreinterpretq_u64_u32(block3), vreinterpretq_u64_u32(one))));
- }
- else
- {
- block0 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = veorq_u32(block0, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u32(block1, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u32(block2, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u32(block3, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func4(block0, block1, block2, block3, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = veorq_u32(block0, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u32(block1, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u32(block2, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u32(block3, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- vst1q_u8(outBlocks, vreinterpretq_u8_u32(block0));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u32(block1));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u32(block2));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u32(block3));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 4*blockSize;
- }
- }
- while (length >= blockSize)
- {
- uint32x4_t block = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
- if (xorInput)
- block = veorq_u32(block, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func1(block, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- block = veorq_u32(block, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
- vst1q_u8(outBlocks, vreinterpretq_u8_u32(block));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- /// \brief AdvancedProcessBlocks for 2 and 6 blocks
- /// \tparam F2 function to process 2 128-bit blocks
- /// \tparam F6 function to process 6 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_6x2_NEON processes 6 and 2 NEON SIMD words
- /// at a time. For a single block the template uses F2 with a zero block.
- /// \details The subkey type is usually word32 or word64. F2 and F6 must use the
- /// same word type.
- template <typename F2, typename F6, typename W>
- inline size_t AdvancedProcessBlocks128_6x2_NEON(F2 func2, F6 func6,
- const W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
- const uint32x4_t s_one = vld1q_u32(w_one);
- const size_t blockSize = 16;
- // const size_t neonBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 6*blockSize)
- {
- uint64x2_t block0, block1, block2, block3, block4, block5;
- if (flags & BT_InBlockIsCounter)
- {
- const uint64x2_t one = vreinterpretq_u64_u32(s_one);
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- block1 = vaddq_u64(block0, one);
- block2 = vaddq_u64(block1, one);
- block3 = vaddq_u64(block2, one);
- block4 = vaddq_u64(block3, one);
- block5 = vaddq_u64(block4, one);
- vst1q_u8(const_cast<byte*>(inBlocks),
- vreinterpretq_u8_u64(vaddq_u64(block5, one)));
- }
- else
- {
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block4 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block5 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block2));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block3));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block4));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block5));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 6*blockSize;
- }
- while (length >= 2*blockSize)
- {
- uint64x2_t block0, block1;
- if (flags & BT_InBlockIsCounter)
- {
- const uint64x2_t one = vreinterpretq_u64_u32(s_one);
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- block1 = vaddq_u64(block0, one);
- vst1q_u8(const_cast<byte*>(inBlocks),
- vreinterpretq_u8_u64(vaddq_u64(block1, one)));
- }
- else
- {
- block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func2(block0, block1, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 2*blockSize;
- }
- }
- while (length >= blockSize)
- {
- uint64x2_t block, zero = {0,0};
- block = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
- if (xorInput)
- block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func2(block, zero, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
- vst1q_u8(outBlocks, vreinterpretq_u8_u64(block));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- NAMESPACE_END // CryptoPP
- #endif // CRYPTOPP_ARM_NEON_AVAILABLE
- // *************************** Intel SSE ************************** //
- #if defined(CRYPTOPP_SSSE3_AVAILABLE) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
- #if defined(CRYPTOPP_DOXYGEN_PROCESSING)
- /// \brief SunCC workaround
- /// \details SunCC loses the const on AES_Enc_Block and AES_Dec_Block
- /// \sa <A HREF="http://github.com/weidai11/cryptopp/issues/224">Issue
- /// 224, SunCC and failed compile for rijndael.cpp</A>
- # define MAYBE_CONST const
- /// \brief SunCC workaround
- /// \details SunCC loses the const on AES_Enc_Block and AES_Dec_Block
- /// \sa <A HREF="http://github.com/weidai11/cryptopp/issues/224">Issue
- /// 224, SunCC and failed compile for rijndael.cpp</A>
- # define MAYBE_UNCONST_CAST(T, x) (x)
- #elif (__SUNPRO_CC >= 0x5130)
- # define MAYBE_CONST
- # define MAYBE_UNCONST_CAST(T, x) const_cast<MAYBE_CONST T>(x)
- #else
- # define MAYBE_CONST const
- # define MAYBE_UNCONST_CAST(T, x) (x)
- #endif
- #if defined(CRYPTOPP_DOXYGEN_PROCESSING)
- /// \brief Clang workaround
- /// \details Clang issues spurious alignment warnings
- /// \sa <A HREF="http://bugs.llvm.org/show_bug.cgi?id=20670">Issue
- /// 20670, _mm_loadu_si128 parameter has wrong type</A>
- # define M128_CAST(x) ((__m128i *)(void *)(x))
- /// \brief Clang workaround
- /// \details Clang issues spurious alignment warnings
- /// \sa <A HREF="http://bugs.llvm.org/show_bug.cgi?id=20670">Issue
- /// 20670, _mm_loadu_si128 parameter has wrong type</A>
- # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
- #else
- # ifndef M128_CAST
- # define M128_CAST(x) ((__m128i *)(void *)(x))
- # endif
- # ifndef CONST_M128_CAST
- # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
- # endif
- #endif
- NAMESPACE_BEGIN(CryptoPP)
- /// \brief AdvancedProcessBlocks for 2 and 6 blocks
- /// \tparam F2 function to process 2 128-bit blocks
- /// \tparam F6 function to process 6 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_6x2_SSE processes 6 and 2 SSE SIMD words
- /// at a time. For a single block the template uses F2 with a zero block.
- /// \details The subkey type is usually word32 or word64. F2 and F6 must use the
- /// same word type.
- template <typename F2, typename F6, typename W>
- inline size_t AdvancedProcessBlocks128_6x2_SSE(F2 func2, F6 func6,
- MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- const size_t blockSize = 16;
- // const size_t xmmBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 6*blockSize)
- {
- __m128i block0, block1, block2, block3, block4, block5;
- if (flags & BT_InBlockIsCounter)
- {
- // Increment of 1 in big-endian compatible with the ctr byte array.
- const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- block1 = _mm_add_epi32(block0, s_one);
- block2 = _mm_add_epi32(block1, s_one);
- block3 = _mm_add_epi32(block2, s_one);
- block4 = _mm_add_epi32(block3, s_one);
- block5 = _mm_add_epi32(block4, s_one);
- _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block5, s_one));
- }
- else
- {
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block4 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block5 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = _mm_xor_si128(block4, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = _mm_xor_si128(block5, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = _mm_xor_si128(block4, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = _mm_xor_si128(block5, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- _mm_storeu_si128(M128_CAST(outBlocks), block0);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block1);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block2);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block3);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block4);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block5);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 6*blockSize;
- }
- while (length >= 2*blockSize)
- {
- __m128i block0, block1;
- if (flags & BT_InBlockIsCounter)
- {
- // Increment of 1 in big-endian compatible with the ctr byte array.
- const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- block1 = _mm_add_epi32(block0, s_one);
- _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block1, s_one));
- }
- else
- {
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func2(block0, block1, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- _mm_storeu_si128(M128_CAST(outBlocks), block0);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block1);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 2*blockSize;
- }
- }
- while (length >= blockSize)
- {
- __m128i block, zero = _mm_setzero_si128();
- block = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- if (xorInput)
- block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func2(block, zero, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- _mm_storeu_si128(M128_CAST(outBlocks), block);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- /// \brief AdvancedProcessBlocks for 1 and 4 blocks
- /// \tparam F1 function to process 1 128-bit block
- /// \tparam F4 function to process 4 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_4x1_SSE processes 4 and 1 SSE SIMD words
- /// at a time.
- /// \details The subkey type is usually word32 or word64. F1 and F4 must use the
- /// same word type.
- template <typename F1, typename F4, typename W>
- inline size_t AdvancedProcessBlocks128_4x1_SSE(F1 func1, F4 func4,
- MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- const size_t blockSize = 16;
- // const size_t xmmBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 4*blockSize)
- {
- __m128i block0, block1, block2, block3;
- if (flags & BT_InBlockIsCounter)
- {
- // Increment of 1 in big-endian compatible with the ctr byte array.
- const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- block1 = _mm_add_epi32(block0, s_one);
- block2 = _mm_add_epi32(block1, s_one);
- block3 = _mm_add_epi32(block2, s_one);
- _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block3, s_one));
- }
- else
- {
- block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func4(block0, block1, block2, block3, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- {
- block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- _mm_storeu_si128(M128_CAST(outBlocks), block0);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block1);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block2);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- _mm_storeu_si128(M128_CAST(outBlocks), block3);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 4*blockSize;
- }
- }
- while (length >= blockSize)
- {
- __m128i block = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
- if (xorInput)
- block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func1(block, subKeys, static_cast<unsigned int>(rounds));
- if (xorOutput)
- block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
- _mm_storeu_si128(M128_CAST(outBlocks), block);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- NAMESPACE_END // CryptoPP
- #endif // CRYPTOPP_SSSE3_AVAILABLE
- // ************************** Altivec/Power 4 ************************** //
- #if defined(__ALTIVEC__) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
- NAMESPACE_BEGIN(CryptoPP)
- /// \brief AdvancedProcessBlocks for 1 and 4 blocks
- /// \tparam F1 function to process 1 128-bit block
- /// \tparam F4 function to process 4 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_4x1_ALTIVEC processes 4 and 1 Altivec SIMD words
- /// at a time.
- /// \details The subkey type is usually word32 or word64. F1 and F4 must use the
- /// same word type.
- template <typename F1, typename F4, typename W>
- inline size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4,
- const W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- #if (CRYPTOPP_LITTLE_ENDIAN)
- const uint32x4_p s_one = {1,0,0,0};
- #else
- const uint32x4_p s_one = {0,0,0,1};
- #endif
- const size_t blockSize = 16;
- // const size_t simdBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 4*blockSize)
- {
- uint32x4_p block0, block1, block2, block3;
- if (flags & BT_InBlockIsCounter)
- {
- block0 = VecLoadBE(inBlocks);
- block1 = VecAdd(block0, s_one);
- block2 = VecAdd(block1, s_one);
- block3 = VecAdd(block2, s_one);
- // Hack due to big-endian loads used by POWER8 (and maybe ARM-BE).
- // CTR_ModePolicy::OperateKeystream is wired such that after
- // returning from this function CTR_ModePolicy will detect wrap on
- // on the last counter byte and increment the next to last byte.
- // The problem is, with a big-endian load, inBlocks[15] is really
- // located at index 15. The vector addition using a 32-bit element
- // generates a carry into inBlocks[14] and then CTR_ModePolicy
- // increments inBlocks[14] too.
- const_cast<byte*>(inBlocks)[15] += 6;
- }
- else
- {
- block0 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = VecXor(block0, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = VecXor(block1, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = VecXor(block2, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = VecXor(block3, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func4(block0, block1, block2, block3, subKeys, rounds);
- if (xorOutput)
- {
- block0 = VecXor(block0, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = VecXor(block1, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = VecXor(block2, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = VecXor(block3, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- VecStoreBE(block0, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block1, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block2, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block3, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 4*blockSize;
- }
- }
- while (length >= blockSize)
- {
- uint32x4_p block = VecLoadBE(inBlocks);
- if (xorInput)
- block = VecXor(block, VecLoadBE(xorBlocks));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func1(block, subKeys, rounds);
- if (xorOutput)
- block = VecXor(block, VecLoadBE(xorBlocks));
- VecStoreBE(block, outBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- /// \brief AdvancedProcessBlocks for 1 and 6 blocks
- /// \tparam F1 function to process 1 128-bit block
- /// \tparam F6 function to process 6 128-bit blocks
- /// \tparam W word type of the subkey table
- /// \details AdvancedProcessBlocks128_6x1_ALTIVEC processes 6 and 1 Altivec SIMD words
- /// at a time.
- /// \details The subkey type is usually word32 or word64. F1 and F6 must use the
- /// same word type.
- template <typename F1, typename F6, typename W>
- inline size_t AdvancedProcessBlocks128_6x1_ALTIVEC(F1 func1, F6 func6,
- const W *subKeys, size_t rounds, const byte *inBlocks,
- const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
- {
- CRYPTOPP_ASSERT(subKeys);
- CRYPTOPP_ASSERT(inBlocks);
- CRYPTOPP_ASSERT(outBlocks);
- CRYPTOPP_ASSERT(length >= 16);
- #if (CRYPTOPP_LITTLE_ENDIAN)
- const uint32x4_p s_one = {1,0,0,0};
- #else
- const uint32x4_p s_one = {0,0,0,1};
- #endif
- const size_t blockSize = 16;
- // const size_t simdBlockSize = 16;
- size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
- size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
- size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
- // Clang and Coverity are generating findings using xorBlocks as a flag.
- const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
- const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
- if (flags & BT_ReverseDirection)
- {
- inBlocks = PtrAdd(inBlocks, length - blockSize);
- xorBlocks = PtrAdd(xorBlocks, length - blockSize);
- outBlocks = PtrAdd(outBlocks, length - blockSize);
- inIncrement = 0-inIncrement;
- xorIncrement = 0-xorIncrement;
- outIncrement = 0-outIncrement;
- }
- if (flags & BT_AllowParallel)
- {
- while (length >= 6*blockSize)
- {
- uint32x4_p block0, block1, block2, block3, block4, block5;
- if (flags & BT_InBlockIsCounter)
- {
- block0 = VecLoadBE(inBlocks);
- block1 = VecAdd(block0, s_one);
- block2 = VecAdd(block1, s_one);
- block3 = VecAdd(block2, s_one);
- block4 = VecAdd(block3, s_one);
- block5 = VecAdd(block4, s_one);
- // Hack due to big-endian loads used by POWER8 (and maybe ARM-BE).
- // CTR_ModePolicy::OperateKeystream is wired such that after
- // returning from this function CTR_ModePolicy will detect wrap on
- // on the last counter byte and increment the next to last byte.
- // The problem is, with a big-endian load, inBlocks[15] is really
- // located at index 15. The vector addition using a 32-bit element
- // generates a carry into inBlocks[14] and then CTR_ModePolicy
- // increments inBlocks[14] too.
- //
- // To find this bug we needed a test case with a ctr of 0xNN...FA.
- // The last octet is 0xFA and adding 6 creates the wrap to trigger
- // the issue. If the last octet was 0xFC then 4 would trigger it.
- // We dumb-lucked into the test with SPECK-128. The test case of
- // interest is the one with IV 348ECA9766C09F04 826520DE47A212FA.
- uint8x16_p temp = VecAdd((uint8x16_p)block5, (uint8x16_p)s_one);
- VecStoreBE(temp, const_cast<byte*>(inBlocks));
- }
- else
- {
- block0 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block1 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block2 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block3 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block4 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- block5 = VecLoadBE(inBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- }
- if (xorInput)
- {
- block0 = VecXor(block0, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = VecXor(block1, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = VecXor(block2, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = VecXor(block3, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = VecXor(block4, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = VecXor(block5, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- func6(block0, block1, block2, block3, block4, block5, subKeys, rounds);
- if (xorOutput)
- {
- block0 = VecXor(block0, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block1 = VecXor(block1, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block2 = VecXor(block2, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block3 = VecXor(block3, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block4 = VecXor(block4, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- block5 = VecXor(block5, VecLoadBE(xorBlocks));
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- }
- VecStoreBE(block0, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block1, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block2, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block3, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block4, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- VecStoreBE(block5, outBlocks);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- length -= 6*blockSize;
- }
- }
- while (length >= blockSize)
- {
- uint32x4_p block = VecLoadBE(inBlocks);
- if (xorInput)
- block = VecXor(block, VecLoadBE(xorBlocks));
- if (flags & BT_InBlockIsCounter)
- const_cast<byte *>(inBlocks)[15]++;
- func1(block, subKeys, rounds);
- if (xorOutput)
- block = VecXor(block, VecLoadBE(xorBlocks));
- VecStoreBE(block, outBlocks);
- inBlocks = PtrAdd(inBlocks, inIncrement);
- outBlocks = PtrAdd(outBlocks, outIncrement);
- xorBlocks = PtrAdd(xorBlocks, xorIncrement);
- length -= blockSize;
- }
- return length;
- }
- NAMESPACE_END // CryptoPP
- #endif // __ALTIVEC__
- #endif // CRYPTOPP_ADVANCED_SIMD_TEMPLATES
|