15 #if defined(CRYPTOPP_DISABLE_GCM_ASM)
16 # undef CRYPTOPP_X86_ASM_AVAILABLE
17 # undef CRYPTOPP_X32_ASM_AVAILABLE
18 # undef CRYPTOPP_X64_ASM_AVAILABLE
19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23 # include <emmintrin.h>
24 # include <xmmintrin.h>
27 #if (CRYPTOPP_CLMUL_AVAILABLE)
28 # include <tmmintrin.h>
29 # include <wmmintrin.h>
32 #if (CRYPTOPP_ARM_NEON_HEADER)
33 # include <arm_neon.h>
36 #if (CRYPTOPP_ARM_ACLE_HEADER)
38 # include <arm_acle.h>
41 #if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
45 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
49 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
54 #ifndef EXCEPTION_EXECUTE_HANDLER
55 # define EXCEPTION_EXECUTE_HANDLER 1
59 #define M128_CAST(x) ((__m128i *)(void *)(x))
60 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
63 extern const char GCM_SIMD_FNAME[] = __FILE__;
69 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
71 typedef void (*SigHandler)(int);
73 static jmp_buf s_jmpSIGILL;
74 static void SigIllHandler(
int)
76 longjmp(s_jmpSIGILL, 1);
79 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
81 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
84 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
86 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
87 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
88 volatile bool result =
true;
92 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
93 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
95 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
96 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
97 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
98 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
99 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
101 const uint64x2_t r1 =
PMULL_00(a1, b1);
102 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
103 vreinterpretq_u64_u8(b2));
105 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
106 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
107 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
108 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
110 __except (EXCEPTION_EXECUTE_HANDLER)
118 volatile bool result =
true;
120 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
121 if (oldHandler == SIG_ERR)
124 volatile sigset_t oldMask;
125 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
127 signal(SIGILL, oldHandler);
131 if (setjmp(s_jmpSIGILL))
136 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
137 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
139 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
140 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
141 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
142 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
143 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
145 const uint64x2_t r1 =
PMULL_00(a1, b1);
146 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
147 vreinterpretq_u64_u8(b2));
149 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
150 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
151 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
152 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
155 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
156 signal(SIGILL, oldHandler);
161 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
163 #endif // ARM32 or ARM64
165 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
166 bool CPU_ProbePMULL()
168 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
170 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
172 volatile bool result =
true;
174 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
175 if (oldHandler == SIG_ERR)
178 volatile sigset_t oldMask;
179 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
181 signal(SIGILL, oldHandler);
185 if (setjmp(s_jmpSIGILL))
189 const uint64_t wa1[]={0,
W64LIT(0x9090909090909090)},
190 wb1[]={0,
W64LIT(0xb0b0b0b0b0b0b0b0)};
193 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
194 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
195 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
196 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
202 const uint64_t wc1[]={
W64LIT(0x5300530053005300),
W64LIT(0x5300530053005300)},
203 wc2[]={
W64LIT(0x6c006c006c006c00),
W64LIT(0x6c006c006c006c00)};
209 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
210 signal(SIGILL, oldHandler);
214 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
216 #endif // PPC32 or PPC64
220 #if CRYPTOPP_ARM_NEON_AVAILABLE
221 void GCM_Xor16_NEON(
byte *a,
const byte *b,
const byte *c)
223 vst1q_u8(a, veorq_u8(vld1q_u8(b), vld1q_u8(c)));
225 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
227 #if CRYPTOPP_ARM_PMULL_AVAILABLE
230 inline uint64x2_t SwapWords(
const uint64x2_t& data)
232 return (uint64x2_t)vcombine_u64(
233 vget_high_u64(data), vget_low_u64(data));
236 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2,
const uint64x2_t &r)
238 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
239 c1 = veorq_u64(c1,
PMULL_01(c0, r));
240 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
241 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
243 c2 = veorq_u64(c2, c0);
244 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
245 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
246 c2 = vshlq_n_u64(c2, 1);
248 return veorq_u64(c2, c1);
251 uint64x2_t GCM_Multiply_PMULL(
const uint64x2_t &x,
const uint64x2_t &h,
const uint64x2_t &r)
253 const uint64x2_t c0 =
PMULL_00(x, h);
255 const uint64x2_t c2 =
PMULL_11(x, h);
257 return GCM_Reduce_PMULL(c0, c1, c2, r);
260 void GCM_SetKeyWithoutResync_PMULL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
262 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
263 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
264 const uint64x2_t h0 = vextq_u64(t, t, 1);
268 for (i=0; i<tableSize-32; i+=32)
270 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
271 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
272 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
273 vst1q_u64((uint64_t *)(mulTable+i+8), h);
274 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
275 h = GCM_Multiply_PMULL(h1, h0, r);
278 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
279 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
280 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
281 vst1q_u64((uint64_t *)(mulTable+i+8), h);
282 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
285 size_t GCM_AuthenticateBlocks_PMULL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
287 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
288 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
293 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
294 uint64x2_t c0 = vdupq_n_u64(0);
295 uint64x2_t c1 = vdupq_n_u64(0);
296 uint64x2_t c2 = vdupq_n_u64(0);
300 const uint64x2_t h0 = vld1q_u64((
const uint64_t*)(mtable+(i+0)*16));
301 const uint64x2_t h1 = vld1q_u64((
const uint64_t*)(mtable+(i+1)*16));
302 const uint64x2_t h2 = veorq_u64(h0, h1);
306 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
307 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
308 c0 = veorq_u64(c0,
PMULL_00(d1, h0));
309 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
310 d1 = veorq_u64(d1, SwapWords(d1));
311 c1 = veorq_u64(c1,
PMULL_00(d1, h2));
316 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
317 c0 = veorq_u64(c0,
PMULL_10(d2, h0));
318 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
319 d2 = veorq_u64(d2, d1);
320 c1 = veorq_u64(c1,
PMULL_10(d2, h2));
324 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
325 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
326 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
327 c2 = veorq_u64(c2,
PMULL_11(d1, h1));
328 d1 = veorq_u64(d1, SwapWords(d1));
329 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
334 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
335 d2 = vextq_u64(t3, t3, 1);
336 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
337 c2 = veorq_u64(c2,
PMULL_01(d2, h1));
338 d1 = veorq_u64(d1, d2);
339 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
344 c1 = veorq_u64(veorq_u64(c1, c0), c2);
345 x = GCM_Reduce_PMULL(c0, c1, c2, r);
348 vst1q_u64(
reinterpret_cast<uint64_t *
>(hbuffer), x);
352 void GCM_ReverseHashBufferIfNeeded_PMULL(
byte *hashBuffer)
356 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
357 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
360 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
364 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
367 void GCM_Xor16_SSE2(
byte *a,
const byte *b,
const byte *c)
369 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
370 asm (
"movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
371 :
"=m" (a[0]) :
"m"(b[0]),
"m"(c[0]));
372 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
373 _mm_store_si128(
M128_CAST(a), _mm_xor_si128(
378 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE
380 #if CRYPTOPP_CLMUL_AVAILABLE
384 void gcm_gf_mult(
const unsigned char *a,
const unsigned char *b,
unsigned char *c)
386 word64 Z0=0, Z1=0, V0, V1;
389 Block::Get(a)(V0)(V1);
391 for (
int i=0; i<16; i++)
393 for (
int j=0x80; j!=0; j>>=1)
399 V1 = (V1>>1) | (V0<<63);
400 V0 = (V0>>1) ^ (x ?
W64LIT(0xe1) << 56 : 0);
406 __m128i _mm_clmulepi64_si128(
const __m128i &a,
const __m128i &b,
int i)
416 for (
int i=0; i<16; i++)
417 ((
byte *)&output)[i] = c.GetByte(i);
423 inline __m128i SwapWords(
const __m128i& val)
425 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
430 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2,
const __m128i& r)
444 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
445 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
446 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
447 c0 = _mm_slli_epi64(c0, 1);
448 c0 = _mm_clmulepi64_si128(c0, r, 0);
449 c2 = _mm_xor_si128(c2, c0);
450 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
451 c1 = _mm_unpacklo_epi64(c1, c2);
452 c1 = _mm_srli_epi64(c1, 63);
453 c2 = _mm_slli_epi64(c2, 1);
454 return _mm_xor_si128(c2, c1);
459 __m128i GCM_Multiply_CLMUL(
const __m128i &x,
const __m128i &h,
const __m128i &r)
461 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
462 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
463 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
465 return GCM_Reduce_CLMUL(c0, c1, c2, r);
468 void GCM_SetKeyWithoutResync_CLMUL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
470 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
471 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
472 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(
CONST_M128_CAST(hashKey)), m), h = h0;
475 for (i=0; i<tableSize-32; i+=32)
477 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
478 _mm_storel_epi64(
M128_CAST(mulTable+i), h);
479 _mm_storeu_si128(
M128_CAST(mulTable+i+16), h1);
480 _mm_storeu_si128(
M128_CAST(mulTable+i+8), h);
481 _mm_storel_epi64(
M128_CAST(mulTable+i+8), h1);
482 h = GCM_Multiply_CLMUL(h1, h0, r);
485 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
486 _mm_storel_epi64(
M128_CAST(mulTable+i), h);
487 _mm_storeu_si128(
M128_CAST(mulTable+i+16), h1);
488 _mm_storeu_si128(
M128_CAST(mulTable+i+8), h);
489 _mm_storel_epi64(
M128_CAST(mulTable+i+8), h1);
492 size_t GCM_AuthenticateBlocks_CLMUL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
494 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
495 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
496 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
497 __m128i x = _mm_load_si128(
M128_CAST(hbuffer));
503 __m128i d2 = _mm_shuffle_epi8(d1, m2);
504 __m128i c0 = _mm_setzero_si128();
505 __m128i c1 = _mm_setzero_si128();
506 __m128i c2 = _mm_setzero_si128();
512 const __m128i h2 = _mm_xor_si128(h0, h1);
517 d1 = _mm_xor_si128(d1, x);
518 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
519 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
520 d1 = _mm_xor_si128(d1, SwapWords(d1));
521 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
525 d1 = _mm_shuffle_epi8(_mm_loadu_si128(
CONST_M128_CAST(data+(s-i)*16-8)), m2);
526 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
527 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
528 d2 = _mm_xor_si128(d2, d1);
529 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
534 d1 = _mm_xor_si128(d1, x);
535 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
536 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
537 d1 = _mm_xor_si128(d1, SwapWords(d1));
538 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
542 d2 = _mm_shuffle_epi8(_mm_loadu_si128(
CONST_M128_CAST(data+(s-i)*16-8)), m1);
543 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
544 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
545 d1 = _mm_xor_si128(d1, d2);
546 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
551 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
552 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
559 void GCM_ReverseHashBufferIfNeeded_CLMUL(
byte *hashBuffer)
562 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
563 _mm_storeu_si128(
M128_CAST(hashBuffer), _mm_shuffle_epi8(
566 #endif // CRYPTOPP_CLMUL_AVAILABLE
570 #if CRYPTOPP_POWER8_AVAILABLE
571 void GCM_Xor16_POWER8(
byte *a,
const byte *b,
const byte *c)
575 #endif // CRYPTOPP_POWER8_AVAILABLE
577 #if CRYPTOPP_POWER8_VMULL_AVAILABLE
583 c1 =
VecXor(c1, VecShiftRightOctet<8>(c0));
585 c0 =
VecXor(c1, VecShiftLeftOctet<8>(c0));
588 c2 =
VecXor(c2, VecShiftLeftOctet<8>(c1));
589 c1 = vec_sr(vec_mergeh(c1, c2), m63);
601 return GCM_Reduce_VMULL(c0, c1, c2, r);
604 inline uint64x2_p LoadHashKey(
const byte *hashKey)
606 #if (CRYPTOPP_BIG_ENDIAN)
608 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
612 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
617 void GCM_SetKeyWithoutResync_VMULL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
619 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
625 for (i=0; i<tableSize-32; i+=32)
627 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
629 std::memcpy(mulTable+i, temp+0, 8);
633 std::memcpy(mulTable+i+8, temp+0, 8);
634 h = GCM_Multiply_VMULL(h1, h0, r);
637 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
639 std::memcpy(mulTable+i, temp+0, 8);
643 std::memcpy(mulTable+i+8, temp+0, 8);
648 inline T SwapWords(
const T& data)
650 return (T)VecRotateLeftOctet<8>(data);
653 inline uint64x2_p LoadBuffer1(
const byte *dataBuffer)
655 #if (CRYPTOPP_BIG_ENDIAN)
659 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
664 inline uint64x2_p LoadBuffer2(
const byte *dataBuffer)
666 #if (CRYPTOPP_BIG_ENDIAN)
673 size_t GCM_AuthenticateBlocks_VMULL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
675 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
681 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
692 d1 = LoadBuffer2(data);
696 d1 =
VecXor(d1, SwapWords(d1));
701 d1 = LoadBuffer1(data+(s-i)*16-8);
709 d1 = LoadBuffer2(data);
713 d1 =
VecXor(d1, SwapWords(d1));
718 d2 = LoadBuffer2(data+(s-i)*16-8);
728 x = GCM_Reduce_VMULL(c0, c1, c2, r);
735 void GCM_ReverseHashBufferIfNeeded_VMULL(
byte *hashBuffer)
737 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
740 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE