14 #if defined(CRYPTOPP_DISABLE_SHA_ASM)
15 # undef CRYPTOPP_X86_ASM_AVAILABLE
16 # undef CRYPTOPP_X32_ASM_AVAILABLE
17 # undef CRYPTOPP_X64_ASM_AVAILABLE
18 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
21 #if (CRYPTOPP_SHANI_AVAILABLE)
22 # include <nmmintrin.h>
23 # include <immintrin.h>
26 #if (CRYPTOPP_ARM_NEON_HEADER)
27 # include <arm_neon.h>
30 #if (CRYPTOPP_ARM_ACLE_HEADER)
32 # include <arm_acle.h>
35 #if CRYPTOPP_POWER8_SHA_AVAILABLE
39 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
44 #ifndef EXCEPTION_EXECUTE_HANDLER
45 # define EXCEPTION_EXECUTE_HANDLER 1
49 #define M128_CAST(x) ((__m128i *)(void *)(x))
50 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
53 extern const char SHA_SIMD_FNAME[] = __FILE__;
59 extern const word32 SHA256_K[64];
60 extern const word64 SHA512_K[80];
64 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
66 typedef void (*SigHandler)(int);
68 static jmp_buf s_jmpSIGILL;
69 static void SigIllHandler(
int)
71 longjmp(s_jmpSIGILL, 1);
74 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
76 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
79 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
81 #elif (CRYPTOPP_ARM_SHA1_AVAILABLE)
82 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
83 volatile bool result =
true;
86 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
87 uint32x4_t data1 = vld1q_u32(w+0);
88 uint32x4_t data2 = vld1q_u32(w+4);
89 uint32x4_t data3 = vld1q_u32(w+8);
91 uint32x4_t r1 = vsha1cq_u32 (data1, 0, data2);
92 uint32x4_t r2 = vsha1mq_u32 (data1, 0, data2);
93 uint32x4_t r3 = vsha1pq_u32 (data1, 0, data2);
94 uint32x4_t r4 = vsha1su0q_u32 (data1, data2, data3);
95 uint32x4_t r5 = vsha1su1q_u32 (data1, data2);
97 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3) | vgetq_lane_u32(r5,0));
99 __except (EXCEPTION_EXECUTE_HANDLER)
108 volatile bool result =
true;
110 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
111 if (oldHandler == SIG_ERR)
114 volatile sigset_t oldMask;
115 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
117 signal(SIGILL, oldHandler);
121 if (setjmp(s_jmpSIGILL))
125 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
126 uint32x4_t data1 = vld1q_u32(w+0);
127 uint32x4_t data2 = vld1q_u32(w+4);
128 uint32x4_t data3 = vld1q_u32(w+8);
130 uint32x4_t r1 = vsha1cq_u32 (data1, 0, data2);
131 uint32x4_t r2 = vsha1mq_u32 (data1, 0, data2);
132 uint32x4_t r3 = vsha1pq_u32 (data1, 0, data2);
133 uint32x4_t r4 = vsha1su0q_u32 (data1, data2, data3);
134 uint32x4_t r5 = vsha1su1q_u32 (data1, data2);
136 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3) | vgetq_lane_u32(r5,0));
139 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
140 signal(SIGILL, oldHandler);
145 #endif // CRYPTOPP_ARM_SHA1_AVAILABLE
148 bool CPU_ProbeSHA256()
150 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
152 #elif (CRYPTOPP_ARM_SHA2_AVAILABLE)
153 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
154 volatile bool result =
true;
157 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
158 uint32x4_t data1 = vld1q_u32(w+0);
159 uint32x4_t data2 = vld1q_u32(w+4);
160 uint32x4_t data3 = vld1q_u32(w+8);
162 uint32x4_t r1 = vsha256hq_u32 (data1, data2, data3);
163 uint32x4_t r2 = vsha256h2q_u32 (data1, data2, data3);
164 uint32x4_t r3 = vsha256su0q_u32 (data1, data2);
165 uint32x4_t r4 = vsha256su1q_u32 (data1, data2, data3);
167 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3));
169 __except (EXCEPTION_EXECUTE_HANDLER)
178 volatile bool result =
true;
180 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
181 if (oldHandler == SIG_ERR)
184 volatile sigset_t oldMask;
185 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
187 signal(SIGILL, oldHandler);
191 if (setjmp(s_jmpSIGILL))
195 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
196 uint32x4_t data1 = vld1q_u32(w+0);
197 uint32x4_t data2 = vld1q_u32(w+4);
198 uint32x4_t data3 = vld1q_u32(w+8);
200 uint32x4_t r1 = vsha256hq_u32 (data1, data2, data3);
201 uint32x4_t r2 = vsha256h2q_u32 (data1, data2, data3);
202 uint32x4_t r3 = vsha256su0q_u32 (data1, data2);
203 uint32x4_t r4 = vsha256su1q_u32 (data1, data2, data3);
205 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3));
208 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
209 signal(SIGILL, oldHandler);
214 #endif // CRYPTOPP_ARM_SHA2_AVAILABLE
216 #endif // ARM32 or ARM64
224 #if CRYPTOPP_SHANI_AVAILABLE
226 void SHA1_HashMultipleBlocks_SHANI(
word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
232 __m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1;
233 __m128i MASK, MSG0, MSG1, MSG2, MSG3;
237 E0 = _mm_set_epi32(state[4], 0, 0, 0);
238 ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
244 _mm_set_epi8(0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15) :
245 _mm_set_epi8(3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12) ;
247 while (length >= SHA1::BLOCKSIZE)
255 MSG0 = _mm_shuffle_epi8(MSG0, MASK);
256 E0 = _mm_add_epi32(E0, MSG0);
258 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
262 MSG1 = _mm_shuffle_epi8(MSG1, MASK);
263 E1 = _mm_sha1nexte_epu32(E1, MSG1);
265 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
266 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
270 MSG2 = _mm_shuffle_epi8(MSG2, MASK);
271 E0 = _mm_sha1nexte_epu32(E0, MSG2);
273 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
274 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
275 MSG0 = _mm_xor_si128(MSG0, MSG2);
279 MSG3 = _mm_shuffle_epi8(MSG3, MASK);
280 E1 = _mm_sha1nexte_epu32(E1, MSG3);
282 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
283 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
284 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
285 MSG1 = _mm_xor_si128(MSG1, MSG3);
288 E0 = _mm_sha1nexte_epu32(E0, MSG0);
290 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
291 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
292 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
293 MSG2 = _mm_xor_si128(MSG2, MSG0);
296 E1 = _mm_sha1nexte_epu32(E1, MSG1);
298 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
299 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
300 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
301 MSG3 = _mm_xor_si128(MSG3, MSG1);
304 E0 = _mm_sha1nexte_epu32(E0, MSG2);
306 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
307 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
308 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
309 MSG0 = _mm_xor_si128(MSG0, MSG2);
312 E1 = _mm_sha1nexte_epu32(E1, MSG3);
314 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
315 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
316 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
317 MSG1 = _mm_xor_si128(MSG1, MSG3);
320 E0 = _mm_sha1nexte_epu32(E0, MSG0);
322 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
323 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
324 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
325 MSG2 = _mm_xor_si128(MSG2, MSG0);
328 E1 = _mm_sha1nexte_epu32(E1, MSG1);
330 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
331 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
332 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
333 MSG3 = _mm_xor_si128(MSG3, MSG1);
336 E0 = _mm_sha1nexte_epu32(E0, MSG2);
338 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
339 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
340 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
341 MSG0 = _mm_xor_si128(MSG0, MSG2);
344 E1 = _mm_sha1nexte_epu32(E1, MSG3);
346 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
347 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
348 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
349 MSG1 = _mm_xor_si128(MSG1, MSG3);
352 E0 = _mm_sha1nexte_epu32(E0, MSG0);
354 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
355 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
356 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
357 MSG2 = _mm_xor_si128(MSG2, MSG0);
360 E1 = _mm_sha1nexte_epu32(E1, MSG1);
362 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
363 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
364 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
365 MSG3 = _mm_xor_si128(MSG3, MSG1);
368 E0 = _mm_sha1nexte_epu32(E0, MSG2);
370 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
371 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
372 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
373 MSG0 = _mm_xor_si128(MSG0, MSG2);
376 E1 = _mm_sha1nexte_epu32(E1, MSG3);
378 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
379 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
380 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
381 MSG1 = _mm_xor_si128(MSG1, MSG3);
384 E0 = _mm_sha1nexte_epu32(E0, MSG0);
386 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
387 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
388 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
389 MSG2 = _mm_xor_si128(MSG2, MSG0);
392 E1 = _mm_sha1nexte_epu32(E1, MSG1);
394 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
395 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
396 MSG3 = _mm_xor_si128(MSG3, MSG1);
399 E0 = _mm_sha1nexte_epu32(E0, MSG2);
401 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
402 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
405 E1 = _mm_sha1nexte_epu32(E1, MSG3);
407 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
410 E0 = _mm_sha1nexte_epu32(E0, E0_SAVE);
411 ABCD = _mm_add_epi32(ABCD, ABCD_SAVE);
413 data += SHA1::BLOCKSIZE/
sizeof(
word32);
414 length -= SHA1::BLOCKSIZE;
418 ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
419 _mm_storeu_si128(
M128_CAST(state), ABCD);
420 state[4] = _mm_extract_epi32(E0, 3);
424 void SHA256_HashMultipleBlocks_SHANI(
word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
430 __m128i STATE0, STATE1;
431 __m128i MSG, TMP, MASK;
432 __m128i TMSG0, TMSG1, TMSG2, TMSG3;
433 __m128i ABEF_SAVE, CDGH_SAVE;
436 TMP = _mm_loadu_si128(
M128_CAST(&state[0]));
437 STATE1 = _mm_loadu_si128(
M128_CAST(&state[4]));
443 _mm_set_epi8(12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3) :
444 _mm_set_epi8(15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0) ;
446 TMP = _mm_shuffle_epi32(TMP, 0xB1);
447 STATE1 = _mm_shuffle_epi32(STATE1, 0x1B);
448 STATE0 = _mm_alignr_epi8(TMP, STATE1, 8);
449 STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0);
451 while (length >= SHA256::BLOCKSIZE)
459 TMSG0 = _mm_shuffle_epi8(MSG, MASK);
460 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(
W64LIT(0xE9B5DBA5B5C0FBCF),
W64LIT(0x71374491428A2F98)));
461 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
462 MSG = _mm_shuffle_epi32(MSG, 0x0E);
463 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
467 TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
468 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(
W64LIT(0xAB1C5ED5923F82A4),
W64LIT(0x59F111F13956C25B)));
469 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
470 MSG = _mm_shuffle_epi32(MSG, 0x0E);
471 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
472 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
476 TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
477 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(
W64LIT(0x550C7DC3243185BE),
W64LIT(0x12835B01D807AA98)));
478 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
479 MSG = _mm_shuffle_epi32(MSG, 0x0E);
480 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
481 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
485 TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
486 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(
W64LIT(0xC19BF1749BDC06A7),
W64LIT(0x80DEB1FE72BE5D74)));
487 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
488 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
489 TMSG0 = _mm_add_epi32(TMSG0, TMP);
490 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
491 MSG = _mm_shuffle_epi32(MSG, 0x0E);
492 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
493 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
496 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(
W64LIT(0x240CA1CC0FC19DC6),
W64LIT(0xEFBE4786E49B69C1)));
497 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
498 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
499 TMSG1 = _mm_add_epi32(TMSG1, TMP);
500 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
501 MSG = _mm_shuffle_epi32(MSG, 0x0E);
502 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
503 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
506 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(
W64LIT(0x76F988DA5CB0A9DC),
W64LIT(0x4A7484AA2DE92C6F)));
507 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
508 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
509 TMSG2 = _mm_add_epi32(TMSG2, TMP);
510 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
511 MSG = _mm_shuffle_epi32(MSG, 0x0E);
512 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
513 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
516 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(
W64LIT(0xBF597FC7B00327C8),
W64LIT(0xA831C66D983E5152)));
517 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
518 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
519 TMSG3 = _mm_add_epi32(TMSG3, TMP);
520 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
521 MSG = _mm_shuffle_epi32(MSG, 0x0E);
522 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
523 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
526 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(
W64LIT(0x1429296706CA6351),
W64LIT(0xD5A79147C6E00BF3)));
527 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
528 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
529 TMSG0 = _mm_add_epi32(TMSG0, TMP);
530 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
531 MSG = _mm_shuffle_epi32(MSG, 0x0E);
532 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
533 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
536 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(
W64LIT(0x53380D134D2C6DFC),
W64LIT(0x2E1B213827B70A85)));
537 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
538 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
539 TMSG1 = _mm_add_epi32(TMSG1, TMP);
540 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
541 MSG = _mm_shuffle_epi32(MSG, 0x0E);
542 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
543 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
546 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(
W64LIT(0x92722C8581C2C92E),
W64LIT(0x766A0ABB650A7354)));
547 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
548 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
549 TMSG2 = _mm_add_epi32(TMSG2, TMP);
550 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
551 MSG = _mm_shuffle_epi32(MSG, 0x0E);
552 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
553 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
556 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(
W64LIT(0xC76C51A3C24B8B70),
W64LIT(0xA81A664BA2BFE8A1)));
557 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
558 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
559 TMSG3 = _mm_add_epi32(TMSG3, TMP);
560 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
561 MSG = _mm_shuffle_epi32(MSG, 0x0E);
562 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
563 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
566 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(
W64LIT(0x106AA070F40E3585),
W64LIT(0xD6990624D192E819)));
567 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
568 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
569 TMSG0 = _mm_add_epi32(TMSG0, TMP);
570 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
571 MSG = _mm_shuffle_epi32(MSG, 0x0E);
572 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
573 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
576 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(
W64LIT(0x34B0BCB52748774C),
W64LIT(0x1E376C0819A4C116)));
577 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
578 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
579 TMSG1 = _mm_add_epi32(TMSG1, TMP);
580 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
581 MSG = _mm_shuffle_epi32(MSG, 0x0E);
582 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
583 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
586 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(
W64LIT(0x682E6FF35B9CCA4F),
W64LIT(0x4ED8AA4A391C0CB3)));
587 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
588 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
589 TMSG2 = _mm_add_epi32(TMSG2, TMP);
590 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
591 MSG = _mm_shuffle_epi32(MSG, 0x0E);
592 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
595 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(
W64LIT(0x8CC7020884C87814),
W64LIT(0x78A5636F748F82EE)));
596 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
597 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
598 TMSG3 = _mm_add_epi32(TMSG3, TMP);
599 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
600 MSG = _mm_shuffle_epi32(MSG, 0x0E);
601 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
604 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(
W64LIT(0xC67178F2BEF9A3F7),
W64LIT(0xA4506CEB90BEFFFA)));
605 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
606 MSG = _mm_shuffle_epi32(MSG, 0x0E);
607 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
610 STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
611 STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
613 data += SHA256::BLOCKSIZE/
sizeof(
word32);
614 length -= SHA256::BLOCKSIZE;
617 TMP = _mm_shuffle_epi32(STATE0, 0x1B);
618 STATE1 = _mm_shuffle_epi32(STATE1, 0xB1);
619 STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0);
620 STATE1 = _mm_alignr_epi8(STATE1, TMP, 8);
623 _mm_storeu_si128(
M128_CAST(&state[0]), STATE0);
624 _mm_storeu_si128(
M128_CAST(&state[4]), STATE1);
626 #endif // CRYPTOPP_SHANI_AVAILABLE
638 #if CRYPTOPP_ARM_SHA1_AVAILABLE
639 void SHA1_HashMultipleBlocks_ARMV8(
word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
645 uint32x4_t C0, C1, C2, C3;
646 uint32x4_t ABCD, ABCD_SAVED;
647 uint32x4_t MSG0, MSG1, MSG2, MSG3;
648 uint32x4_t TMP0, TMP1;
649 uint32_t E0, E0_SAVED, E1;
652 C0 = vdupq_n_u32(0x5A827999);
653 C1 = vdupq_n_u32(0x6ED9EBA1);
654 C2 = vdupq_n_u32(0x8F1BBCDC);
655 C3 = vdupq_n_u32(0xCA62C1D6);
657 ABCD = vld1q_u32(&state[0]);
660 while (length >= SHA1::BLOCKSIZE)
666 MSG0 = vld1q_u32(data + 0);
667 MSG1 = vld1q_u32(data + 4);
668 MSG2 = vld1q_u32(data + 8);
669 MSG3 = vld1q_u32(data + 12);
673 MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
674 MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
675 MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
676 MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
679 TMP0 = vaddq_u32(MSG0, C0);
680 TMP1 = vaddq_u32(MSG1, C0);
683 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
684 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
685 TMP0 = vaddq_u32(MSG2, C0);
686 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
689 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
690 ABCD = vsha1cq_u32(ABCD, E1, TMP1);
691 TMP1 = vaddq_u32(MSG3, C0);
692 MSG0 = vsha1su1q_u32(MSG0, MSG3);
693 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
696 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
697 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
698 TMP0 = vaddq_u32(MSG0, C0);
699 MSG1 = vsha1su1q_u32(MSG1, MSG0);
700 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
703 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
704 ABCD = vsha1cq_u32(ABCD, E1, TMP1);
705 TMP1 = vaddq_u32(MSG1, C1);
706 MSG2 = vsha1su1q_u32(MSG2, MSG1);
707 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
710 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
711 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
712 TMP0 = vaddq_u32(MSG2, C1);
713 MSG3 = vsha1su1q_u32(MSG3, MSG2);
714 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
717 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
718 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
719 TMP1 = vaddq_u32(MSG3, C1);
720 MSG0 = vsha1su1q_u32(MSG0, MSG3);
721 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
724 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
725 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
726 TMP0 = vaddq_u32(MSG0, C1);
727 MSG1 = vsha1su1q_u32(MSG1, MSG0);
728 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
731 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
732 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
733 TMP1 = vaddq_u32(MSG1, C1);
734 MSG2 = vsha1su1q_u32(MSG2, MSG1);
735 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
738 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
739 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
740 TMP0 = vaddq_u32(MSG2, C2);
741 MSG3 = vsha1su1q_u32(MSG3, MSG2);
742 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
745 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
746 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
747 TMP1 = vaddq_u32(MSG3, C2);
748 MSG0 = vsha1su1q_u32(MSG0, MSG3);
749 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
752 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
753 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
754 TMP0 = vaddq_u32(MSG0, C2);
755 MSG1 = vsha1su1q_u32(MSG1, MSG0);
756 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
759 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
760 ABCD = vsha1mq_u32(ABCD, E1, TMP1);
761 TMP1 = vaddq_u32(MSG1, C2);
762 MSG2 = vsha1su1q_u32(MSG2, MSG1);
763 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
766 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
767 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
768 TMP0 = vaddq_u32(MSG2, C2);
769 MSG3 = vsha1su1q_u32(MSG3, MSG2);
770 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
773 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
774 ABCD = vsha1mq_u32(ABCD, E1, TMP1);
775 TMP1 = vaddq_u32(MSG3, C3);
776 MSG0 = vsha1su1q_u32(MSG0, MSG3);
777 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
780 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
781 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
782 TMP0 = vaddq_u32(MSG0, C3);
783 MSG1 = vsha1su1q_u32(MSG1, MSG0);
784 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
787 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
788 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
789 TMP1 = vaddq_u32(MSG1, C3);
790 MSG2 = vsha1su1q_u32(MSG2, MSG1);
791 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
794 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
795 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
796 TMP0 = vaddq_u32(MSG2, C3);
797 MSG3 = vsha1su1q_u32(MSG3, MSG2);
798 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
801 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
802 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
803 TMP1 = vaddq_u32(MSG3, C3);
804 MSG0 = vsha1su1q_u32(MSG0, MSG3);
807 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
808 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
811 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
812 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
815 ABCD = vaddq_u32(ABCD_SAVED, ABCD);
817 data += SHA1::BLOCKSIZE/
sizeof(
word32);
818 length -= SHA1::BLOCKSIZE;
822 vst1q_u32(&state[0], ABCD);
825 #endif // CRYPTOPP_ARM_SHA1_AVAILABLE
827 #if CRYPTOPP_ARM_SHA2_AVAILABLE
828 void SHA256_HashMultipleBlocks_ARMV8(
word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
834 uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE;
835 uint32x4_t MSG0, MSG1, MSG2, MSG3;
836 uint32x4_t TMP0, TMP1, TMP2;
839 STATE0 = vld1q_u32(&state[0]);
840 STATE1 = vld1q_u32(&state[4]);
842 while (length >= SHA256::BLOCKSIZE)
849 MSG0 = vld1q_u32(data + 0);
850 MSG1 = vld1q_u32(data + 4);
851 MSG2 = vld1q_u32(data + 8);
852 MSG3 = vld1q_u32(data + 12);
856 MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
857 MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
858 MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
859 MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
862 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x00]));
865 MSG0 = vsha256su0q_u32(MSG0, MSG1);
867 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x04]));
868 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
869 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
870 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
873 MSG1 = vsha256su0q_u32(MSG1, MSG2);
875 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x08]));
876 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
877 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
878 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
881 MSG2 = vsha256su0q_u32(MSG2, MSG3);
883 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x0c]));
884 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
885 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
886 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
889 MSG3 = vsha256su0q_u32(MSG3, MSG0);
891 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x10]));
892 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
893 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
894 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
897 MSG0 = vsha256su0q_u32(MSG0, MSG1);
899 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x14]));
900 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
901 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
902 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
905 MSG1 = vsha256su0q_u32(MSG1, MSG2);
907 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x18]));
908 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
909 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
910 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
913 MSG2 = vsha256su0q_u32(MSG2, MSG3);
915 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x1c]));
916 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
917 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
918 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
921 MSG3 = vsha256su0q_u32(MSG3, MSG0);
923 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x20]));
924 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
925 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
926 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
929 MSG0 = vsha256su0q_u32(MSG0, MSG1);
931 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x24]));
932 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
933 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
934 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
937 MSG1 = vsha256su0q_u32(MSG1, MSG2);
939 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x28]));
940 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
941 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
942 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
945 MSG2 = vsha256su0q_u32(MSG2, MSG3);
947 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x2c]));
948 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
949 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
950 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
953 MSG3 = vsha256su0q_u32(MSG3, MSG0);
955 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x30]));
956 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
957 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
958 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
962 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x34]));
963 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
964 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
968 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x38]));
969 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
970 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
974 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x3c]));
975 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
976 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
980 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
981 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
984 STATE0 = vaddq_u32(STATE0, ABEF_SAVE);
985 STATE1 = vaddq_u32(STATE1, CDGH_SAVE);
987 data += SHA256::BLOCKSIZE/
sizeof(
word32);
988 length -= SHA256::BLOCKSIZE;
992 vst1q_u32(&state[0], STATE0);
993 vst1q_u32(&state[4], STATE1);
995 #endif // CRYPTOPP_ARM_SHA2_AVAILABLE
1007 #if CRYPTOPP_POWER8_SHA_AVAILABLE
1010 enum {A=0, B=1, C, D, E, F, G, H};
1015 #if (CRYPTOPP_LITTLE_ENDIAN)
1016 const uint8x16_p mask = {3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12};
1024 template<
class T>
inline
1025 void VecStore32(
const T data,
word32 dest[4])
1034 return vec_sel(z,y,x);
1041 return vec_sel(y, z,
VecXor(x, y));
1047 return VecSHA256<0,0>(val);
1053 return VecSHA256<0,0xf>(val);
1059 return VecSHA256<1,0>(val);
1065 return VecSHA256<1,0xf>(val);
1072 const uint8x16_p m1 = {0,1,2,3, 16,17,18,19, 0,0,0,0, 0,0,0,0};
1073 const uint8x16_p m2 = {0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
1077 template <
unsigned int R>
inline
1083 T1 = S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K + M;
1084 T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1086 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1088 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1092 template <
unsigned int R>
inline
1096 enum {IDX0=(R+0)&0xf, IDX1=(R+1)&0xf, IDX9=(R+9)&0xf, IDX14=(R+14)&0xf};
1098 const uint32x4_p s0 = Vector_sigma0(W[IDX1]);
1099 const uint32x4_p s1 = Vector_sigma1(W[IDX14]);
1101 uint32x4_p T1 = (W[IDX0] += s0 + s1 + W[IDX9]);
1102 T1 += S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K;
1103 uint32x4_p T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1105 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1107 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1111 void SHA256_HashMultipleBlocks_POWER8(
word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
1115 CRYPTOPP_UNUSED(order);
1117 const uint32_t* k =
reinterpret_cast<const uint32_t*
>(SHA256_K);
1118 const uint32_t* m =
reinterpret_cast<const uint32_t*
>(data);
1124 size_t blocks = length / SHA256::BLOCKSIZE;
1127 unsigned int offset=0;
1129 S[A] = abcd; S[E] = efgh;
1130 S[B] = VecShiftLeftOctet<4>(S[A]);
1131 S[F] = VecShiftLeftOctet<4>(S[E]);
1132 S[C] = VecShiftLeftOctet<4>(S[B]);
1133 S[G] = VecShiftLeftOctet<4>(S[F]);
1134 S[D] = VecShiftLeftOctet<4>(S[C]);
1135 S[H] = VecShiftLeftOctet<4>(S[G]);
1139 vm = VecLoad32(m, offset);
1140 SHA256_ROUND1<0>(W,S, vk,vm);
1143 vk = VecShiftLeftOctet<4>(vk);
1144 vm = VecShiftLeftOctet<4>(vm);
1145 SHA256_ROUND1<1>(W,S, vk,vm);
1147 vk = VecShiftLeftOctet<4>(vk);
1148 vm = VecShiftLeftOctet<4>(vm);
1149 SHA256_ROUND1<2>(W,S, vk,vm);
1151 vk = VecShiftLeftOctet<4>(vk);
1152 vm = VecShiftLeftOctet<4>(vm);
1153 SHA256_ROUND1<3>(W,S, vk,vm);
1156 vm = VecLoad32(m, offset);
1157 SHA256_ROUND1<4>(W,S, vk,vm);
1160 vk = VecShiftLeftOctet<4>(vk);
1161 vm = VecShiftLeftOctet<4>(vm);
1162 SHA256_ROUND1<5>(W,S, vk,vm);
1164 vk = VecShiftLeftOctet<4>(vk);
1165 vm = VecShiftLeftOctet<4>(vm);
1166 SHA256_ROUND1<6>(W,S, vk,vm);
1168 vk = VecShiftLeftOctet<4>(vk);
1169 vm = VecShiftLeftOctet<4>(vm);
1170 SHA256_ROUND1<7>(W,S, vk,vm);
1173 vm = VecLoad32(m, offset);
1174 SHA256_ROUND1<8>(W,S, vk,vm);
1177 vk = VecShiftLeftOctet<4>(vk);
1178 vm = VecShiftLeftOctet<4>(vm);
1179 SHA256_ROUND1<9>(W,S, vk,vm);
1181 vk = VecShiftLeftOctet<4>(vk);
1182 vm = VecShiftLeftOctet<4>(vm);
1183 SHA256_ROUND1<10>(W,S, vk,vm);
1185 vk = VecShiftLeftOctet<4>(vk);
1186 vm = VecShiftLeftOctet<4>(vm);
1187 SHA256_ROUND1<11>(W,S, vk,vm);
1190 vm = VecLoad32(m, offset);
1191 SHA256_ROUND1<12>(W,S, vk,vm);
1194 vk = VecShiftLeftOctet<4>(vk);
1195 vm = VecShiftLeftOctet<4>(vm);
1196 SHA256_ROUND1<13>(W,S, vk,vm);
1198 vk = VecShiftLeftOctet<4>(vk);
1199 vm = VecShiftLeftOctet<4>(vm);
1200 SHA256_ROUND1<14>(W,S, vk,vm);
1202 vk = VecShiftLeftOctet<4>(vk);
1203 vm = VecShiftLeftOctet<4>(vm);
1204 SHA256_ROUND1<15>(W,S, vk,vm);
1209 for (
unsigned int i=16; i<64; i+=16)
1212 SHA256_ROUND2<0>(W,S, vk);
1213 SHA256_ROUND2<1>(W,S, VecShiftLeftOctet<4>(vk));
1214 SHA256_ROUND2<2>(W,S, VecShiftLeftOctet<8>(vk));
1215 SHA256_ROUND2<3>(W,S, VecShiftLeftOctet<12>(vk));
1219 SHA256_ROUND2<4>(W,S, vk);
1220 SHA256_ROUND2<5>(W,S, VecShiftLeftOctet<4>(vk));
1221 SHA256_ROUND2<6>(W,S, VecShiftLeftOctet<8>(vk));
1222 SHA256_ROUND2<7>(W,S, VecShiftLeftOctet<12>(vk));
1226 SHA256_ROUND2<8>(W,S, vk);
1227 SHA256_ROUND2<9>(W,S, VecShiftLeftOctet<4>(vk));
1228 SHA256_ROUND2<10>(W,S, VecShiftLeftOctet<8>(vk));
1229 SHA256_ROUND2<11>(W,S, VecShiftLeftOctet<12>(vk));
1233 SHA256_ROUND2<12>(W,S, vk);
1234 SHA256_ROUND2<13>(W,S, VecShiftLeftOctet<4>(vk));
1235 SHA256_ROUND2<14>(W,S, VecShiftLeftOctet<8>(vk));
1236 SHA256_ROUND2<15>(W,S, VecShiftLeftOctet<12>(vk));
1240 abcd += VectorPack(S[A],S[B],S[C],S[D]);
1241 efgh += VectorPack(S[E],S[F],S[G],S[H]);
1244 VecStore32(abcd, state+0);
1245 VecStore32(efgh, state+4);
1257 #if (CRYPTOPP_LITTLE_ENDIAN)
1258 const uint8x16_p mask = {0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15};
1269 return vec_sel(z,y,x);
1276 return vec_sel(y, z,
VecXor(x, y));
1282 return VecSHA512<0,0>(val);
1288 return VecSHA512<0,0xf>(val);
1294 return VecSHA512<1,0>(val);
1300 return VecSHA512<1,0xf>(val);
1306 const uint8x16_p m = {0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
1310 template <
unsigned int R>
inline
1316 T1 = S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K + M;
1317 T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1319 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1321 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1325 template <
unsigned int R>
inline
1329 enum {IDX0=(R+0)&0xf, IDX1=(R+1)&0xf, IDX9=(R+9)&0xf, IDX14=(R+14)&0xf};
1331 const uint64x2_p s0 = Vector_sigma0(W[IDX1]);
1332 const uint64x2_p s1 = Vector_sigma1(W[IDX14]);
1334 uint64x2_p T1 = (W[IDX0] += s0 + s1 + W[IDX9]);
1335 T1 += S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K;
1336 uint64x2_p T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1338 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1340 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1344 void SHA512_HashMultipleBlocks_POWER8(
word64 *state,
const word64 *data,
size_t length,
ByteOrder order)
1348 CRYPTOPP_UNUSED(order);
1350 const uint64_t* k =
reinterpret_cast<const uint64_t*
>(SHA512_K);
1351 const uint64_t* m =
reinterpret_cast<const uint64_t*
>(data);
1359 size_t blocks = length / SHA512::BLOCKSIZE;
1362 unsigned int offset=0;
1364 S[A] = ab; S[C] = cd;
1365 S[E] = ef; S[G] = gh;
1366 S[B] = VecShiftLeftOctet<8>(S[A]);
1367 S[D] = VecShiftLeftOctet<8>(S[C]);
1368 S[F] = VecShiftLeftOctet<8>(S[E]);
1369 S[H] = VecShiftLeftOctet<8>(S[G]);
1373 vm = VecLoad64(m, offset);
1374 SHA512_ROUND1<0>(W,S, vk,vm);
1377 vk = VecShiftLeftOctet<8>(vk);
1378 vm = VecShiftLeftOctet<8>(vm);
1379 SHA512_ROUND1<1>(W,S, vk,vm);
1382 vm = VecLoad64(m, offset);
1383 SHA512_ROUND1<2>(W,S, vk,vm);
1386 vk = VecShiftLeftOctet<8>(vk);
1387 vm = VecShiftLeftOctet<8>(vm);
1388 SHA512_ROUND1<3>(W,S, vk,vm);
1391 vm = VecLoad64(m, offset);
1392 SHA512_ROUND1<4>(W,S, vk,vm);
1395 vk = VecShiftLeftOctet<8>(vk);
1396 vm = VecShiftLeftOctet<8>(vm);
1397 SHA512_ROUND1<5>(W,S, vk,vm);
1400 vm = VecLoad64(m, offset);
1401 SHA512_ROUND1<6>(W,S, vk,vm);
1404 vk = VecShiftLeftOctet<8>(vk);
1405 vm = VecShiftLeftOctet<8>(vm);
1406 SHA512_ROUND1<7>(W,S, vk,vm);
1409 vm = VecLoad64(m, offset);
1410 SHA512_ROUND1<8>(W,S, vk,vm);
1413 vk = VecShiftLeftOctet<8>(vk);
1414 vm = VecShiftLeftOctet<8>(vm);
1415 SHA512_ROUND1<9>(W,S, vk,vm);
1418 vm = VecLoad64(m, offset);
1419 SHA512_ROUND1<10>(W,S, vk,vm);
1422 vk = VecShiftLeftOctet<8>(vk);
1423 vm = VecShiftLeftOctet<8>(vm);
1424 SHA512_ROUND1<11>(W,S, vk,vm);
1427 vm = VecLoad64(m, offset);
1428 SHA512_ROUND1<12>(W,S, vk,vm);
1431 vk = VecShiftLeftOctet<8>(vk);
1432 vm = VecShiftLeftOctet<8>(vm);
1433 SHA512_ROUND1<13>(W,S, vk,vm);
1436 vm = VecLoad64(m, offset);
1437 SHA512_ROUND1<14>(W,S, vk,vm);
1440 vk = VecShiftLeftOctet<8>(vk);
1441 vm = VecShiftLeftOctet<8>(vm);
1442 SHA512_ROUND1<15>(W,S, vk,vm);
1447 for (
unsigned int i=16; i<80; i+=16)
1450 SHA512_ROUND2<0>(W,S, vk);
1451 SHA512_ROUND2<1>(W,S, VecShiftLeftOctet<8>(vk));
1455 SHA512_ROUND2<2>(W,S, vk);
1456 SHA512_ROUND2<3>(W,S, VecShiftLeftOctet<8>(vk));
1460 SHA512_ROUND2<4>(W,S, vk);
1461 SHA512_ROUND2<5>(W,S, VecShiftLeftOctet<8>(vk));
1465 SHA512_ROUND2<6>(W,S, vk);
1466 SHA512_ROUND2<7>(W,S, VecShiftLeftOctet<8>(vk));
1470 SHA512_ROUND2<8>(W,S, vk);
1471 SHA512_ROUND2<9>(W,S, VecShiftLeftOctet<8>(vk));
1475 SHA512_ROUND2<10>(W,S, vk);
1476 SHA512_ROUND2<11>(W,S, VecShiftLeftOctet<8>(vk));
1480 SHA512_ROUND2<12>(W,S, vk);
1481 SHA512_ROUND2<13>(W,S, VecShiftLeftOctet<8>(vk));
1485 SHA512_ROUND2<14>(W,S, vk);
1486 SHA512_ROUND2<15>(W,S, VecShiftLeftOctet<8>(vk));
1490 ab += VectorPack(S[A],S[B]);
1491 cd += VectorPack(S[C],S[D]);
1492 ef += VectorPack(S[E],S[F]);
1493 gh += VectorPack(S[G],S[H]);
1496 VecStore64(ab, state+0);
1497 VecStore64(cd, state+2);
1498 VecStore64(ef, state+4);
1499 VecStore64(gh, state+6);
1502 #endif // CRYPTOPP_POWER8_SHA_AVAILABLE