Crypto++  6.1
Free C++ class library of cryptographic schemes
gcm-simd.cpp
1 // gcm-simd.cpp - written and placed in the public domain by
2 // Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3 //
4 // This source file uses intrinsics to gain access to SSE4.2 and
5 // ARMv8a CRC-32 and CRC-32C instructions. A separate source file
6 // is needed because additional CXXFLAGS are required to enable
7 // the appropriate instructions sets in some build configurations.
8 
9 #include "pch.h"
10 #include "config.h"
11 #include "misc.h"
12 
13 // Clang 3.3 integrated assembler crash on Linux. Other versions produce incorrect results.
14 // Clang has never handled Intel ASM very well. I wish LLVM would fix it.
15 #if defined(__clang__)
16 # undef CRYPTOPP_X86_ASM_AVAILABLE
17 # undef CRYPTOPP_X32_ASM_AVAILABLE
18 # undef CRYPTOPP_X64_ASM_AVAILABLE
19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
20 #endif
21 
22 // SunCC 12.3 - 12.5 crash in GCM_Reduce_CLMUL
23 // http://github.com/weidai11/cryptopp/issues/226
24 #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5140)
25 # undef CRYPTOPP_CLMUL_AVAILABLE
26 #endif
27 
28 // Clang and GCC hoops...
29 #if !(defined(__ARM_FEATURE_CRYPTO) || defined(_MSC_VER))
30 # undef CRYPTOPP_ARM_PMULL_AVAILABLE
31 #endif
32 
33 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
34 # include <emmintrin.h>
35 #endif
36 
37 #if (CRYPTOPP_CLMUL_AVAILABLE)
38 # include <tmmintrin.h>
39 # include <wmmintrin.h>
40 #endif
41 
42 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
43 # include <arm_neon.h>
44 #endif
45 
46 // Can't use CRYPTOPP_ARM_XXX_AVAILABLE because too many
47 // compilers don't follow ACLE conventions for the include.
48 #if defined(CRYPTOPP_ARM_ACLE_AVAILABLE)
49 # include <stdint.h>
50 # include <arm_acle.h>
51 #endif
52 
53 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
54 # include <signal.h>
55 # include <setjmp.h>
56 #endif
57 
58 #ifndef EXCEPTION_EXECUTE_HANDLER
59 # define EXCEPTION_EXECUTE_HANDLER 1
60 #endif
61 
62 // Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
63 #define M128_CAST(x) ((__m128i *)(void *)(x))
64 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
65 
66 // GCC cast warning
67 #define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x))
68 #define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x))
69 
70 ANONYMOUS_NAMESPACE_BEGIN
71 
72 // GCC 4.8 is missing PMULL gear
73 #if (CRYPTOPP_ARM_PMULL_AVAILABLE)
74 # if (CRYPTOPP_GCC_VERSION >= 40800) && (CRYPTOPP_GCC_VERSION < 49000)
75 inline poly128_t VMULL_P64(poly64_t a, poly64_t b)
76 {
77  return __builtin_aarch64_crypto_pmulldi_ppp (a, b);
78 }
79 
80 inline poly128_t VMULL_HIGH_P64(poly64x2_t a, poly64x2_t b)
81 {
82  return __builtin_aarch64_crypto_pmullv2di_ppp (a, b);
83 }
84 # else
85 inline poly128_t VMULL_P64(poly64_t a, poly64_t b)
86 {
87  return vmull_p64(a, b);
88 }
89 
90 inline poly128_t VMULL_HIGH_P64(poly64x2_t a, poly64x2_t b)
91 {
92  return vmull_high_p64(a, b);
93 }
94 # endif
95 #endif
96 
97 #if CRYPTOPP_ARM_PMULL_AVAILABLE
98 #if defined(__GNUC__)
99 // Schneiders, Hovsmith and O'Rourke used this trick.
100 // It results in much better code generation in production code
101 // by avoiding D-register spills when using vgetq_lane_u64. The
102 // problem does not surface under minimal test cases.
103 inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
104 {
105  uint64x2_t r;
106  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
107  :"=w" (r) : "w" (a), "w" (b) );
108  return r;
109 }
110 
111 inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
112 {
113  uint64x2_t r;
114  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
115  :"=w" (r) : "w" (a), "w" (vget_high_u64(b)) );
116  return r;
117 }
118 
119 inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
120 {
121  uint64x2_t r;
122  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
123  :"=w" (r) : "w" (vget_high_u64(a)), "w" (b) );
124  return r;
125 }
126 
127 inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
128 {
129  uint64x2_t r;
130  __asm __volatile("pmull2 %0.1q, %1.2d, %2.2d \n\t"
131  :"=w" (r) : "w" (a), "w" (b) );
132  return r;
133 }
134 
135 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
136 {
137  uint64x2_t r;
138  __asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
139  :"=w" (r) : "w" (a), "w" (b), "I" (c) );
140  return r;
141 }
142 
143 // https://github.com/weidai11/cryptopp/issues/366
144 template <unsigned int C>
145 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
146 {
147  uint64x2_t r;
148  __asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
149  :"=w" (r) : "w" (a), "w" (b), "I" (C) );
150  return r;
151 }
152 #endif // GCC and compatibles
153 
154 #if defined(_MSC_VER)
155 inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
156 {
157  return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
158  vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
159 }
160 
161 inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
162 {
163  return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
164  vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
165 }
166 
167 inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
168 {
169  return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
170  vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
171 }
172 
173 inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
174 {
175  return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
176  vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
177 }
178 
179 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
180 {
181  return (uint64x2_t)vextq_u8(vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), c);
182 }
183 
184 // https://github.com/weidai11/cryptopp/issues/366
185 template <unsigned int C>
186 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
187 {
188  return (uint64x2_t)vextq_u8(vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), C);
189 }
190 #endif // Microsoft and compatibles
191 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
192 
193 ANONYMOUS_NAMESPACE_END
194 
195 NAMESPACE_BEGIN(CryptoPP)
196 
197 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
198 extern "C" {
199  typedef void (*SigHandler)(int);
200 
201  static jmp_buf s_jmpSIGILL;
202  static void SigIllHandler(int)
203  {
204  longjmp(s_jmpSIGILL, 1);
205  }
206 };
207 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
208 
209 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARM64)
210 bool CPU_ProbePMULL()
211 {
212 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
213  return false;
214 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
215 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
216  volatile bool result = true;
217  __try
218  {
219  const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
220  const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
221  b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
222 
223  const poly128_t r1 = vmull_p64(a1, b1);
224  const poly128_t r2 = vmull_high_p64((poly64x2_t)(a2), (poly64x2_t)(b2));
225 
226  // Linaro is missing vreinterpretq_u64_p128. Also see http://github.com/weidai11/cryptopp/issues/233.
227  const uint64x2_t& t1 = (uint64x2_t)(r1); // {bignum,bignum}
228  const uint64x2_t& t2 = (uint64x2_t)(r2); // {bignum,bignum}
229 
230  result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 && vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
231  vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 && vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
232  }
233  __except (EXCEPTION_EXECUTE_HANDLER)
234  {
235  return false;
236  }
237  return result;
238 # else
239 
240 # if defined(__APPLE__)
241  // No SIGILL probes on Apple platforms. Plus, Apple Clang does not have PMULL intrinsics.
242  return false;
243 # endif
244 
245  // longjmp and clobber warnings. Volatile is required.
246  // http://github.com/weidai11/cryptopp/issues/24 and http://stackoverflow.com/q/7721854
247  volatile bool result = true;
248 
249  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
250  if (oldHandler == SIG_ERR)
251  return false;
252 
253  volatile sigset_t oldMask;
254  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
255  return false;
256 
257  if (setjmp(s_jmpSIGILL))
258  result = false;
259  else
260  {
261  const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
262  const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
263  b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
264 
265  const poly128_t r1 = VMULL_P64(a1, b1);
266  const poly128_t r2 = VMULL_HIGH_P64((poly64x2_t)(a2), (poly64x2_t)(b2));
267 
268  // Linaro is missing vreinterpretq_u64_p128. Also see http://github.com/weidai11/cryptopp/issues/233.
269  const uint64x2_t& t1 = (uint64x2_t)(r1); // {bignum,bignum}
270  const uint64x2_t& t2 = (uint64x2_t)(r2); // {bignum,bignum}
271 
272  result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 && vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
273  vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 && vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
274  }
275 
276  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
277  signal(SIGILL, oldHandler);
278  return result;
279 # endif
280 #else
281  return false;
282 #endif // CRYPTOPP_ARM_SHA_AVAILABLE
283 }
284 #endif // ARM32 or ARM64
285 
286 #if CRYPTOPP_ARM_NEON_AVAILABLE
287 void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
288 {
289  CRYPTOPP_ASSERT(IsAlignedOn(a,GetAlignmentOf<uint64x2_t>()));
290  CRYPTOPP_ASSERT(IsAlignedOn(b,GetAlignmentOf<uint64x2_t>()));
291  CRYPTOPP_ASSERT(IsAlignedOn(c,GetAlignmentOf<uint64x2_t>()));
292  *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
293 }
294 #endif
295 
296 #if CRYPTOPP_ARM_PMULL_AVAILABLE
297 
298 ANONYMOUS_NAMESPACE_BEGIN
299 
300 CRYPTOPP_ALIGN_DATA(16)
301 const word64 s_clmulConstants64[] = {
302  W64LIT(0xe100000000000000), W64LIT(0xc200000000000000), // Used for ARM and x86; polynomial coefficients
303  W64LIT(0x08090a0b0c0d0e0f), W64LIT(0x0001020304050607), // Unused for ARM; used for x86 _mm_shuffle_epi8
304  W64LIT(0x0001020304050607), W64LIT(0x08090a0b0c0d0e0f) // Unused for ARM; used for x86 _mm_shuffle_epi8
305 };
306 
307 const uint64x2_t *s_clmulConstants = (const uint64x2_t *)s_clmulConstants64;
308 const unsigned int s_clmulTableSizeInBlocks = 8;
309 
310 ANONYMOUS_NAMESPACE_END
311 
312 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
313 {
314  c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
315  c1 = veorq_u64(c1, PMULL_01(c0, r));
316  c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
317  c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
318  c0 = PMULL_00(c0, r);
319  c2 = veorq_u64(c2, c0);
320  c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
321  c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
322  c2 = vshlq_n_u64(c2, 1);
323 
324  return veorq_u64(c2, c1);
325 }
326 
327 uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
328 {
329  const uint64x2_t c0 = PMULL_00(x, h);
330  const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
331  const uint64x2_t c2 = PMULL_11(x, h);
332 
333  return GCM_Reduce_PMULL(c0, c1, c2, r);
334 }
335 
336 void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
337 {
338  const uint64x2_t r = s_clmulConstants[0];
339  const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
340  const uint64x2_t h0 = vextq_u64(t, t, 1);
341 
342  uint64x2_t h = h0;
343  unsigned int i;
344  for (i=0; i<tableSize-32; i+=32)
345  {
346  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
347  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
348  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
349  vst1q_u64((uint64_t *)(mulTable+i+8), h);
350  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
351  h = GCM_Multiply_PMULL(h1, h0, r);
352  }
353 
354  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
355  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
356  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
357  vst1q_u64((uint64_t *)(mulTable+i+8), h);
358  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
359 }
360 
361 size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
362 {
363  const uint64x2_t* table = reinterpret_cast<const uint64x2_t*>(mtable);
364  uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
365  const uint64x2_t r = s_clmulConstants[0];
366 
367  const size_t BLOCKSIZE = 16;
368  while (len >= BLOCKSIZE)
369  {
370  size_t s = UnsignedMin(len/BLOCKSIZE, s_clmulTableSizeInBlocks), i=0;
371  uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*BLOCKSIZE)));
372  uint64x2_t c0 = vdupq_n_u64(0);
373  uint64x2_t c1 = vdupq_n_u64(0);
374  uint64x2_t c2 = vdupq_n_u64(0);
375 
376  while (true)
377  {
378  const uint64x2_t h0 = vld1q_u64((const uint64_t*)(table+i));
379  const uint64x2_t h1 = vld1q_u64((const uint64_t*)(table+i+1));
380  const uint64x2_t h2 = veorq_u64(h0, h1);
381 
382  if (++i == s)
383  {
384  const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
385  d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
386  c0 = veorq_u64(c0, PMULL_00(d1, h0));
387  c2 = veorq_u64(c2, PMULL_10(d1, h1));
388  d1 = veorq_u64(d1, (uint64x2_t)vcombine_u32(vget_high_u32(vreinterpretq_u32_u64(d1)),
389  vget_low_u32(vreinterpretq_u32_u64(d1))));
390  c1 = veorq_u64(c1, PMULL_00(d1, h2));
391 
392  break;
393  }
394 
395  d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
396  c0 = veorq_u64(c0, PMULL_10(d2, h0));
397  c2 = veorq_u64(c2, PMULL_10(d1, h1));
398  d2 = veorq_u64(d2, d1);
399  c1 = veorq_u64(c1, PMULL_10(d2, h2));
400 
401  if (++i == s)
402  {
403  const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
404  d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
405  c0 = veorq_u64(c0, PMULL_01(d1, h0));
406  c2 = veorq_u64(c2, PMULL_11(d1, h1));
407  d1 = veorq_u64(d1, (uint64x2_t)vcombine_u32(vget_high_u32(vreinterpretq_u32_u64(d1)),
408  vget_low_u32(vreinterpretq_u32_u64(d1))));
409  c1 = veorq_u64(c1, PMULL_01(d1, h2));
410 
411  break;
412  }
413 
414  const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
415  d2 = vextq_u64(t3, t3, 1);
416  c0 = veorq_u64(c0, PMULL_01(d1, h0));
417  c2 = veorq_u64(c2, PMULL_01(d2, h1));
418  d1 = veorq_u64(d1, d2);
419  c1 = veorq_u64(c1, PMULL_01(d1, h2));
420  }
421  data += s*16;
422  len -= s*16;
423 
424  c1 = veorq_u64(veorq_u64(c1, c0), c2);
425  x = GCM_Reduce_PMULL(c0, c1, c2, r);
426  }
427 
428  vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
429  return len;
430 }
431 
432 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
433 {
435  {
436  const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
437  vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
438  }
439 }
440 #endif
441 
442 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
443 // SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
444 // a source file with a SSE architecture switch. Also see GH #226 and GH #284.
445 void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
446 {
447 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
448  asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
449  : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
450 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
451  _mm_store_si128(M128_CAST(a), _mm_xor_si128(
452  _mm_load_si128(CONST_M128_CAST(b)),
453  _mm_load_si128(CONST_M128_CAST(c))));
454 # endif
455 }
456 #endif
457 
458 #if CRYPTOPP_CLMUL_AVAILABLE
459 
460 ANONYMOUS_NAMESPACE_BEGIN
461 
462 CRYPTOPP_ALIGN_DATA(16)
463 const word64 s_clmulConstants64[] = {
464  W64LIT(0xe100000000000000), W64LIT(0xc200000000000000),
465  W64LIT(0x08090a0b0c0d0e0f), W64LIT(0x0001020304050607),
466  W64LIT(0x0001020304050607), W64LIT(0x08090a0b0c0d0e0f)};
467 
468 const __m128i *s_clmulConstants = CONST_M128_CAST(s_clmulConstants64);
469 const unsigned int s_cltableSizeInBlocks = 8;
470 
471 ANONYMOUS_NAMESPACE_END
472 
473 #if 0
474 // preserved for testing
475 void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
476 {
477  word64 Z0=0, Z1=0, V0, V1;
478 
480  Block::Get(a)(V0)(V1);
481 
482  for (int i=0; i<16; i++)
483  {
484  for (int j=0x80; j!=0; j>>=1)
485  {
486  int x = b[i] & j;
487  Z0 ^= x ? V0 : 0;
488  Z1 ^= x ? V1 : 0;
489  x = (int)V1 & 1;
490  V1 = (V1>>1) | (V0<<63);
491  V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
492  }
493  }
494  Block::Put(NULLPTR, c)(Z0)(Z1);
495 }
496 
497 __m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
498 {
499  word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
500  word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
501 
502  PolynomialMod2 pa((byte *)A, 8);
503  PolynomialMod2 pb((byte *)B, 8);
504  PolynomialMod2 c = pa*pb;
505 
506  __m128i output;
507  for (int i=0; i<16; i++)
508  ((byte *)&output)[i] = c.GetByte(i);
509  return output;
510 }
511 #endif // Testing
512 
513 __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i &r)
514 {
515  /*
516  The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
517  significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
518  rightmost bit positions, and the lowest byte addresses.
519 
520  c1 ^= c0t * 0xc200000000000000
521  c2t ^= c0t
522  t = shift (c1t ^ c0b) left 1 bit
523  c2 ^= t * 0xe100000000000000
524  c2t ^= c1b
525  shift c2 left 1 bit and xor in lowest bit of c1t
526  */
527  c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
528  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
529  c0 = _mm_srli_si128(c0, 8);
530  c0 = _mm_xor_si128(c0, c1);
531  c0 = _mm_slli_epi64(c0, 1);
532  c0 = _mm_clmulepi64_si128(c0, r, 0);
533  c2 = _mm_xor_si128(c2, c0);
534  c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
535  c1 = _mm_unpacklo_epi64(c1, c2);
536  c1 = _mm_srli_epi64(c1, 63);
537  c2 = _mm_slli_epi64(c2, 1);
538  return _mm_xor_si128(c2, c1);
539 }
540 
541 __m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
542 {
543  const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
544  const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
545  const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
546 
547  return GCM_Reduce_CLMUL(c0, c1, c2, r);
548 }
549 
550 void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
551 {
552  const __m128i r = s_clmulConstants[0];
553  const __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), s_clmulConstants[1]);
554 
555  __m128i h = h0;
556  unsigned int i;
557  for (i=0; i<tableSize-32; i+=32)
558  {
559  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
560  _mm_storel_epi64(M128_CAST(mulTable+i), h);
561  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
562  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
563  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
564  h = GCM_Multiply_CLMUL(h1, h0, r);
565  }
566 
567  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
568  _mm_storel_epi64(M128_CAST(mulTable+i), h);
569  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
570  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
571  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
572 }
573 
574 size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
575 {
576  const __m128i *table = CONST_M128_CAST(mtable);
577  __m128i x = _mm_load_si128(M128_CAST(hbuffer));
578  const __m128i r = s_clmulConstants[0], mask1 = s_clmulConstants[1], mask2 = s_clmulConstants[2];
579 
580  while (len >= 16)
581  {
582  size_t s = UnsignedMin(len/16, s_cltableSizeInBlocks), i=0;
583  __m128i d1, d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16)), mask2);
584  __m128i c0 = _mm_setzero_si128();
585  __m128i c1 = _mm_setzero_si128();
586  __m128i c2 = _mm_setzero_si128();
587 
588  while (true)
589  {
590  const __m128i h0 = _mm_load_si128(table+i);
591  const __m128i h1 = _mm_load_si128(table+i+1);
592  const __m128i h2 = _mm_xor_si128(h0, h1);
593 
594  if (++i == s)
595  {
596  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), mask1);
597  d1 = _mm_xor_si128(d1, x);
598  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
599  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
600  d1 = _mm_xor_si128(d1, _mm_shuffle_epi32(d1, _MM_SHUFFLE(1, 0, 3, 2)));
601  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
602  break;
603  }
604 
605  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), mask2);
606  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
607  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
608  d2 = _mm_xor_si128(d2, d1);
609  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
610 
611  if (++i == s)
612  {
613  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), mask1);
614  d1 = _mm_xor_si128(d1, x);
615  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
616  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
617  d1 = _mm_xor_si128(d1, _mm_shuffle_epi32(d1, _MM_SHUFFLE(1, 0, 3, 2)));
618  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
619  break;
620  }
621 
622  d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), mask1);
623  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
624  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
625  d1 = _mm_xor_si128(d1, d2);
626  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
627  }
628  data += s*16;
629  len -= s*16;
630 
631  c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
632  x = GCM_Reduce_CLMUL(c0, c1, c2, r);
633  }
634 
635  _mm_store_si128(M128_CAST(hbuffer), x);
636  return len;
637 }
638 
639 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
640 {
641  // SSSE3 instruction, but only used with CLMUL
642  __m128i &x = *M128_CAST(hashBuffer);
643  x = _mm_shuffle_epi8(x, s_clmulConstants[1]);
644 }
645 #endif
646 
647 NAMESPACE_END
Utility functions for the Crypto++ library.
Library configuration file.
Access a block of memory.
Definition: misc.h:2402
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:26
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
Definition: misc.h:1030
Precompiled header file.
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
Definition: misc.h:546
byte order is big-endian
Definition: cryptlib.h:144
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
Definition: trap.h:60
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1063
Access a block of memory.
Definition: misc.h:2365
Crypto++ library namespace.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:1838