mirror of
				https://github.com/cuberite/polarssl.git
				synced 2025-11-04 04:32:24 -05:00 
			
		
		
		
	Add aesni_gcm_mult()
This commit is contained in:
		
							parent
							
								
									9d57482280
								
							
						
					
					
						commit
						d333f67f8c
					
				@ -58,13 +58,29 @@ int aesni_supports( unsigned int what );
 | 
			
		||||
 * \param input    16-byte input block
 | 
			
		||||
 * \param output   16-byte output block
 | 
			
		||||
 *
 | 
			
		||||
 * \return         0 if success, 1 if operation failed
 | 
			
		||||
 * \return         0 on success (cannot fail)
 | 
			
		||||
 */
 | 
			
		||||
int aesni_crypt_ecb( aes_context *ctx,
 | 
			
		||||
                     int mode,
 | 
			
		||||
                     const unsigned char input[16],
 | 
			
		||||
                     unsigned char output[16] );
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * \brief          GCM multiplication: c = a * b in GF(2^128)
 | 
			
		||||
 *
 | 
			
		||||
 * \param c        Result
 | 
			
		||||
 * \param a        First operand
 | 
			
		||||
 * \param b        Second operand
 | 
			
		||||
 *
 | 
			
		||||
 * \note           Both operands and result are bit strings interpreted as
 | 
			
		||||
 *                 elements of GF(2^128) as per the GCM spec.
 | 
			
		||||
 *
 | 
			
		||||
 * \return         0 on success (cannot fail)
 | 
			
		||||
 */
 | 
			
		||||
int aesni_gcm_mult( unsigned char c[16],
 | 
			
		||||
                    const unsigned char a[16],
 | 
			
		||||
                    const unsigned char b[16] );
 | 
			
		||||
 | 
			
		||||
#endif /* POLARSSL_HAVE_X86_64 */
 | 
			
		||||
 | 
			
		||||
#endif /* POLARSSL_AESNI_H */
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										114
									
								
								library/aesni.c
									
									
									
									
									
								
							
							
						
						
									
										114
									
								
								library/aesni.c
									
									
									
									
									
								
							@ -25,6 +25,7 @@
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * [AES-WP] http://software.intel.com/en-us/articles/intel-advanced-encryption-standard-aes-instructions-set
 | 
			
		||||
 * [CLMUL-WP] http://software.intel.com/en-us/articles/intel-carry-less-multiplication-instruction-and-its-usage-for-computing-the-gcm-mode/
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "polarssl/config.h"
 | 
			
		||||
@ -101,6 +102,119 @@ int aesni_crypt_ecb( aes_context *ctx,
 | 
			
		||||
 | 
			
		||||
    return( 0 );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * GCM multiplication: c = a times b in GF(2^128)
 | 
			
		||||
 * Based on [CLMUL-WP] algorithms 1 (with equation 27) and 5.
 | 
			
		||||
 */
 | 
			
		||||
int aesni_gcm_mult( unsigned char c[16],
 | 
			
		||||
                    const unsigned char a[16],
 | 
			
		||||
                    const unsigned char b[16] )
 | 
			
		||||
{
 | 
			
		||||
    unsigned char aa[16], bb[16], cc[16];
 | 
			
		||||
    size_t i;
 | 
			
		||||
 | 
			
		||||
    /* The inputs are in big-endian order, so byte-reverse them */
 | 
			
		||||
    for( i = 0; i < 16; i++ )
 | 
			
		||||
    {
 | 
			
		||||
        aa[i] = a[15 - i];
 | 
			
		||||
        bb[i] = b[15 - i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    asm( "movdqu (%0), %%xmm0               \n" // a1:a0
 | 
			
		||||
         "movdqu (%1), %%xmm1               \n" // b1:b0
 | 
			
		||||
 | 
			
		||||
         /*
 | 
			
		||||
          * Caryless multiplication xmm2:xmm1 = xmm0 * xmm1
 | 
			
		||||
          * using [CLMUL-WP] algorithm 1 (p. 13).
 | 
			
		||||
          */
 | 
			
		||||
         "movdqa %%xmm1, %%xmm2             \n" // copy of b1:b0
 | 
			
		||||
         "movdqa %%xmm1, %%xmm3             \n" // same
 | 
			
		||||
         "movdqa %%xmm1, %%xmm4             \n" // same
 | 
			
		||||
         "pclmulqdq $0x00, %%xmm0, %%xmm1   \n" // a0*b0 = c1:c0
 | 
			
		||||
         "pclmulqdq $0x11, %%xmm0, %%xmm2   \n" // a1*b1 = d1:d0
 | 
			
		||||
         "pclmulqdq $0x10, %%xmm0, %%xmm3   \n" // a0*b1 = e1:e0
 | 
			
		||||
         "pclmulqdq $0x01, %%xmm0, %%xmm4   \n" // a1*b0 = f1:f0
 | 
			
		||||
         "pxor %%xmm3, %%xmm4               \n" // e1+f1:e0+f0
 | 
			
		||||
         "movdqa %%xmm4, %%xmm3             \n" // same
 | 
			
		||||
         "psrldq $8, %%xmm4                 \n" // 0:e1+f1
 | 
			
		||||
         "pslldq $8, %%xmm3                 \n" // e0+f0:0
 | 
			
		||||
         "pxor %%xmm4, %%xmm2               \n" // d1:d0+e1+f1
 | 
			
		||||
         "pxor %%xmm3, %%xmm1               \n" // c1+e0+f1:c0
 | 
			
		||||
 | 
			
		||||
         /*
 | 
			
		||||
          * Now shift the result one bit to the left,
 | 
			
		||||
          * taking advantage of [CLMUL-WP] eq 27 (p. 20)
 | 
			
		||||
          */
 | 
			
		||||
         "movdqa %%xmm1, %%xmm3             \n" // r1:r0
 | 
			
		||||
         "movdqa %%xmm2, %%xmm4             \n" // r3:r2
 | 
			
		||||
         "psllq $1, %%xmm1                  \n" // r1<<1:r0<<1
 | 
			
		||||
         "psllq $1, %%xmm2                  \n" // r3<<1:r2<<1
 | 
			
		||||
         "psrlq $63, %%xmm3                 \n" // r1>>63:r0>>63
 | 
			
		||||
         "psrlq $63, %%xmm4                 \n" // r3>>63:r2>>63
 | 
			
		||||
         "movdqa %%xmm3, %%xmm5             \n" // r1>>63:r0>>63
 | 
			
		||||
         "pslldq $8, %%xmm3                 \n" // r0>>63:0
 | 
			
		||||
         "pslldq $8, %%xmm4                 \n" // r2>>63:0
 | 
			
		||||
         "psrldq $8, %%xmm5                 \n" // 0:r1>>63
 | 
			
		||||
         "por %%xmm3, %%xmm1                \n" // r1<<1|r0>>63:r0<<1
 | 
			
		||||
         "por %%xmm4, %%xmm2                \n" // r3<<1|r2>>62:r2<<1
 | 
			
		||||
         "por %%xmm5, %%xmm2                \n" // r3<<1|r2>>62:r2<<1|r1>>63
 | 
			
		||||
 | 
			
		||||
         /*
 | 
			
		||||
          * Now reduce modulo the GCM polynomial x^128 + x^7 + x^2 + x + 1
 | 
			
		||||
          * using [CLMUL-WP] algorithm 5 (p. 20).
 | 
			
		||||
          * Currently xmm2:xmm1 holds x3:x2:x1:x0 (already shifted).
 | 
			
		||||
          */
 | 
			
		||||
         /* Step 2 (1) */
 | 
			
		||||
         "movdqa %%xmm1, %%xmm3             \n" // x1:x0
 | 
			
		||||
         "movdqa %%xmm1, %%xmm4             \n" // same
 | 
			
		||||
         "movdqa %%xmm1, %%xmm5             \n" // same
 | 
			
		||||
         "psllq $63, %%xmm3                 \n" // x1<<63:x0<<63 = stuff:a
 | 
			
		||||
         "psllq $62, %%xmm4                 \n" // x1<<62:x0<<62 = stuff:b
 | 
			
		||||
         "psllq $57, %%xmm5                 \n" // x1<<57:x0<<57 = stuff:c
 | 
			
		||||
 | 
			
		||||
         /* Step 2 (2) */
 | 
			
		||||
         "pxor %%xmm4, %%xmm3               \n" // stuff:a+b
 | 
			
		||||
         "pxor %%xmm5, %%xmm3               \n" // stuff:a+b+c
 | 
			
		||||
         "pslldq $8, %%xmm3                 \n" // a+b+c:0
 | 
			
		||||
         "pxor %%xmm3, %%xmm1               \n" // x1+a+b+c:x0 = d:x0
 | 
			
		||||
 | 
			
		||||
         /* Steps 3 and 4 */
 | 
			
		||||
         "movdqa %%xmm1,%%xmm0              \n" // d:x0
 | 
			
		||||
         "movdqa %%xmm1,%%xmm4              \n" // same
 | 
			
		||||
         "movdqa %%xmm1,%%xmm5              \n" // same
 | 
			
		||||
         "psrlq $1, %%xmm0                  \n" // e1:x0>>1 = e1:e0'
 | 
			
		||||
         "psrlq $2, %%xmm4                  \n" // f1:x0>>2 = f1:f0'
 | 
			
		||||
         "psrlq $7, %%xmm5                  \n" // g1:x0>>7 = g1:g0'
 | 
			
		||||
         "pxor %%xmm4, %%xmm0               \n" // e1+f1:e0'+f0'
 | 
			
		||||
         "pxor %%xmm5, %%xmm0               \n" // e1+f1+g1:e0'+f0'+g0'
 | 
			
		||||
         // e0'+f0'+g0' is almost e0+f0+g0, except for some missing
 | 
			
		||||
         // bits carried from d. Now get those bits back in.
 | 
			
		||||
         "movdqa %%xmm1,%%xmm3              \n" // d:x0
 | 
			
		||||
         "movdqa %%xmm1,%%xmm4              \n" // same
 | 
			
		||||
         "movdqa %%xmm1,%%xmm5              \n" // same
 | 
			
		||||
         "psllq $63, %%xmm3                 \n" // d<<63:stuff
 | 
			
		||||
         "psllq $62, %%xmm4                 \n" // d<<62:stuff
 | 
			
		||||
         "psllq $57, %%xmm5                 \n" // d<<57:stuff
 | 
			
		||||
         "pxor %%xmm4, %%xmm3               \n" // d<<63+d<<62:stuff
 | 
			
		||||
         "pxor %%xmm5, %%xmm3               \n" // missing bits of d:stuff
 | 
			
		||||
         "psrldq $8, %%xmm3                 \n" // 0:missing bits of d
 | 
			
		||||
         "pxor %%xmm3, %%xmm0               \n" // e1+f1+g1:e0+f0+g0
 | 
			
		||||
         "pxor %%xmm1, %%xmm0               \n" // h1:h0
 | 
			
		||||
         "pxor %%xmm2, %%xmm0               \n" // x3+h1:x2+h0
 | 
			
		||||
 | 
			
		||||
         "movdqu %%xmm0, (%2)               \n" // done
 | 
			
		||||
         :
 | 
			
		||||
         : "r" (aa), "r" (bb), "r" (cc)
 | 
			
		||||
         : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" );
 | 
			
		||||
 | 
			
		||||
    /* Now byte-reverse the outputs */
 | 
			
		||||
    for( i = 0; i < 16; i++ )
 | 
			
		||||
        c[i] = cc[15 - i];
 | 
			
		||||
 | 
			
		||||
    return( 0 );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* POLARSSL_HAVE_X86_64 */
 | 
			
		||||
 | 
			
		||||
#endif /* POLARSSL_AESNI_C */
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user