mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge tag 'aes-gcm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux
Pull AES-GCM optimizations from Eric Biggers:
"More optimizations and cleanups for the x86_64 AES-GCM code:
- Add a VAES+AVX2 optimized implementation of AES-GCM. This is very
helpful on CPUs that have VAES but not AVX512, such as AMD Zen 3.
- Make the VAES+AVX512 optimized implementation of AES-GCM handle
large amounts of associated data efficiently.
- Remove the "avx10_256" implementation of AES-GCM. It's superseded
by the VAES+AVX2 optimized implementation.
- Rename the "avx10_512" implementation to "avx512"
Overall, this fills in a gap where AES-GCM wasn't fully optimized on
some recent CPUs. It also drops code that won't be as useful as
initially expected due to AVX10/256 being dropped from the AVX10 spec"
* tag 'aes-gcm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux:
crypto: x86/aes-gcm-vaes-avx2 - initialize full %rax return register
crypto: x86/aes-gcm - optimize long AAD processing with AVX512
crypto: x86/aes-gcm - optimize AVX512 precomputation of H^2 from H^1
crypto: x86/aes-gcm - revise some comments in AVX512 code
crypto: x86/aes-gcm - reorder AVX512 precompute and aad_update functions
crypto: x86/aes-gcm - clean up AVX512 code to assume 512-bit vectors
crypto: x86/aes-gcm - rename avx10 and avx10_512 to avx512
crypto: x86/aes-gcm - remove VAES+AVX10/256 optimized code
crypto: x86/aes-gcm - add VAES+AVX2 optimized code
This commit is contained in:
@@ -46,8 +46,9 @@ obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
|
||||
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
|
||||
aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \
|
||||
aes-gcm-aesni-x86_64.o \
|
||||
aes-xts-avx-x86_64.o \
|
||||
aes-gcm-avx10-x86_64.o
|
||||
aes-gcm-vaes-avx2.o \
|
||||
aes-gcm-vaes-avx512.o \
|
||||
aes-xts-avx-x86_64.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
|
||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||
|
||||
@@ -61,15 +61,15 @@
|
||||
// for the *_aesni functions or AVX for the *_aesni_avx ones. (But it seems
|
||||
// there are no CPUs that support AES-NI without also PCLMULQDQ and SSE4.1.)
|
||||
//
|
||||
// The design generally follows that of aes-gcm-avx10-x86_64.S, and that file is
|
||||
// The design generally follows that of aes-gcm-vaes-avx512.S, and that file is
|
||||
// more thoroughly commented. This file has the following notable changes:
|
||||
//
|
||||
// - The vector length is fixed at 128-bit, i.e. xmm registers. This means
|
||||
// there is only one AES block (and GHASH block) per register.
|
||||
//
|
||||
// - Without AVX512 / AVX10, only 16 SIMD registers are available instead of
|
||||
// 32. We work around this by being much more careful about using
|
||||
// registers, relying heavily on loads to load values as they are needed.
|
||||
// - Without AVX512, only 16 SIMD registers are available instead of 32. We
|
||||
// work around this by being much more careful about using registers,
|
||||
// relying heavily on loads to load values as they are needed.
|
||||
//
|
||||
// - Masking is not available either. We work around this by implementing
|
||||
// partial block loads and stores using overlapping scalar loads and stores
|
||||
@@ -90,8 +90,8 @@
|
||||
// multiplication instead of schoolbook multiplication. This saves one
|
||||
// pclmulqdq instruction per block, at the cost of one 64-bit load, one
|
||||
// pshufd, and 0.25 pxors per block. (This is without the three-argument
|
||||
// XOR support that would be provided by AVX512 / AVX10, which would be
|
||||
// more beneficial to schoolbook than Karatsuba.)
|
||||
// XOR support that would be provided by AVX512, which would be more
|
||||
// beneficial to schoolbook than Karatsuba.)
|
||||
//
|
||||
// As a rough approximation, we can assume that Karatsuba multiplication is
|
||||
// faster than schoolbook multiplication in this context if one pshufd and
|
||||
|
||||
1146
arch/x86/crypto/aes-gcm-vaes-avx2.S
Normal file
1146
arch/x86/crypto/aes-gcm-vaes-avx2.S
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,7 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
|
||||
//
|
||||
// VAES and VPCLMULQDQ optimized AES-GCM for x86_64
|
||||
// AES-GCM implementation for x86_64 CPUs that support the following CPU
|
||||
// features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
|
||||
//
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
@@ -45,41 +46,6 @@
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that
|
||||
// support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and
|
||||
// either AVX512 or AVX10. Some of the functions, notably the encryption and
|
||||
// decryption update functions which are the most performance-critical, are
|
||||
// provided in two variants generated from a macro: one using 256-bit vectors
|
||||
// (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512). The
|
||||
// other, "shared" functions (vaes_avx10) use at most 256-bit vectors.
|
||||
//
|
||||
// The functions that use 512-bit vectors are intended for CPUs that support
|
||||
// 512-bit vectors *and* where using them doesn't cause significant
|
||||
// downclocking. They require the following CPU features:
|
||||
//
|
||||
// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512)
|
||||
//
|
||||
// The other functions require the following CPU features:
|
||||
//
|
||||
// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256)
|
||||
//
|
||||
// All functions use the "System V" ABI. The Windows ABI is not supported.
|
||||
//
|
||||
// Note that we use "avx10" in the names of the functions as a shorthand to
|
||||
// really mean "AVX10 or a certain set of AVX512 features". Due to Intel's
|
||||
// introduction of AVX512 and then its replacement by AVX10, there doesn't seem
|
||||
// to be a simple way to name things that makes sense on all CPUs.
|
||||
//
|
||||
// Note that the macros that support both 256-bit and 512-bit vectors could
|
||||
// fairly easily be changed to support 128-bit too. However, this would *not*
|
||||
// be sufficient to allow the code to run on CPUs without AVX512 or AVX10,
|
||||
// because the code heavily uses several features of these extensions other than
|
||||
// the vector length: the increase in the number of SIMD registers from 16 to
|
||||
// 32, masking support, and new instructions such as vpternlogd (which can do a
|
||||
// three-argument XOR). These features are very useful for AES-GCM.
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
@@ -104,16 +70,14 @@
|
||||
.Lgfpoly_and_internal_carrybit:
|
||||
.octa 0xc2000000000000010000000000000001
|
||||
|
||||
// The below constants are used for incrementing the counter blocks.
|
||||
// ctr_pattern points to the four 128-bit values [0, 1, 2, 3].
|
||||
// inc_2blocks and inc_4blocks point to the single 128-bit values 2 and
|
||||
// 4. Note that the same '2' is reused in ctr_pattern and inc_2blocks.
|
||||
// Values needed to prepare the initial vector of counter blocks.
|
||||
.Lctr_pattern:
|
||||
.octa 0
|
||||
.octa 1
|
||||
.Linc_2blocks:
|
||||
.octa 2
|
||||
.octa 3
|
||||
|
||||
// The number of AES blocks per vector, as a 128-bit value.
|
||||
.Linc_4blocks:
|
||||
.octa 4
|
||||
|
||||
@@ -130,29 +94,13 @@
|
||||
// Offset to end of hash key powers array in the key struct.
|
||||
//
|
||||
// This is immediately followed by three zeroized padding blocks, which are
|
||||
// included so that partial vectors can be handled more easily. E.g. if VL=64
|
||||
// and two blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most
|
||||
// padding blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
|
||||
// included so that partial vectors can be handled more easily. E.g. if two
|
||||
// blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most padding
|
||||
// blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
|
||||
#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
|
||||
|
||||
.text
|
||||
|
||||
// Set the vector length in bytes. This sets the VL variable and defines
|
||||
// register aliases V0-V31 that map to the ymm or zmm registers.
|
||||
.macro _set_veclen vl
|
||||
.set VL, \vl
|
||||
.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
|
||||
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
.if VL == 32
|
||||
.set V\i, %ymm\i
|
||||
.elseif VL == 64
|
||||
.set V\i, %zmm\i
|
||||
.else
|
||||
.error "Unsupported vector length"
|
||||
.endif
|
||||
.endr
|
||||
.endm
|
||||
|
||||
// The _ghash_mul_step macro does one step of GHASH multiplication of the
|
||||
// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
|
||||
// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the
|
||||
@@ -312,39 +260,44 @@
|
||||
vpternlogd $0x96, \t0, \mi, \hi
|
||||
.endm
|
||||
|
||||
// void aes_gcm_precompute_##suffix(struct aes_gcm_key_avx10 *key);
|
||||
// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it
|
||||
// squares \a. It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.
|
||||
.macro _ghash_square a, dst, gfpoly, t0, t1
|
||||
vpclmulqdq $0x00, \a, \a, \t0 // LO = a_L * a_L
|
||||
vpclmulqdq $0x11, \a, \a, \dst // HI = a_H * a_H
|
||||
vpclmulqdq $0x01, \t0, \gfpoly, \t1 // LO_L*(x^63 + x^62 + x^57)
|
||||
vpshufd $0x4e, \t0, \t0 // Swap halves of LO
|
||||
vpxord \t0, \t1, \t1 // Fold LO into MI
|
||||
vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)
|
||||
vpshufd $0x4e, \t1, \t1 // Swap halves of MI
|
||||
vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI
|
||||
.endm
|
||||
|
||||
// void aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);
|
||||
//
|
||||
// Given the expanded AES key |key->aes_key|, this function derives the GHASH
|
||||
// subkey and initializes |key->ghash_key_powers| with powers of it.
|
||||
//
|
||||
// The number of key powers initialized is NUM_H_POWERS, and they are stored in
|
||||
// the order H^NUM_H_POWERS to H^1. The zeroized padding blocks after the key
|
||||
// powers themselves are also initialized.
|
||||
//
|
||||
// This macro supports both VL=32 and VL=64. _set_veclen must have been invoked
|
||||
// with the desired length. In the VL=32 case, the function computes twice as
|
||||
// many key powers than are actually used by the VL=32 GCM update functions.
|
||||
// This is done to keep the key format the same regardless of vector length.
|
||||
.macro _aes_gcm_precompute
|
||||
// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and
|
||||
// initialize |key->h_powers| and |key->padding|.
|
||||
SYM_FUNC_START(aes_gcm_precompute_vaes_avx512)
|
||||
|
||||
// Function arguments
|
||||
.set KEY, %rdi
|
||||
|
||||
// Additional local variables. V0-V2 and %rax are used as temporaries.
|
||||
// Additional local variables.
|
||||
// %zmm[0-2] and %rax are used as temporaries.
|
||||
.set POWERS_PTR, %rsi
|
||||
.set RNDKEYLAST_PTR, %rdx
|
||||
.set H_CUR, V3
|
||||
.set H_CUR, %zmm3
|
||||
.set H_CUR_YMM, %ymm3
|
||||
.set H_CUR_XMM, %xmm3
|
||||
.set H_INC, V4
|
||||
.set H_INC, %zmm4
|
||||
.set H_INC_YMM, %ymm4
|
||||
.set H_INC_XMM, %xmm4
|
||||
.set GFPOLY, V5
|
||||
.set GFPOLY, %zmm5
|
||||
.set GFPOLY_YMM, %ymm5
|
||||
.set GFPOLY_XMM, %xmm5
|
||||
|
||||
// Get pointer to lowest set of key powers (located at end of array).
|
||||
lea OFFSETOFEND_H_POWERS-VL(KEY), POWERS_PTR
|
||||
lea OFFSETOFEND_H_POWERS-64(KEY), POWERS_PTR
|
||||
|
||||
// Encrypt an all-zeroes block to get the raw hash subkey.
|
||||
movl OFFSETOF_AESKEYLEN(KEY), %eax
|
||||
@@ -363,8 +316,8 @@
|
||||
|
||||
// Zeroize the padding blocks.
|
||||
vpxor %xmm0, %xmm0, %xmm0
|
||||
vmovdqu %ymm0, VL(POWERS_PTR)
|
||||
vmovdqu %xmm0, VL+2*16(POWERS_PTR)
|
||||
vmovdqu %ymm0, 64(POWERS_PTR)
|
||||
vmovdqu %xmm0, 64+2*16(POWERS_PTR)
|
||||
|
||||
// Finish preprocessing the first key power, H^1. Since this GHASH
|
||||
// implementation operates directly on values with the backwards bit
|
||||
@@ -397,54 +350,44 @@
|
||||
// special needs to be done to make this happen, though: H^1 * H^1 would
|
||||
// end up with two factors of x^-1, but the multiplication consumes one.
|
||||
// So the product H^2 ends up with the desired one factor of x^-1.
|
||||
_ghash_mul H_CUR_XMM, H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, \
|
||||
%xmm0, %xmm1, %xmm2
|
||||
_ghash_square H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1
|
||||
|
||||
// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
|
||||
vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
|
||||
vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM
|
||||
|
||||
.if VL == 64
|
||||
// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
|
||||
_ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
|
||||
%ymm0, %ymm1, %ymm2
|
||||
vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR
|
||||
vshufi64x2 $0, H_INC, H_INC, H_INC
|
||||
.endif
|
||||
|
||||
// Store the lowest set of key powers.
|
||||
vmovdqu8 H_CUR, (POWERS_PTR)
|
||||
|
||||
// Compute and store the remaining key powers. With VL=32, repeatedly
|
||||
// multiply [H^(i+1), H^i] by [H^2, H^2] to get [H^(i+3), H^(i+2)].
|
||||
// With VL=64, repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
|
||||
// Compute and store the remaining key powers.
|
||||
// Repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
|
||||
// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
|
||||
mov $(NUM_H_POWERS*16/VL) - 1, %eax
|
||||
.Lprecompute_next\@:
|
||||
sub $VL, POWERS_PTR
|
||||
_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, V0, V1, V2
|
||||
mov $3, %eax
|
||||
.Lprecompute_next:
|
||||
sub $64, POWERS_PTR
|
||||
_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, %zmm0, %zmm1, %zmm2
|
||||
vmovdqu8 H_CUR, (POWERS_PTR)
|
||||
dec %eax
|
||||
jnz .Lprecompute_next\@
|
||||
jnz .Lprecompute_next
|
||||
|
||||
vzeroupper // This is needed after using ymm or zmm registers.
|
||||
RET
|
||||
.endm
|
||||
SYM_FUNC_END(aes_gcm_precompute_vaes_avx512)
|
||||
|
||||
// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
|
||||
// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.
|
||||
.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
|
||||
vextracti32x4 $1, \src, \t0_xmm
|
||||
.if VL == 32
|
||||
vpxord \t0_xmm, \src_xmm, \dst_xmm
|
||||
.elseif VL == 64
|
||||
vextracti32x4 $2, \src, \t1_xmm
|
||||
vextracti32x4 $3, \src, \t2_xmm
|
||||
vpxord \t0_xmm, \src_xmm, \dst_xmm
|
||||
vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm
|
||||
.else
|
||||
.error "Unsupported vector length"
|
||||
.endif
|
||||
.endm
|
||||
|
||||
// Do one step of the GHASH update of the data blocks given in the vector
|
||||
@@ -458,25 +401,21 @@
|
||||
//
|
||||
// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
|
||||
// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
|
||||
// operations are vectorized operations on vectors of 16-byte blocks. E.g.,
|
||||
// with VL=32 there are 2 blocks per vector and the vectorized terms correspond
|
||||
// to the following non-vectorized terms:
|
||||
// operations are vectorized operations on 512-bit vectors of 128-bit blocks.
|
||||
// The vectorized terms correspond to the following non-vectorized terms:
|
||||
//
|
||||
// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^8*(blk0 + GHASH_ACC_XMM) and H^7*(blk1 + 0)
|
||||
// H_POW3*GHASHDATA1 => H^6*blk2 and H^5*blk3
|
||||
// H_POW2*GHASHDATA2 => H^4*blk4 and H^3*blk5
|
||||
// H_POW1*GHASHDATA3 => H^2*blk6 and H^1*blk7
|
||||
//
|
||||
// With VL=64, we use 4 blocks/vector, H^16 through H^1, and blk0 through blk15.
|
||||
// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^16*(blk0 + GHASH_ACC_XMM),
|
||||
// H^15*(blk1 + 0), H^14*(blk2 + 0), and H^13*(blk3 + 0)
|
||||
// H_POW3*GHASHDATA1 => H^12*blk4, H^11*blk5, H^10*blk6, and H^9*blk7
|
||||
// H_POW2*GHASHDATA2 => H^8*blk8, H^7*blk9, H^6*blk10, and H^5*blk11
|
||||
// H_POW1*GHASHDATA3 => H^4*blk12, H^3*blk13, H^2*blk14, and H^1*blk15
|
||||
//
|
||||
// More concretely, this code does:
|
||||
// - Do vectorized "schoolbook" multiplications to compute the intermediate
|
||||
// 256-bit product of each block and its corresponding hash key power.
|
||||
// There are 4*VL/16 of these intermediate products.
|
||||
// - Sum (XOR) the intermediate 256-bit products across vectors. This leaves
|
||||
// VL/16 256-bit intermediate values.
|
||||
// - Sum (XOR) the intermediate 256-bit products across vectors.
|
||||
// - Do a vectorized reduction of these 256-bit intermediate values to
|
||||
// 128-bits each. This leaves VL/16 128-bit intermediate values.
|
||||
// 128-bits each.
|
||||
// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
|
||||
//
|
||||
// See _ghash_mul_step for the full explanation of the operations performed for
|
||||
@@ -532,85 +471,224 @@
|
||||
.endif
|
||||
.endm
|
||||
|
||||
// Do one non-last round of AES encryption on the counter blocks in V0-V3 using
|
||||
// the round key that has been broadcast to all 128-bit lanes of \round_key.
|
||||
// Update GHASH with four vectors of data blocks. See _ghash_step_4x for full
|
||||
// explanation.
|
||||
.macro _ghash_4x
|
||||
.irp i, 0,1,2,3,4,5,6,7,8,9
|
||||
_ghash_step_4x \i
|
||||
.endr
|
||||
.endm
|
||||
|
||||
// void aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
// u8 ghash_acc[16],
|
||||
// const u8 *aad, int aadlen);
|
||||
//
|
||||
// This function processes the AAD (Additional Authenticated Data) in GCM.
|
||||
// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
|
||||
// data given by |aad| and |aadlen|. On the first call, |ghash_acc| must be all
|
||||
// zeroes. |aadlen| must be a multiple of 16, except on the last call where it
|
||||
// can be any length. The caller must do any buffering needed to ensure this.
|
||||
//
|
||||
// This handles large amounts of AAD efficiently, while also keeping overhead
|
||||
// low for small amounts which is the common case. TLS and IPsec use less than
|
||||
// one block of AAD, but (uncommonly) other use cases may use much more.
|
||||
SYM_FUNC_START(aes_gcm_aad_update_vaes_avx512)
|
||||
|
||||
// Function arguments
|
||||
.set KEY, %rdi
|
||||
.set GHASH_ACC_PTR, %rsi
|
||||
.set AAD, %rdx
|
||||
.set AADLEN, %ecx
|
||||
.set AADLEN64, %rcx // Zero-extend AADLEN before using!
|
||||
|
||||
// Additional local variables.
|
||||
// %rax and %k1 are used as temporary registers.
|
||||
.set GHASHDATA0, %zmm0
|
||||
.set GHASHDATA0_XMM, %xmm0
|
||||
.set GHASHDATA1, %zmm1
|
||||
.set GHASHDATA1_XMM, %xmm1
|
||||
.set GHASHDATA2, %zmm2
|
||||
.set GHASHDATA2_XMM, %xmm2
|
||||
.set GHASHDATA3, %zmm3
|
||||
.set BSWAP_MASK, %zmm4
|
||||
.set BSWAP_MASK_XMM, %xmm4
|
||||
.set GHASH_ACC, %zmm5
|
||||
.set GHASH_ACC_XMM, %xmm5
|
||||
.set H_POW4, %zmm6
|
||||
.set H_POW3, %zmm7
|
||||
.set H_POW2, %zmm8
|
||||
.set H_POW1, %zmm9
|
||||
.set H_POW1_XMM, %xmm9
|
||||
.set GFPOLY, %zmm10
|
||||
.set GFPOLY_XMM, %xmm10
|
||||
.set GHASHTMP0, %zmm11
|
||||
.set GHASHTMP1, %zmm12
|
||||
.set GHASHTMP2, %zmm13
|
||||
|
||||
// Load the GHASH accumulator.
|
||||
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
|
||||
|
||||
// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.
|
||||
cmp $16, AADLEN
|
||||
jg .Laad_more_than_16bytes
|
||||
test AADLEN, AADLEN
|
||||
jz .Laad_done
|
||||
|
||||
// Fast path: update GHASH with 1 <= AADLEN <= 16 bytes of AAD.
|
||||
vmovdqu .Lbswap_mask(%rip), BSWAP_MASK_XMM
|
||||
vmovdqu .Lgfpoly(%rip), GFPOLY_XMM
|
||||
mov $-1, %eax
|
||||
bzhi AADLEN, %eax, %eax
|
||||
kmovd %eax, %k1
|
||||
vmovdqu8 (AAD), GHASHDATA0_XMM{%k1}{z}
|
||||
vmovdqu OFFSETOFEND_H_POWERS-16(KEY), H_POW1_XMM
|
||||
vpshufb BSWAP_MASK_XMM, GHASHDATA0_XMM, GHASHDATA0_XMM
|
||||
vpxor GHASHDATA0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
|
||||
_ghash_mul H_POW1_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \
|
||||
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
|
||||
jmp .Laad_done
|
||||
|
||||
.Laad_more_than_16bytes:
|
||||
vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
|
||||
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
|
||||
|
||||
// If AADLEN >= 256, update GHASH with 256 bytes of AAD at a time.
|
||||
sub $256, AADLEN
|
||||
jl .Laad_loop_4x_done
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
|
||||
.Laad_loop_4x:
|
||||
vmovdqu8 0*64(AAD), GHASHDATA0
|
||||
vmovdqu8 1*64(AAD), GHASHDATA1
|
||||
vmovdqu8 2*64(AAD), GHASHDATA2
|
||||
vmovdqu8 3*64(AAD), GHASHDATA3
|
||||
_ghash_4x
|
||||
add $256, AAD
|
||||
sub $256, AADLEN
|
||||
jge .Laad_loop_4x
|
||||
.Laad_loop_4x_done:
|
||||
|
||||
// If AADLEN >= 64, update GHASH with 64 bytes of AAD at a time.
|
||||
add $192, AADLEN
|
||||
jl .Laad_loop_1x_done
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
|
||||
.Laad_loop_1x:
|
||||
vmovdqu8 (AAD), GHASHDATA0
|
||||
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
|
||||
vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC
|
||||
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
|
||||
GHASHDATA0, GHASHDATA1, GHASHDATA2
|
||||
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
|
||||
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
|
||||
add $64, AAD
|
||||
sub $64, AADLEN
|
||||
jge .Laad_loop_1x
|
||||
.Laad_loop_1x_done:
|
||||
|
||||
// Update GHASH with the remaining 0 <= AADLEN < 64 bytes of AAD.
|
||||
add $64, AADLEN
|
||||
jz .Laad_done
|
||||
mov $-1, %rax
|
||||
bzhi AADLEN64, %rax, %rax
|
||||
kmovq %rax, %k1
|
||||
vmovdqu8 (AAD), GHASHDATA0{%k1}{z}
|
||||
neg AADLEN64
|
||||
and $~15, AADLEN64 // -round_up(AADLEN, 16)
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
|
||||
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
|
||||
vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC
|
||||
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
|
||||
GHASHDATA0, GHASHDATA1, GHASHDATA2
|
||||
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
|
||||
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
|
||||
|
||||
.Laad_done:
|
||||
// Store the updated GHASH accumulator back to memory.
|
||||
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
|
||||
|
||||
vzeroupper // This is needed after using ymm or zmm registers.
|
||||
RET
|
||||
SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
|
||||
|
||||
// Do one non-last round of AES encryption on the blocks in %zmm[0-3] using the
|
||||
// round key that has been broadcast to all 128-bit lanes of \round_key.
|
||||
.macro _vaesenc_4x round_key
|
||||
vaesenc \round_key, V0, V0
|
||||
vaesenc \round_key, V1, V1
|
||||
vaesenc \round_key, V2, V2
|
||||
vaesenc \round_key, V3, V3
|
||||
vaesenc \round_key, %zmm0, %zmm0
|
||||
vaesenc \round_key, %zmm1, %zmm1
|
||||
vaesenc \round_key, %zmm2, %zmm2
|
||||
vaesenc \round_key, %zmm3, %zmm3
|
||||
.endm
|
||||
|
||||
// Start the AES encryption of four vectors of counter blocks.
|
||||
.macro _ctr_begin_4x
|
||||
|
||||
// Increment LE_CTR four times to generate four vectors of little-endian
|
||||
// counter blocks, swap each to big-endian, and store them in V0-V3.
|
||||
vpshufb BSWAP_MASK, LE_CTR, V0
|
||||
// counter blocks, swap each to big-endian, and store them in %zmm[0-3].
|
||||
vpshufb BSWAP_MASK, LE_CTR, %zmm0
|
||||
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
|
||||
vpshufb BSWAP_MASK, LE_CTR, V1
|
||||
vpshufb BSWAP_MASK, LE_CTR, %zmm1
|
||||
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
|
||||
vpshufb BSWAP_MASK, LE_CTR, V2
|
||||
vpshufb BSWAP_MASK, LE_CTR, %zmm2
|
||||
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
|
||||
vpshufb BSWAP_MASK, LE_CTR, V3
|
||||
vpshufb BSWAP_MASK, LE_CTR, %zmm3
|
||||
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
|
||||
|
||||
// AES "round zero": XOR in the zero-th round key.
|
||||
vpxord RNDKEY0, V0, V0
|
||||
vpxord RNDKEY0, V1, V1
|
||||
vpxord RNDKEY0, V2, V2
|
||||
vpxord RNDKEY0, V3, V3
|
||||
vpxord RNDKEY0, %zmm0, %zmm0
|
||||
vpxord RNDKEY0, %zmm1, %zmm1
|
||||
vpxord RNDKEY0, %zmm2, %zmm2
|
||||
vpxord RNDKEY0, %zmm3, %zmm3
|
||||
.endm
|
||||
|
||||
// Do the last AES round for four vectors of counter blocks V0-V3, XOR source
|
||||
// data with the resulting keystream, and write the result to DST and
|
||||
// Do the last AES round for four vectors of counter blocks %zmm[0-3], XOR
|
||||
// source data with the resulting keystream, and write the result to DST and
|
||||
// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
|
||||
.macro _aesenclast_and_xor_4x
|
||||
// XOR the source data with the last round key, saving the result in
|
||||
// GHASHDATA[0-3]. This reduces latency by taking advantage of the
|
||||
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
|
||||
vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0
|
||||
vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1
|
||||
vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2
|
||||
vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3
|
||||
vpxord 0*64(SRC), RNDKEYLAST, GHASHDATA0
|
||||
vpxord 1*64(SRC), RNDKEYLAST, GHASHDATA1
|
||||
vpxord 2*64(SRC), RNDKEYLAST, GHASHDATA2
|
||||
vpxord 3*64(SRC), RNDKEYLAST, GHASHDATA3
|
||||
|
||||
// Do the last AES round. This handles the XOR with the source data
|
||||
// too, as per the optimization described above.
|
||||
vaesenclast GHASHDATA0, V0, GHASHDATA0
|
||||
vaesenclast GHASHDATA1, V1, GHASHDATA1
|
||||
vaesenclast GHASHDATA2, V2, GHASHDATA2
|
||||
vaesenclast GHASHDATA3, V3, GHASHDATA3
|
||||
vaesenclast GHASHDATA0, %zmm0, GHASHDATA0
|
||||
vaesenclast GHASHDATA1, %zmm1, GHASHDATA1
|
||||
vaesenclast GHASHDATA2, %zmm2, GHASHDATA2
|
||||
vaesenclast GHASHDATA3, %zmm3, GHASHDATA3
|
||||
|
||||
// Store the en/decrypted data to DST.
|
||||
vmovdqu8 GHASHDATA0, 0*VL(DST)
|
||||
vmovdqu8 GHASHDATA1, 1*VL(DST)
|
||||
vmovdqu8 GHASHDATA2, 2*VL(DST)
|
||||
vmovdqu8 GHASHDATA3, 3*VL(DST)
|
||||
vmovdqu8 GHASHDATA0, 0*64(DST)
|
||||
vmovdqu8 GHASHDATA1, 1*64(DST)
|
||||
vmovdqu8 GHASHDATA2, 2*64(DST)
|
||||
vmovdqu8 GHASHDATA3, 3*64(DST)
|
||||
.endm
|
||||
|
||||
// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
|
||||
// void aes_gcm_{enc,dec}_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
// const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
// const u8 *src, u8 *dst, int datalen);
|
||||
//
|
||||
// This macro generates a GCM encryption or decryption update function with the
|
||||
// above prototype (with \enc selecting which one). This macro supports both
|
||||
// VL=32 and VL=64. _set_veclen must have been invoked with the desired length.
|
||||
//
|
||||
// This function computes the next portion of the CTR keystream, XOR's it with
|
||||
// |datalen| bytes from |src|, and writes the resulting encrypted or decrypted
|
||||
// data to |dst|. It also updates the GHASH accumulator |ghash_acc| using the
|
||||
// next |datalen| ciphertext bytes.
|
||||
// above prototype (with \enc selecting which one). The function computes the
|
||||
// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,
|
||||
// and writes the resulting encrypted or decrypted data to |dst|. It also
|
||||
// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext
|
||||
// bytes.
|
||||
//
|
||||
// |datalen| must be a multiple of 16, except on the last call where it can be
|
||||
// any length. The caller must do any buffering needed to ensure this. Both
|
||||
// in-place and out-of-place en/decryption are supported.
|
||||
//
|
||||
// |le_ctr| must give the current counter in little-endian format. For a new
|
||||
// message, the low word of the counter must be 2. This function loads the
|
||||
// counter from |le_ctr| and increments the loaded counter as needed, but it
|
||||
// does *not* store the updated counter back to |le_ctr|. The caller must
|
||||
// update |le_ctr| if any more data segments follow. Internally, only the low
|
||||
// 32-bit word of the counter is incremented, following the GCM standard.
|
||||
// |le_ctr| must give the current counter in little-endian format. This
|
||||
// function loads the counter from |le_ctr| and increments the loaded counter as
|
||||
// needed, but it does *not* store the updated counter back to |le_ctr|. The
|
||||
// caller must update |le_ctr| if any more data segments follow. Internally,
|
||||
// only the low 32-bit word of the counter is incremented, following the GCM
|
||||
// standard.
|
||||
.macro _aes_gcm_update enc
|
||||
|
||||
// Function arguments
|
||||
@@ -634,69 +712,69 @@
|
||||
// Pointer to the last AES round key for the chosen AES variant
|
||||
.set RNDKEYLAST_PTR, %r11
|
||||
|
||||
// In the main loop, V0-V3 are used as AES input and output. Elsewhere
|
||||
// they are used as temporary registers.
|
||||
// In the main loop, %zmm[0-3] are used as AES input and output.
|
||||
// Elsewhere they are used as temporary registers.
|
||||
|
||||
// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
|
||||
.set GHASHDATA0, V4
|
||||
.set GHASHDATA0, %zmm4
|
||||
.set GHASHDATA0_XMM, %xmm4
|
||||
.set GHASHDATA1, V5
|
||||
.set GHASHDATA1, %zmm5
|
||||
.set GHASHDATA1_XMM, %xmm5
|
||||
.set GHASHDATA2, V6
|
||||
.set GHASHDATA2, %zmm6
|
||||
.set GHASHDATA2_XMM, %xmm6
|
||||
.set GHASHDATA3, V7
|
||||
.set GHASHDATA3, %zmm7
|
||||
|
||||
// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
|
||||
// using vpshufb, copied to all 128-bit lanes.
|
||||
.set BSWAP_MASK, V8
|
||||
.set BSWAP_MASK, %zmm8
|
||||
|
||||
// RNDKEY temporarily holds the next AES round key.
|
||||
.set RNDKEY, V9
|
||||
.set RNDKEY, %zmm9
|
||||
|
||||
// GHASH_ACC is the accumulator variable for GHASH. When fully reduced,
|
||||
// only the lowest 128-bit lane can be nonzero. When not fully reduced,
|
||||
// more than one lane may be used, and they need to be XOR'd together.
|
||||
.set GHASH_ACC, V10
|
||||
.set GHASH_ACC, %zmm10
|
||||
.set GHASH_ACC_XMM, %xmm10
|
||||
|
||||
// LE_CTR_INC is the vector of 32-bit words that need to be added to a
|
||||
// vector of little-endian counter blocks to advance it forwards.
|
||||
.set LE_CTR_INC, V11
|
||||
.set LE_CTR_INC, %zmm11
|
||||
|
||||
// LE_CTR contains the next set of little-endian counter blocks.
|
||||
.set LE_CTR, V12
|
||||
.set LE_CTR, %zmm12
|
||||
|
||||
// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
|
||||
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
|
||||
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
|
||||
.set RNDKEY0, V13
|
||||
.set RNDKEYLAST, V14
|
||||
.set RNDKEY_M9, V15
|
||||
.set RNDKEY_M8, V16
|
||||
.set RNDKEY_M7, V17
|
||||
.set RNDKEY_M6, V18
|
||||
.set RNDKEY_M5, V19
|
||||
.set RNDKEY_M4, V20
|
||||
.set RNDKEY_M3, V21
|
||||
.set RNDKEY_M2, V22
|
||||
.set RNDKEY_M1, V23
|
||||
.set RNDKEY0, %zmm13
|
||||
.set RNDKEYLAST, %zmm14
|
||||
.set RNDKEY_M9, %zmm15
|
||||
.set RNDKEY_M8, %zmm16
|
||||
.set RNDKEY_M7, %zmm17
|
||||
.set RNDKEY_M6, %zmm18
|
||||
.set RNDKEY_M5, %zmm19
|
||||
.set RNDKEY_M4, %zmm20
|
||||
.set RNDKEY_M3, %zmm21
|
||||
.set RNDKEY_M2, %zmm22
|
||||
.set RNDKEY_M1, %zmm23
|
||||
|
||||
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
|
||||
// cannot coincide with anything used for AES encryption, since for
|
||||
// performance reasons GHASH and AES encryption are interleaved.
|
||||
.set GHASHTMP0, V24
|
||||
.set GHASHTMP1, V25
|
||||
.set GHASHTMP2, V26
|
||||
.set GHASHTMP0, %zmm24
|
||||
.set GHASHTMP1, %zmm25
|
||||
.set GHASHTMP2, %zmm26
|
||||
|
||||
// H_POW[4-1] contain the powers of the hash key H^(4*VL/16)...H^1. The
|
||||
// H_POW[4-1] contain the powers of the hash key H^16...H^1. The
|
||||
// descending numbering reflects the order of the key powers.
|
||||
.set H_POW4, V27
|
||||
.set H_POW3, V28
|
||||
.set H_POW2, V29
|
||||
.set H_POW1, V30
|
||||
.set H_POW4, %zmm27
|
||||
.set H_POW3, %zmm28
|
||||
.set H_POW2, %zmm29
|
||||
.set H_POW1, %zmm30
|
||||
|
||||
// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
|
||||
.set GFPOLY, V31
|
||||
.set GFPOLY, %zmm31
|
||||
|
||||
// Load some constants.
|
||||
vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
|
||||
@@ -719,29 +797,23 @@
|
||||
// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
|
||||
vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR
|
||||
|
||||
// Initialize LE_CTR_INC to contain VL/16 in all 128-bit lanes.
|
||||
.if VL == 32
|
||||
vbroadcasti32x4 .Linc_2blocks(%rip), LE_CTR_INC
|
||||
.elseif VL == 64
|
||||
// Load 4 into all 128-bit lanes of LE_CTR_INC.
|
||||
vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC
|
||||
.else
|
||||
.error "Unsupported vector length"
|
||||
.endif
|
||||
|
||||
// If there are at least 4*VL bytes of data, then continue into the loop
|
||||
// that processes 4*VL bytes of data at a time. Otherwise skip it.
|
||||
// If there are at least 256 bytes of data, then continue into the loop
|
||||
// that processes 256 bytes of data at a time. Otherwise skip it.
|
||||
//
|
||||
// Pre-subtracting 4*VL from DATALEN saves an instruction from the main
|
||||
// Pre-subtracting 256 from DATALEN saves an instruction from the main
|
||||
// loop and also ensures that at least one write always occurs to
|
||||
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
|
||||
add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32
|
||||
sub $256, DATALEN
|
||||
jl .Lcrypt_loop_4x_done\@
|
||||
|
||||
// Load powers of the hash key.
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-4*VL(KEY), H_POW4
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-3*VL(KEY), H_POW3
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-2*VL(KEY), H_POW2
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-1*VL(KEY), H_POW1
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
|
||||
|
||||
// Main loop: en/decrypt and hash 4 vectors at a time.
|
||||
//
|
||||
@@ -770,9 +842,9 @@
|
||||
cmp %rax, RNDKEYLAST_PTR
|
||||
jne 1b
|
||||
_aesenclast_and_xor_4x
|
||||
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
|
||||
sub $-4*VL, DST
|
||||
add $-4*VL, DATALEN
|
||||
add $256, SRC
|
||||
add $256, DST
|
||||
sub $256, DATALEN
|
||||
jl .Lghash_last_ciphertext_4x\@
|
||||
.endif
|
||||
|
||||
@@ -786,10 +858,10 @@
|
||||
// If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If
|
||||
// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
|
||||
.if !\enc
|
||||
vmovdqu8 0*VL(SRC), GHASHDATA0
|
||||
vmovdqu8 1*VL(SRC), GHASHDATA1
|
||||
vmovdqu8 2*VL(SRC), GHASHDATA2
|
||||
vmovdqu8 3*VL(SRC), GHASHDATA3
|
||||
vmovdqu8 0*64(SRC), GHASHDATA0
|
||||
vmovdqu8 1*64(SRC), GHASHDATA1
|
||||
vmovdqu8 2*64(SRC), GHASHDATA2
|
||||
vmovdqu8 3*64(SRC), GHASHDATA3
|
||||
.endif
|
||||
|
||||
// Start the AES encryption of the counter blocks.
|
||||
@@ -809,44 +881,44 @@
|
||||
_vaesenc_4x RNDKEY
|
||||
128:
|
||||
|
||||
// Finish the AES encryption of the counter blocks in V0-V3, interleaved
|
||||
// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
|
||||
// Finish the AES encryption of the counter blocks in %zmm[0-3],
|
||||
// interleaved with the GHASH update of the ciphertext blocks in
|
||||
// GHASHDATA[0-3].
|
||||
.irp i, 9,8,7,6,5,4,3,2,1
|
||||
_ghash_step_4x (9 - \i)
|
||||
_vaesenc_4x RNDKEY_M\i
|
||||
.endr
|
||||
_ghash_step_4x 9
|
||||
_aesenclast_and_xor_4x
|
||||
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
|
||||
sub $-4*VL, DST
|
||||
add $-4*VL, DATALEN
|
||||
add $256, SRC
|
||||
add $256, DST
|
||||
sub $256, DATALEN
|
||||
jge .Lcrypt_loop_4x\@
|
||||
|
||||
.if \enc
|
||||
.Lghash_last_ciphertext_4x\@:
|
||||
// Update GHASH with the last set of ciphertext blocks.
|
||||
.irp i, 0,1,2,3,4,5,6,7,8,9
|
||||
_ghash_step_4x \i
|
||||
.endr
|
||||
_ghash_4x
|
||||
.endif
|
||||
|
||||
.Lcrypt_loop_4x_done\@:
|
||||
|
||||
// Undo the extra subtraction by 4*VL and check whether data remains.
|
||||
sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32
|
||||
// Undo the extra subtraction by 256 and check whether data remains.
|
||||
add $256, DATALEN
|
||||
jz .Ldone\@
|
||||
|
||||
// The data length isn't a multiple of 4*VL. Process the remaining data
|
||||
// of length 1 <= DATALEN < 4*VL, up to one vector (VL bytes) at a time.
|
||||
// Going one vector at a time may seem inefficient compared to having
|
||||
// separate code paths for each possible number of vectors remaining.
|
||||
// However, using a loop keeps the code size down, and it performs
|
||||
// surprising well; modern CPUs will start executing the next iteration
|
||||
// before the previous one finishes and also predict the number of loop
|
||||
// iterations. For a similar reason, we roll up the AES rounds.
|
||||
// The data length isn't a multiple of 256 bytes. Process the remaining
|
||||
// data of length 1 <= DATALEN < 256, up to one 64-byte vector at a
|
||||
// time. Going one vector at a time may seem inefficient compared to
|
||||
// having separate code paths for each possible number of vectors
|
||||
// remaining. However, using a loop keeps the code size down, and it
|
||||
// performs surprising well; modern CPUs will start executing the next
|
||||
// iteration before the previous one finishes and also predict the
|
||||
// number of loop iterations. For a similar reason, we roll up the AES
|
||||
// rounds.
|
||||
//
|
||||
// On the last iteration, the remaining length may be less than VL.
|
||||
// Handle this using masking.
|
||||
// On the last iteration, the remaining length may be less than 64
|
||||
// bytes. Handle this using masking.
|
||||
//
|
||||
// Since there are enough key powers available for all remaining data,
|
||||
// there is no need to do a GHASH reduction after each iteration.
|
||||
@@ -875,65 +947,60 @@
|
||||
.Lcrypt_loop_1x\@:
|
||||
|
||||
// Select the appropriate mask for this iteration: all 1's if
|
||||
// DATALEN >= VL, otherwise DATALEN 1's. Do this branchlessly using the
|
||||
// DATALEN >= 64, otherwise DATALEN 1's. Do this branchlessly using the
|
||||
// bzhi instruction from BMI2. (This relies on DATALEN <= 255.)
|
||||
.if VL < 64
|
||||
mov $-1, %eax
|
||||
bzhi DATALEN, %eax, %eax
|
||||
kmovd %eax, %k1
|
||||
.else
|
||||
mov $-1, %rax
|
||||
bzhi DATALEN64, %rax, %rax
|
||||
kmovq %rax, %k1
|
||||
.endif
|
||||
|
||||
// Encrypt a vector of counter blocks. This does not need to be masked.
|
||||
vpshufb BSWAP_MASK, LE_CTR, V0
|
||||
vpshufb BSWAP_MASK, LE_CTR, %zmm0
|
||||
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
|
||||
vpxord RNDKEY0, V0, V0
|
||||
vpxord RNDKEY0, %zmm0, %zmm0
|
||||
lea 16(KEY), %rax
|
||||
1:
|
||||
vbroadcasti32x4 (%rax), RNDKEY
|
||||
vaesenc RNDKEY, V0, V0
|
||||
vaesenc RNDKEY, %zmm0, %zmm0
|
||||
add $16, %rax
|
||||
cmp %rax, RNDKEYLAST_PTR
|
||||
jne 1b
|
||||
vaesenclast RNDKEYLAST, V0, V0
|
||||
vaesenclast RNDKEYLAST, %zmm0, %zmm0
|
||||
|
||||
// XOR the data with the appropriate number of keystream bytes.
|
||||
vmovdqu8 (SRC), V1{%k1}{z}
|
||||
vpxord V1, V0, V0
|
||||
vmovdqu8 V0, (DST){%k1}
|
||||
vmovdqu8 (SRC), %zmm1{%k1}{z}
|
||||
vpxord %zmm1, %zmm0, %zmm0
|
||||
vmovdqu8 %zmm0, (DST){%k1}
|
||||
|
||||
// Update GHASH with the ciphertext block(s), without reducing.
|
||||
//
|
||||
// In the case of DATALEN < VL, the ciphertext is zero-padded to VL.
|
||||
// (If decrypting, it's done by the above masked load. If encrypting,
|
||||
// it's done by the below masked register-to-register move.) Note that
|
||||
// if DATALEN <= VL - 16, there will be additional padding beyond the
|
||||
// padding of the last block specified by GHASH itself; i.e., there may
|
||||
// be whole block(s) that get processed by the GHASH multiplication and
|
||||
// reduction instructions but should not actually be included in the
|
||||
// In the case of DATALEN < 64, the ciphertext is zero-padded to 64
|
||||
// bytes. (If decrypting, it's done by the above masked load. If
|
||||
// encrypting, it's done by the below masked register-to-register move.)
|
||||
// Note that if DATALEN <= 48, there will be additional padding beyond
|
||||
// the padding of the last block specified by GHASH itself; i.e., there
|
||||
// may be whole block(s) that get processed by the GHASH multiplication
|
||||
// and reduction instructions but should not actually be included in the
|
||||
// GHASH. However, any such blocks are all-zeroes, and the values that
|
||||
// they're multiplied with are also all-zeroes. Therefore they just add
|
||||
// 0 * 0 = 0 to the final GHASH result, which makes no difference.
|
||||
vmovdqu8 (POWERS_PTR), H_POW1
|
||||
.if \enc
|
||||
vmovdqu8 V0, V1{%k1}{z}
|
||||
vmovdqu8 %zmm0, %zmm1{%k1}{z}
|
||||
.endif
|
||||
vpshufb BSWAP_MASK, V1, V0
|
||||
vpxord GHASH_ACC, V0, V0
|
||||
_ghash_mul_noreduce H_POW1, V0, LO, MI, HI, GHASHDATA3, V1, V2, V3
|
||||
vpshufb BSWAP_MASK, %zmm1, %zmm0
|
||||
vpxord GHASH_ACC, %zmm0, %zmm0
|
||||
_ghash_mul_noreduce H_POW1, %zmm0, LO, MI, HI, \
|
||||
GHASHDATA3, %zmm1, %zmm2, %zmm3
|
||||
vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
|
||||
|
||||
add $VL, POWERS_PTR
|
||||
add $VL, SRC
|
||||
add $VL, DST
|
||||
sub $VL, DATALEN
|
||||
add $64, POWERS_PTR
|
||||
add $64, SRC
|
||||
add $64, DST
|
||||
sub $64, DATALEN
|
||||
jg .Lcrypt_loop_1x\@
|
||||
|
||||
// Finally, do the GHASH reduction.
|
||||
_ghash_reduce LO, MI, HI, GFPOLY, V0
|
||||
_ghash_reduce LO, MI, HI, GFPOLY, %zmm0
|
||||
_horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
|
||||
|
||||
.Ldone\@:
|
||||
@@ -944,10 +1011,10 @@
|
||||
RET
|
||||
.endm
|
||||
|
||||
// void aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
// void aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
// const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
// u64 total_aadlen, u64 total_datalen);
|
||||
// bool aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
// bool aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
// const u32 le_ctr[4],
|
||||
// const u8 ghash_acc[16],
|
||||
// u64 total_aadlen, u64 total_datalen,
|
||||
@@ -1081,119 +1148,16 @@
|
||||
RET
|
||||
.endm
|
||||
|
||||
_set_veclen 32
|
||||
SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_256)
|
||||
_aes_gcm_precompute
|
||||
SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_256)
|
||||
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_256)
|
||||
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx512)
|
||||
_aes_gcm_update 1
|
||||
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_256)
|
||||
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_256)
|
||||
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx512)
|
||||
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx512)
|
||||
_aes_gcm_update 0
|
||||
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_256)
|
||||
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx512)
|
||||
|
||||
_set_veclen 64
|
||||
SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_512)
|
||||
_aes_gcm_precompute
|
||||
SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_512)
|
||||
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_512)
|
||||
_aes_gcm_update 1
|
||||
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_512)
|
||||
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_512)
|
||||
_aes_gcm_update 0
|
||||
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_512)
|
||||
|
||||
// void aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
// u8 ghash_acc[16],
|
||||
// const u8 *aad, int aadlen);
|
||||
//
|
||||
// This function processes the AAD (Additional Authenticated Data) in GCM.
|
||||
// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
|
||||
// data given by |aad| and |aadlen|. |key->ghash_key_powers| must have been
|
||||
// initialized. On the first call, |ghash_acc| must be all zeroes. |aadlen|
|
||||
// must be a multiple of 16, except on the last call where it can be any length.
|
||||
// The caller must do any buffering needed to ensure this.
|
||||
//
|
||||
// AES-GCM is almost always used with small amounts of AAD, less than 32 bytes.
|
||||
// Therefore, for AAD processing we currently only provide this implementation
|
||||
// which uses 256-bit vectors (ymm registers) and only has a 1x-wide loop. This
|
||||
// keeps the code size down, and it enables some micro-optimizations, e.g. using
|
||||
// VEX-coded instructions instead of EVEX-coded to save some instruction bytes.
|
||||
// To optimize for large amounts of AAD, we could implement a 4x-wide loop and
|
||||
// provide a version using 512-bit vectors, but that doesn't seem to be useful.
|
||||
SYM_FUNC_START(aes_gcm_aad_update_vaes_avx10)
|
||||
|
||||
// Function arguments
|
||||
.set KEY, %rdi
|
||||
.set GHASH_ACC_PTR, %rsi
|
||||
.set AAD, %rdx
|
||||
.set AADLEN, %ecx
|
||||
.set AADLEN64, %rcx // Zero-extend AADLEN before using!
|
||||
|
||||
// Additional local variables.
|
||||
// %rax, %ymm0-%ymm3, and %k1 are used as temporary registers.
|
||||
.set BSWAP_MASK, %ymm4
|
||||
.set GFPOLY, %ymm5
|
||||
.set GHASH_ACC, %ymm6
|
||||
.set GHASH_ACC_XMM, %xmm6
|
||||
.set H_POW1, %ymm7
|
||||
|
||||
// Load some constants.
|
||||
vbroadcasti128 .Lbswap_mask(%rip), BSWAP_MASK
|
||||
vbroadcasti128 .Lgfpoly(%rip), GFPOLY
|
||||
|
||||
// Load the GHASH accumulator.
|
||||
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
|
||||
|
||||
// Update GHASH with 32 bytes of AAD at a time.
|
||||
//
|
||||
// Pre-subtracting 32 from AADLEN saves an instruction from the loop and
|
||||
// also ensures that at least one write always occurs to AADLEN,
|
||||
// zero-extending it and allowing AADLEN64 to be used later.
|
||||
sub $32, AADLEN
|
||||
jl .Laad_loop_1x_done
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS-32(KEY), H_POW1 // [H^2, H^1]
|
||||
.Laad_loop_1x:
|
||||
vmovdqu (AAD), %ymm0
|
||||
vpshufb BSWAP_MASK, %ymm0, %ymm0
|
||||
vpxor %ymm0, GHASH_ACC, GHASH_ACC
|
||||
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
|
||||
%ymm0, %ymm1, %ymm2
|
||||
vextracti128 $1, GHASH_ACC, %xmm0
|
||||
vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
|
||||
add $32, AAD
|
||||
sub $32, AADLEN
|
||||
jge .Laad_loop_1x
|
||||
.Laad_loop_1x_done:
|
||||
add $32, AADLEN
|
||||
jz .Laad_done
|
||||
|
||||
// Update GHASH with the remaining 1 <= AADLEN < 32 bytes of AAD.
|
||||
mov $-1, %eax
|
||||
bzhi AADLEN, %eax, %eax
|
||||
kmovd %eax, %k1
|
||||
vmovdqu8 (AAD), %ymm0{%k1}{z}
|
||||
neg AADLEN64
|
||||
and $~15, AADLEN64 // -round_up(AADLEN, 16)
|
||||
vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
|
||||
vpshufb BSWAP_MASK, %ymm0, %ymm0
|
||||
vpxor %ymm0, GHASH_ACC, GHASH_ACC
|
||||
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
|
||||
%ymm0, %ymm1, %ymm2
|
||||
vextracti128 $1, GHASH_ACC, %xmm0
|
||||
vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
|
||||
|
||||
.Laad_done:
|
||||
// Store the updated GHASH accumulator back to memory.
|
||||
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
|
||||
|
||||
vzeroupper // This is needed after using ymm or zmm registers.
|
||||
RET
|
||||
SYM_FUNC_END(aes_gcm_aad_update_vaes_avx10)
|
||||
|
||||
SYM_FUNC_START(aes_gcm_enc_final_vaes_avx10)
|
||||
SYM_FUNC_START(aes_gcm_enc_final_vaes_avx512)
|
||||
_aes_gcm_final 1
|
||||
SYM_FUNC_END(aes_gcm_enc_final_vaes_avx10)
|
||||
SYM_FUNC_START(aes_gcm_dec_final_vaes_avx10)
|
||||
SYM_FUNC_END(aes_gcm_enc_final_vaes_avx512)
|
||||
SYM_FUNC_START(aes_gcm_dec_final_vaes_avx512)
|
||||
_aes_gcm_final 0
|
||||
SYM_FUNC_END(aes_gcm_dec_final_vaes_avx10)
|
||||
SYM_FUNC_END(aes_gcm_dec_final_vaes_avx512)
|
||||
@@ -874,8 +874,38 @@ struct aes_gcm_key_aesni {
|
||||
#define AES_GCM_KEY_AESNI_SIZE \
|
||||
(sizeof(struct aes_gcm_key_aesni) + (15 & ~(CRYPTO_MINALIGN - 1)))
|
||||
|
||||
/* Key struct used by the VAES + AVX10 implementations of AES-GCM */
|
||||
struct aes_gcm_key_avx10 {
|
||||
/* Key struct used by the VAES + AVX2 implementation of AES-GCM */
|
||||
struct aes_gcm_key_vaes_avx2 {
|
||||
/*
|
||||
* Common part of the key. The assembly code prefers 16-byte alignment
|
||||
* for the round keys; we get this by them being located at the start of
|
||||
* the struct and the whole struct being 32-byte aligned.
|
||||
*/
|
||||
struct aes_gcm_key base;
|
||||
|
||||
/*
|
||||
* Powers of the hash key H^8 through H^1. These are 128-bit values.
|
||||
* They all have an extra factor of x^-1 and are byte-reversed.
|
||||
* The assembly code prefers 32-byte alignment for this.
|
||||
*/
|
||||
u64 h_powers[8][2] __aligned(32);
|
||||
|
||||
/*
|
||||
* Each entry in this array contains the two halves of an entry of
|
||||
* h_powers XOR'd together, in the following order:
|
||||
* H^8,H^6,H^7,H^5,H^4,H^2,H^3,H^1 i.e. indices 0,2,1,3,4,6,5,7.
|
||||
* This is used for Karatsuba multiplication.
|
||||
*/
|
||||
u64 h_powers_xored[8];
|
||||
};
|
||||
|
||||
#define AES_GCM_KEY_VAES_AVX2(key) \
|
||||
container_of((key), struct aes_gcm_key_vaes_avx2, base)
|
||||
#define AES_GCM_KEY_VAES_AVX2_SIZE \
|
||||
(sizeof(struct aes_gcm_key_vaes_avx2) + (31 & ~(CRYPTO_MINALIGN - 1)))
|
||||
|
||||
/* Key struct used by the VAES + AVX512 implementation of AES-GCM */
|
||||
struct aes_gcm_key_vaes_avx512 {
|
||||
/*
|
||||
* Common part of the key. The assembly code prefers 16-byte alignment
|
||||
* for the round keys; we get this by them being located at the start of
|
||||
@@ -895,10 +925,10 @@ struct aes_gcm_key_avx10 {
|
||||
/* Three padding blocks required by the assembly code */
|
||||
u64 padding[3][2];
|
||||
};
|
||||
#define AES_GCM_KEY_AVX10(key) \
|
||||
container_of((key), struct aes_gcm_key_avx10, base)
|
||||
#define AES_GCM_KEY_AVX10_SIZE \
|
||||
(sizeof(struct aes_gcm_key_avx10) + (63 & ~(CRYPTO_MINALIGN - 1)))
|
||||
#define AES_GCM_KEY_VAES_AVX512(key) \
|
||||
container_of((key), struct aes_gcm_key_vaes_avx512, base)
|
||||
#define AES_GCM_KEY_VAES_AVX512_SIZE \
|
||||
(sizeof(struct aes_gcm_key_vaes_avx512) + (63 & ~(CRYPTO_MINALIGN - 1)))
|
||||
|
||||
/*
|
||||
* These flags are passed to the AES-GCM helper functions to specify the
|
||||
@@ -910,14 +940,16 @@ struct aes_gcm_key_avx10 {
|
||||
#define FLAG_RFC4106 BIT(0)
|
||||
#define FLAG_ENC BIT(1)
|
||||
#define FLAG_AVX BIT(2)
|
||||
#define FLAG_AVX10_256 BIT(3)
|
||||
#define FLAG_AVX10_512 BIT(4)
|
||||
#define FLAG_VAES_AVX2 BIT(3)
|
||||
#define FLAG_VAES_AVX512 BIT(4)
|
||||
|
||||
static inline struct aes_gcm_key *
|
||||
aes_gcm_key_get(struct crypto_aead *tfm, int flags)
|
||||
{
|
||||
if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
return PTR_ALIGN(crypto_aead_ctx(tfm), 64);
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
return PTR_ALIGN(crypto_aead_ctx(tfm), 32);
|
||||
else
|
||||
return PTR_ALIGN(crypto_aead_ctx(tfm), 16);
|
||||
}
|
||||
@@ -927,26 +959,16 @@ aes_gcm_precompute_aesni(struct aes_gcm_key_aesni *key);
|
||||
asmlinkage void
|
||||
aes_gcm_precompute_aesni_avx(struct aes_gcm_key_aesni *key);
|
||||
asmlinkage void
|
||||
aes_gcm_precompute_vaes_avx10_256(struct aes_gcm_key_avx10 *key);
|
||||
aes_gcm_precompute_vaes_avx2(struct aes_gcm_key_vaes_avx2 *key);
|
||||
asmlinkage void
|
||||
aes_gcm_precompute_vaes_avx10_512(struct aes_gcm_key_avx10 *key);
|
||||
aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);
|
||||
|
||||
static void aes_gcm_precompute(struct aes_gcm_key *key, int flags)
|
||||
{
|
||||
/*
|
||||
* To make things a bit easier on the assembly side, the AVX10
|
||||
* implementations use the same key format. Therefore, a single
|
||||
* function using 256-bit vectors would suffice here. However, it's
|
||||
* straightforward to provide a 512-bit one because of how the assembly
|
||||
* code is structured, and it works nicely because the total size of the
|
||||
* key powers is a multiple of 512 bits. So we take advantage of that.
|
||||
*
|
||||
* A similar situation applies to the AES-NI implementations.
|
||||
*/
|
||||
if (flags & FLAG_AVX10_512)
|
||||
aes_gcm_precompute_vaes_avx10_512(AES_GCM_KEY_AVX10(key));
|
||||
else if (flags & FLAG_AVX10_256)
|
||||
aes_gcm_precompute_vaes_avx10_256(AES_GCM_KEY_AVX10(key));
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
aes_gcm_precompute_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key));
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
aes_gcm_precompute_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key));
|
||||
else if (flags & FLAG_AVX)
|
||||
aes_gcm_precompute_aesni_avx(AES_GCM_KEY_AESNI(key));
|
||||
else
|
||||
@@ -960,15 +982,21 @@ asmlinkage void
|
||||
aes_gcm_aad_update_aesni_avx(const struct aes_gcm_key_aesni *key,
|
||||
u8 ghash_acc[16], const u8 *aad, int aadlen);
|
||||
asmlinkage void
|
||||
aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_aad_update_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
|
||||
u8 ghash_acc[16], const u8 *aad, int aadlen);
|
||||
asmlinkage void
|
||||
aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
u8 ghash_acc[16], const u8 *aad, int aadlen);
|
||||
|
||||
static void aes_gcm_aad_update(const struct aes_gcm_key *key, u8 ghash_acc[16],
|
||||
const u8 *aad, int aadlen, int flags)
|
||||
{
|
||||
if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
|
||||
aes_gcm_aad_update_vaes_avx10(AES_GCM_KEY_AVX10(key), ghash_acc,
|
||||
aad, aadlen);
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
aes_gcm_aad_update_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key),
|
||||
ghash_acc, aad, aadlen);
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
aes_gcm_aad_update_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key),
|
||||
ghash_acc, aad, aadlen);
|
||||
else if (flags & FLAG_AVX)
|
||||
aes_gcm_aad_update_aesni_avx(AES_GCM_KEY_AESNI(key), ghash_acc,
|
||||
aad, aadlen);
|
||||
@@ -986,11 +1014,11 @@ aes_gcm_enc_update_aesni_avx(const struct aes_gcm_key_aesni *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_enc_update_vaes_avx10_256(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_enc_update_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_enc_update_vaes_avx10_512(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_enc_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
|
||||
@@ -1003,11 +1031,11 @@ aes_gcm_dec_update_aesni_avx(const struct aes_gcm_key_aesni *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_dec_update_vaes_avx10_256(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_dec_update_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_dec_update_vaes_avx10_512(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_dec_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
const u8 *src, u8 *dst, int datalen);
|
||||
|
||||
@@ -1018,12 +1046,12 @@ aes_gcm_update(const struct aes_gcm_key *key,
|
||||
const u8 *src, u8 *dst, int datalen, int flags)
|
||||
{
|
||||
if (flags & FLAG_ENC) {
|
||||
if (flags & FLAG_AVX10_512)
|
||||
aes_gcm_enc_update_vaes_avx10_512(AES_GCM_KEY_AVX10(key),
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
aes_gcm_enc_update_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key),
|
||||
le_ctr, ghash_acc,
|
||||
src, dst, datalen);
|
||||
else if (flags & FLAG_AVX10_256)
|
||||
aes_gcm_enc_update_vaes_avx10_256(AES_GCM_KEY_AVX10(key),
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
aes_gcm_enc_update_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key),
|
||||
le_ctr, ghash_acc,
|
||||
src, dst, datalen);
|
||||
else if (flags & FLAG_AVX)
|
||||
@@ -1034,12 +1062,12 @@ aes_gcm_update(const struct aes_gcm_key *key,
|
||||
aes_gcm_enc_update_aesni(AES_GCM_KEY_AESNI(key), le_ctr,
|
||||
ghash_acc, src, dst, datalen);
|
||||
} else {
|
||||
if (flags & FLAG_AVX10_512)
|
||||
aes_gcm_dec_update_vaes_avx10_512(AES_GCM_KEY_AVX10(key),
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
aes_gcm_dec_update_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key),
|
||||
le_ctr, ghash_acc,
|
||||
src, dst, datalen);
|
||||
else if (flags & FLAG_AVX10_256)
|
||||
aes_gcm_dec_update_vaes_avx10_256(AES_GCM_KEY_AVX10(key),
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
aes_gcm_dec_update_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key),
|
||||
le_ctr, ghash_acc,
|
||||
src, dst, datalen);
|
||||
else if (flags & FLAG_AVX)
|
||||
@@ -1062,7 +1090,11 @@ aes_gcm_enc_final_aesni_avx(const struct aes_gcm_key_aesni *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_enc_final_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen);
|
||||
asmlinkage void
|
||||
aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen);
|
||||
|
||||
@@ -1072,8 +1104,12 @@ aes_gcm_enc_final(const struct aes_gcm_key *key,
|
||||
const u32 le_ctr[4], u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen, int flags)
|
||||
{
|
||||
if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
|
||||
aes_gcm_enc_final_vaes_avx10(AES_GCM_KEY_AVX10(key),
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
aes_gcm_enc_final_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key),
|
||||
le_ctr, ghash_acc,
|
||||
total_aadlen, total_datalen);
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
aes_gcm_enc_final_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key),
|
||||
le_ctr, ghash_acc,
|
||||
total_aadlen, total_datalen);
|
||||
else if (flags & FLAG_AVX)
|
||||
@@ -1097,7 +1133,12 @@ aes_gcm_dec_final_aesni_avx(const struct aes_gcm_key_aesni *key,
|
||||
u64 total_aadlen, u64 total_datalen,
|
||||
const u8 tag[16], int taglen);
|
||||
asmlinkage bool __must_check
|
||||
aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
|
||||
aes_gcm_dec_final_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
|
||||
const u32 le_ctr[4], const u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen,
|
||||
const u8 tag[16], int taglen);
|
||||
asmlinkage bool __must_check
|
||||
aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
|
||||
const u32 le_ctr[4], const u8 ghash_acc[16],
|
||||
u64 total_aadlen, u64 total_datalen,
|
||||
const u8 tag[16], int taglen);
|
||||
@@ -1108,8 +1149,13 @@ aes_gcm_dec_final(const struct aes_gcm_key *key, const u32 le_ctr[4],
|
||||
u8 ghash_acc[16], u64 total_aadlen, u64 total_datalen,
|
||||
u8 tag[16], int taglen, int flags)
|
||||
{
|
||||
if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
|
||||
return aes_gcm_dec_final_vaes_avx10(AES_GCM_KEY_AVX10(key),
|
||||
if (flags & FLAG_VAES_AVX512)
|
||||
return aes_gcm_dec_final_vaes_avx512(AES_GCM_KEY_VAES_AVX512(key),
|
||||
le_ctr, ghash_acc,
|
||||
total_aadlen, total_datalen,
|
||||
tag, taglen);
|
||||
else if (flags & FLAG_VAES_AVX2)
|
||||
return aes_gcm_dec_final_vaes_avx2(AES_GCM_KEY_VAES_AVX2(key),
|
||||
le_ctr, ghash_acc,
|
||||
total_aadlen, total_datalen,
|
||||
tag, taglen);
|
||||
@@ -1195,10 +1241,14 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers) != 496);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers_xored) != 624);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_times_x64) != 688);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, base.aes_key.key_enc) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, base.aes_key.key_length) != 480);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, h_powers) != 512);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, padding) != 768);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.key_enc) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, base.aes_key.key_length) != 480);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, h_powers) != 512);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx2, h_powers_xored) != 640);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.key_enc) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, base.aes_key.key_length) != 480);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, h_powers) != 512);
|
||||
BUILD_BUG_ON(offsetof(struct aes_gcm_key_vaes_avx512, padding) != 768);
|
||||
|
||||
if (likely(crypto_simd_usable())) {
|
||||
err = aes_check_keylen(keylen);
|
||||
@@ -1231,8 +1281,9 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
|
||||
gf128mul_lle(&h, (const be128 *)x_to_the_minus1);
|
||||
|
||||
/* Compute the needed key powers */
|
||||
if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512)) {
|
||||
struct aes_gcm_key_avx10 *k = AES_GCM_KEY_AVX10(key);
|
||||
if (flags & FLAG_VAES_AVX512) {
|
||||
struct aes_gcm_key_vaes_avx512 *k =
|
||||
AES_GCM_KEY_VAES_AVX512(key);
|
||||
|
||||
for (i = ARRAY_SIZE(k->h_powers) - 1; i >= 0; i--) {
|
||||
k->h_powers[i][0] = be64_to_cpu(h.b);
|
||||
@@ -1240,6 +1291,22 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
|
||||
gf128mul_lle(&h, &h1);
|
||||
}
|
||||
memset(k->padding, 0, sizeof(k->padding));
|
||||
} else if (flags & FLAG_VAES_AVX2) {
|
||||
struct aes_gcm_key_vaes_avx2 *k =
|
||||
AES_GCM_KEY_VAES_AVX2(key);
|
||||
static const u8 indices[8] = { 0, 2, 1, 3, 4, 6, 5, 7 };
|
||||
|
||||
for (i = ARRAY_SIZE(k->h_powers) - 1; i >= 0; i--) {
|
||||
k->h_powers[i][0] = be64_to_cpu(h.b);
|
||||
k->h_powers[i][1] = be64_to_cpu(h.a);
|
||||
gf128mul_lle(&h, &h1);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(k->h_powers_xored); i++) {
|
||||
int j = indices[i];
|
||||
|
||||
k->h_powers_xored[i] = k->h_powers[j][0] ^
|
||||
k->h_powers[j][1];
|
||||
}
|
||||
} else {
|
||||
struct aes_gcm_key_aesni *k = AES_GCM_KEY_AESNI(key);
|
||||
|
||||
@@ -1508,15 +1575,15 @@ DEFINE_GCM_ALGS(aesni_avx, FLAG_AVX,
|
||||
"generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx",
|
||||
AES_GCM_KEY_AESNI_SIZE, 500);
|
||||
|
||||
/* aes_gcm_algs_vaes_avx10_256 */
|
||||
DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256,
|
||||
"generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256",
|
||||
AES_GCM_KEY_AVX10_SIZE, 700);
|
||||
/* aes_gcm_algs_vaes_avx2 */
|
||||
DEFINE_GCM_ALGS(vaes_avx2, FLAG_VAES_AVX2,
|
||||
"generic-gcm-vaes-avx2", "rfc4106-gcm-vaes-avx2",
|
||||
AES_GCM_KEY_VAES_AVX2_SIZE, 600);
|
||||
|
||||
/* aes_gcm_algs_vaes_avx10_512 */
|
||||
DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512,
|
||||
"generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512",
|
||||
AES_GCM_KEY_AVX10_SIZE, 800);
|
||||
/* aes_gcm_algs_vaes_avx512 */
|
||||
DEFINE_GCM_ALGS(vaes_avx512, FLAG_VAES_AVX512,
|
||||
"generic-gcm-vaes-avx512", "rfc4106-gcm-vaes-avx512",
|
||||
AES_GCM_KEY_VAES_AVX512_SIZE, 800);
|
||||
|
||||
static int __init register_avx_algs(void)
|
||||
{
|
||||
@@ -1548,6 +1615,10 @@ static int __init register_avx_algs(void)
|
||||
ARRAY_SIZE(skcipher_algs_vaes_avx2));
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_register_aeads(aes_gcm_algs_vaes_avx2,
|
||||
ARRAY_SIZE(aes_gcm_algs_vaes_avx2));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_AVX512BW) ||
|
||||
!boot_cpu_has(X86_FEATURE_AVX512VL) ||
|
||||
@@ -1556,26 +1627,21 @@ static int __init register_avx_algs(void)
|
||||
XFEATURE_MASK_AVX512, NULL))
|
||||
return 0;
|
||||
|
||||
err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_256,
|
||||
ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx512); i++)
|
||||
skcipher_algs_vaes_avx512[i].base.cra_priority = 1;
|
||||
for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++)
|
||||
aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1;
|
||||
for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx512); i++)
|
||||
aes_gcm_algs_vaes_avx512[i].base.cra_priority = 1;
|
||||
}
|
||||
|
||||
err = crypto_register_skciphers(skcipher_algs_vaes_avx512,
|
||||
ARRAY_SIZE(skcipher_algs_vaes_avx512));
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_512,
|
||||
ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512));
|
||||
err = crypto_register_aeads(aes_gcm_algs_vaes_avx512,
|
||||
ARRAY_SIZE(aes_gcm_algs_vaes_avx512));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1595,8 +1661,8 @@ static void unregister_avx_algs(void)
|
||||
unregister_aeads(aes_gcm_algs_aesni_avx);
|
||||
unregister_skciphers(skcipher_algs_vaes_avx2);
|
||||
unregister_skciphers(skcipher_algs_vaes_avx512);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx10_256);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx10_512);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx2);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx512);
|
||||
}
|
||||
#else /* CONFIG_X86_64 */
|
||||
static struct aead_alg aes_gcm_algs_aesni[0];
|
||||
|
||||
Reference in New Issue
Block a user