1 /* SPDX-License-Identifier: Apache-2.0 OR BSD- 2 // 3 // This file is dual-licensed, meaning that yo 4 // choice of either of the following two licen 5 // 6 // Copyright 2023 The OpenSSL Project Authors. 7 // 8 // Licensed under the Apache License 2.0 (the 9 // a copy in the file LICENSE in the source di 10 // https://www.openssl.org/source/license.html 11 // 12 // or 13 // 14 // Copyright (c) 2023, Christoph Müllner <chri 15 // Copyright (c) 2023, Jerry Shih <jerry.shih@s 16 // Copyright 2024 Google LLC 17 // All rights reserved. 18 // 19 // Redistribution and use in source and binary 20 // modification, are permitted provided that t 21 // are met: 22 // 1. Redistributions of source code must reta 23 // notice, this list of conditions and the 24 // 2. Redistributions in binary form must repr 25 // notice, this list of conditions and the 26 // documentation and/or other materials pro 27 // 28 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT 29 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTI 30 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCH 31 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 32 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIR 33 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGE 34 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 // THEORY OF LIABILITY, WHETHER IN CONTRACT, S 37 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 38 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO 39 40 // The generated code of this file depends on 41 // - RV64I 42 // - RISC-V Vector ('V') with VLEN >= 128 43 // - RISC-V Vector SM3 Secure Hash extension ( 44 // - RISC-V Vector Cryptography Bit-manipulati 45 46 #include <linux/cfi_types.h> 47 48 .text 49 .option arch, +zvksh, +zvkb 50 51 #define STATEP a0 52 #define DATA a1 53 #define NUM_BLOCKS a2 54 55 #define STATE v0 // LMUL=2 56 #define PREV_STATE v2 // LMUL=2 57 #define W0 v4 // LMUL=2 58 #define W1 v6 // LMUL=2 59 #define VTMP v8 // LMUL=2 60 61 .macro sm3_8rounds i, w0, w1 62 // Do 4 rounds using W_{0+i}..W_{7+i}. 63 vsm3c.vi STATE, \w0, \i + 0 64 vslidedown.vi VTMP, \w0, 2 65 vsm3c.vi STATE, VTMP, \i + 1 66 67 // Compute W_{4+i}..W_{11+i}. 68 vslidedown.vi VTMP, \w0, 4 69 vslideup.vi VTMP, \w1, 4 70 71 // Do 4 rounds using W_{4+i}..W_{11+i} 72 vsm3c.vi STATE, VTMP, \i + 2 73 vslidedown.vi VTMP, VTMP, 2 74 vsm3c.vi STATE, VTMP, \i + 3 75 76 .if \i < 28 77 // Compute W_{16+i}..W_{23+i}. 78 vsm3me.vv \w0, \w1, \w0 79 .endif 80 // For the next 8 rounds, w0 and w1 ar 81 .endm 82 83 // void sm3_transform_zvksh_zvkb(u32 state[8], 84 SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb) 85 86 // Load the state and endian-swap each 87 vsetivli zero, 8, e32, m2, ta, 88 vle32.v STATE, (STATEP) 89 vrev8.v STATE, STATE 90 91 .Lnext_block: 92 addi NUM_BLOCKS, NUM_BLOCKS 93 94 // Save the previous state, as it's ne 95 vmv.v.v PREV_STATE, STATE 96 97 // Load the next 512-bit message block 98 vle32.v W0, (DATA) 99 addi DATA, DATA, 32 100 vle32.v W1, (DATA) 101 addi DATA, DATA, 32 102 103 // Do the 64 rounds of SM3. 104 sm3_8rounds 0, W0, W1 105 sm3_8rounds 4, W1, W0 106 sm3_8rounds 8, W0, W1 107 sm3_8rounds 12, W1, W0 108 sm3_8rounds 16, W0, W1 109 sm3_8rounds 20, W1, W0 110 sm3_8rounds 24, W0, W1 111 sm3_8rounds 28, W1, W0 112 113 // XOR in the previous state. 114 vxor.vv STATE, STATE, PREV_STA 115 116 // Repeat if more blocks remain. 117 bnez NUM_BLOCKS, .Lnext_blo 118 119 // Store the new state and return. 120 vrev8.v STATE, STATE 121 vse32.v STATE, (STATEP) 122 ret 123 SYM_FUNC_END(sm3_transform_zvksh_zvkb)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.