blob: 8752624ed9e645bcdfbf6b9e0cdbcf7c7afa83fc [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32IM
; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64IM
define i4 @clmul_i4(i4 %a, i4 %b) nounwind {
; CHECK-LABEL: clmul_i4:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: andi a3, a1, 1
; CHECK-NEXT: andi a4, a1, 4
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: xor a0, a4, a0
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: ret
%res = call i4 @llvm.clmul.i4(i4 %a, i4 %b)
ret i4 %res
}
define i8 @clmul_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: clmul_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: andi a3, a1, 1
; CHECK-NEXT: andi a4, a1, 4
; CHECK-NEXT: andi a5, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: andi a3, a1, 16
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul a5, a0, a5
; CHECK-NEXT: xor a4, a4, a5
; CHECK-NEXT: andi a5, a1, 32
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: mul a5, a0, a5
; CHECK-NEXT: xor a3, a3, a5
; CHECK-NEXT: xor a2, a2, a4
; CHECK-NEXT: andi a4, a1, 64
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: xor a3, a3, a4
; CHECK-NEXT: xor a2, a2, a3
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: ret
%res = call i8 @llvm.clmul.i8(i8 %a, i8 %b)
ret i8 %res
}
define i16 @clmul_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: clmul_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: andi a3, a1, 1
; CHECK-NEXT: andi a4, a1, 4
; CHECK-NEXT: andi a5, a1, 8
; CHECK-NEXT: andi a6, a1, 16
; CHECK-NEXT: andi a7, a1, 32
; CHECK-NEXT: andi t0, a1, 64
; CHECK-NEXT: andi t1, a1, 128
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: andi a3, a1, 256
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul a5, a0, a5
; CHECK-NEXT: xor a4, a4, a5
; CHECK-NEXT: andi a5, a1, 512
; CHECK-NEXT: mul a6, a0, a6
; CHECK-NEXT: mul a7, a0, a7
; CHECK-NEXT: xor a6, a6, a7
; CHECK-NEXT: li a7, 1
; CHECK-NEXT: mul t1, a0, t1
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: xor a3, t1, a3
; CHECK-NEXT: lui t1, 1
; CHECK-NEXT: xor a2, a2, a4
; CHECK-NEXT: lui a4, 2
; CHECK-NEXT: mul t0, a0, t0
; CHECK-NEXT: xor a6, a6, t0
; CHECK-NEXT: lui t0, 4
; CHECK-NEXT: mul a5, a0, a5
; CHECK-NEXT: xor a3, a3, a5
; CHECK-NEXT: lui a5, 1048568
; CHECK-NEXT: slli a7, a7, 11
; CHECK-NEXT: and t1, a1, t1
; CHECK-NEXT: and a4, a1, a4
; CHECK-NEXT: and t0, a1, t0
; CHECK-NEXT: and a5, a1, a5
; CHECK-NEXT: and a7, a1, a7
; CHECK-NEXT: andi a1, a1, 1024
; CHECK-NEXT: mul a1, a0, a1
; CHECK-NEXT: mul t1, a0, t1
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul t0, a0, t0
; CHECK-NEXT: mul a5, a0, a5
; CHECK-NEXT: mul a0, a0, a7
; CHECK-NEXT: xor a2, a2, a6
; CHECK-NEXT: xor a1, a3, a1
; CHECK-NEXT: xor a0, a0, t1
; CHECK-NEXT: xor a1, a2, a1
; CHECK-NEXT: xor a0, a0, a4
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: xor a1, t0, a5
; CHECK-NEXT: xor a0, a0, a1
; CHECK-NEXT: ret
%res = call i16 @llvm.clmul.i16(i16 %a, i16 %b)
ret i16 %res
}
define i32 @clmul_i32(i32 %a, i32 %b) nounwind {
; RV32IM-LABEL: clmul_i32:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -48
; RV32IM-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s3, 32(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s4, 28(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s5, 24(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s6, 20(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s9, 8(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s10, 4(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi t6, a1, 2
; RV32IM-NEXT: andi s1, a1, 1
; RV32IM-NEXT: andi a7, a1, 4
; RV32IM-NEXT: andi t2, a1, 8
; RV32IM-NEXT: andi t0, a1, 16
; RV32IM-NEXT: andi t3, a1, 32
; RV32IM-NEXT: andi a2, a1, 64
; RV32IM-NEXT: andi t4, a1, 128
; RV32IM-NEXT: andi s0, a1, 256
; RV32IM-NEXT: andi a3, a1, 512
; RV32IM-NEXT: li a4, 1
; RV32IM-NEXT: lui a5, 1
; RV32IM-NEXT: lui a6, 2
; RV32IM-NEXT: lui t1, 4
; RV32IM-NEXT: lui t5, 8
; RV32IM-NEXT: lui s2, 16
; RV32IM-NEXT: lui s3, 32
; RV32IM-NEXT: lui s4, 64
; RV32IM-NEXT: lui s5, 128
; RV32IM-NEXT: lui s6, 256
; RV32IM-NEXT: lui s7, 512
; RV32IM-NEXT: lui s8, 1024
; RV32IM-NEXT: lui s9, 2048
; RV32IM-NEXT: lui s10, 4096
; RV32IM-NEXT: mul t6, a0, t6
; RV32IM-NEXT: mul s1, a0, s1
; RV32IM-NEXT: xor t6, s1, t6
; RV32IM-NEXT: lui s1, 8192
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: mul t2, a0, t2
; RV32IM-NEXT: xor a7, a7, t2
; RV32IM-NEXT: lui t2, 16384
; RV32IM-NEXT: mul t0, a0, t0
; RV32IM-NEXT: mul t3, a0, t3
; RV32IM-NEXT: xor t0, t0, t3
; RV32IM-NEXT: lui t3, 32768
; RV32IM-NEXT: mul t4, a0, t4
; RV32IM-NEXT: mul s0, a0, s0
; RV32IM-NEXT: xor t4, t4, s0
; RV32IM-NEXT: lui s0, 65536
; RV32IM-NEXT: xor a7, t6, a7
; RV32IM-NEXT: lui t6, 131072
; RV32IM-NEXT: mul a2, a0, a2
; RV32IM-NEXT: xor a2, t0, a2
; RV32IM-NEXT: lui t0, 262144
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a3, t4, a3
; RV32IM-NEXT: lui t4, 524288
; RV32IM-NEXT: slli a4, a4, 11
; RV32IM-NEXT: and a5, a1, a5
; RV32IM-NEXT: and a6, a1, a6
; RV32IM-NEXT: and t1, a1, t1
; RV32IM-NEXT: and t5, a1, t5
; RV32IM-NEXT: and s2, a1, s2
; RV32IM-NEXT: and s3, a1, s3
; RV32IM-NEXT: and s4, a1, s4
; RV32IM-NEXT: and s5, a1, s5
; RV32IM-NEXT: and s6, a1, s6
; RV32IM-NEXT: and s7, a1, s7
; RV32IM-NEXT: and s8, a1, s8
; RV32IM-NEXT: and s9, a1, s9
; RV32IM-NEXT: and s10, a1, s10
; RV32IM-NEXT: and s1, a1, s1
; RV32IM-NEXT: and t2, a1, t2
; RV32IM-NEXT: and t3, a1, t3
; RV32IM-NEXT: and s0, a1, s0
; RV32IM-NEXT: and t6, a1, t6
; RV32IM-NEXT: and t0, a1, t0
; RV32IM-NEXT: and t4, a1, t4
; RV32IM-NEXT: and a4, a1, a4
; RV32IM-NEXT: andi a1, a1, 1024
; RV32IM-NEXT: mul a1, a0, a1
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: mul a6, a0, a6
; RV32IM-NEXT: mul t1, a0, t1
; RV32IM-NEXT: mul t5, a0, t5
; RV32IM-NEXT: mul s2, a0, s2
; RV32IM-NEXT: mul s3, a0, s3
; RV32IM-NEXT: mul s4, a0, s4
; RV32IM-NEXT: mul s5, a0, s5
; RV32IM-NEXT: mul s6, a0, s6
; RV32IM-NEXT: mul s7, a0, s7
; RV32IM-NEXT: mul s8, a0, s8
; RV32IM-NEXT: mul s9, a0, s9
; RV32IM-NEXT: mul s10, a0, s10
; RV32IM-NEXT: mul s1, a0, s1
; RV32IM-NEXT: mul t2, a0, t2
; RV32IM-NEXT: mul t3, a0, t3
; RV32IM-NEXT: mul s0, a0, s0
; RV32IM-NEXT: mul t6, a0, t6
; RV32IM-NEXT: mul t0, a0, t0
; RV32IM-NEXT: mul t4, a0, t4
; RV32IM-NEXT: mul a0, a0, a4
; RV32IM-NEXT: xor a4, t1, t5
; RV32IM-NEXT: xor t1, s5, s6
; RV32IM-NEXT: xor t2, s1, t2
; RV32IM-NEXT: xor a2, a7, a2
; RV32IM-NEXT: xor a1, a3, a1
; RV32IM-NEXT: xor a0, a0, a5
; RV32IM-NEXT: xor a3, a4, s2
; RV32IM-NEXT: xor a4, t1, s7
; RV32IM-NEXT: xor a5, t2, t3
; RV32IM-NEXT: xor a1, a2, a1
; RV32IM-NEXT: xor a0, a0, a6
; RV32IM-NEXT: xor a2, a3, s3
; RV32IM-NEXT: xor a3, a4, s8
; RV32IM-NEXT: xor a5, a5, s0
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: xor a1, a2, s4
; RV32IM-NEXT: xor a2, a3, s9
; RV32IM-NEXT: xor a3, a5, t6
; RV32IM-NEXT: xor a0, a0, a1
; RV32IM-NEXT: xor a1, a2, s10
; RV32IM-NEXT: xor a2, a3, t0
; RV32IM-NEXT: xor a0, a0, a1
; RV32IM-NEXT: xor a1, a2, t4
; RV32IM-NEXT: xor a0, a0, a1
; RV32IM-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s3, 32(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s4, 28(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s5, 24(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s6, 20(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s7, 16(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s8, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s9, 8(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s10, 4(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 48
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmul_i32:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi sp, sp, -96
; RV64IM-NEXT: sd s0, 88(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s1, 80(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s2, 72(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s3, 64(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s4, 56(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s5, 48(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s6, 40(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s7, 32(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s8, 24(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s9, 16(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s10, 8(sp) # 8-byte Folded Spill
; RV64IM-NEXT: andi t6, a1, 2
; RV64IM-NEXT: andi s1, a1, 1
; RV64IM-NEXT: andi a7, a1, 4
; RV64IM-NEXT: andi t2, a1, 8
; RV64IM-NEXT: andi t0, a1, 16
; RV64IM-NEXT: andi t3, a1, 32
; RV64IM-NEXT: andi a2, a1, 64
; RV64IM-NEXT: andi t4, a1, 128
; RV64IM-NEXT: andi s0, a1, 256
; RV64IM-NEXT: andi a3, a1, 512
; RV64IM-NEXT: li a4, 1
; RV64IM-NEXT: lui a5, 1
; RV64IM-NEXT: lui a6, 2
; RV64IM-NEXT: lui t1, 4
; RV64IM-NEXT: lui t5, 8
; RV64IM-NEXT: lui s2, 16
; RV64IM-NEXT: lui s3, 32
; RV64IM-NEXT: lui s4, 64
; RV64IM-NEXT: lui s5, 128
; RV64IM-NEXT: lui s6, 256
; RV64IM-NEXT: lui s7, 512
; RV64IM-NEXT: lui s8, 1024
; RV64IM-NEXT: lui s9, 2048
; RV64IM-NEXT: lui s10, 4096
; RV64IM-NEXT: mulw t6, a0, t6
; RV64IM-NEXT: mulw s1, a0, s1
; RV64IM-NEXT: xor t6, s1, t6
; RV64IM-NEXT: lui s1, 8192
; RV64IM-NEXT: mulw a7, a0, a7
; RV64IM-NEXT: mulw t2, a0, t2
; RV64IM-NEXT: xor a7, a7, t2
; RV64IM-NEXT: lui t2, 16384
; RV64IM-NEXT: mulw t0, a0, t0
; RV64IM-NEXT: mulw t3, a0, t3
; RV64IM-NEXT: xor t0, t0, t3
; RV64IM-NEXT: lui t3, 32768
; RV64IM-NEXT: mulw t4, a0, t4
; RV64IM-NEXT: mulw s0, a0, s0
; RV64IM-NEXT: xor t4, t4, s0
; RV64IM-NEXT: lui s0, 65536
; RV64IM-NEXT: xor a7, t6, a7
; RV64IM-NEXT: lui t6, 131072
; RV64IM-NEXT: mulw a2, a0, a2
; RV64IM-NEXT: xor a2, t0, a2
; RV64IM-NEXT: lui t0, 262144
; RV64IM-NEXT: mulw a3, a0, a3
; RV64IM-NEXT: xor a3, t4, a3
; RV64IM-NEXT: lui t4, 524288
; RV64IM-NEXT: slli a4, a4, 11
; RV64IM-NEXT: and a5, a1, a5
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and t1, a1, t1
; RV64IM-NEXT: and t5, a1, t5
; RV64IM-NEXT: and s2, a1, s2
; RV64IM-NEXT: and s3, a1, s3
; RV64IM-NEXT: and s4, a1, s4
; RV64IM-NEXT: and s5, a1, s5
; RV64IM-NEXT: and s6, a1, s6
; RV64IM-NEXT: and s7, a1, s7
; RV64IM-NEXT: and s8, a1, s8
; RV64IM-NEXT: and s9, a1, s9
; RV64IM-NEXT: and s10, a1, s10
; RV64IM-NEXT: and s1, a1, s1
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: and t3, a1, t3
; RV64IM-NEXT: and s0, a1, s0
; RV64IM-NEXT: and t6, a1, t6
; RV64IM-NEXT: and t0, a1, t0
; RV64IM-NEXT: and t4, a1, t4
; RV64IM-NEXT: and a4, a1, a4
; RV64IM-NEXT: andi a1, a1, 1024
; RV64IM-NEXT: mulw a1, a0, a1
; RV64IM-NEXT: mulw a5, a0, a5
; RV64IM-NEXT: mulw a6, a0, a6
; RV64IM-NEXT: mulw t1, a0, t1
; RV64IM-NEXT: mulw t5, a0, t5
; RV64IM-NEXT: mulw s2, a0, s2
; RV64IM-NEXT: mulw s3, a0, s3
; RV64IM-NEXT: mulw s4, a0, s4
; RV64IM-NEXT: mulw s5, a0, s5
; RV64IM-NEXT: mulw s6, a0, s6
; RV64IM-NEXT: mulw s7, a0, s7
; RV64IM-NEXT: mulw s8, a0, s8
; RV64IM-NEXT: mulw s9, a0, s9
; RV64IM-NEXT: mulw s10, a0, s10
; RV64IM-NEXT: mulw s1, a0, s1
; RV64IM-NEXT: mulw t2, a0, t2
; RV64IM-NEXT: mulw t3, a0, t3
; RV64IM-NEXT: mulw s0, a0, s0
; RV64IM-NEXT: mulw t6, a0, t6
; RV64IM-NEXT: mulw t0, a0, t0
; RV64IM-NEXT: mulw t4, a0, t4
; RV64IM-NEXT: mulw a0, a0, a4
; RV64IM-NEXT: xor a4, t1, t5
; RV64IM-NEXT: xor t1, s5, s6
; RV64IM-NEXT: xor t2, s1, t2
; RV64IM-NEXT: xor a2, a7, a2
; RV64IM-NEXT: xor a1, a3, a1
; RV64IM-NEXT: xor a0, a0, a5
; RV64IM-NEXT: xor a3, a4, s2
; RV64IM-NEXT: xor a4, t1, s7
; RV64IM-NEXT: xor a5, t2, t3
; RV64IM-NEXT: xor a1, a2, a1
; RV64IM-NEXT: xor a0, a0, a6
; RV64IM-NEXT: xor a2, a3, s3
; RV64IM-NEXT: xor a3, a4, s8
; RV64IM-NEXT: xor a5, a5, s0
; RV64IM-NEXT: xor a0, a1, a0
; RV64IM-NEXT: xor a1, a2, s4
; RV64IM-NEXT: xor a2, a3, s9
; RV64IM-NEXT: xor a3, a5, t6
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: xor a1, a2, s10
; RV64IM-NEXT: xor a2, a3, t0
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: xor a1, a2, t4
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: ld s0, 88(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s1, 80(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s2, 72(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s3, 64(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s4, 56(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s5, 48(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s6, 40(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s7, 32(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s8, 24(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s9, 16(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s10, 8(sp) # 8-byte Folded Reload
; RV64IM-NEXT: addi sp, sp, 96
; RV64IM-NEXT: ret
%res = call i32 @llvm.clmul.i32(i32 %a, i32 %b)
ret i32 %res
}
define i64 @clmul_i64(i64 %a, i64 %b) nounwind {
; RV32IM-LABEL: clmul_i64:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -272
; RV32IM-NEXT: sw ra, 268(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s0, 264(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s1, 260(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s2, 256(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s3, 252(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s4, 248(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s5, 244(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s6, 240(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 236(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 232(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s9, 228(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s10, 224(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s11, 220(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mv t1, a1
; RV32IM-NEXT: srli a7, a0, 8
; RV32IM-NEXT: lui s11, 16
; RV32IM-NEXT: srli t0, a0, 24
; RV32IM-NEXT: srli a1, a2, 8
; RV32IM-NEXT: srli t2, a2, 24
; RV32IM-NEXT: andi t3, a2, 2
; RV32IM-NEXT: andi t5, a2, 1
; RV32IM-NEXT: andi t6, a2, 4
; RV32IM-NEXT: andi s0, a2, 8
; RV32IM-NEXT: andi s1, a2, 16
; RV32IM-NEXT: andi s2, a2, 32
; RV32IM-NEXT: andi t4, a2, 128
; RV32IM-NEXT: andi s4, a2, 256
; RV32IM-NEXT: andi a4, a3, 2
; RV32IM-NEXT: andi a5, a3, 1
; RV32IM-NEXT: andi s7, a3, 4
; RV32IM-NEXT: andi s8, a3, 8
; RV32IM-NEXT: mul a6, t1, t3
; RV32IM-NEXT: mul s3, t1, t5
; RV32IM-NEXT: mul s5, t1, t6
; RV32IM-NEXT: mul s6, t1, s0
; RV32IM-NEXT: mul s9, t1, s1
; RV32IM-NEXT: xor a6, s3, a6
; RV32IM-NEXT: mul s3, t1, s2
; RV32IM-NEXT: xor s5, s5, s6
; RV32IM-NEXT: mul s6, t1, t4
; RV32IM-NEXT: xor s3, s9, s3
; RV32IM-NEXT: mul s9, t1, s4
; RV32IM-NEXT: xor s6, s6, s9
; RV32IM-NEXT: andi s9, a3, 16
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a5, a4
; RV32IM-NEXT: andi s10, a3, 32
; RV32IM-NEXT: mul a5, a0, s7
; RV32IM-NEXT: mul s7, a0, s8
; RV32IM-NEXT: xor a5, a5, s7
; RV32IM-NEXT: andi s8, a3, 128
; RV32IM-NEXT: mul s7, a0, s9
; RV32IM-NEXT: mul s9, a0, s10
; RV32IM-NEXT: xor s7, s7, s9
; RV32IM-NEXT: andi s9, a3, 256
; RV32IM-NEXT: mul s8, a0, s8
; RV32IM-NEXT: mul s9, a0, s9
; RV32IM-NEXT: xor s8, s8, s9
; RV32IM-NEXT: mul t3, a0, t3
; RV32IM-NEXT: mul t5, a0, t5
; RV32IM-NEXT: xor t5, t5, t3
; RV32IM-NEXT: andi t3, a2, 64
; RV32IM-NEXT: mul t6, a0, t6
; RV32IM-NEXT: mul s0, a0, s0
; RV32IM-NEXT: xor s0, t6, s0
; RV32IM-NEXT: andi t6, a2, 512
; RV32IM-NEXT: mul s1, a0, s1
; RV32IM-NEXT: mul s2, a0, s2
; RV32IM-NEXT: xor s1, s1, s2
; RV32IM-NEXT: addi s9, s11, -256
; RV32IM-NEXT: sw s9, 216(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul t4, a0, t4
; RV32IM-NEXT: mul s2, a0, s4
; RV32IM-NEXT: xor t4, t4, s2
; RV32IM-NEXT: mul s2, t1, t3
; RV32IM-NEXT: and a7, a7, s9
; RV32IM-NEXT: or a7, a7, t0
; RV32IM-NEXT: sw a7, 212(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a7, t1, t6
; RV32IM-NEXT: and t0, a1, s9
; RV32IM-NEXT: or a1, t0, t2
; RV32IM-NEXT: sw a1, 208(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui s9, 4
; RV32IM-NEXT: and t2, a2, s9
; RV32IM-NEXT: xor a1, a6, s5
; RV32IM-NEXT: sw a1, 204(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui s5, 8
; RV32IM-NEXT: and s4, a2, s5
; RV32IM-NEXT: xor a1, s3, s2
; RV32IM-NEXT: sw a1, 200(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a6, t1, t2
; RV32IM-NEXT: xor a1, s6, a7
; RV32IM-NEXT: sw a1, 196(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a7, t1, s4
; RV32IM-NEXT: xor a1, a6, a7
; RV32IM-NEXT: sw a1, 192(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a1, 256
; RV32IM-NEXT: lui s6, 128
; RV32IM-NEXT: and t0, a2, s6
; RV32IM-NEXT: and a1, a2, a1
; RV32IM-NEXT: lui s10, 256
; RV32IM-NEXT: mul a6, t1, t0
; RV32IM-NEXT: mul a7, t1, a1
; RV32IM-NEXT: xor a6, a6, a7
; RV32IM-NEXT: sw a6, 188(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a6, 8192
; RV32IM-NEXT: lui a7, 16384
; RV32IM-NEXT: and a6, a2, a6
; RV32IM-NEXT: lui s11, 8192
; RV32IM-NEXT: and a7, a2, a7
; RV32IM-NEXT: lui ra, 16384
; RV32IM-NEXT: mul s2, t1, a6
; RV32IM-NEXT: mul s3, t1, a7
; RV32IM-NEXT: xor s2, s2, s3
; RV32IM-NEXT: sw s2, 180(sp) # 4-byte Folded Spill
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 184(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi a4, a3, 64
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a4, s7, a4
; RV32IM-NEXT: sw a4, 176(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi a4, a3, 512
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a4, s8, a4
; RV32IM-NEXT: sw a4, 172(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a3, s9
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: and a5, a3, s5
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 168(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a3, s6
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: and a5, a3, s10
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 164(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a3, s11
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: and a5, a3, ra
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 156(sp) # 4-byte Folded Spill
; RV32IM-NEXT: xor a4, t5, s0
; RV32IM-NEXT: sw a4, 160(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a4, a0, t3
; RV32IM-NEXT: xor a4, s1, a4
; RV32IM-NEXT: sw a4, 152(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a4, a0, t6
; RV32IM-NEXT: xor a4, t4, a4
; RV32IM-NEXT: sw a4, 148(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a4, a0, t2
; RV32IM-NEXT: mul a5, a0, s4
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 144(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a4, a0, t0
; RV32IM-NEXT: mul a5, a0, a1
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 140(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a4, a0, a6
; RV32IM-NEXT: mul a5, a0, a7
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: sw a4, 132(sp) # 4-byte Folded Spill
; RV32IM-NEXT: li a4, 1
; RV32IM-NEXT: slli s6, a4, 11
; RV32IM-NEXT: andi a4, a3, 1024
; RV32IM-NEXT: mul a1, a0, a4
; RV32IM-NEXT: sw a1, 116(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi a4, a2, 1024
; RV32IM-NEXT: mul a1, t1, a4
; RV32IM-NEXT: sw a1, 104(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a4
; RV32IM-NEXT: sw a1, 124(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a4, 1
; RV32IM-NEXT: lui a5, 2
; RV32IM-NEXT: lui a1, 32
; RV32IM-NEXT: lui s3, 64
; RV32IM-NEXT: lui t2, 512
; RV32IM-NEXT: lui t3, 1024
; RV32IM-NEXT: lui t4, 2048
; RV32IM-NEXT: lui t5, 4096
; RV32IM-NEXT: lui t6, 32768
; RV32IM-NEXT: lui s0, 65536
; RV32IM-NEXT: lui s1, 131072
; RV32IM-NEXT: lui s2, 262144
; RV32IM-NEXT: lui s4, 524288
; RV32IM-NEXT: and a4, a3, a4
; RV32IM-NEXT: lui s7, 1
; RV32IM-NEXT: and a5, a3, a5
; RV32IM-NEXT: lui s9, 2
; RV32IM-NEXT: lui t0, 16
; RV32IM-NEXT: and a6, a3, t0
; RV32IM-NEXT: and a7, a3, a1
; RV32IM-NEXT: lui s8, 32
; RV32IM-NEXT: and a1, a3, s3
; RV32IM-NEXT: lui s10, 64
; RV32IM-NEXT: and t2, a3, t2
; RV32IM-NEXT: lui s11, 512
; RV32IM-NEXT: and t3, a3, t3
; RV32IM-NEXT: lui ra, 1024
; RV32IM-NEXT: and t4, a3, t4
; RV32IM-NEXT: and t5, a3, t5
; RV32IM-NEXT: and t6, a3, t6
; RV32IM-NEXT: and s0, a3, s0
; RV32IM-NEXT: and s1, a3, s1
; RV32IM-NEXT: and s3, a3, s2
; RV32IM-NEXT: and s5, a3, s4
; RV32IM-NEXT: and s2, a3, s6
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a3, a0, a5
; RV32IM-NEXT: sw a3, 96(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a3, a0, a6
; RV32IM-NEXT: sw a3, 48(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a3, a0, a7
; RV32IM-NEXT: sw a3, 92(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a1
; RV32IM-NEXT: sw a1, 112(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, t2
; RV32IM-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, t3
; RV32IM-NEXT: sw a1, 88(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, t4
; RV32IM-NEXT: sw a1, 108(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, t5
; RV32IM-NEXT: sw a1, 128(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, t6
; RV32IM-NEXT: sw a1, 32(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, s0
; RV32IM-NEXT: sw a1, 84(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, s1
; RV32IM-NEXT: sw a1, 100(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, s3
; RV32IM-NEXT: sw a1, 120(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, s5
; RV32IM-NEXT: sw a1, 136(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a2, s7
; RV32IM-NEXT: mul t5, t1, a3
; RV32IM-NEXT: mul a1, a0, a3
; RV32IM-NEXT: sw a1, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a2, s9
; RV32IM-NEXT: mul a1, t1, a3
; RV32IM-NEXT: sw a1, 36(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a3
; RV32IM-NEXT: sw a1, 64(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a2, t0
; RV32IM-NEXT: mul t3, t1, a3
; RV32IM-NEXT: mul s9, a0, a3
; RV32IM-NEXT: and a3, a2, s8
; RV32IM-NEXT: mul a1, t1, a3
; RV32IM-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a3
; RV32IM-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a2, s10
; RV32IM-NEXT: mul a1, t1, a3
; RV32IM-NEXT: sw a1, 60(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a3
; RV32IM-NEXT: sw a1, 80(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a2, s11
; RV32IM-NEXT: mul a7, t1, a3
; RV32IM-NEXT: mul s4, a0, a3
; RV32IM-NEXT: and a6, a2, ra
; RV32IM-NEXT: mul s11, t1, a6
; RV32IM-NEXT: mul a1, a0, a6
; RV32IM-NEXT: sw a1, 24(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a1, 2048
; RV32IM-NEXT: and a6, a2, a1
; RV32IM-NEXT: mul a1, t1, a6
; RV32IM-NEXT: sw a1, 28(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a6
; RV32IM-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a1, 4096
; RV32IM-NEXT: and a6, a2, a1
; RV32IM-NEXT: mul a1, t1, a6
; RV32IM-NEXT: sw a1, 56(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a6
; RV32IM-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a1, 32768
; RV32IM-NEXT: and a6, a2, a1
; RV32IM-NEXT: mul a5, t1, a6
; RV32IM-NEXT: mul s1, a0, a6
; RV32IM-NEXT: lui a1, 65536
; RV32IM-NEXT: and a6, a2, a1
; RV32IM-NEXT: mul s7, t1, a6
; RV32IM-NEXT: mul s8, a0, a6
; RV32IM-NEXT: lui a1, 131072
; RV32IM-NEXT: and t6, a2, a1
; RV32IM-NEXT: mul s10, t1, t6
; RV32IM-NEXT: mul ra, a0, t6
; RV32IM-NEXT: lui a1, 262144
; RV32IM-NEXT: and s5, a2, a1
; RV32IM-NEXT: mul a1, t1, s5
; RV32IM-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul s5, a0, s5
; RV32IM-NEXT: lui a1, 524288
; RV32IM-NEXT: and a1, a2, a1
; RV32IM-NEXT: mul a3, t1, a1
; RV32IM-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a0, a1
; RV32IM-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a1, a2, s6
; RV32IM-NEXT: mul t1, t1, a1
; RV32IM-NEXT: mul s2, a0, s2
; RV32IM-NEXT: mul t6, a0, a1
; RV32IM-NEXT: slli a1, a0, 24
; RV32IM-NEXT: lw a3, 216(sp) # 4-byte Folded Reload
; RV32IM-NEXT: and a0, a0, a3
; RV32IM-NEXT: slli a0, a0, 8
; RV32IM-NEXT: or s3, a1, a0
; RV32IM-NEXT: slli a1, a2, 24
; RV32IM-NEXT: and a2, a2, a3
; RV32IM-NEXT: slli a2, a2, 8
; RV32IM-NEXT: or t2, a1, a2
; RV32IM-NEXT: lw a0, 204(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a1, 200(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t4, a0, a1
; RV32IM-NEXT: lw a0, 196(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a1, 104(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a6, a0, a1
; RV32IM-NEXT: xor t1, t1, t5
; RV32IM-NEXT: lw a0, 192(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t3, a0, t3
; RV32IM-NEXT: lw a0, 188(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a7, a0, a7
; RV32IM-NEXT: lw a0, 180(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t0, a0, a5
; RV32IM-NEXT: lw a0, 184(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a1, 176(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t5, a0, a1
; RV32IM-NEXT: lw a0, 172(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a5, 116(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a5, a0, a5
; RV32IM-NEXT: xor s2, s2, a4
; RV32IM-NEXT: lw a0, 168(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a1, 48(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a0, a1
; RV32IM-NEXT: lw a0, 164(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a2, a0, a2
; RV32IM-NEXT: lw a0, 156(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a0, a3
; RV32IM-NEXT: lw a0, 160(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a4, 152(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, a0, a4
; RV32IM-NEXT: lw a0, 148(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 124(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a0, a0, s0
; RV32IM-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t6, t6, s0
; RV32IM-NEXT: lw s0, 144(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor s9, s0, s9
; RV32IM-NEXT: lw s0, 140(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor s4, s0, s4
; RV32IM-NEXT: lw s0, 132(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor s1, s0, s1
; RV32IM-NEXT: lw s0, 212(sp) # 4-byte Folded Reload
; RV32IM-NEXT: or s0, s3, s0
; RV32IM-NEXT: lw s3, 208(sp) # 4-byte Folded Reload
; RV32IM-NEXT: or t2, t2, s3
; RV32IM-NEXT: xor a6, t4, a6
; RV32IM-NEXT: lw t4, 36(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t1, t1, t4
; RV32IM-NEXT: lw t4, 20(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t3, t3, t4
; RV32IM-NEXT: xor a7, a7, s11
; RV32IM-NEXT: xor t0, t0, s7
; RV32IM-NEXT: xor a5, t5, a5
; RV32IM-NEXT: lw t4, 96(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t4, s2, t4
; RV32IM-NEXT: lw t5, 92(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a1, t5
; RV32IM-NEXT: lw t5, 88(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a2, a2, t5
; RV32IM-NEXT: lw t5, 84(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, t5
; RV32IM-NEXT: xor a0, a4, a0
; RV32IM-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, t6, a4
; RV32IM-NEXT: lw t5, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t5, s9, t5
; RV32IM-NEXT: lw t6, 24(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t6, s4, t6
; RV32IM-NEXT: xor s1, s1, s8
; RV32IM-NEXT: xor a6, a6, t1
; RV32IM-NEXT: lw t1, 60(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t1, t3, t1
; RV32IM-NEXT: lw t3, 28(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a7, a7, t3
; RV32IM-NEXT: xor t0, t0, s10
; RV32IM-NEXT: xor a5, a5, t4
; RV32IM-NEXT: lw t3, 112(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a1, t3
; RV32IM-NEXT: lw t3, 108(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a2, a2, t3
; RV32IM-NEXT: lw t3, 100(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, t3
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: lw a4, 80(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, t5, a4
; RV32IM-NEXT: lw t3, 52(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t3, t6, t3
; RV32IM-NEXT: xor t4, s1, ra
; RV32IM-NEXT: xor a6, a6, t1
; RV32IM-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a7, a7, t1
; RV32IM-NEXT: lw t1, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t0, t0, t1
; RV32IM-NEXT: xor a5, a5, a1
; RV32IM-NEXT: lw a1, 128(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a2, a2, a1
; RV32IM-NEXT: lw a1, 120(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, a1
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, t3, a1
; RV32IM-NEXT: xor t1, t4, s5
; RV32IM-NEXT: lui a1, 61681
; RV32IM-NEXT: addi t5, a1, -241
; RV32IM-NEXT: srli t3, s0, 4
; RV32IM-NEXT: and s0, s0, t5
; RV32IM-NEXT: and t3, t3, t5
; RV32IM-NEXT: slli s0, s0, 4
; RV32IM-NEXT: or t3, t3, s0
; RV32IM-NEXT: srli t4, t2, 4
; RV32IM-NEXT: and t2, t2, t5
; RV32IM-NEXT: and t4, t4, t5
; RV32IM-NEXT: slli t2, t2, 4
; RV32IM-NEXT: or t2, t4, t2
; RV32IM-NEXT: xor a6, a6, a7
; RV32IM-NEXT: lw a1, 68(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a7, t0, a1
; RV32IM-NEXT: xor a5, a5, a2
; RV32IM-NEXT: lw a1, 136(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, a1
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: sw a0, 212(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a0, t1, a0
; RV32IM-NEXT: sw a0, 208(sp) # 4-byte Folded Spill
; RV32IM-NEXT: xor a4, a6, a7
; RV32IM-NEXT: xor a3, a5, a3
; RV32IM-NEXT: xor a3, a3, a4
; RV32IM-NEXT: sw a3, 204(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a4, 209715
; RV32IM-NEXT: addi t4, a4, 819
; RV32IM-NEXT: srli a5, t3, 2
; RV32IM-NEXT: and a6, t3, t4
; RV32IM-NEXT: and a5, a5, t4
; RV32IM-NEXT: slli a6, a6, 2
; RV32IM-NEXT: or a7, a5, a6
; RV32IM-NEXT: srli a5, t2, 2
; RV32IM-NEXT: and a6, t2, t4
; RV32IM-NEXT: and a5, a5, t4
; RV32IM-NEXT: slli a6, a6, 2
; RV32IM-NEXT: or t0, a5, a6
; RV32IM-NEXT: lui t6, 349525
; RV32IM-NEXT: addi t6, t6, 1365
; RV32IM-NEXT: srli t1, a7, 1
; RV32IM-NEXT: and a7, a7, t6
; RV32IM-NEXT: and t1, t1, t6
; RV32IM-NEXT: slli a7, a7, 1
; RV32IM-NEXT: or a7, t1, a7
; RV32IM-NEXT: srli t1, t0, 1
; RV32IM-NEXT: and t0, t0, t6
; RV32IM-NEXT: and t1, t1, t6
; RV32IM-NEXT: slli t0, t0, 1
; RV32IM-NEXT: or t0, t1, t0
; RV32IM-NEXT: and s0, t0, s6
; RV32IM-NEXT: lui a0, 1
; RV32IM-NEXT: and s2, t0, a0
; RV32IM-NEXT: lui a0, 2
; RV32IM-NEXT: and s3, t0, a0
; RV32IM-NEXT: lui a0, 4
; RV32IM-NEXT: and s5, t0, a0
; RV32IM-NEXT: lui a0, 8
; RV32IM-NEXT: and s6, t0, a0
; RV32IM-NEXT: lui a0, 16
; RV32IM-NEXT: and s10, t0, a0
; RV32IM-NEXT: lui a0, 32
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 196(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 64
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 192(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 128
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 188(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 256
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 184(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 512
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 180(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 1024
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 176(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 2048
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 172(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 4096
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 168(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 8192
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 164(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 16384
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 160(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 32768
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 156(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 65536
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 152(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 131072
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 148(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 262144
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 144(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui a0, 524288
; RV32IM-NEXT: and a0, t0, a0
; RV32IM-NEXT: sw a0, 140(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi a0, t0, 2
; RV32IM-NEXT: andi a1, t0, 1
; RV32IM-NEXT: andi a2, t0, 4
; RV32IM-NEXT: andi a3, t0, 8
; RV32IM-NEXT: andi a4, t0, 16
; RV32IM-NEXT: andi a5, t0, 32
; RV32IM-NEXT: andi a6, t0, 64
; RV32IM-NEXT: andi t1, t0, 128
; RV32IM-NEXT: andi t2, t0, 256
; RV32IM-NEXT: andi t3, t0, 512
; RV32IM-NEXT: andi t0, t0, 1024
; RV32IM-NEXT: mul a0, a7, a0
; RV32IM-NEXT: sw a0, 120(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul ra, a7, a1
; RV32IM-NEXT: mul s11, a7, a2
; RV32IM-NEXT: mul s8, a7, a3
; RV32IM-NEXT: mul s7, a7, a4
; RV32IM-NEXT: mul s4, a7, a5
; RV32IM-NEXT: mul a0, a7, a6
; RV32IM-NEXT: sw a0, 124(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a7, t1
; RV32IM-NEXT: sw a0, 200(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul s1, a7, t2
; RV32IM-NEXT: mul t3, a7, t3
; RV32IM-NEXT: mul a0, a7, t0
; RV32IM-NEXT: sw a0, 116(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a7, s0
; RV32IM-NEXT: sw a0, 132(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a7, s2
; RV32IM-NEXT: sw a0, 136(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul t1, a7, s3
; RV32IM-NEXT: mul a6, a7, s5
; RV32IM-NEXT: mul s2, a7, s6
; RV32IM-NEXT: mul s10, a7, s10
; RV32IM-NEXT: lw a0, 196(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a7, a0
; RV32IM-NEXT: sw a0, 128(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 192(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a7, a0
; RV32IM-NEXT: sw a0, 196(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 188(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a3, a7, a0
; RV32IM-NEXT: lw a0, 184(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a2, a7, a0
; RV32IM-NEXT: lw a0, 180(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a5, a7, a0
; RV32IM-NEXT: lw a0, 176(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t2, a7, a0
; RV32IM-NEXT: lw a0, 172(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s6, a7, a0
; RV32IM-NEXT: lw a0, 168(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a1, a7, a0
; RV32IM-NEXT: lw a0, 164(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a7, a0
; RV32IM-NEXT: lw a4, 160(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a4, a7, a4
; RV32IM-NEXT: lw t0, 156(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t0, a7, t0
; RV32IM-NEXT: lw s0, 152(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s0, a7, s0
; RV32IM-NEXT: lw s3, 148(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s3, a7, s3
; RV32IM-NEXT: lw s5, 144(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s5, a7, s5
; RV32IM-NEXT: lw s9, 140(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a7, a7, s9
; RV32IM-NEXT: lw s9, 120(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor ra, ra, s9
; RV32IM-NEXT: xor s8, s11, s8
; RV32IM-NEXT: xor s4, s7, s4
; RV32IM-NEXT: xor t3, s1, t3
; RV32IM-NEXT: xor a6, t1, a6
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: xor a1, ra, s8
; RV32IM-NEXT: lw a3, 124(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, s4, a3
; RV32IM-NEXT: lw t1, 116(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t1, t3, t1
; RV32IM-NEXT: xor a6, a6, s2
; RV32IM-NEXT: xor a2, a2, a5
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: xor a1, a1, a3
; RV32IM-NEXT: lw a3, 132(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, t1, a3
; RV32IM-NEXT: xor a4, a6, s10
; RV32IM-NEXT: xor a2, a2, t2
; RV32IM-NEXT: xor a0, a0, t0
; RV32IM-NEXT: lw a5, 200(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a1, a5
; RV32IM-NEXT: lw a5, 136(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, a5
; RV32IM-NEXT: lw a5, 128(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a2, a2, s6
; RV32IM-NEXT: xor a0, a0, s0
; RV32IM-NEXT: lw a5, 196(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a0, a0, s3
; RV32IM-NEXT: xor a3, a1, a3
; RV32IM-NEXT: xor a3, a3, a4
; RV32IM-NEXT: xor a0, a0, s5
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a0, a7
; RV32IM-NEXT: lw a4, 216(sp) # 4-byte Folded Reload
; RV32IM-NEXT: and a3, a2, a4
; RV32IM-NEXT: xor a0, a2, a0
; RV32IM-NEXT: srli a2, a2, 8
; RV32IM-NEXT: and a2, a2, a4
; RV32IM-NEXT: slli a1, a1, 24
; RV32IM-NEXT: slli a3, a3, 8
; RV32IM-NEXT: or a1, a1, a3
; RV32IM-NEXT: srli a0, a0, 24
; RV32IM-NEXT: or a0, a2, a0
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 4
; RV32IM-NEXT: and a0, a0, t5
; RV32IM-NEXT: and a1, a1, t5
; RV32IM-NEXT: slli a0, a0, 4
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 2
; RV32IM-NEXT: and a0, a0, t4
; RV32IM-NEXT: and a1, a1, t4
; RV32IM-NEXT: slli a0, a0, 2
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: lui a1, 349525
; RV32IM-NEXT: addi a1, a1, 1364
; RV32IM-NEXT: and a2, a0, t6
; RV32IM-NEXT: srli a0, a0, 1
; RV32IM-NEXT: and a0, a0, a1
; RV32IM-NEXT: slli a2, a2, 1
; RV32IM-NEXT: or a0, a0, a2
; RV32IM-NEXT: srli a0, a0, 1
; RV32IM-NEXT: lw a1, 204(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a0, a1
; RV32IM-NEXT: lw a0, 212(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw a2, 208(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a0, a0, a2
; RV32IM-NEXT: lw ra, 268(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 264(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 260(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s2, 256(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s3, 252(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s4, 248(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s5, 244(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s6, 240(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s7, 236(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s8, 232(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s9, 228(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s10, 224(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s11, 220(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 272
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmul_i64:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi sp, sp, -448
; RV64IM-NEXT: sd ra, 440(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s0, 432(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s1, 424(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s2, 416(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s3, 408(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s4, 400(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s5, 392(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s6, 384(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s7, 376(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s8, 368(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s9, 360(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s10, 352(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s11, 344(sp) # 8-byte Folded Spill
; RV64IM-NEXT: andi t2, a1, 2
; RV64IM-NEXT: andi t4, a1, 1
; RV64IM-NEXT: andi a6, a1, 4
; RV64IM-NEXT: andi t0, a1, 8
; RV64IM-NEXT: andi a5, a1, 16
; RV64IM-NEXT: andi a7, a1, 32
; RV64IM-NEXT: andi a3, a1, 64
; RV64IM-NEXT: andi t1, a1, 128
; RV64IM-NEXT: andi t3, a1, 256
; RV64IM-NEXT: andi a4, a1, 512
; RV64IM-NEXT: li a2, 1
; RV64IM-NEXT: lui s7, 1
; RV64IM-NEXT: lui t6, 2
; RV64IM-NEXT: lui s0, 4
; RV64IM-NEXT: lui s1, 8
; RV64IM-NEXT: lui s2, 16
; RV64IM-NEXT: lui s3, 32
; RV64IM-NEXT: lui s4, 64
; RV64IM-NEXT: lui s5, 128
; RV64IM-NEXT: lui s6, 256
; RV64IM-NEXT: lui s8, 512
; RV64IM-NEXT: lui s9, 1024
; RV64IM-NEXT: lui s10, 2048
; RV64IM-NEXT: lui s11, 4096
; RV64IM-NEXT: lui ra, 8192
; RV64IM-NEXT: lui t5, 16384
; RV64IM-NEXT: mul t2, a0, t2
; RV64IM-NEXT: mul t4, a0, t4
; RV64IM-NEXT: xor t2, t4, t2
; RV64IM-NEXT: lui t4, 32768
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: mul t0, a0, t0
; RV64IM-NEXT: xor a6, a6, t0
; RV64IM-NEXT: lui t0, 65536
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a5, a5, a7
; RV64IM-NEXT: lui a7, 131072
; RV64IM-NEXT: mul t1, a0, t1
; RV64IM-NEXT: mul t3, a0, t3
; RV64IM-NEXT: xor t1, t1, t3
; RV64IM-NEXT: lui t3, 262144
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: xor a6, t2, a6
; RV64IM-NEXT: sd a6, 336(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a6, a2, 11
; RV64IM-NEXT: sd a6, 216(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and s7, a1, s7
; RV64IM-NEXT: and a6, a1, t6
; RV64IM-NEXT: sd a6, 288(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and s0, a1, s0
; RV64IM-NEXT: and s1, a1, s1
; RV64IM-NEXT: and s2, a1, s2
; RV64IM-NEXT: and s3, a1, s3
; RV64IM-NEXT: and a6, a1, s4
; RV64IM-NEXT: sd a6, 280(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a6, a1, s5
; RV64IM-NEXT: and t2, a1, s6
; RV64IM-NEXT: and s8, a1, s8
; RV64IM-NEXT: and t6, a1, s9
; RV64IM-NEXT: sd t6, 272(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t6, a1, s10
; RV64IM-NEXT: sd t6, 264(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t6, a1, s11
; RV64IM-NEXT: sd t6, 256(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t6, a1, ra
; RV64IM-NEXT: and t5, a1, t5
; RV64IM-NEXT: and t4, a1, t4
; RV64IM-NEXT: sd t4, 248(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t0, a1, t0
; RV64IM-NEXT: sd t0, 240(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: sd a7, 232(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a7, a1, t3
; RV64IM-NEXT: sd a7, 224(sp) # 8-byte Folded Spill
; RV64IM-NEXT: xor a3, a5, a3
; RV64IM-NEXT: sd a3, 328(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli t4, a2, 32
; RV64IM-NEXT: xor a3, t1, a4
; RV64IM-NEXT: sd a3, 320(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli s4, a2, 33
; RV64IM-NEXT: mul a3, a0, s0
; RV64IM-NEXT: mul a4, a0, s1
; RV64IM-NEXT: xor a3, a3, a4
; RV64IM-NEXT: sd a3, 312(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli s0, a2, 34
; RV64IM-NEXT: mul a3, a0, a6
; RV64IM-NEXT: mul a4, a0, t2
; RV64IM-NEXT: xor a3, a3, a4
; RV64IM-NEXT: sd a3, 304(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli s1, a2, 35
; RV64IM-NEXT: mul a3, a0, t6
; RV64IM-NEXT: mul a4, a0, t5
; RV64IM-NEXT: xor a3, a3, a4
; RV64IM-NEXT: sd a3, 296(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli t5, a2, 36
; RV64IM-NEXT: slli t6, a2, 37
; RV64IM-NEXT: slli s5, a2, 38
; RV64IM-NEXT: slli s6, a2, 39
; RV64IM-NEXT: slli s9, a2, 40
; RV64IM-NEXT: slli s10, a2, 41
; RV64IM-NEXT: slli s11, a2, 42
; RV64IM-NEXT: slli ra, a2, 43
; RV64IM-NEXT: slli a3, a2, 44
; RV64IM-NEXT: sd a3, 208(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 45
; RV64IM-NEXT: sd a3, 200(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 46
; RV64IM-NEXT: sd a3, 192(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 47
; RV64IM-NEXT: sd a3, 184(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 48
; RV64IM-NEXT: sd a3, 176(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 49
; RV64IM-NEXT: sd a3, 168(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 50
; RV64IM-NEXT: sd a3, 160(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 51
; RV64IM-NEXT: sd a3, 152(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 52
; RV64IM-NEXT: sd a3, 144(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 53
; RV64IM-NEXT: sd a3, 136(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a3, a2, 54
; RV64IM-NEXT: sd a3, 128(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli t1, a2, 55
; RV64IM-NEXT: slli t0, a2, 56
; RV64IM-NEXT: slli a7, a2, 57
; RV64IM-NEXT: slli a6, a2, 58
; RV64IM-NEXT: slli a5, a2, 59
; RV64IM-NEXT: slli a4, a2, 60
; RV64IM-NEXT: slli a3, a2, 61
; RV64IM-NEXT: slli a2, a2, 62
; RV64IM-NEXT: ld t2, 216(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t3, a1, t2
; RV64IM-NEXT: and t2, a1, t4
; RV64IM-NEXT: sd t2, 120(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t4, a1, s4
; RV64IM-NEXT: and s0, a1, s0
; RV64IM-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and s1, a1, s1
; RV64IM-NEXT: sd s1, 104(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t2, a1, t5
; RV64IM-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and s1, a1, t6
; RV64IM-NEXT: and t2, a1, s5
; RV64IM-NEXT: sd t2, 88(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t2, a1, s6
; RV64IM-NEXT: sd t2, 80(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and s4, a1, s9
; RV64IM-NEXT: and s5, a1, s10
; RV64IM-NEXT: and s6, a1, s11
; RV64IM-NEXT: and t6, a1, ra
; RV64IM-NEXT: ld t2, 208(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 72(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 200(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 64(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 192(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and s10, a1, t2
; RV64IM-NEXT: ld t2, 184(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and s11, a1, t2
; RV64IM-NEXT: ld t2, 176(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and ra, a1, t2
; RV64IM-NEXT: ld t2, 168(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 184(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 160(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 160(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 144(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 144(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 136(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 136(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld t2, 128(sp) # 8-byte Folded Reload
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: sd t2, 128(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and t1, a1, t1
; RV64IM-NEXT: and t0, a1, t0
; RV64IM-NEXT: sd t0, 56(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: sd a7, 48(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and a5, a1, a5
; RV64IM-NEXT: sd a5, 40(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a4, a1, a4
; RV64IM-NEXT: sd a4, 32(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a3, a1, a3
; RV64IM-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
; RV64IM-NEXT: and a2, a1, a2
; RV64IM-NEXT: sd a2, 16(sp) # 8-byte Folded Spill
; RV64IM-NEXT: andi a2, a1, 1024
; RV64IM-NEXT: srliw a3, a1, 31
; RV64IM-NEXT: srli a1, a1, 63
; RV64IM-NEXT: mul s9, a0, a2
; RV64IM-NEXT: slli a3, a3, 31
; RV64IM-NEXT: slli a1, a1, 63
; RV64IM-NEXT: mul s7, a0, s7
; RV64IM-NEXT: ld a2, 288(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 192(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul s2, a0, s2
; RV64IM-NEXT: mul a2, a0, s3
; RV64IM-NEXT: sd a2, 176(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 280(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 216(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul s0, a0, s8
; RV64IM-NEXT: ld a2, 272(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 8(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 264(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 208(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 256(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 272(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 248(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t2, a0, a2
; RV64IM-NEXT: ld a2, 240(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 0(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 232(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 200(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a2, 224(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: sd a2, 256(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a2, a0, a3
; RV64IM-NEXT: sd a2, 280(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 288(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a5, a0, t3
; RV64IM-NEXT: ld a1, 120(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a7, a0, a1
; RV64IM-NEXT: mul t4, a0, t4
; RV64IM-NEXT: ld a1, 112(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t5, a0, a1
; RV64IM-NEXT: ld a1, 104(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 120(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 96(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 224(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a1, a0, s1
; RV64IM-NEXT: sd a1, 240(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 264(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul s1, a0, a1
; RV64IM-NEXT: mul s4, a0, s4
; RV64IM-NEXT: mul s5, a0, s5
; RV64IM-NEXT: mul s6, a0, s6
; RV64IM-NEXT: mul a1, a0, t6
; RV64IM-NEXT: sd a1, 112(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 72(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 168(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 64(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 232(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a1, a0, s10
; RV64IM-NEXT: sd a1, 248(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul s10, a0, s11
; RV64IM-NEXT: mul s11, a0, ra
; RV64IM-NEXT: ld a1, 184(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul ra, a0, a1
; RV64IM-NEXT: ld a1, 160(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t0, a0, a1
; RV64IM-NEXT: ld a1, 152(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t3, a0, a1
; RV64IM-NEXT: ld a1, 144(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul s3, a0, a1
; RV64IM-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 152(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 128(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: sd a1, 160(sp) # 8-byte Folded Spill
; RV64IM-NEXT: mul a1, a0, t1
; RV64IM-NEXT: sd a1, 184(sp) # 8-byte Folded Spill
; RV64IM-NEXT: ld a1, 56(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a2, a0, a1
; RV64IM-NEXT: ld a1, 48(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: mul a3, a0, a6
; RV64IM-NEXT: ld a4, 40(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: ld a6, 32(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: ld t1, 24(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t1, a0, t1
; RV64IM-NEXT: ld t6, 16(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul t6, a0, t6
; RV64IM-NEXT: ld a0, 336(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s8, 328(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a0, a0, s8
; RV64IM-NEXT: ld s8, 320(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor s9, s8, s9
; RV64IM-NEXT: xor a5, a5, s7
; RV64IM-NEXT: ld s7, 312(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor s2, s7, s2
; RV64IM-NEXT: ld s7, 304(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor s0, s7, s0
; RV64IM-NEXT: ld s7, 296(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor t2, s7, t2
; RV64IM-NEXT: xor a7, a7, t4
; RV64IM-NEXT: xor t4, s1, s4
; RV64IM-NEXT: xor s1, s10, s11
; RV64IM-NEXT: xor a1, a2, a1
; RV64IM-NEXT: xor a0, a0, s9
; RV64IM-NEXT: ld a2, 192(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a5, a2
; RV64IM-NEXT: ld a5, 176(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a5, s2, a5
; RV64IM-NEXT: ld s2, 8(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor s0, s0, s2
; RV64IM-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor t2, t2, s2
; RV64IM-NEXT: xor a7, a7, t5
; RV64IM-NEXT: xor t4, t4, s5
; RV64IM-NEXT: xor t5, s1, ra
; RV64IM-NEXT: xor a1, a1, a3
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 216(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a5, a2
; RV64IM-NEXT: ld a3, 208(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a3, s0, a3
; RV64IM-NEXT: ld a5, 200(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a5, t2, a5
; RV64IM-NEXT: ld t2, 120(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a7, a7, t2
; RV64IM-NEXT: xor t2, t4, s6
; RV64IM-NEXT: xor t0, t5, t0
; RV64IM-NEXT: xor a1, a1, a4
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 272(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: ld a3, 256(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a3, a5, a3
; RV64IM-NEXT: ld a4, 224(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a4, a7, a4
; RV64IM-NEXT: ld a5, 112(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a5, t2, a5
; RV64IM-NEXT: xor a7, t0, t3
; RV64IM-NEXT: xor a1, a1, a6
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 280(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: ld a3, 240(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a3, a4, a3
; RV64IM-NEXT: ld a4, 168(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a4, a5, a4
; RV64IM-NEXT: xor a5, a7, s3
; RV64IM-NEXT: xor a1, a1, t1
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 264(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: ld a3, 232(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a3, a4, a3
; RV64IM-NEXT: ld a4, 152(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a4, a5, a4
; RV64IM-NEXT: xor a1, a1, t6
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 248(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: ld a3, 160(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a3, a4, a3
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 184(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: ld a2, 288(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor a1, a1, a2
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: ld ra, 440(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s0, 432(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s1, 424(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s2, 416(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s3, 408(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s4, 400(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s5, 392(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s6, 384(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s7, 376(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s8, 368(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s9, 360(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s10, 352(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s11, 344(sp) # 8-byte Folded Reload
; RV64IM-NEXT: addi sp, sp, 448
; RV64IM-NEXT: ret
%res = call i64 @llvm.clmul.i64(i64 %a, i64 %b)
ret i64 %res
}
define i4 @clmul_constfold_i4() nounwind {
; CHECK-LABEL: clmul_constfold_i4:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 2
; CHECK-NEXT: ret
%res = call i4 @llvm.clmul.i4(i4 1, i4 2)
ret i4 %res
}
define i16 @clmul_constfold_i16() nounwind {
; CHECK-LABEL: clmul_constfold_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 11
; CHECK-NEXT: addi a0, a0, -1366
; CHECK-NEXT: ret
%res = call i16 @llvm.clmul.i16(i16 -2, i16 -1)
ret i16 %res
}
define i4 @clmulr_i4(i4 %a, i4 %b) nounwind {
; CHECK-LABEL: clmulr_i4:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 15
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: andi a3, a1, 1
; CHECK-NEXT: andi a4, a1, 4
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: xor a0, a4, a0
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: ret
%a.ext = zext i4 %a to i8
%b.ext = zext i4 %b to i8
%clmul = call i8 @llvm.clmul.i8(i8 %a.ext, i8 %b.ext)
%res.ext = lshr i8 %clmul, 3
%res = trunc i8 %res.ext to i4
ret i4 %res
}
define i4 @clmulr_i4_bitreverse(i4 %a, i4 %b) nounwind {
; CHECK-LABEL: clmulr_i4_bitreverse:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 15
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: andi a3, a1, 1
; CHECK-NEXT: andi a4, a1, 4
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: mul a4, a0, a4
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: xor a0, a4, a0
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: ret
%a.rev = call i4 @llvm.bitreverse.i4(i4 %a)
%b.rev = call i4 @llvm.bitreverse.i4(i4 %b)
%res.rev = call i4 @llvm.clmul.i4(i4 %a.rev, i4 %b.rev)
%res = call i4 @llvm.bitreverse.i4(i4 %res.rev)
ret i4 %res
}
define i8 @clmulr_i8(i8 %a, i8 %b) nounwind {
; RV32IM-LABEL: clmulr_i8:
; RV32IM: # %bb.0:
; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: andi a2, a1, 2
; RV32IM-NEXT: andi a3, a1, 1
; RV32IM-NEXT: andi a4, a1, 4
; RV32IM-NEXT: andi a5, a1, 8
; RV32IM-NEXT: mul a2, a0, a2
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: andi a3, a1, 16
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: andi a5, a1, 32
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a3, a3, a5
; RV32IM-NEXT: xor a2, a2, a4
; RV32IM-NEXT: andi a4, a1, 64
; RV32IM-NEXT: andi a1, a1, 128
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a3, a3, a4
; RV32IM-NEXT: xor a2, a2, a3
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: xor a0, a2, a0
; RV32IM-NEXT: slli a0, a0, 17
; RV32IM-NEXT: srli a0, a0, 24
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulr_i8:
; RV64IM: # %bb.0:
; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: andi a2, a1, 2
; RV64IM-NEXT: andi a3, a1, 1
; RV64IM-NEXT: andi a4, a1, 4
; RV64IM-NEXT: andi a5, a1, 8
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: andi a3, a1, 16
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: andi a5, a1, 32
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a3, a3, a5
; RV64IM-NEXT: xor a2, a2, a4
; RV64IM-NEXT: andi a4, a1, 64
; RV64IM-NEXT: andi a1, a1, 128
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: xor a3, a3, a4
; RV64IM-NEXT: xor a2, a2, a3
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: xor a0, a2, a0
; RV64IM-NEXT: slli a0, a0, 49
; RV64IM-NEXT: srli a0, a0, 56
; RV64IM-NEXT: ret
%a.ext = zext i8 %a to i16
%b.ext = zext i8 %b to i16
%clmul = call i16 @llvm.clmul.i16(i16 %a.ext, i16 %b.ext)
%res.ext = lshr i16 %clmul, 7
%res = trunc i16 %res.ext to i8
ret i8 %res
}
define i16 @clmulr_i16(i16 %a, i16 %b) nounwind {
; RV32IM-LABEL: clmulr_i16:
; RV32IM: # %bb.0:
; RV32IM-NEXT: slli a0, a0, 16
; RV32IM-NEXT: andi a2, a1, 2
; RV32IM-NEXT: andi a3, a1, 1
; RV32IM-NEXT: andi a4, a1, 4
; RV32IM-NEXT: andi a5, a1, 8
; RV32IM-NEXT: andi a6, a1, 16
; RV32IM-NEXT: andi a7, a1, 32
; RV32IM-NEXT: srli a0, a0, 16
; RV32IM-NEXT: mul a2, a0, a2
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: andi a3, a1, 64
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: andi a5, a1, 128
; RV32IM-NEXT: mul a6, a0, a6
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a6, a6, a7
; RV32IM-NEXT: andi a7, a1, 256
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a5, a5, a7
; RV32IM-NEXT: andi a7, a1, 512
; RV32IM-NEXT: xor a2, a2, a4
; RV32IM-NEXT: li a4, 1
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a3, a6, a3
; RV32IM-NEXT: lui a6, 1
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a5, a5, a7
; RV32IM-NEXT: lui a7, 2
; RV32IM-NEXT: slli a4, a4, 11
; RV32IM-NEXT: and a6, a1, a6
; RV32IM-NEXT: and a4, a1, a4
; RV32IM-NEXT: mul a6, a0, a6
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a4, a4, a6
; RV32IM-NEXT: lui a6, 4
; RV32IM-NEXT: xor a2, a2, a3
; RV32IM-NEXT: lui a3, 8
; RV32IM-NEXT: and a7, a1, a7
; RV32IM-NEXT: and a6, a1, a6
; RV32IM-NEXT: and a3, a1, a3
; RV32IM-NEXT: andi a1, a1, 1024
; RV32IM-NEXT: mul a1, a0, a1
; RV32IM-NEXT: xor a1, a5, a1
; RV32IM-NEXT: mul a5, a0, a7
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a1, a2, a1
; RV32IM-NEXT: mul a2, a0, a6
; RV32IM-NEXT: xor a2, a4, a2
; RV32IM-NEXT: xor a1, a1, a2
; RV32IM-NEXT: mul a0, a0, a3
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: slli a0, a0, 1
; RV32IM-NEXT: srli a0, a0, 16
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulr_i16:
; RV64IM: # %bb.0:
; RV64IM-NEXT: slli a0, a0, 48
; RV64IM-NEXT: andi a2, a1, 2
; RV64IM-NEXT: andi a3, a1, 1
; RV64IM-NEXT: andi a4, a1, 4
; RV64IM-NEXT: andi a5, a1, 8
; RV64IM-NEXT: andi a6, a1, 16
; RV64IM-NEXT: andi a7, a1, 32
; RV64IM-NEXT: srli a0, a0, 48
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: andi a3, a1, 64
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: andi a5, a1, 128
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a6, a6, a7
; RV64IM-NEXT: andi a7, a1, 256
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a5, a5, a7
; RV64IM-NEXT: andi a7, a1, 512
; RV64IM-NEXT: xor a2, a2, a4
; RV64IM-NEXT: li a4, 1
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a3, a6, a3
; RV64IM-NEXT: lui a6, 1
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a5, a5, a7
; RV64IM-NEXT: lui a7, 2
; RV64IM-NEXT: slli a4, a4, 11
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and a4, a1, a4
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: xor a4, a4, a6
; RV64IM-NEXT: lui a6, 4
; RV64IM-NEXT: xor a2, a2, a3
; RV64IM-NEXT: lui a3, 8
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and a3, a1, a3
; RV64IM-NEXT: andi a1, a1, 1024
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: xor a1, a5, a1
; RV64IM-NEXT: mul a5, a0, a7
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: xor a1, a2, a1
; RV64IM-NEXT: mul a2, a0, a6
; RV64IM-NEXT: xor a2, a4, a2
; RV64IM-NEXT: xor a1, a1, a2
; RV64IM-NEXT: mul a0, a0, a3
; RV64IM-NEXT: xor a0, a1, a0
; RV64IM-NEXT: slli a0, a0, 33
; RV64IM-NEXT: srli a0, a0, 48
; RV64IM-NEXT: ret
%a.ext = zext i16 %a to i32
%b.ext = zext i16 %b to i32
%clmul = call i32 @llvm.clmul.i32(i32 %a.ext, i32 %b.ext)
%res.ext = lshr i32 %clmul, 15
%res = trunc i32 %res.ext to i16
ret i16 %res
}
define i32 @clmulr_i32(i32 %a, i32 %b) nounwind {
; RV32IM-LABEL: clmulr_i32:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -144
; RV32IM-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
; RV32IM-NEXT: srli t0, a0, 8
; RV32IM-NEXT: lui a3, 16
; RV32IM-NEXT: srli t1, a0, 24
; RV32IM-NEXT: slli a2, a0, 24
; RV32IM-NEXT: lui t3, 61681
; RV32IM-NEXT: lui t5, 209715
; RV32IM-NEXT: lui t6, 349525
; RV32IM-NEXT: srli t4, a1, 8
; RV32IM-NEXT: srli a4, a1, 24
; RV32IM-NEXT: slli a5, a1, 24
; RV32IM-NEXT: li s7, 1
; RV32IM-NEXT: lui t2, 4
; RV32IM-NEXT: lui s0, 8
; RV32IM-NEXT: lui s1, 32
; RV32IM-NEXT: lui s2, 64
; RV32IM-NEXT: lui s3, 128
; RV32IM-NEXT: lui s4, 256
; RV32IM-NEXT: lui s8, 512
; RV32IM-NEXT: lui a7, 1024
; RV32IM-NEXT: lui s9, 2048
; RV32IM-NEXT: lui s10, 4096
; RV32IM-NEXT: lui s11, 8192
; RV32IM-NEXT: lui ra, 16384
; RV32IM-NEXT: addi s5, a3, -256
; RV32IM-NEXT: and t0, t0, s5
; RV32IM-NEXT: or t1, t0, t1
; RV32IM-NEXT: lui a6, 32768
; RV32IM-NEXT: and t4, t4, s5
; RV32IM-NEXT: or a4, t4, a4
; RV32IM-NEXT: lui t0, 65536
; RV32IM-NEXT: and a0, a0, s5
; RV32IM-NEXT: slli a0, a0, 8
; RV32IM-NEXT: or a0, a2, a0
; RV32IM-NEXT: lui a2, 131072
; RV32IM-NEXT: and a1, a1, s5
; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: or t4, a5, a1
; RV32IM-NEXT: lui a1, 262144
; RV32IM-NEXT: or a0, a0, t1
; RV32IM-NEXT: lui a5, 524288
; RV32IM-NEXT: addi t3, t3, -241
; RV32IM-NEXT: addi t5, t5, 819
; RV32IM-NEXT: addi t6, t6, 1365
; RV32IM-NEXT: slli s7, s7, 11
; RV32IM-NEXT: or a4, t4, a4
; RV32IM-NEXT: srli t4, a0, 4
; RV32IM-NEXT: and a0, a0, t3
; RV32IM-NEXT: and t4, t4, t3
; RV32IM-NEXT: slli a0, a0, 4
; RV32IM-NEXT: or a0, t4, a0
; RV32IM-NEXT: srli t4, a4, 4
; RV32IM-NEXT: and a4, a4, t3
; RV32IM-NEXT: and t4, t4, t3
; RV32IM-NEXT: slli a4, a4, 4
; RV32IM-NEXT: or a4, t4, a4
; RV32IM-NEXT: srli t4, a0, 2
; RV32IM-NEXT: and a0, a0, t5
; RV32IM-NEXT: and t4, t4, t5
; RV32IM-NEXT: slli a0, a0, 2
; RV32IM-NEXT: or a0, t4, a0
; RV32IM-NEXT: srli t4, a4, 2
; RV32IM-NEXT: and a4, a4, t5
; RV32IM-NEXT: and t4, t4, t5
; RV32IM-NEXT: slli a4, a4, 2
; RV32IM-NEXT: or t4, t4, a4
; RV32IM-NEXT: srli a4, a0, 1
; RV32IM-NEXT: and a0, a0, t6
; RV32IM-NEXT: and a4, a4, t6
; RV32IM-NEXT: slli a0, a0, 1
; RV32IM-NEXT: or a4, a4, a0
; RV32IM-NEXT: srli a0, t4, 1
; RV32IM-NEXT: and t4, t4, t6
; RV32IM-NEXT: and a0, a0, t6
; RV32IM-NEXT: slli t4, t4, 1
; RV32IM-NEXT: or a0, a0, t4
; RV32IM-NEXT: andi t4, a0, 2
; RV32IM-NEXT: and s6, a0, s7
; RV32IM-NEXT: lui t1, 1
; RV32IM-NEXT: and t1, a0, t1
; RV32IM-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lui t1, 2
; RV32IM-NEXT: and t1, a0, t1
; RV32IM-NEXT: sw t1, 80(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and t1, a0, t2
; RV32IM-NEXT: sw t1, 76(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s0, a0, s0
; RV32IM-NEXT: and a3, a0, a3
; RV32IM-NEXT: sw a3, 72(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s1, a0, s1
; RV32IM-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, s2
; RV32IM-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s3, a0, s3
; RV32IM-NEXT: and a3, a0, s4
; RV32IM-NEXT: sw a3, 60(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, s8
; RV32IM-NEXT: sw a3, 56(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, a7
; RV32IM-NEXT: sw a3, 52(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s9, a0, s9
; RV32IM-NEXT: and a3, a0, s10
; RV32IM-NEXT: sw a3, 48(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, s11
; RV32IM-NEXT: sw a3, 44(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, ra
; RV32IM-NEXT: sw a3, 40(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, a6
; RV32IM-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, t0
; RV32IM-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a2, a0, a2
; RV32IM-NEXT: sw a2, 28(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a1, a0, a1
; RV32IM-NEXT: sw a1, 24(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a5, a0, a5
; RV32IM-NEXT: sw a5, 20(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi a1, a0, 1
; RV32IM-NEXT: andi a2, a0, 4
; RV32IM-NEXT: andi a3, a0, 8
; RV32IM-NEXT: andi a5, a0, 16
; RV32IM-NEXT: andi a6, a0, 32
; RV32IM-NEXT: andi a7, a0, 64
; RV32IM-NEXT: andi t0, a0, 128
; RV32IM-NEXT: andi t1, a0, 256
; RV32IM-NEXT: andi t2, a0, 512
; RV32IM-NEXT: andi a0, a0, 1024
; RV32IM-NEXT: mul t4, a4, t4
; RV32IM-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul ra, a4, a1
; RV32IM-NEXT: mul s11, a4, a2
; RV32IM-NEXT: mul s8, a4, a3
; RV32IM-NEXT: mul s7, a4, a5
; RV32IM-NEXT: mul s4, a4, a6
; RV32IM-NEXT: mul a1, a4, a7
; RV32IM-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a4, t0
; RV32IM-NEXT: sw a1, 88(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul s2, a4, t1
; RV32IM-NEXT: mul t2, a4, t2
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a4, s6
; RV32IM-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t1, a4, a0
; RV32IM-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a7, a4, a0
; RV32IM-NEXT: mul s1, a4, s0
; RV32IM-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a3, a4, s3
; RV32IM-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a2, a4, a0
; RV32IM-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a6, a4, a0
; RV32IM-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t4, a4, a0
; RV32IM-NEXT: mul s6, a4, s9
; RV32IM-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a1, a4, a0
; RV32IM-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a4, a0
; RV32IM-NEXT: lw a5, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a5, a4, a5
; RV32IM-NEXT: lw t0, 36(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t0, a4, t0
; RV32IM-NEXT: lw s0, 32(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s0, a4, s0
; RV32IM-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s3, a4, s3
; RV32IM-NEXT: lw s9, 24(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s9, a4, s9
; RV32IM-NEXT: lw s10, 20(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a4, a4, s10
; RV32IM-NEXT: lw s10, 8(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor ra, ra, s10
; RV32IM-NEXT: xor s8, s11, s8
; RV32IM-NEXT: xor s4, s7, s4
; RV32IM-NEXT: xor t2, s2, t2
; RV32IM-NEXT: xor a7, t1, a7
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: xor a1, ra, s8
; RV32IM-NEXT: lw a3, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, s4, a3
; RV32IM-NEXT: lw t1, 4(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor t1, t2, t1
; RV32IM-NEXT: xor a7, a7, s1
; RV32IM-NEXT: xor a2, a2, a6
; RV32IM-NEXT: xor a0, a0, a5
; RV32IM-NEXT: xor a1, a1, a3
; RV32IM-NEXT: lw a3, 16(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, t1, a3
; RV32IM-NEXT: lw a5, 72(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a5, a7, a5
; RV32IM-NEXT: xor a2, a2, t4
; RV32IM-NEXT: xor a0, a0, t0
; RV32IM-NEXT: lw a6, 88(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a1, a6
; RV32IM-NEXT: lw a6, 84(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, a6
; RV32IM-NEXT: lw a6, 76(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a5, a5, a6
; RV32IM-NEXT: xor a2, a2, s6
; RV32IM-NEXT: xor a0, a0, s0
; RV32IM-NEXT: lw a6, 80(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a5, a5, a6
; RV32IM-NEXT: xor a0, a0, s3
; RV32IM-NEXT: xor a3, a1, a3
; RV32IM-NEXT: slli a1, a1, 24
; RV32IM-NEXT: xor a3, a3, a5
; RV32IM-NEXT: xor a0, a0, s9
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: and a3, a2, s5
; RV32IM-NEXT: srli a4, a2, 8
; RV32IM-NEXT: xor a0, a2, a0
; RV32IM-NEXT: slli a3, a3, 8
; RV32IM-NEXT: and a2, a4, s5
; RV32IM-NEXT: srli a0, a0, 24
; RV32IM-NEXT: or a1, a1, a3
; RV32IM-NEXT: or a0, a2, a0
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 4
; RV32IM-NEXT: and a0, a0, t3
; RV32IM-NEXT: and a1, a1, t3
; RV32IM-NEXT: slli a0, a0, 4
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 2
; RV32IM-NEXT: and a0, a0, t5
; RV32IM-NEXT: and a1, a1, t5
; RV32IM-NEXT: slli a0, a0, 2
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 1
; RV32IM-NEXT: and a0, a0, t6
; RV32IM-NEXT: and a1, a1, t6
; RV32IM-NEXT: slli a0, a0, 1
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 144
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulr_i32:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi sp, sp, -128
; RV64IM-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s1, 104(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s2, 96(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s3, 88(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s4, 80(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s5, 72(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s6, 64(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s7, 56(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s8, 48(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s9, 40(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s10, 32(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s11, 24(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a6, a0, 32
; RV64IM-NEXT: andi t1, a1, 2
; RV64IM-NEXT: andi t3, a1, 1
; RV64IM-NEXT: andi a5, a1, 4
; RV64IM-NEXT: andi a7, a1, 8
; RV64IM-NEXT: andi a3, a1, 16
; RV64IM-NEXT: andi a4, a1, 32
; RV64IM-NEXT: andi a0, a1, 64
; RV64IM-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IM-NEXT: andi t0, a1, 128
; RV64IM-NEXT: andi t2, a1, 256
; RV64IM-NEXT: andi a0, a1, 512
; RV64IM-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IM-NEXT: li a2, 1
; RV64IM-NEXT: lui t5, 1
; RV64IM-NEXT: lui t6, 2
; RV64IM-NEXT: lui s0, 4
; RV64IM-NEXT: lui s2, 8
; RV64IM-NEXT: lui s3, 16
; RV64IM-NEXT: lui s4, 32
; RV64IM-NEXT: lui s5, 64
; RV64IM-NEXT: lui s6, 128
; RV64IM-NEXT: lui s7, 256
; RV64IM-NEXT: lui s8, 512
; RV64IM-NEXT: lui s9, 1024
; RV64IM-NEXT: lui s10, 2048
; RV64IM-NEXT: lui s11, 4096
; RV64IM-NEXT: lui ra, 8192
; RV64IM-NEXT: lui a0, 16384
; RV64IM-NEXT: srli s1, a6, 32
; RV64IM-NEXT: mul a6, s1, t1
; RV64IM-NEXT: mul t1, s1, t3
; RV64IM-NEXT: xor a6, t1, a6
; RV64IM-NEXT: sd a6, 0(sp) # 8-byte Folded Spill
; RV64IM-NEXT: lui t1, 32768
; RV64IM-NEXT: mul a5, s1, a5
; RV64IM-NEXT: mul a7, s1, a7
; RV64IM-NEXT: xor t4, a5, a7
; RV64IM-NEXT: lui a7, 65536
; RV64IM-NEXT: mul a3, s1, a3
; RV64IM-NEXT: mul a4, s1, a4
; RV64IM-NEXT: xor a6, a3, a4
; RV64IM-NEXT: lui t3, 131072
; RV64IM-NEXT: mul a4, s1, t0
; RV64IM-NEXT: mul t0, s1, t2
; RV64IM-NEXT: xor a5, a4, t0
; RV64IM-NEXT: lui t0, 262144
; RV64IM-NEXT: slli t2, a2, 11
; RV64IM-NEXT: and t5, a1, t5
; RV64IM-NEXT: and t6, a1, t6
; RV64IM-NEXT: and s0, a1, s0
; RV64IM-NEXT: and s2, a1, s2
; RV64IM-NEXT: and s3, a1, s3
; RV64IM-NEXT: and s4, a1, s4
; RV64IM-NEXT: and s5, a1, s5
; RV64IM-NEXT: and s6, a1, s6
; RV64IM-NEXT: and s7, a1, s7
; RV64IM-NEXT: and s8, a1, s8
; RV64IM-NEXT: and s9, a1, s9
; RV64IM-NEXT: and s10, a1, s10
; RV64IM-NEXT: and s11, a1, s11
; RV64IM-NEXT: and ra, a1, ra
; RV64IM-NEXT: and a2, a1, a0
; RV64IM-NEXT: and t1, a1, t1
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: and t3, a1, t3
; RV64IM-NEXT: and t0, a1, t0
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: andi a0, a1, 1024
; RV64IM-NEXT: srliw a1, a1, 31
; RV64IM-NEXT: slli a1, a1, 31
; RV64IM-NEXT: ld a3, 16(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a3, s1, a3
; RV64IM-NEXT: ld a4, 8(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a4, s1, a4
; RV64IM-NEXT: mul a0, s1, a0
; RV64IM-NEXT: mul t5, s1, t5
; RV64IM-NEXT: mul t6, s1, t6
; RV64IM-NEXT: mul s0, s1, s0
; RV64IM-NEXT: mul s2, s1, s2
; RV64IM-NEXT: mul s3, s1, s3
; RV64IM-NEXT: mul s4, s1, s4
; RV64IM-NEXT: mul s5, s1, s5
; RV64IM-NEXT: mul s6, s1, s6
; RV64IM-NEXT: mul s7, s1, s7
; RV64IM-NEXT: mul s8, s1, s8
; RV64IM-NEXT: mul s9, s1, s9
; RV64IM-NEXT: mul s10, s1, s10
; RV64IM-NEXT: mul s11, s1, s11
; RV64IM-NEXT: mul ra, s1, ra
; RV64IM-NEXT: mul a2, s1, a2
; RV64IM-NEXT: mul t1, s1, t1
; RV64IM-NEXT: mul a7, s1, a7
; RV64IM-NEXT: mul t3, s1, t3
; RV64IM-NEXT: mul t0, s1, t0
; RV64IM-NEXT: mul a1, s1, a1
; RV64IM-NEXT: mul t2, s1, t2
; RV64IM-NEXT: xor s1, s2, s3
; RV64IM-NEXT: xor s2, s8, s9
; RV64IM-NEXT: xor a7, a7, t3
; RV64IM-NEXT: ld t3, 0(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor t3, t3, t4
; RV64IM-NEXT: xor a3, a6, a3
; RV64IM-NEXT: xor a4, a5, a4
; RV64IM-NEXT: xor a5, t2, t5
; RV64IM-NEXT: xor a6, s1, s4
; RV64IM-NEXT: xor t2, s2, s10
; RV64IM-NEXT: xor a7, a7, t0
; RV64IM-NEXT: xor a3, t3, a3
; RV64IM-NEXT: xor a0, a4, a0
; RV64IM-NEXT: xor a4, a5, t6
; RV64IM-NEXT: xor a5, a6, s5
; RV64IM-NEXT: xor a6, t2, s11
; RV64IM-NEXT: xor a0, a3, a0
; RV64IM-NEXT: xor a4, a4, s0
; RV64IM-NEXT: xor a3, a5, s6
; RV64IM-NEXT: xor a5, a6, ra
; RV64IM-NEXT: xor a0, a0, a4
; RV64IM-NEXT: xor a3, a3, s7
; RV64IM-NEXT: xor a2, a5, a2
; RV64IM-NEXT: xor a0, a0, a3
; RV64IM-NEXT: xor a2, a2, t1
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: xor a1, a7, a1
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 1
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s1, 104(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s2, 96(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s3, 88(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s4, 80(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s5, 72(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s6, 64(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s7, 56(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s8, 48(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s9, 40(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s10, 32(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s11, 24(sp) # 8-byte Folded Reload
; RV64IM-NEXT: addi sp, sp, 128
; RV64IM-NEXT: ret
%a.ext = zext i32 %a to i64
%b.ext = zext i32 %b to i64
%clmul = call i64 @llvm.clmul.i64(i64 %a.ext, i64 %b.ext)
%res.ext = lshr i64 %clmul, 31
%res = trunc i64 %res.ext to i32
ret i32 %res
}
define i4 @clmulh_i4(i4 %a, i4 %b) nounwind {
; CHECK-LABEL: clmulh_i4:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 15
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: andi a3, a1, 2
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: srli a0, a0, 4
; CHECK-NEXT: ret
%a.ext = zext i4 %a to i8
%b.ext = zext i4 %b to i8
%clmul = call i8 @llvm.clmul.i8(i8 %a.ext, i8 %b.ext)
%res.ext = lshr i8 %clmul, 4
%res = trunc i8 %res.ext to i4
ret i4 %res
}
define i4 @clmulh_i4_bitreverse(i4 %a, i4 %b) nounwind {
; CHECK-LABEL: clmulh_i4_bitreverse:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 15
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: andi a3, a1, 2
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: mul a2, a0, a2
; CHECK-NEXT: mul a3, a0, a3
; CHECK-NEXT: xor a2, a3, a2
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: xor a0, a2, a0
; CHECK-NEXT: srli a0, a0, 4
; CHECK-NEXT: ret
%a.rev = call i4 @llvm.bitreverse.i4(i4 %a)
%b.rev = call i4 @llvm.bitreverse.i4(i4 %b)
%clmul = call i4 @llvm.clmul.i4(i4 %a.rev, i4 %b.rev)
%clmul.rev = call i4 @llvm.bitreverse.i4(i4 %clmul)
%res = lshr i4 %clmul.rev, 1
ret i4 %res
}
define i8 @clmulh_i8(i8 %a, i8 %b) nounwind {
; RV32IM-LABEL: clmulh_i8:
; RV32IM: # %bb.0:
; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: andi a2, a1, 2
; RV32IM-NEXT: andi a3, a1, 1
; RV32IM-NEXT: andi a4, a1, 4
; RV32IM-NEXT: andi a5, a1, 8
; RV32IM-NEXT: mul a2, a0, a2
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: andi a3, a1, 16
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: andi a5, a1, 32
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a3, a3, a5
; RV32IM-NEXT: xor a2, a2, a4
; RV32IM-NEXT: andi a4, a1, 64
; RV32IM-NEXT: andi a1, a1, 128
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a3, a3, a4
; RV32IM-NEXT: xor a2, a2, a3
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: xor a0, a2, a0
; RV32IM-NEXT: slli a0, a0, 16
; RV32IM-NEXT: srli a0, a0, 24
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulh_i8:
; RV64IM: # %bb.0:
; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: andi a2, a1, 2
; RV64IM-NEXT: andi a3, a1, 1
; RV64IM-NEXT: andi a4, a1, 4
; RV64IM-NEXT: andi a5, a1, 8
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: andi a3, a1, 16
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: andi a5, a1, 32
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a3, a3, a5
; RV64IM-NEXT: xor a2, a2, a4
; RV64IM-NEXT: andi a4, a1, 64
; RV64IM-NEXT: andi a1, a1, 128
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: xor a3, a3, a4
; RV64IM-NEXT: xor a2, a2, a3
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: xor a0, a2, a0
; RV64IM-NEXT: slli a0, a0, 48
; RV64IM-NEXT: srli a0, a0, 56
; RV64IM-NEXT: ret
%a.ext = zext i8 %a to i16
%b.ext = zext i8 %b to i16
%clmul = call i16 @llvm.clmul.i16(i16 %a.ext, i16 %b.ext)
%res.ext = lshr i16 %clmul, 8
%res = trunc i16 %res.ext to i8
ret i8 %res
}
define i16 @clmulh_i16(i16 %a, i16 %b) nounwind {
; RV32IM-LABEL: clmulh_i16:
; RV32IM: # %bb.0:
; RV32IM-NEXT: slli a0, a0, 16
; RV32IM-NEXT: andi a2, a1, 2
; RV32IM-NEXT: andi a3, a1, 1
; RV32IM-NEXT: andi a4, a1, 4
; RV32IM-NEXT: andi a5, a1, 8
; RV32IM-NEXT: andi a6, a1, 16
; RV32IM-NEXT: andi a7, a1, 32
; RV32IM-NEXT: srli a0, a0, 16
; RV32IM-NEXT: mul a2, a0, a2
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: andi a3, a1, 64
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: andi a5, a1, 128
; RV32IM-NEXT: mul a6, a0, a6
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a6, a6, a7
; RV32IM-NEXT: andi a7, a1, 256
; RV32IM-NEXT: mul a5, a0, a5
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a5, a5, a7
; RV32IM-NEXT: andi a7, a1, 512
; RV32IM-NEXT: xor a2, a2, a4
; RV32IM-NEXT: li a4, 1
; RV32IM-NEXT: mul a3, a0, a3
; RV32IM-NEXT: xor a3, a6, a3
; RV32IM-NEXT: lui a6, 1
; RV32IM-NEXT: mul a7, a0, a7
; RV32IM-NEXT: xor a5, a5, a7
; RV32IM-NEXT: lui a7, 2
; RV32IM-NEXT: slli a4, a4, 11
; RV32IM-NEXT: and a6, a1, a6
; RV32IM-NEXT: and a4, a1, a4
; RV32IM-NEXT: mul a6, a0, a6
; RV32IM-NEXT: mul a4, a0, a4
; RV32IM-NEXT: xor a4, a4, a6
; RV32IM-NEXT: lui a6, 4
; RV32IM-NEXT: xor a2, a2, a3
; RV32IM-NEXT: lui a3, 8
; RV32IM-NEXT: and a7, a1, a7
; RV32IM-NEXT: and a6, a1, a6
; RV32IM-NEXT: and a3, a1, a3
; RV32IM-NEXT: andi a1, a1, 1024
; RV32IM-NEXT: mul a1, a0, a1
; RV32IM-NEXT: xor a1, a5, a1
; RV32IM-NEXT: mul a5, a0, a7
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a1, a2, a1
; RV32IM-NEXT: mul a2, a0, a6
; RV32IM-NEXT: xor a2, a4, a2
; RV32IM-NEXT: xor a1, a1, a2
; RV32IM-NEXT: mul a0, a0, a3
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: srli a0, a0, 16
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulh_i16:
; RV64IM: # %bb.0:
; RV64IM-NEXT: slli a0, a0, 48
; RV64IM-NEXT: andi a2, a1, 2
; RV64IM-NEXT: andi a3, a1, 1
; RV64IM-NEXT: andi a4, a1, 4
; RV64IM-NEXT: andi a5, a1, 8
; RV64IM-NEXT: andi a6, a1, 16
; RV64IM-NEXT: andi a7, a1, 32
; RV64IM-NEXT: srli a0, a0, 48
; RV64IM-NEXT: mul a2, a0, a2
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a2, a3, a2
; RV64IM-NEXT: andi a3, a1, 64
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: andi a5, a1, 128
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a6, a6, a7
; RV64IM-NEXT: andi a7, a1, 256
; RV64IM-NEXT: mul a5, a0, a5
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a5, a5, a7
; RV64IM-NEXT: andi a7, a1, 512
; RV64IM-NEXT: xor a2, a2, a4
; RV64IM-NEXT: li a4, 1
; RV64IM-NEXT: mul a3, a0, a3
; RV64IM-NEXT: xor a3, a6, a3
; RV64IM-NEXT: lui a6, 1
; RV64IM-NEXT: mul a7, a0, a7
; RV64IM-NEXT: xor a5, a5, a7
; RV64IM-NEXT: lui a7, 2
; RV64IM-NEXT: slli a4, a4, 11
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and a4, a1, a4
; RV64IM-NEXT: mul a6, a0, a6
; RV64IM-NEXT: mul a4, a0, a4
; RV64IM-NEXT: xor a4, a4, a6
; RV64IM-NEXT: lui a6, 4
; RV64IM-NEXT: xor a2, a2, a3
; RV64IM-NEXT: lui a3, 8
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: and a6, a1, a6
; RV64IM-NEXT: and a3, a1, a3
; RV64IM-NEXT: andi a1, a1, 1024
; RV64IM-NEXT: mul a1, a0, a1
; RV64IM-NEXT: xor a1, a5, a1
; RV64IM-NEXT: mul a5, a0, a7
; RV64IM-NEXT: xor a4, a4, a5
; RV64IM-NEXT: xor a1, a2, a1
; RV64IM-NEXT: mul a2, a0, a6
; RV64IM-NEXT: xor a2, a4, a2
; RV64IM-NEXT: xor a1, a1, a2
; RV64IM-NEXT: mul a0, a0, a3
; RV64IM-NEXT: xor a0, a1, a0
; RV64IM-NEXT: srliw a0, a0, 16
; RV64IM-NEXT: ret
%a.ext = zext i16 %a to i32
%b.ext = zext i16 %b to i32
%clmul = call i32 @llvm.clmul.i32(i32 %a.ext, i32 %b.ext)
%res.ext = lshr i32 %clmul, 16
%res = trunc i32 %res.ext to i16
ret i16 %res
}
define i32 @clmulh_i32(i32 %a, i32 %b) nounwind {
; RV32IM-LABEL: clmulh_i32:
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -144
; RV32IM-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
; RV32IM-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
; RV32IM-NEXT: srli t0, a0, 8
; RV32IM-NEXT: lui a3, 16
; RV32IM-NEXT: srli t1, a0, 24
; RV32IM-NEXT: slli a2, a0, 24
; RV32IM-NEXT: lui s1, 61681
; RV32IM-NEXT: lui s3, 209715
; RV32IM-NEXT: lui a6, 349525
; RV32IM-NEXT: srli t4, a1, 8
; RV32IM-NEXT: srli t6, a1, 24
; RV32IM-NEXT: slli a4, a1, 24
; RV32IM-NEXT: li t3, 1
; RV32IM-NEXT: lui s11, 2
; RV32IM-NEXT: lui t2, 4
; RV32IM-NEXT: lui s10, 8
; RV32IM-NEXT: lui t5, 32
; RV32IM-NEXT: lui s0, 64
; RV32IM-NEXT: lui s2, 128
; RV32IM-NEXT: lui s4, 256
; RV32IM-NEXT: lui s5, 512
; RV32IM-NEXT: lui s6, 1024
; RV32IM-NEXT: lui s7, 2048
; RV32IM-NEXT: lui s8, 4096
; RV32IM-NEXT: lui s9, 8192
; RV32IM-NEXT: lui ra, 16384
; RV32IM-NEXT: addi a3, a3, -256
; RV32IM-NEXT: lui a5, 16
; RV32IM-NEXT: and t0, t0, a3
; RV32IM-NEXT: or t1, t0, t1
; RV32IM-NEXT: lui a7, 32768
; RV32IM-NEXT: and t4, t4, a3
; RV32IM-NEXT: or t6, t4, t6
; RV32IM-NEXT: lui t0, 65536
; RV32IM-NEXT: and a0, a0, a3
; RV32IM-NEXT: mv t4, a3
; RV32IM-NEXT: sw a3, 88(sp) # 4-byte Folded Spill
; RV32IM-NEXT: slli a0, a0, 8
; RV32IM-NEXT: or a2, a2, a0
; RV32IM-NEXT: lui a3, 131072
; RV32IM-NEXT: and a1, a1, t4
; RV32IM-NEXT: slli a1, a1, 8
; RV32IM-NEXT: or a0, a4, a1
; RV32IM-NEXT: lui a1, 262144
; RV32IM-NEXT: addi s1, s1, -241
; RV32IM-NEXT: addi s3, s3, 819
; RV32IM-NEXT: or a2, a2, t1
; RV32IM-NEXT: addi a4, a6, 1365
; RV32IM-NEXT: sw a4, 84(sp) # 4-byte Folded Spill
; RV32IM-NEXT: or a0, a0, t6
; RV32IM-NEXT: srli a6, a2, 4
; RV32IM-NEXT: and a2, a2, s1
; RV32IM-NEXT: and a6, a6, s1
; RV32IM-NEXT: slli a2, a2, 4
; RV32IM-NEXT: or a2, a6, a2
; RV32IM-NEXT: srli a6, a0, 4
; RV32IM-NEXT: and a0, a0, s1
; RV32IM-NEXT: and a6, a6, s1
; RV32IM-NEXT: slli a0, a0, 4
; RV32IM-NEXT: or a0, a6, a0
; RV32IM-NEXT: srli a6, a2, 2
; RV32IM-NEXT: and a2, a2, s3
; RV32IM-NEXT: and a6, a6, s3
; RV32IM-NEXT: slli a2, a2, 2
; RV32IM-NEXT: or a2, a6, a2
; RV32IM-NEXT: srli a6, a0, 2
; RV32IM-NEXT: and a0, a0, s3
; RV32IM-NEXT: and a6, a6, s3
; RV32IM-NEXT: slli a0, a0, 2
; RV32IM-NEXT: or a0, a6, a0
; RV32IM-NEXT: srli a6, a2, 1
; RV32IM-NEXT: and a2, a2, a4
; RV32IM-NEXT: and a6, a6, a4
; RV32IM-NEXT: slli a2, a2, 1
; RV32IM-NEXT: or a6, a6, a2
; RV32IM-NEXT: srli a2, a0, 1
; RV32IM-NEXT: and a0, a0, a4
; RV32IM-NEXT: and a2, a2, a4
; RV32IM-NEXT: slli a0, a0, 1
; RV32IM-NEXT: or a0, a2, a0
; RV32IM-NEXT: lui a2, 524288
; RV32IM-NEXT: slli t3, t3, 11
; RV32IM-NEXT: and t3, a0, t3
; RV32IM-NEXT: lui a4, 1
; RV32IM-NEXT: and t4, a0, a4
; RV32IM-NEXT: and s11, a0, s11
; RV32IM-NEXT: and a4, a0, t2
; RV32IM-NEXT: sw a4, 80(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, s10
; RV32IM-NEXT: sw a4, 72(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a5, a0, a5
; RV32IM-NEXT: sw a5, 68(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, t5
; RV32IM-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s0, a0, s0
; RV32IM-NEXT: and a4, a0, s2
; RV32IM-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and s4, a0, s4
; RV32IM-NEXT: and a4, a0, s5
; RV32IM-NEXT: sw a4, 56(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, s6
; RV32IM-NEXT: sw a4, 52(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, s7
; RV32IM-NEXT: sw a4, 48(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, s8
; RV32IM-NEXT: sw a4, 44(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, s9
; RV32IM-NEXT: sw a4, 40(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, ra
; RV32IM-NEXT: sw a4, 36(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, a7
; RV32IM-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a4, a0, t0
; RV32IM-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a3, a0, a3
; RV32IM-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a1, a0, a1
; RV32IM-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
; RV32IM-NEXT: and a2, a0, a2
; RV32IM-NEXT: sw a2, 16(sp) # 4-byte Folded Spill
; RV32IM-NEXT: andi ra, a0, 2
; RV32IM-NEXT: andi a1, a0, 1
; RV32IM-NEXT: andi a2, a0, 4
; RV32IM-NEXT: andi a3, a0, 8
; RV32IM-NEXT: andi a4, a0, 16
; RV32IM-NEXT: andi a5, a0, 32
; RV32IM-NEXT: andi a7, a0, 64
; RV32IM-NEXT: andi t0, a0, 128
; RV32IM-NEXT: andi t1, a0, 256
; RV32IM-NEXT: andi t2, a0, 512
; RV32IM-NEXT: andi a0, a0, 1024
; RV32IM-NEXT: mul ra, a6, ra
; RV32IM-NEXT: mul s10, a6, a1
; RV32IM-NEXT: mul s9, a6, a2
; RV32IM-NEXT: mul s5, a6, a3
; RV32IM-NEXT: mul s6, a6, a4
; RV32IM-NEXT: mul s2, a6, a5
; RV32IM-NEXT: mul a1, a6, a7
; RV32IM-NEXT: sw a1, 4(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a1, a6, t0
; RV32IM-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul t6, a6, t1
; RV32IM-NEXT: mul t2, a6, t2
; RV32IM-NEXT: mul s7, a6, a0
; RV32IM-NEXT: mul a0, a6, t3
; RV32IM-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a6, t4
; RV32IM-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul t1, a6, s11
; RV32IM-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a7, a6, a0
; RV32IM-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t5, a6, a0
; RV32IM-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s8, a6, a0
; RV32IM-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a6, a0
; RV32IM-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
; RV32IM-NEXT: mul a0, a6, s0
; RV32IM-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a3, a6, a0
; RV32IM-NEXT: mul a2, a6, s4
; RV32IM-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a5, a6, a0
; RV32IM-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t3, a6, a0
; RV32IM-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s4, a6, a0
; RV32IM-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a1, a6, a0
; RV32IM-NEXT: lw a0, 40(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a0, a6, a0
; RV32IM-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a4, a6, a4
; RV32IM-NEXT: lw t0, 32(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t0, a6, t0
; RV32IM-NEXT: lw t4, 28(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul t4, a6, t4
; RV32IM-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s0, a6, s0
; RV32IM-NEXT: lw s11, 20(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul s11, a6, s11
; RV32IM-NEXT: sw s11, 80(sp) # 4-byte Folded Spill
; RV32IM-NEXT: lw s11, 16(sp) # 4-byte Folded Reload
; RV32IM-NEXT: mul a6, a6, s11
; RV32IM-NEXT: xor s10, s10, ra
; RV32IM-NEXT: xor s5, s9, s5
; RV32IM-NEXT: xor s2, s6, s2
; RV32IM-NEXT: xor t2, t6, t2
; RV32IM-NEXT: xor a7, t1, a7
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a1, a0
; RV32IM-NEXT: xor a1, s10, s5
; RV32IM-NEXT: lw a3, 4(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, s2, a3
; RV32IM-NEXT: xor t1, t2, s7
; RV32IM-NEXT: xor a7, a7, t5
; RV32IM-NEXT: xor a2, a2, a5
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: xor a1, a1, a3
; RV32IM-NEXT: lw a3, 8(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, t1, a3
; RV32IM-NEXT: xor a4, a7, s8
; RV32IM-NEXT: xor a2, a2, t3
; RV32IM-NEXT: xor a0, a0, t0
; RV32IM-NEXT: lw a5, 76(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a1, a1, a5
; RV32IM-NEXT: lw a5, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a3, a3, a5
; RV32IM-NEXT: lw a5, 68(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a2, a2, s4
; RV32IM-NEXT: xor a0, a0, t4
; RV32IM-NEXT: lw a5, 72(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a4, a4, a5
; RV32IM-NEXT: xor a0, a0, s0
; RV32IM-NEXT: lui a5, 349525
; RV32IM-NEXT: addi a5, a5, 1364
; RV32IM-NEXT: xor a3, a1, a3
; RV32IM-NEXT: slli a1, a1, 24
; RV32IM-NEXT: xor a3, a3, a4
; RV32IM-NEXT: lw a4, 80(sp) # 4-byte Folded Reload
; RV32IM-NEXT: xor a0, a0, a4
; RV32IM-NEXT: xor a2, a3, a2
; RV32IM-NEXT: xor a0, a0, a6
; RV32IM-NEXT: lw a6, 88(sp) # 4-byte Folded Reload
; RV32IM-NEXT: and a3, a2, a6
; RV32IM-NEXT: srli a4, a2, 8
; RV32IM-NEXT: xor a0, a2, a0
; RV32IM-NEXT: slli a3, a3, 8
; RV32IM-NEXT: and a2, a4, a6
; RV32IM-NEXT: srli a0, a0, 24
; RV32IM-NEXT: or a1, a1, a3
; RV32IM-NEXT: or a0, a2, a0
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 4
; RV32IM-NEXT: and a0, a0, s1
; RV32IM-NEXT: and a1, a1, s1
; RV32IM-NEXT: slli a0, a0, 4
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 2
; RV32IM-NEXT: and a0, a0, s3
; RV32IM-NEXT: and a1, a1, s3
; RV32IM-NEXT: slli a0, a0, 2
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a1, a0, 1
; RV32IM-NEXT: lw a2, 84(sp) # 4-byte Folded Reload
; RV32IM-NEXT: and a0, a0, a2
; RV32IM-NEXT: and a1, a1, a5
; RV32IM-NEXT: slli a0, a0, 1
; RV32IM-NEXT: or a0, a1, a0
; RV32IM-NEXT: srli a0, a0, 1
; RV32IM-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 144
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: clmulh_i32:
; RV64IM: # %bb.0:
; RV64IM-NEXT: addi sp, sp, -128
; RV64IM-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s1, 104(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s2, 96(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s3, 88(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s4, 80(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s5, 72(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s6, 64(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s7, 56(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s8, 48(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s9, 40(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s10, 32(sp) # 8-byte Folded Spill
; RV64IM-NEXT: sd s11, 24(sp) # 8-byte Folded Spill
; RV64IM-NEXT: slli a6, a0, 32
; RV64IM-NEXT: andi t1, a1, 2
; RV64IM-NEXT: andi t3, a1, 1
; RV64IM-NEXT: andi a5, a1, 4
; RV64IM-NEXT: andi a7, a1, 8
; RV64IM-NEXT: andi a3, a1, 16
; RV64IM-NEXT: andi a4, a1, 32
; RV64IM-NEXT: andi a0, a1, 64
; RV64IM-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IM-NEXT: andi t0, a1, 128
; RV64IM-NEXT: andi t2, a1, 256
; RV64IM-NEXT: andi a0, a1, 512
; RV64IM-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IM-NEXT: li a2, 1
; RV64IM-NEXT: lui t5, 1
; RV64IM-NEXT: lui t6, 2
; RV64IM-NEXT: lui s0, 4
; RV64IM-NEXT: lui s2, 8
; RV64IM-NEXT: lui s3, 16
; RV64IM-NEXT: lui s4, 32
; RV64IM-NEXT: lui s5, 64
; RV64IM-NEXT: lui s6, 128
; RV64IM-NEXT: lui s7, 256
; RV64IM-NEXT: lui s8, 512
; RV64IM-NEXT: lui s9, 1024
; RV64IM-NEXT: lui s10, 2048
; RV64IM-NEXT: lui s11, 4096
; RV64IM-NEXT: lui ra, 8192
; RV64IM-NEXT: lui a0, 16384
; RV64IM-NEXT: srli s1, a6, 32
; RV64IM-NEXT: mul a6, s1, t1
; RV64IM-NEXT: mul t1, s1, t3
; RV64IM-NEXT: xor a6, t1, a6
; RV64IM-NEXT: sd a6, 0(sp) # 8-byte Folded Spill
; RV64IM-NEXT: lui t1, 32768
; RV64IM-NEXT: mul a5, s1, a5
; RV64IM-NEXT: mul a7, s1, a7
; RV64IM-NEXT: xor t4, a5, a7
; RV64IM-NEXT: lui a7, 65536
; RV64IM-NEXT: mul a3, s1, a3
; RV64IM-NEXT: mul a4, s1, a4
; RV64IM-NEXT: xor a6, a3, a4
; RV64IM-NEXT: lui t3, 131072
; RV64IM-NEXT: mul a4, s1, t0
; RV64IM-NEXT: mul t0, s1, t2
; RV64IM-NEXT: xor a5, a4, t0
; RV64IM-NEXT: lui t0, 262144
; RV64IM-NEXT: slli t2, a2, 11
; RV64IM-NEXT: and t5, a1, t5
; RV64IM-NEXT: and t6, a1, t6
; RV64IM-NEXT: and s0, a1, s0
; RV64IM-NEXT: and s2, a1, s2
; RV64IM-NEXT: and s3, a1, s3
; RV64IM-NEXT: and s4, a1, s4
; RV64IM-NEXT: and s5, a1, s5
; RV64IM-NEXT: and s6, a1, s6
; RV64IM-NEXT: and s7, a1, s7
; RV64IM-NEXT: and s8, a1, s8
; RV64IM-NEXT: and s9, a1, s9
; RV64IM-NEXT: and s10, a1, s10
; RV64IM-NEXT: and s11, a1, s11
; RV64IM-NEXT: and ra, a1, ra
; RV64IM-NEXT: and a2, a1, a0
; RV64IM-NEXT: and t1, a1, t1
; RV64IM-NEXT: and a7, a1, a7
; RV64IM-NEXT: and t3, a1, t3
; RV64IM-NEXT: and t0, a1, t0
; RV64IM-NEXT: and t2, a1, t2
; RV64IM-NEXT: andi a0, a1, 1024
; RV64IM-NEXT: srliw a1, a1, 31
; RV64IM-NEXT: slli a1, a1, 31
; RV64IM-NEXT: ld a3, 16(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a3, s1, a3
; RV64IM-NEXT: ld a4, 8(sp) # 8-byte Folded Reload
; RV64IM-NEXT: mul a4, s1, a4
; RV64IM-NEXT: mul a0, s1, a0
; RV64IM-NEXT: mul t5, s1, t5
; RV64IM-NEXT: mul t6, s1, t6
; RV64IM-NEXT: mul s0, s1, s0
; RV64IM-NEXT: mul s2, s1, s2
; RV64IM-NEXT: mul s3, s1, s3
; RV64IM-NEXT: mul s4, s1, s4
; RV64IM-NEXT: mul s5, s1, s5
; RV64IM-NEXT: mul s6, s1, s6
; RV64IM-NEXT: mul s7, s1, s7
; RV64IM-NEXT: mul s8, s1, s8
; RV64IM-NEXT: mul s9, s1, s9
; RV64IM-NEXT: mul s10, s1, s10
; RV64IM-NEXT: mul s11, s1, s11
; RV64IM-NEXT: mul ra, s1, ra
; RV64IM-NEXT: mul a2, s1, a2
; RV64IM-NEXT: mul t1, s1, t1
; RV64IM-NEXT: mul a7, s1, a7
; RV64IM-NEXT: mul t3, s1, t3
; RV64IM-NEXT: mul t0, s1, t0
; RV64IM-NEXT: mul a1, s1, a1
; RV64IM-NEXT: mul t2, s1, t2
; RV64IM-NEXT: xor s1, s2, s3
; RV64IM-NEXT: xor s2, s8, s9
; RV64IM-NEXT: xor a7, a7, t3
; RV64IM-NEXT: ld t3, 0(sp) # 8-byte Folded Reload
; RV64IM-NEXT: xor t3, t3, t4
; RV64IM-NEXT: xor a3, a6, a3
; RV64IM-NEXT: xor a4, a5, a4
; RV64IM-NEXT: xor a5, t2, t5
; RV64IM-NEXT: xor a6, s1, s4
; RV64IM-NEXT: xor t2, s2, s10
; RV64IM-NEXT: xor a7, a7, t0
; RV64IM-NEXT: xor a3, t3, a3
; RV64IM-NEXT: xor a0, a4, a0
; RV64IM-NEXT: xor a4, a5, t6
; RV64IM-NEXT: xor a5, a6, s5
; RV64IM-NEXT: xor a6, t2, s11
; RV64IM-NEXT: xor a0, a3, a0
; RV64IM-NEXT: xor a4, a4, s0
; RV64IM-NEXT: xor a3, a5, s6
; RV64IM-NEXT: xor a5, a6, ra
; RV64IM-NEXT: xor a0, a0, a4
; RV64IM-NEXT: xor a3, a3, s7
; RV64IM-NEXT: xor a2, a5, a2
; RV64IM-NEXT: xor a0, a0, a3
; RV64IM-NEXT: xor a2, a2, t1
; RV64IM-NEXT: xor a0, a0, a2
; RV64IM-NEXT: xor a1, a7, a1
; RV64IM-NEXT: xor a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s1, 104(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s2, 96(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s3, 88(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s4, 80(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s5, 72(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s6, 64(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s7, 56(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s8, 48(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s9, 40(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s10, 32(sp) # 8-byte Folded Reload
; RV64IM-NEXT: ld s11, 24(sp) # 8-byte Folded Reload
; RV64IM-NEXT: addi sp, sp, 128
; RV64IM-NEXT: ret
%a.ext = zext i32 %a to i64
%b.ext = zext i32 %b to i64
%clmul = call i64 @llvm.clmul.i64(i64 %a.ext, i64 %b.ext)
%res.ext = lshr i64 %clmul, 32
%res = trunc i64 %res.ext to i32
ret i32 %res
}