| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+experimental-p,+zbb,+m -verify-machineinstrs \ |
| ; RUN: < %s | FileCheck %s |
| |
| define i32 @abs_i32(i32 %x) { |
| ; CHECK-LABEL: abs_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: abs a0, a0 |
| ; CHECK-NEXT: ret |
| %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) |
| ret i32 %abs |
| } |
| |
| define i64 @abs_i64(i64 %x) { |
| ; CHECK-LABEL: abs_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: bgez a1, .LBB1_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: subd a0, zero, a0 |
| ; CHECK-NEXT: .LBB1_2: |
| ; CHECK-NEXT: ret |
| %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) |
| ret i64 %abs |
| } |
| |
| ; Make sure we prefer li over pli |
| define i32 @li_imm() { |
| ; CHECK-LABEL: li_imm: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a0, -1 |
| ; CHECK-NEXT: ret |
| ret i32 -1 |
| } |
| |
| define i32 @pli_b_i32(ptr %p) { |
| ; CHECK-LABEL: pli_b_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pli.b a0, 5 |
| ; CHECK-NEXT: ret |
| ret i32 u0x05050505 |
| } |
| |
| define i32 @pli_h_i32(ptr %p) { |
| ; CHECK-LABEL: pli_h_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pli.h a0, -64 |
| ; CHECK-NEXT: ret |
| ret i32 u0xffc0ffc0 |
| } |
| |
| define void @pli_b_store_i32(ptr %p) { |
| ; CHECK-LABEL: pli_b_store_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pli.b a1, 65 |
| ; CHECK-NEXT: sw a1, 0(a0) |
| ; CHECK-NEXT: ret |
| store i32 u0x41414141, ptr %p |
| ret void |
| } |
| |
| define i32 @pack_i32(i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: pack_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pack a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %shl = and i32 %a, 65535 |
| %shl1 = shl i32 %b, 16 |
| %or = or i32 %shl1, %shl |
| ret i32 %or |
| } |
| |
| define i32 @pack_i32_2(i16 zeroext %a, i16 zeroext %b) nounwind { |
| ; CHECK-LABEL: pack_i32_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pack a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %zexta = zext i16 %a to i32 |
| %zextb = zext i16 %b to i32 |
| %shl1 = shl i32 %zextb, 16 |
| %or = or i32 %shl1, %zexta |
| ret i32 %or |
| } |
| |
| define i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 %2) { |
| ; CHECK-LABEL: pack_i32_3: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: pack a0, a1, a0 |
| ; CHECK-NEXT: add a0, a0, a2 |
| ; CHECK-NEXT: ret |
| %4 = zext i16 %0 to i32 |
| %5 = shl nuw i32 %4, 16 |
| %6 = zext i16 %1 to i32 |
| %7 = or i32 %5, %6 |
| %8 = add i32 %7, %2 |
| ret i32 %8 |
| } |
| |
| define i8 @cls_i8(i8 %x) { |
| ; CHECK-LABEL: cls_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sext.b a0, a0 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: addi a0, a0, -24 |
| ; CHECK-NEXT: ret |
| %a = ashr i8 %x, 7 |
| %b = xor i8 %x, %a |
| %c = call i8 @llvm.ctlz.i8(i8 %b, i1 false) |
| %d = sub i8 %c, 1 |
| ret i8 %d |
| } |
| |
| define i8 @cls_i8_2(i8 %x) { |
| ; CHECK-LABEL: cls_i8_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sext.b a0, a0 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: addi a0, a0, -24 |
| ; CHECK-NEXT: ret |
| %a = ashr i8 %x, 7 |
| %b = xor i8 %x, %a |
| %c = shl i8 %b, 1 |
| %d = or i8 %c, 1 |
| %e = call i8 @llvm.ctlz.i8(i8 %d, i1 true) |
| ret i8 %e |
| } |
| |
| define i16 @cls_i16(i16 %x) { |
| ; CHECK-LABEL: cls_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sext.h a0, a0 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: addi a0, a0, -16 |
| ; CHECK-NEXT: ret |
| %a = ashr i16 %x, 15 |
| %b = xor i16 %x, %a |
| %c = call i16 @llvm.ctlz.i16(i16 %b, i1 false) |
| %d = sub i16 %c, 1 |
| ret i16 %d |
| } |
| |
| define i16 @cls_i16_2(i16 %x) { |
| ; CHECK-LABEL: cls_i16_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sext.h a0, a0 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: addi a0, a0, -16 |
| ; CHECK-NEXT: ret |
| %a = ashr i16 %x, 15 |
| %b = xor i16 %x, %a |
| %c = shl i16 %b, 1 |
| %d = or i16 %c, 1 |
| %e = call i16 @llvm.ctlz.i16(i16 %d, i1 true) |
| ret i16 %e |
| } |
| |
| define i32 @cls_i32(i32 %x) { |
| ; CHECK-LABEL: cls_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ret |
| %a = ashr i32 %x, 31 |
| %b = xor i32 %x, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| ret i32 %d |
| } |
| |
| define i32 @cls_i32_2(i32 %x) { |
| ; CHECK-LABEL: cls_i32_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ret |
| %a = ashr i32 %x, 31 |
| %b = xor i32 %x, %a |
| %c = shl i32 %b, 1 |
| %d = or i32 %c, 1 |
| %e = call i32 @llvm.ctlz.i32(i32 %d, i1 true) |
| ret i32 %e |
| } |
| |
| define i64 @cls_i64(i64 %x) { |
| ; CHECK-LABEL: cls_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srai a2, a1, 31 |
| ; CHECK-NEXT: bne a1, a2, .LBB15_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: xor a0, a0, a2 |
| ; CHECK-NEXT: clz a0, a0 |
| ; CHECK-NEXT: addi a2, a0, 32 |
| ; CHECK-NEXT: j .LBB15_3 |
| ; CHECK-NEXT: .LBB15_2: |
| ; CHECK-NEXT: xor a1, a1, a2 |
| ; CHECK-NEXT: clz a2, a1 |
| ; CHECK-NEXT: .LBB15_3: |
| ; CHECK-NEXT: li a0, -1 |
| ; CHECK-NEXT: mv a1, a0 |
| ; CHECK-NEXT: waddau a0, a2, zero |
| ; CHECK-NEXT: ret |
| %a = ashr i64 %x, 63 |
| %b = xor i64 %x, %a |
| %c = call i64 @llvm.ctlz.i64(i64 %b, i1 false) |
| %d = sub i64 %c, 1 |
| ret i64 %d |
| } |
| |
| define i64 @cls_i64_2(i64 %x) { |
| ; CHECK-LABEL: cls_i64_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srai a2, a1, 31 |
| ; CHECK-NEXT: xor a1, a1, a2 |
| ; CHECK-NEXT: xor a0, a0, a2 |
| ; CHECK-NEXT: nsrli a1, a0, 31 |
| ; CHECK-NEXT: bnez a1, .LBB16_2 |
| ; CHECK-NEXT: # %bb.1: |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: addi a0, a0, 1 |
| ; CHECK-NEXT: clz a0, a0 |
| ; CHECK-NEXT: addi a0, a0, 32 |
| ; CHECK-NEXT: li a1, 0 |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB16_2: |
| ; CHECK-NEXT: clz a0, a1 |
| ; CHECK-NEXT: li a1, 0 |
| ; CHECK-NEXT: ret |
| %a = ashr i64 %x, 63 |
| %b = xor i64 %x, %a |
| %c = shl i64 %b, 1 |
| %d = or i64 %c, 1 |
| %e = call i64 @llvm.ctlz.i64(i64 %d, i1 true) |
| ret i64 %e |
| } |
| |
| ; The result is in the range [1-31], so we don't need an andi after the cls. |
| define i32 @cls_i32_knownbits(i32 %x) { |
| ; CHECK-LABEL: cls_i32_knownbits: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ret |
| %a = ashr i32 %x, 31 |
| %b = xor i32 %x, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| %e = and i32 %d, 31 |
| ret i32 %e |
| } |
| |
| ; There are at least 16 redundant sign bits so we don't need an ori after the clsw. |
| define i32 @cls_i32_knownbits_2(i16 signext %x) { |
| ; CHECK-LABEL: cls_i32_knownbits_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ret |
| %sext = sext i16 %x to i32 |
| %a = ashr i32 %sext, 31 |
| %b = xor i32 %sext, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| %e = or i32 %d, 16 |
| ret i32 %e |
| } |
| |
| ; There are at least 24 redundant sign bits so we don't need an ori after the clsw. |
| define i32 @cls_i32_knownbits_3(i8 signext %x) { |
| ; CHECK-LABEL: cls_i32_knownbits_3: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ret |
| %sext = sext i8 %x to i32 |
| %a = ashr i32 %sext, 31 |
| %b = xor i32 %sext, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| %e = or i32 %d, 24 |
| ret i32 %e |
| } |
| |
| ; Negative test. We only know there is at least 1 redundant sign bit. We can't |
| ; remove the ori. |
| define i32 @cls_i32_knownbits_4(i32 signext %x) { |
| ; CHECK-LABEL: cls_i32_knownbits_4: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a0, a0, 1 |
| ; CHECK-NEXT: srai a0, a0, 1 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ori a0, a0, 1 |
| ; CHECK-NEXT: ret |
| %shl = shl i32 %x, 1 |
| %ashr = ashr i32 %shl, 1 |
| %a = ashr i32 %ashr, 31 |
| %b = xor i32 %ashr, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| %e = or i32 %d, 1 |
| ret i32 %e |
| } |
| |
| ; Negative test. Check that the number of sign bits is not |
| ; overestimated. If it is, the orr disappears. |
| define i32 @cls_i32_knownbits_no_overestimate(i32 signext %x) { |
| ; CHECK-LABEL: cls_i32_knownbits_no_overestimate: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srai a0, a0, 15 |
| ; CHECK-NEXT: cls a0, a0 |
| ; CHECK-NEXT: ori a0, a0, 16 |
| ; CHECK-NEXT: ret |
| %ashr = ashr i32 %x, 15 |
| %a = ashr i32 %ashr, 31 |
| %b = xor i32 %ashr, %a |
| %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false) |
| %d = sub i32 %c, 1 |
| %e = or i32 %d, 16 |
| ret i32 %e |
| } |
| |
| define i64 @sll_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sll_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sll a3, a0, a2 |
| ; CHECK-NEXT: slx a1, a0, a2 |
| ; CHECK-NEXT: slli a2, a2, 26 |
| ; CHECK-NEXT: srai a2, a2, 31 |
| ; CHECK-NEXT: mvm a1, a3, a2 |
| ; CHECK-NEXT: andn a0, a3, a2 |
| ; CHECK-NEXT: ret |
| %b = shl i64 %x, %y |
| ret i64 %b |
| } |
| |
| define i64 @sll_small_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sll_small_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sll a3, a0, a2 |
| ; CHECK-NEXT: slx a1, a0, a2 |
| ; CHECK-NEXT: mv a0, a3 |
| ; CHECK-NEXT: ret |
| %a = and i64 %y, 31 |
| %b = shl i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @sll_large_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sll_large_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sll a1, a0, a2 |
| ; CHECK-NEXT: li a0, 0 |
| ; CHECK-NEXT: ret |
| %a = or i64 %y, 32 |
| %b = shl i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @slli_i64(i64 %x) { |
| ; CHECK-LABEL: slli_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: nsrli a1, a0, 7 |
| ; CHECK-NEXT: slli a0, a0, 25 |
| ; CHECK-NEXT: ret |
| %a = shl i64 %x, 25 |
| ret i64 %a |
| } |
| |
| define i64 @slli_i64_large(i64 %x) { |
| ; CHECK-LABEL: slli_i64_large: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a1, a0, 7 |
| ; CHECK-NEXT: li a0, 0 |
| ; CHECK-NEXT: ret |
| %a = shl i64 %x, 39 |
| ret i64 %a |
| } |
| |
| define i64 @srl_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: srl_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a3, a2, 26 |
| ; CHECK-NEXT: nsrl a0, a0, a2 |
| ; CHECK-NEXT: srl a1, a1, a2 |
| ; CHECK-NEXT: srai a3, a3, 31 |
| ; CHECK-NEXT: andn a1, a1, a3 |
| ; CHECK-NEXT: ret |
| %b = lshr i64 %x, %y |
| ret i64 %b |
| } |
| |
| define i64 @srl_small_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: srl_small_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srl a3, a1, a2 |
| ; CHECK-NEXT: srx a0, a1, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: ret |
| %a = and i64 %y, 31 |
| %b = lshr i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @srl_large_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: srl_large_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srl a0, a1, a2 |
| ; CHECK-NEXT: li a1, 0 |
| ; CHECK-NEXT: ret |
| %a = or i64 %y, 32 |
| %b = lshr i64 %x, %a |
| ret i64 %b |
| } |
| |
| ; The andi with 63 is optimized away since nsrl only reads 6 bits. |
| define i64 @srl_mask63_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: srl_mask63_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a3, a2, 26 |
| ; CHECK-NEXT: nsrl a0, a0, a2 |
| ; CHECK-NEXT: srl a1, a1, a2 |
| ; CHECK-NEXT: srai a3, a3, 31 |
| ; CHECK-NEXT: andn a1, a1, a3 |
| ; CHECK-NEXT: ret |
| %a = and i64 %y, 63 |
| %b = lshr i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @srli_i64(i64 %x) { |
| ; CHECK-LABEL: srli_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: nsrli a0, a0, 25 |
| ; CHECK-NEXT: srli a1, a1, 25 |
| ; CHECK-NEXT: ret |
| %a = lshr i64 %x, 25 |
| ret i64 %a |
| } |
| |
| define i64 @srli_i64_large(i64 %x) { |
| ; CHECK-LABEL: srli_i64_large: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srli a0, a1, 7 |
| ; CHECK-NEXT: li a1, 0 |
| ; CHECK-NEXT: ret |
| %a = lshr i64 %x, 39 |
| ret i64 %a |
| } |
| |
| define i64 @sra_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sra_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a3, a2, 26 |
| ; CHECK-NEXT: nsra a0, a0, a2 |
| ; CHECK-NEXT: sra a1, a1, a2 |
| ; CHECK-NEXT: srai a3, a3, 31 |
| ; CHECK-NEXT: sra a1, a1, a3 |
| ; CHECK-NEXT: ret |
| %b = ashr i64 %x, %y |
| ret i64 %b |
| } |
| |
| define i64 @sra_small_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sra_small_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sra a3, a1, a2 |
| ; CHECK-NEXT: srx a0, a1, a2 |
| ; CHECK-NEXT: mv a1, a3 |
| ; CHECK-NEXT: ret |
| %a = and i64 %y, 31 |
| %b = ashr i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @sra_large_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sra_large_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sra a0, a1, a2 |
| ; CHECK-NEXT: srai a1, a1, 31 |
| ; CHECK-NEXT: ret |
| %a = or i64 %y, 32 |
| %b = ashr i64 %x, %a |
| ret i64 %b |
| } |
| |
| ; The andi with 63 is optimized away since nsra only reads 6 bits. |
| define i64 @sra_mask63_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sra_mask63_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a3, a2, 26 |
| ; CHECK-NEXT: nsra a0, a0, a2 |
| ; CHECK-NEXT: sra a1, a1, a2 |
| ; CHECK-NEXT: srai a3, a3, 31 |
| ; CHECK-NEXT: sra a1, a1, a3 |
| ; CHECK-NEXT: ret |
| %a = and i64 %y, 63 |
| %b = ashr i64 %x, %a |
| ret i64 %b |
| } |
| |
| define i64 @srai_i64(i64 %x) { |
| ; CHECK-LABEL: srai_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: nsrai a0, a0, 25 |
| ; CHECK-NEXT: srai a1, a1, 25 |
| ; CHECK-NEXT: ret |
| %a = ashr i64 %x, 25 |
| ret i64 %a |
| } |
| |
| define i64 @srai_i64_large(i64 %x) { |
| ; CHECK-LABEL: srai_i64_large: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srai a0, a1, 7 |
| ; CHECK-NEXT: srai a1, a1, 31 |
| ; CHECK-NEXT: ret |
| %a = ashr i64 %x, 39 |
| ret i64 %a |
| } |
| |
| define i32 @slx_i32(i32 %a, i32 %b, i32 %shamt) { |
| ; CHECK-LABEL: slx_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slx a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %shamt) |
| ret i32 %1 |
| } |
| |
| define i32 @slxi_i32(i32 %a, i32 %b) { |
| ; CHECK-LABEL: slxi_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a2, 25 |
| ; CHECK-NEXT: slx a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 25) |
| ret i32 %1 |
| } |
| |
| define i32 @srx_i32(i32 %a, i32 %b, i32 %shamt) { |
| ; CHECK-LABEL: srx_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srx a1, a0, a2 |
| ; CHECK-NEXT: mv a0, a1 |
| ; CHECK-NEXT: ret |
| %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %shamt) |
| ret i32 %1 |
| } |
| |
| define i32 @srxi_i32(i32 %a, i32 %b) { |
| ; CHECK-LABEL: srxi_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a2, 25 |
| ; CHECK-NEXT: srx a1, a0, a2 |
| ; CHECK-NEXT: mv a0, a1 |
| ; CHECK-NEXT: ret |
| %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 25) |
| ret i32 %1 |
| } |
| |
| define i8 @shlsat_i8(i8 %a, i8 %b) { |
| ; CHECK-LABEL: shlsat_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.b a1, a1 |
| ; CHECK-NEXT: slli a0, a0, 24 |
| ; CHECK-NEXT: ssha a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 24 |
| ; CHECK-NEXT: ret |
| %sshlsat = tail call i8 @llvm.sshl.sat.i8(i8 %a,i8 %b) |
| ret i8 %sshlsat |
| } |
| |
| define i16 @shlsat_i16(i16 %a, i16 %b) { |
| ; CHECK-LABEL: shlsat_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.h a1, a1 |
| ; CHECK-NEXT: slli a0, a0, 16 |
| ; CHECK-NEXT: ssha a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 16 |
| ; CHECK-NEXT: ret |
| %sshlsat = tail call i16 @llvm.sshl.sat.i16(i16 %a,i16 %b) |
| ret i16 %sshlsat |
| } |
| |
| define i32 @shlsat_i32(i32 %a, i32 %b) { |
| ; CHECK-LABEL: shlsat_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ssha a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %sshlsat = tail call i32 @llvm.sshl.sat.i32(i32 %a,i32 %b) |
| ret i32 %sshlsat |
| } |
| |
| define i8 @sadd_i8(i8 %x, i8 %y) { |
| ; CHECK-LABEL: sadd_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a1, a1, 24 |
| ; CHECK-NEXT: slli a0, a0, 24 |
| ; CHECK-NEXT: sadd a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 24 |
| ; CHECK-NEXT: ret |
| %a = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y) |
| ret i8 %a |
| } |
| |
| define i16 @sadd_i16(i16 %x, i16 %y) { |
| ; CHECK-LABEL: sadd_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a1, a1, 16 |
| ; CHECK-NEXT: slli a0, a0, 16 |
| ; CHECK-NEXT: sadd a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 16 |
| ; CHECK-NEXT: ret |
| %a = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y) |
| ret i16 %a |
| } |
| |
| define i32 @sadd_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: sadd_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: sadd a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y) |
| ret i32 %a |
| } |
| |
| define i8 @ssub_i8(i8 %x, i8 %y) { |
| ; CHECK-LABEL: ssub_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a1, a1, 24 |
| ; CHECK-NEXT: slli a0, a0, 24 |
| ; CHECK-NEXT: ssub a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 24 |
| ; CHECK-NEXT: ret |
| %a = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y) |
| ret i8 %a |
| } |
| |
| define i16 @ssub_i16(i16 %x, i16 %y) { |
| ; CHECK-LABEL: ssub_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: slli a1, a1, 16 |
| ; CHECK-NEXT: slli a0, a0, 16 |
| ; CHECK-NEXT: ssub a0, a0, a1 |
| ; CHECK-NEXT: srai a0, a0, 16 |
| ; CHECK-NEXT: ret |
| %a = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y) |
| ret i16 %a |
| } |
| |
| define i32 @ssub_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: ssub_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ssub a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y) |
| ret i32 %a |
| } |
| |
| define i8 @uadd_i8(i8 %x, i8 %y) { |
| ; CHECK-LABEL: uadd_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.b a1, a1 |
| ; CHECK-NEXT: zext.b a0, a0 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: li a1, 255 |
| ; CHECK-NEXT: minu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y) |
| ret i8 %a |
| } |
| |
| define i16 @uadd_i16(i16 %x, i16 %y) { |
| ; CHECK-LABEL: uadd_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.h a1, a1 |
| ; CHECK-NEXT: zext.h a0, a0 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: lui a1, 16 |
| ; CHECK-NEXT: addi a1, a1, -1 |
| ; CHECK-NEXT: minu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y) |
| ret i16 %a |
| } |
| |
| define i32 @uadd_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: uadd_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: saddu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y) |
| ret i32 %a |
| } |
| |
| define i8 @usub_i8(i8 %x, i8 %y) { |
| ; CHECK-LABEL: usub_i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.b a1, a1 |
| ; CHECK-NEXT: zext.b a0, a0 |
| ; CHECK-NEXT: ssubu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y) |
| ret i8 %a |
| } |
| |
| define i16 @usub_i16(i16 %x, i16 %y) { |
| ; CHECK-LABEL: usub_i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: zext.h a1, a1 |
| ; CHECK-NEXT: zext.h a0, a0 |
| ; CHECK-NEXT: ssubu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y) |
| ret i16 %a |
| } |
| |
| define i32 @usub_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: usub_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ssubu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y) |
| ret i32 %a |
| } |
| |
| define i64 @wmul_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: wmul_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmul a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = sext i32 %x to i64 |
| %b = sext i32 %y to i64 |
| %c = mul i64 %a, %b |
| ret i64 %c |
| } |
| |
| define i64 @wmulu_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: wmulu_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmulu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = zext i32 %x to i64 |
| %b = zext i32 %y to i64 |
| %c = mul i64 %a, %b |
| ret i64 %c |
| } |
| |
| define i64 @wmulsu_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: wmulsu_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmulsu a0, a1, a0 |
| ; CHECK-NEXT: ret |
| %a = zext i32 %x to i64 |
| %b = sext i32 %y to i64 |
| %c = mul i64 %a, %b |
| ret i64 %c |
| } |
| |
| ; Test that mulh continues to be used with P. |
| define i32 @mulh_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: mulh_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mulh a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = sext i32 %x to i64 |
| %b = sext i32 %y to i64 |
| %c = mul i64 %a, %b |
| %d = lshr i64 %c, 32 |
| %e = trunc i64 %d to i32 |
| ret i32 %e |
| } |
| |
| ; Test that mulhu continues to be used with P. |
| define i32 @mulhu_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: mulhu_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mulhu a0, a0, a1 |
| ; CHECK-NEXT: ret |
| %a = zext i32 %x to i64 |
| %b = zext i32 %y to i64 |
| %c = mul i64 %a, %b |
| %d = lshr i64 %c, 32 |
| %e = trunc i64 %d to i32 |
| ret i32 %e |
| } |
| |
| ; Test that mulhsu continues to be used with P. |
| define i32 @mulhsu_i32(i32 %x, i32 %y) { |
| ; CHECK-LABEL: mulhsu_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mulhsu a0, a1, a0 |
| ; CHECK-NEXT: ret |
| %a = zext i32 %x to i64 |
| %b = sext i32 %y to i64 |
| %c = mul i64 %a, %b |
| %d = lshr i64 %c, 32 |
| %e = trunc i64 %d to i32 |
| ret i32 %e |
| } |
| |
| define i64 @add_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: add_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addd a0, a0, a2 |
| ; CHECK-NEXT: ret |
| %a = add i64 %x, %y |
| ret i64 %a |
| } |
| |
| define i64 @sub_i64(i64 %x, i64 %y) { |
| ; CHECK-LABEL: sub_i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: subd a0, a0, a2 |
| ; CHECK-NEXT: ret |
| %a = sub i64 %x, %y |
| ret i64 %a |
| } |
| |
| define i64 @wmaccu(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmaccu: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmaccu a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = zext i32 %a to i64 |
| %bext = zext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %c, %mul |
| ret i64 %result |
| } |
| |
| define i64 @wmaccu_commute(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmaccu_commute: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmaccu a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = zext i32 %a to i64 |
| %bext = zext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %mul, %c |
| ret i64 %result |
| } |
| |
| define i64 @wmacc(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmacc: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmacc a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = sext i32 %a to i64 |
| %bext = sext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %c, %mul |
| ret i64 %result |
| } |
| |
| define i64 @wmacc_commute(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmacc_commute: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmacc a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = sext i32 %a to i64 |
| %bext = sext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %mul, %c |
| ret i64 %result |
| } |
| |
| define i64 @wmaccsu(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmaccsu: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmaccsu a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = sext i32 %a to i64 |
| %bext = zext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %c, %mul |
| ret i64 %result |
| } |
| |
| define i64 @wmaccsu_commute(i32 %a, i32 %b, i64 %c) nounwind { |
| ; CHECK-LABEL: wmaccsu_commute: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmaccsu a2, a0, a1 |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = sext i32 %a to i64 |
| %bext = zext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %mul, %c |
| ret i64 %result |
| } |
| |
| ; Negative test: multiply result has multiple uses, should not combine |
| define void @wmaccu_multiple_uses(i32 %a, i32 %b, i64 %c, ptr %out1, ptr %out2) nounwind { |
| ; CHECK-LABEL: wmaccu_multiple_uses: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmulu a0, a0, a1 |
| ; CHECK-NEXT: addd a2, a2, a0 |
| ; CHECK-NEXT: sw a2, 0(a4) |
| ; CHECK-NEXT: sw a3, 4(a4) |
| ; CHECK-NEXT: sw a0, 0(a5) |
| ; CHECK-NEXT: sw a1, 4(a5) |
| ; CHECK-NEXT: ret |
| %aext = zext i32 %a to i64 |
| %bext = zext i32 %b to i64 |
| %mul = mul i64 %aext, %bext |
| %result = add i64 %c, %mul |
| store i64 %result, ptr %out1 |
| store i64 %mul, ptr %out2 |
| ret void |
| } |
| |
| ; First multiply has multiple uses, but second multiply has single use. |
| ; Make sure we fold the second multiply into wmacc. |
| define i64 @wmacc_first_mul_multiple_uses(i32 %a, i32 %b, i32 %c, i32 %d, ptr %out) nounwind { |
| ; CHECK-LABEL: wmacc_first_mul_multiple_uses: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wmul a2, a2, a3 |
| ; CHECK-NEXT: mv a5, a3 |
| ; CHECK-NEXT: mv a6, a2 |
| ; CHECK-NEXT: wmacc a2, a0, a1 |
| ; CHECK-NEXT: sw a6, 0(a4) |
| ; CHECK-NEXT: sw a5, 4(a4) |
| ; CHECK-NEXT: padd.dw a0, a2, zero |
| ; CHECK-NEXT: ret |
| %aext = sext i32 %a to i64 |
| %bext = sext i32 %b to i64 |
| %cext = sext i32 %c to i64 |
| %dext = sext i32 %d to i64 |
| %mul1 = mul i64 %aext, %bext |
| %mul2 = mul i64 %cext, %dext |
| ; mul2 is first operand (has multiple uses), mul1 is second (single use) |
| %result = add i64 %mul2, %mul1 |
| store i64 %mul2, ptr %out |
| ret i64 %result |
| } |
| |
| ; Test bitwise merge: (mask & b) | (~mask & a) |
| define i32 @merge_i32(i32 %mask, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: merge_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: merge a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test MERGE with swapped a/b arguments |
| define i32 @merge_i32_2(i32 %mask, i32 %b, i32 %a) nounwind { |
| ; CHECK-LABEL: merge_i32_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: merge a0, a2, a1 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test MVM: result overwrites rs1 (%a) |
| define i32 @mvm_i32(i32 %a, i32 %mask, i32 %b) nounwind { |
| ; CHECK-LABEL: mvm_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvm a0, a2, a1 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test MVM with mask as last argument |
| define i32 @mvm_i32_2(i32 %a, i32 %b, i32 %mask) nounwind { |
| ; CHECK-LABEL: mvm_i32_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvm a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test MVMN: result overwrites rs2 (%b) |
| define i32 @mvmn_i32(i32 %b, i32 %mask, i32 %a) nounwind { |
| ; CHECK-LABEL: mvmn_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvmn a0, a2, a1 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test MVMN with mask as last argument |
| define i32 @mvmn_i32_2(i32 %b, i32 %a, i32 %mask) nounwind { |
| ; CHECK-LABEL: mvmn_i32_2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvmn a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| ret i32 %or |
| } |
| |
| ; Test case where none of the source operands can be overwritten, |
| ; requiring a mv before merge |
| define i32 @merge_i32_mv(i32 %mask, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: merge_i32_mv: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mv a3, a0 |
| ; CHECK-NEXT: merge a3, a1, a2 |
| ; CHECK-NEXT: add a0, a0, a1 |
| ; CHECK-NEXT: add a0, a3, a0 |
| ; CHECK-NEXT: add a0, a0, a2 |
| ; CHECK-NEXT: ret |
| %and1 = and i32 %mask, %b |
| %not = xor i32 %mask, -1 |
| %and2 = and i32 %not, %a |
| %or = or i32 %and1, %and2 |
| %sum1 = add i32 %or, %mask |
| %sum2 = add i32 %sum1, %a |
| %sum3 = add i32 %sum2, %b |
| ret i32 %sum3 |
| } |
| |
| ; Test alternate merge pattern: (a ^ b) & mask ^ a |
| define i32 @merge_xor_i32(i32 %mask, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: merge_xor_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: merge a0, a1, a2 |
| ; CHECK-NEXT: ret |
| %xor1 = xor i32 %a, %b |
| %and = and i32 %xor1, %mask |
| %xor2 = xor i32 %and, %a |
| ret i32 %xor2 |
| } |
| |
| ; Test alternate merge pattern with different argument order for MVM |
| define i32 @mvm_xor_i32(i32 %a, i32 %mask, i32 %b) nounwind { |
| ; CHECK-LABEL: mvm_xor_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvm a0, a2, a1 |
| ; CHECK-NEXT: ret |
| %xor1 = xor i32 %a, %b |
| %and = and i32 %xor1, %mask |
| %xor2 = xor i32 %and, %a |
| ret i32 %xor2 |
| } |
| |
| ; Test alternate merge pattern with different argument order for MVMN |
| define i32 @mvmn_xor_i32(i32 %b, i32 %mask, i32 %a) nounwind { |
| ; CHECK-LABEL: mvmn_xor_i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: mvmn a0, a2, a1 |
| ; CHECK-NEXT: ret |
| %xor1 = xor i32 %a, %b |
| %and = and i32 %xor1, %mask |
| %xor2 = xor i32 %and, %a |
| ret i32 %xor2 |
| } |
| |
| ; acc + zext(a) -> waddau acc, a, 0 |
| define i64 @waddau_zext(i64 %acc, i32 %a) nounwind { |
| ; CHECK-LABEL: waddau_zext: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: waddau a0, a2, zero |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %sum = add i64 %acc, %ext_a |
| ret i64 %sum |
| } |
| |
| ; zext(a) + acc -> waddau acc, a, 0 |
| define i64 @waddau_zext_commuted(i64 %acc, i32 %a) nounwind { |
| ; CHECK-LABEL: waddau_zext_commuted: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: waddau a0, a2, zero |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %sum = add i64 %ext_a, %acc |
| ret i64 %sum |
| } |
| |
| ; acc + zext(a) + zext(b) -> waddau acc, a, b |
| define i64 @waddau_zext_chain(i64 %acc, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: waddau_zext_chain: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: waddau a0, a2, a3 |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %ext_b = zext i32 %b to i64 |
| %sum1 = add i64 %acc, %ext_a |
| %sum2 = add i64 %sum1, %ext_b |
| ret i64 %sum2 |
| } |
| |
| ; acc - zext(a) -> wsubau acc, 0, a |
| define i64 @wsubau_zext(i64 %acc, i32 %a) nounwind { |
| ; CHECK-LABEL: wsubau_zext: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wsubau a0, zero, a2 |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %sub = sub i64 %acc, %ext_a |
| ret i64 %sub |
| } |
| |
| ; (acc + zext(a)) - zext(b) -> wsubau acc, a, b |
| define i64 @wsubau_zext_chain(i64 %acc, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: wsubau_zext_chain: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wsubau a0, a2, a3 |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %ext_b = zext i32 %b to i64 |
| %sum = add i64 %acc, %ext_a |
| %sub = sub i64 %sum, %ext_b |
| ret i64 %sub |
| } |
| |
| ; (acc - zext(a)) + zext(b) -> wsubau acc, b, a |
| define i64 @wsubau_zext_chain_rev(i64 %acc, i32 %a, i32 %b) nounwind { |
| ; CHECK-LABEL: wsubau_zext_chain_rev: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: wsubau a0, a3, a2 |
| ; CHECK-NEXT: ret |
| %ext_a = zext i32 %a to i64 |
| %ext_b = zext i32 %b to i64 |
| %sub = sub i64 %acc, %ext_a |
| %sum = add i64 %sub, %ext_b |
| ret i64 %sum |
| } |