| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ | 
 | ; RUN:   | FileCheck %s -check-prefix=RV64I | 
 |  | 
 | ; Test that we turn (sra (shl X, 32), 32-C) into (slli (sext.w X), C) | 
 |  | 
 | define i64 @test1(i64 %a) nounwind { | 
 | ; RV64I-LABEL: test1: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    sext.w a0, a0 | 
 | ; RV64I-NEXT:    slli a0, a0, 2 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = shl i64 %a, 32 | 
 |   %2 = ashr i64 %1, 30 | 
 |   ret i64 %2 | 
 | } | 
 |  | 
 | define i64 @test2(i32 signext %a) nounwind { | 
 | ; RV64I-LABEL: test2: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    slli a0, a0, 3 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = zext i32 %a to i64 | 
 |   %2 = shl i64 %1, 32 | 
 |   %3 = ashr i64 %2, 29 | 
 |   ret i64 %3 | 
 | } | 
 |  | 
 | define i64 @test3(ptr %a) nounwind { | 
 | ; RV64I-LABEL: test3: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    lw a0, 0(a0) | 
 | ; RV64I-NEXT:    slli a0, a0, 4 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = load i32, ptr %a | 
 |   %2 = zext i32 %1 to i64 | 
 |   %3 = shl i64 %2, 32 | 
 |   %4 = ashr i64 %3, 28 | 
 |   ret i64 %4 | 
 | } | 
 |  | 
 | define i64 @test4(i32 signext %a, i32 signext %b) nounwind { | 
 | ; RV64I-LABEL: test4: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    addw a0, a0, a1 | 
 | ; RV64I-NEXT:    slli a0, a0, 30 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = add i32 %a, %b | 
 |   %2 = zext i32 %1 to i64 | 
 |   %3 = shl i64 %2, 32 | 
 |   %4 = ashr i64 %3, 2 | 
 |   ret i64 %4 | 
 | } | 
 |  | 
 | define i64 @test5(i32 signext %a, i32 signext %b) nounwind { | 
 | ; RV64I-LABEL: test5: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    xor a0, a0, a1 | 
 | ; RV64I-NEXT:    slli a0, a0, 31 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = xor i32 %a, %b | 
 |   %2 = zext i32 %1 to i64 | 
 |   %3 = shl i64 %2, 32 | 
 |   %4 = ashr i64 %3, 1 | 
 |   ret i64 %4 | 
 | } | 
 |  | 
 | define i64 @test6(i32 signext %a, i32 signext %b) nounwind { | 
 | ; RV64I-LABEL: test6: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    sllw a0, a0, a1 | 
 | ; RV64I-NEXT:    slli a0, a0, 16 | 
 | ; RV64I-NEXT:    ret | 
 |   %1 = shl i32 %a, %b | 
 |   %2 = zext i32 %1 to i64 | 
 |   %3 = shl i64 %2, 32 | 
 |   %4 = ashr i64 %3, 16 | 
 |   ret i64 %4 | 
 | } | 
 |  | 
 | ; The ashr+add+shl is canonical IR from InstCombine for | 
 | ; (sext (add (trunc X to i32), 1) to i32). | 
 | ; That can be implemented as addiw make sure we recover it. | 
 | define i64 @test7(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test7: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    addiw a0, a1, 1 | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = shl i64 %1, 32 | 
 |   %4 = add i64 %3, 4294967296 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   ret i64 %5 | 
 | } | 
 |  | 
 | ; The ashr+add+shl is canonical IR from InstCombine for | 
 | ; (sext (sub 1, (trunc X to i32)) to i32). | 
 | ; That can be implemented as (li 1)+subw make sure we recover it. | 
 | define i64 @test8(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test8: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    li a0, 1 | 
 | ; RV64I-NEXT:    subw a0, a0, a1 | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = mul i64 %1, -4294967296 | 
 |   %4 = add i64 %3, 4294967296 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   ret i64 %5 | 
 | } | 
 |  | 
 | ; The gep is here to introduce a shl by 2 after the ashr that will get folded | 
 | ; and make this harder to recover. | 
 | define signext i32 @test9(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test9: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    lui a2, 1 | 
 | ; RV64I-NEXT:    addi a2, a2, 1 | 
 | ; RV64I-NEXT:    addw a1, a1, a2 | 
 | ; RV64I-NEXT:    slli a1, a1, 2 | 
 | ; RV64I-NEXT:    add a0, a0, a1 | 
 | ; RV64I-NEXT:    lw a0, 0(a0) | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = shl i64 %1, 32 | 
 |   %4 = add i64 %3, 17596481011712 ; 4097 << 32 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   %6 = getelementptr inbounds i32, ptr %0, i64 %5 | 
 |   %7 = load i32, ptr %6, align 4 | 
 |   ret i32 %7 | 
 | } | 
 |  | 
 | ; The gep is here to introduce a shl by 2 after the ashr that will get folded | 
 | ; and make this harder to recover. | 
 | define signext i32 @test10(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test10: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    lui a2, 30141 | 
 | ; RV64I-NEXT:    addi a2, a2, -747 | 
 | ; RV64I-NEXT:    subw a2, a2, a1 | 
 | ; RV64I-NEXT:    slli a2, a2, 2 | 
 | ; RV64I-NEXT:    add a0, a0, a2 | 
 | ; RV64I-NEXT:    lw a0, 0(a0) | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = mul i64 %1, -4294967296 | 
 |   %4 = add i64 %3, 530242871224172544 ; 123456789 << 32 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   %6 = getelementptr inbounds i32, ptr %0, i64 %5 | 
 |   %7 = load i32, ptr %6, align 4 | 
 |   ret i32 %7 | 
 | } | 
 |  | 
 | define i64 @test11(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test11: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    lui a0, 524288 | 
 | ; RV64I-NEXT:    subw a0, a0, a1 | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = mul i64 %1, -4294967296 | 
 |   %4 = add i64 %3, 9223372036854775808 ;0x8000'0000'0000'0000 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   ret i64 %5 | 
 | } | 
 |  | 
 | ; Make sure we use slli+srai to enable the possibility of compressed | 
 | define i32 @test12(i32 signext %0) { | 
 | ; RV64I-LABEL: test12: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    slli a0, a0, 49 | 
 | ; RV64I-NEXT:    srai a0, a0, 47 | 
 | ; RV64I-NEXT:    ret | 
 |   %2 = shl i32 %0, 17 | 
 |   %3 = ashr i32 %2, 15 | 
 |   ret i32 %3 | 
 | } | 
 |  | 
 | define i8 @test13(ptr %0, i64 %1) { | 
 | ; RV64I-LABEL: test13: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    li a2, 1 | 
 | ; RV64I-NEXT:    li a3, 2 | 
 | ; RV64I-NEXT:    subw a2, a2, a1 | 
 | ; RV64I-NEXT:    subw a3, a3, a1 | 
 | ; RV64I-NEXT:    add a2, a0, a2 | 
 | ; RV64I-NEXT:    add a0, a0, a3 | 
 | ; RV64I-NEXT:    lbu a1, 0(a2) | 
 | ; RV64I-NEXT:    lbu a0, 0(a0) | 
 | ; RV64I-NEXT:    add a0, a1, a0 | 
 | ; RV64I-NEXT:    ret | 
 |   %3 = mul i64 %1, -4294967296 | 
 |   %4 = add i64 %3, 4294967296 ; 1 << 32 | 
 |   %5 = ashr exact i64 %4, 32 | 
 |   %6 = getelementptr inbounds i8, ptr %0, i64 %5 | 
 |   %7 = load i8, ptr %6, align 4 | 
 |   %8 = add i64 %3, 8589934592 ; 2 << 32 | 
 |   %9 = ashr exact i64 %8, 32 | 
 |   %10 = getelementptr inbounds i8, ptr %0, i64 %9 | 
 |   %11 = load i8, ptr %10, align 4 | 
 |   %12 = add i8 %7, %11 | 
 |   ret i8 %12 | 
 | } | 
 |  | 
 | define signext i32 @test14(ptr %0, ptr %1, i64 %2) { | 
 | ; RV64I-LABEL: test14: | 
 | ; RV64I:       # %bb.0: | 
 | ; RV64I-NEXT:    li a3, 1 | 
 | ; RV64I-NEXT:    subw a3, a3, a2 | 
 | ; RV64I-NEXT:    add a0, a0, a3 | 
 | ; RV64I-NEXT:    slli a3, a3, 2 | 
 | ; RV64I-NEXT:    lbu a0, 0(a0) | 
 | ; RV64I-NEXT:    add a1, a1, a3 | 
 | ; RV64I-NEXT:    lw a1, 0(a1) | 
 | ; RV64I-NEXT:    addw a0, a0, a1 | 
 | ; RV64I-NEXT:    ret | 
 |   %4 = mul i64 %2, -4294967296 | 
 |   %5 = add i64 %4, 4294967296 ; 1 << 32 | 
 |   %6 = ashr exact i64 %5, 32 | 
 |   %7 = getelementptr inbounds i8, ptr %0, i64 %6 | 
 |   %8 = load i8, ptr %7, align 4 | 
 |   %9 = zext i8 %8 to i32 | 
 |   %10 = getelementptr inbounds i32, ptr %1, i64 %6 | 
 |   %11 = load i32, ptr %10, align 4 | 
 |   %12 = add i32 %9, %11 | 
 |   ret i32 %12 | 
 | } |