| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefix=XTENSA %s |
| |
| define i32 @rotl_32(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotl_32: |
| ; XTENSA: ssl a3 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: movi a9, 32 |
| ; XTENSA-NEXT: sub a9, a9, a3 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %z = sub i32 32, %y |
| %b = shl i32 %x, %y |
| %c = lshr i32 %x, %z |
| %d = or i32 %b, %c |
| ret i32 %d |
| } |
| |
| define i32 @rotr_32(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotr_32: |
| ; XTENSA: ssr a3 |
| ; XTENSA-NEXT: srl a8, a2 |
| ; XTENSA-NEXT: movi a9, 32 |
| ; XTENSA-NEXT: sub a9, a9, a3 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %z = sub i32 32, %y |
| %b = lshr i32 %x, %y |
| %c = shl i32 %x, %z |
| %d = or i32 %b, %c |
| ret i32 %d |
| } |
| |
| define i64 @rotl_64(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotl_64: |
| ; XTENSA: movi a8, 64 |
| ; XTENSA-NEXT: sub a8, a8, a4 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: src a11, a3, a2 |
| ; XTENSA-NEXT: movi a9, 32 |
| ; XTENSA-NEXT: sub a9, a9, a4 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a7, a3 |
| ; XTENSA-NEXT: movi a10, 0 |
| ; XTENSA-NEXT: blt a9, a10, .LBB2_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a11, a7, a7 |
| ; XTENSA-NEXT: .LBB2_2: |
| ; XTENSA-NEXT: ssl a4 |
| ; XTENSA-NEXT: sll a7, a2 |
| ; XTENSA-NEXT: addi a5, a4, -32 |
| ; XTENSA-NEXT: blt a5, a10, .LBB2_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a7, a10, a10 |
| ; XTENSA-NEXT: .LBB2_4: |
| ; XTENSA-NEXT: ssl a4 |
| ; XTENSA-NEXT: src a6, a3, a2 |
| ; XTENSA-NEXT: ssl a5 |
| ; XTENSA-NEXT: sll a4, a2 |
| ; XTENSA-NEXT: blt a5, a10, .LBB2_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a6, a4, a4 |
| ; XTENSA-NEXT: .LBB2_6: |
| ; XTENSA-NEXT: or a2, a7, a11 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a8, a3 |
| ; XTENSA-NEXT: blt a9, a10, .LBB2_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a8, a10, a10 |
| ; XTENSA-NEXT: .LBB2_8: |
| ; XTENSA-NEXT: or a3, a6, a8 |
| ; XTENSA-NEXT: ret |
| %z = sub i64 64, %y |
| %b = shl i64 %x, %y |
| %c = lshr i64 %x, %z |
| %d = or i64 %b, %c |
| ret i64 %d |
| } |
| |
| define i64 @rotr_64(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotr_64: |
| ; XTENSA: ssr a4 |
| ; XTENSA-NEXT: src a10, a3, a2 |
| ; XTENSA-NEXT: addi a8, a4, -32 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a11, a3 |
| ; XTENSA-NEXT: movi a9, 0 |
| ; XTENSA-NEXT: blt a8, a9, .LBB3_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a10, a11, a11 |
| ; XTENSA-NEXT: .LBB3_2: |
| ; XTENSA-NEXT: movi a11, 32 |
| ; XTENSA-NEXT: sub a7, a11, a4 |
| ; XTENSA-NEXT: movi a11, 64 |
| ; XTENSA-NEXT: sub a11, a11, a4 |
| ; XTENSA-NEXT: ssl a11 |
| ; XTENSA-NEXT: sll a6, a2 |
| ; XTENSA-NEXT: blt a7, a9, .LBB3_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a6, a9, a9 |
| ; XTENSA-NEXT: .LBB3_4: |
| ; XTENSA-NEXT: ssl a11 |
| ; XTENSA-NEXT: src a11, a3, a2 |
| ; XTENSA-NEXT: ssl a7 |
| ; XTENSA-NEXT: sll a5, a2 |
| ; XTENSA-NEXT: blt a7, a9, .LBB3_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a11, a5, a5 |
| ; XTENSA-NEXT: .LBB3_6: |
| ; XTENSA-NEXT: or a2, a10, a6 |
| ; XTENSA-NEXT: ssr a4 |
| ; XTENSA-NEXT: srl a10, a3 |
| ; XTENSA-NEXT: blt a8, a9, .LBB3_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a10, a9, a9 |
| ; XTENSA-NEXT: .LBB3_8: |
| ; XTENSA-NEXT: or a3, a10, a11 |
| ; XTENSA-NEXT: ret |
| %z = sub i64 64, %y |
| %b = lshr i64 %x, %y |
| %c = shl i64 %x, %z |
| %d = or i64 %b, %c |
| ret i64 %d |
| } |
| |
| define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotl_32_mask: |
| ; XTENSA: ssl a3 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: neg a9, a3 |
| ; XTENSA-NEXT: movi a10, 31 |
| ; XTENSA-NEXT: and a9, a9, a10 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %z = sub i32 0, %y |
| %and = and i32 %z, 31 |
| %b = shl i32 %x, %y |
| %c = lshr i32 %x, %and |
| %d = or i32 %b, %c |
| ret i32 %d |
| } |
| |
| define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotl_32_mask_and_63_and_31: |
| ; XTENSA: movi a8, 63 |
| ; XTENSA-NEXT: and a8, a3, a8 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: neg a9, a3 |
| ; XTENSA-NEXT: movi a10, 31 |
| ; XTENSA-NEXT: and a9, a9, a10 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %a = and i32 %y, 63 |
| %b = shl i32 %x, %a |
| %c = sub i32 0, %y |
| %d = and i32 %c, 31 |
| %e = lshr i32 %x, %d |
| %f = or i32 %b, %e |
| ret i32 %f |
| } |
| |
| define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotr_32_mask: |
| ; XTENSA: ssr a3 |
| ; XTENSA-NEXT: srl a8, a2 |
| ; XTENSA-NEXT: neg a9, a3 |
| ; XTENSA-NEXT: movi a10, 31 |
| ; XTENSA-NEXT: and a9, a9, a10 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %z = sub i32 0, %y |
| %and = and i32 %z, 31 |
| %b = lshr i32 %x, %y |
| %c = shl i32 %x, %and |
| %d = or i32 %b, %c |
| ret i32 %d |
| } |
| |
| define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { |
| ; XTENSA-LABEL: rotr_32_mask_and_63_and_31: |
| ; XTENSA: movi a8, 63 |
| ; XTENSA-NEXT: and a8, a3, a8 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a8, a2 |
| ; XTENSA-NEXT: neg a9, a3 |
| ; XTENSA-NEXT: movi a10, 31 |
| ; XTENSA-NEXT: and a9, a9, a10 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a2 |
| ; XTENSA-NEXT: or a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %a = and i32 %y, 63 |
| %b = lshr i32 %x, %a |
| %c = sub i32 0, %y |
| %d = and i32 %c, 31 |
| %e = shl i32 %x, %d |
| %f = or i32 %b, %e |
| ret i32 %f |
| } |
| |
| define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotl_64_mask: |
| ; XTENSA: ssl a4 |
| ; XTENSA-NEXT: src a10, a3, a2 |
| ; XTENSA-NEXT: addi a8, a4, -32 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a11, a2 |
| ; XTENSA-NEXT: movi a9, 0 |
| ; XTENSA-NEXT: blt a8, a9, .LBB8_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a10, a11, a11 |
| ; XTENSA-NEXT: .LBB8_2: |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: movi a7, 63 |
| ; XTENSA-NEXT: and a7, a11, a7 |
| ; XTENSA-NEXT: ssr a7 |
| ; XTENSA-NEXT: srl a11, a3 |
| ; XTENSA-NEXT: addi a6, a7, -32 |
| ; XTENSA-NEXT: blt a6, a9, .LBB8_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a11, a9, a9 |
| ; XTENSA-NEXT: .LBB8_4: |
| ; XTENSA-NEXT: ssr a7 |
| ; XTENSA-NEXT: src a7, a3, a2 |
| ; XTENSA-NEXT: ssr a6 |
| ; XTENSA-NEXT: srl a5, a3 |
| ; XTENSA-NEXT: blt a6, a9, .LBB8_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a7, a5, a5 |
| ; XTENSA-NEXT: .LBB8_6: |
| ; XTENSA-NEXT: or a3, a10, a11 |
| ; XTENSA-NEXT: ssl a4 |
| ; XTENSA-NEXT: sll a10, a2 |
| ; XTENSA-NEXT: blt a8, a9, .LBB8_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a10, a9, a9 |
| ; XTENSA-NEXT: .LBB8_8: |
| ; XTENSA-NEXT: or a2, a10, a7 |
| ; XTENSA-NEXT: ret |
| %z = sub i64 0, %y |
| %and = and i64 %z, 63 |
| %b = shl i64 %x, %y |
| %c = lshr i64 %x, %and |
| %d = or i64 %b, %c |
| ret i64 %d |
| } |
| |
| define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotl_64_mask_and_127_and_63: |
| ; XTENSA: movi a8, 127 |
| ; XTENSA-NEXT: and a8, a4, a8 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: src a11, a3, a2 |
| ; XTENSA-NEXT: addi a9, a8, -32 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a7, a2 |
| ; XTENSA-NEXT: movi a10, 0 |
| ; XTENSA-NEXT: blt a9, a10, .LBB9_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a11, a7, a7 |
| ; XTENSA-NEXT: .LBB9_2: |
| ; XTENSA-NEXT: neg a7, a4 |
| ; XTENSA-NEXT: movi a6, 63 |
| ; XTENSA-NEXT: and a6, a7, a6 |
| ; XTENSA-NEXT: ssr a6 |
| ; XTENSA-NEXT: srl a7, a3 |
| ; XTENSA-NEXT: addi a5, a6, -32 |
| ; XTENSA-NEXT: blt a5, a10, .LBB9_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a7, a10, a10 |
| ; XTENSA-NEXT: .LBB9_4: |
| ; XTENSA-NEXT: ssr a6 |
| ; XTENSA-NEXT: src a6, a3, a2 |
| ; XTENSA-NEXT: ssr a5 |
| ; XTENSA-NEXT: srl a4, a3 |
| ; XTENSA-NEXT: blt a5, a10, .LBB9_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a6, a4, a4 |
| ; XTENSA-NEXT: .LBB9_6: |
| ; XTENSA-NEXT: or a3, a11, a7 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: blt a9, a10, .LBB9_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a8, a10, a10 |
| ; XTENSA-NEXT: .LBB9_8: |
| ; XTENSA-NEXT: or a2, a8, a6 |
| ; XTENSA-NEXT: ret |
| %a = and i64 %y, 127 |
| %b = shl i64 %x, %a |
| %c = sub i64 0, %y |
| %d = and i64 %c, 63 |
| %e = lshr i64 %x, %d |
| %f = or i64 %b, %e |
| ret i64 %f |
| } |
| |
| define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotr_64_mask: |
| ; XTENSA: ssr a4 |
| ; XTENSA-NEXT: src a10, a3, a2 |
| ; XTENSA-NEXT: addi a8, a4, -32 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a11, a3 |
| ; XTENSA-NEXT: movi a9, 0 |
| ; XTENSA-NEXT: blt a8, a9, .LBB10_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a10, a11, a11 |
| ; XTENSA-NEXT: .LBB10_2: |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: movi a7, 63 |
| ; XTENSA-NEXT: and a7, a11, a7 |
| ; XTENSA-NEXT: ssl a7 |
| ; XTENSA-NEXT: sll a11, a2 |
| ; XTENSA-NEXT: addi a6, a7, -32 |
| ; XTENSA-NEXT: blt a6, a9, .LBB10_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a11, a9, a9 |
| ; XTENSA-NEXT: .LBB10_4: |
| ; XTENSA-NEXT: ssl a7 |
| ; XTENSA-NEXT: src a7, a3, a2 |
| ; XTENSA-NEXT: ssl a6 |
| ; XTENSA-NEXT: sll a5, a2 |
| ; XTENSA-NEXT: blt a6, a9, .LBB10_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a7, a5, a5 |
| ; XTENSA-NEXT: .LBB10_6: |
| ; XTENSA-NEXT: or a2, a10, a11 |
| ; XTENSA-NEXT: ssr a4 |
| ; XTENSA-NEXT: srl a10, a3 |
| ; XTENSA-NEXT: blt a8, a9, .LBB10_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a10, a9, a9 |
| ; XTENSA-NEXT: .LBB10_8: |
| ; XTENSA-NEXT: or a3, a10, a7 |
| ; XTENSA-NEXT: ret |
| %z = sub i64 0, %y |
| %and = and i64 %z, 63 |
| %b = lshr i64 %x, %y |
| %c = shl i64 %x, %and |
| %d = or i64 %b, %c |
| ret i64 %d |
| } |
| |
| define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { |
| ; XTENSA-LABEL: rotr_64_mask_and_127_and_63: |
| ; XTENSA: movi a8, 127 |
| ; XTENSA-NEXT: and a8, a4, a8 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: src a11, a3, a2 |
| ; XTENSA-NEXT: addi a9, a8, -32 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a7, a3 |
| ; XTENSA-NEXT: movi a10, 0 |
| ; XTENSA-NEXT: blt a9, a10, .LBB11_2 |
| ; XTENSA-NEXT: # %bb.1: |
| ; XTENSA-NEXT: or a11, a7, a7 |
| ; XTENSA-NEXT: .LBB11_2: |
| ; XTENSA-NEXT: neg a7, a4 |
| ; XTENSA-NEXT: movi a6, 63 |
| ; XTENSA-NEXT: and a6, a7, a6 |
| ; XTENSA-NEXT: ssl a6 |
| ; XTENSA-NEXT: sll a7, a2 |
| ; XTENSA-NEXT: addi a5, a6, -32 |
| ; XTENSA-NEXT: blt a5, a10, .LBB11_4 |
| ; XTENSA-NEXT: # %bb.3: |
| ; XTENSA-NEXT: or a7, a10, a10 |
| ; XTENSA-NEXT: .LBB11_4: |
| ; XTENSA-NEXT: ssl a6 |
| ; XTENSA-NEXT: src a6, a3, a2 |
| ; XTENSA-NEXT: ssl a5 |
| ; XTENSA-NEXT: sll a4, a2 |
| ; XTENSA-NEXT: blt a5, a10, .LBB11_6 |
| ; XTENSA-NEXT: # %bb.5: |
| ; XTENSA-NEXT: or a6, a4, a4 |
| ; XTENSA-NEXT: .LBB11_6: |
| ; XTENSA-NEXT: or a2, a11, a7 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a8, a3 |
| ; XTENSA-NEXT: blt a9, a10, .LBB11_8 |
| ; XTENSA-NEXT: # %bb.7: |
| ; XTENSA-NEXT: or a8, a10, a10 |
| ; XTENSA-NEXT: .LBB11_8: |
| ; XTENSA-NEXT: or a3, a8, a6 |
| ; XTENSA-NEXT: ret |
| %a = and i64 %y, 127 |
| %b = lshr i64 %x, %a |
| %c = sub i64 0, %y |
| %d = and i64 %c, 63 |
| %e = shl i64 %x, %d |
| %f = or i64 %b, %e |
| ret i64 %f |
| } |
| |
| define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { |
| ; XTENSA-LABEL: rotl_32_mask_shared: |
| ; XTENSA: movi a8, 31 |
| ; XTENSA-NEXT: and a9, a4, a8 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a10, a2 |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: and a8, a11, a8 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a8, a2 |
| ; XTENSA-NEXT: or a8, a10, a8 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a3 |
| ; XTENSA-NEXT: add a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %maskedamt = and i32 %amt, 31 |
| %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) |
| %2 = shl i32 %b, %maskedamt |
| %3 = add i32 %1, %2 |
| ret i32 %3 |
| } |
| declare i32 @llvm.fshl.i32(i32, i32, i32) |
| |
| define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { |
| ; XTENSA-LABEL: rotr_32_mask_shared: |
| ; XTENSA: movi a8, 31 |
| ; XTENSA-NEXT: and a9, a4, a8 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a10, a2 |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: and a8, a11, a8 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: or a8, a10, a8 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a3 |
| ; XTENSA-NEXT: add a2, a8, a9 |
| ; XTENSA-NEXT: ret |
| %maskedamt = and i32 %amt, 31 |
| %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) |
| %2 = shl i32 %b, %maskedamt |
| %3 = add i32 %1, %2 |
| ret i32 %3 |
| } |
| declare i32 @llvm.fshr.i32(i32, i32, i32) |
| |
| define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { |
| ; XTENSA-LABEL: rotl_32_mask_multiple: |
| ; XTENSA: movi a8, 31 |
| ; XTENSA-NEXT: and a9, a4, a8 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a10, a3 |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: and a8, a11, a8 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a11, a3 |
| ; XTENSA-NEXT: or a10, a10, a11 |
| ; XTENSA-NEXT: ssl a9 |
| ; XTENSA-NEXT: sll a9, a2 |
| ; XTENSA-NEXT: ssr a8 |
| ; XTENSA-NEXT: srl a8, a2 |
| ; XTENSA-NEXT: or a8, a9, a8 |
| ; XTENSA-NEXT: add a2, a8, a10 |
| ; XTENSA-NEXT: ret |
| %maskedamt = and i32 %amt, 31 |
| %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) |
| %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt) |
| %3 = add i32 %1, %2 |
| ret i32 %3 |
| } |
| |
| define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { |
| ; XTENSA-LABEL: rotr_32_mask_multiple: |
| ; XTENSA: movi a8, 31 |
| ; XTENSA-NEXT: and a9, a4, a8 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a10, a3 |
| ; XTENSA-NEXT: neg a11, a4 |
| ; XTENSA-NEXT: and a8, a11, a8 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a11, a3 |
| ; XTENSA-NEXT: or a10, a10, a11 |
| ; XTENSA-NEXT: ssr a9 |
| ; XTENSA-NEXT: srl a9, a2 |
| ; XTENSA-NEXT: ssl a8 |
| ; XTENSA-NEXT: sll a8, a2 |
| ; XTENSA-NEXT: or a8, a9, a8 |
| ; XTENSA-NEXT: add a2, a8, a10 |
| ; XTENSA-NEXT: ret |
| %maskedamt = and i32 %amt, 31 |
| %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) |
| %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt) |
| %3 = add i32 %1, %2 |
| ret i32 %3 |
| } |