| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s |
| |
| ; FIXME: Inefficient codegen which skips an optimization of load + |
| ; extractelement when the vector element type is not byte-sized. |
| define i1 @extractloadi1(ptr %ptr, i32 %idx) { |
| ; CHECK-LABEL: extractloadi1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: flat_load_ubyte v0, v[0:1] |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_lshrrev_b32_e32 v1, 2, v0 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v3, 5, v0 |
| ; CHECK-NEXT: v_and_b32_e32 v4, 2, v0 |
| ; CHECK-NEXT: v_lshrrev_b32_e32 v5, 6, v0 |
| ; CHECK-NEXT: v_lshrrev_b32_e32 v6, 4, v0 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v7, 3, v0 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v8, 1, v0 |
| ; CHECK-NEXT: v_or_b32_e32 v1, v1, v3 |
| ; CHECK-NEXT: v_and_b32_e32 v3, 0x100, v7 |
| ; CHECK-NEXT: v_and_b32_e32 v7, 0x100, v8 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v4, 7, v4 |
| ; CHECK-NEXT: v_or_b32_e32 v3, v6, v3 |
| ; CHECK-NEXT: v_or_b32_e32 v5, v5, v7 |
| ; CHECK-NEXT: v_or_b32_e32 v0, v0, v4 |
| ; CHECK-NEXT: v_and_b32_e32 v1, 0x103, v1 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v4, 16, v5 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v5, 16, v1 |
| ; CHECK-NEXT: v_or_b32_e32 v1, v3, v4 |
| ; CHECK-NEXT: v_or_b32_e32 v0, v0, v5 |
| ; CHECK-NEXT: v_lshlrev_b32_e32 v2, 3, v2 |
| ; CHECK-NEXT: v_lshr_b64 v[0:1], v[0:1], v2 |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %val = load <8 x i1>, ptr %ptr |
| %ret = extractelement <8 x i1> %val, i32 %idx |
| ret i1 %ret |
| } |