| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \ |
| ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s |
| ; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \ |
| ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \ |
| ; RUN: -check-prefix=CHECK-P8 |
| |
| @a_qp = common dso_local global fp128 0xL00000000000000000000000000000000, align 16 |
| @b_qp = common dso_local global fp128 0xL00000000000000000000000000000000, align 16 |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @greater_qp() { |
| ; CHECK-LABEL: greater_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: li r4, 1 |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 0 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: iselgt r3, r4, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: greater_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __gtkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: extsw r3, r3 |
| ; CHECK-P8-NEXT: neg r3, r3 |
| ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ogt fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @less_qp() { |
| ; CHECK-LABEL: less_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: li r4, 1 |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 0 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: isellt r3, r4, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: less_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __ltkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp olt fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @greater_eq_qp() { |
| ; CHECK-LABEL: greater_eq_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: cror 4*cr5+lt, un, lt |
| ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: greater_eq_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __gekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 |
| ; CHECK-P8-NEXT: xori r3, r3, 1 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp oge fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @less_eq_qp() { |
| ; CHECK-LABEL: less_eq_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: cror 4*cr5+lt, un, gt |
| ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: less_eq_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __lekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: extsw r3, r3 |
| ; CHECK-P8-NEXT: neg r3, r3 |
| ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63 |
| ; CHECK-P8-NEXT: xori r3, r3, 1 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ole fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @equal_qp() { |
| ; CHECK-LABEL: equal_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: li r4, 1 |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 0 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: iseleq r3, r4, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: equal_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __eqkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cntlzw r3, r3 |
| ; CHECK-P8-NEXT: srwi r3, r3, 5 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp oeq fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @not_greater_qp() { |
| ; CHECK-LABEL: not_greater_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: iselgt r3, 0, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: not_greater_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __gtkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: extsw r3, r3 |
| ; CHECK-P8-NEXT: neg r3, r3 |
| ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63 |
| ; CHECK-P8-NEXT: xori r3, r3, 1 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ogt fp128 %0, %1 |
| %lnot = xor i1 %cmp, true |
| %lnot.ext = zext i1 %lnot to i32 |
| ret i32 %lnot.ext |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @not_less_qp() { |
| ; CHECK-LABEL: not_less_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: isellt r3, 0, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: not_less_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __ltkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 |
| ; CHECK-P8-NEXT: xori r3, r3, 1 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp olt fp128 %0, %1 |
| %lnot = xor i1 %cmp, true |
| %lnot.ext = zext i1 %lnot to i32 |
| ret i32 %lnot.ext |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @not_greater_eq_qp() { |
| ; CHECK-LABEL: not_greater_eq_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: crnor 4*cr5+lt, lt, un |
| ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: not_greater_eq_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __gekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp oge fp128 %0, %1 |
| %lnot = xor i1 %cmp, true |
| %lnot.ext = zext i1 %lnot to i32 |
| ret i32 %lnot.ext |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @not_less_eq_qp() { |
| ; CHECK-LABEL: not_less_eq_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: crnor 4*cr5+lt, gt, un |
| ; CHECK-NEXT: isel r3, 0, r3, 4*cr5+lt |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: not_less_eq_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __lekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: extsw r3, r3 |
| ; CHECK-P8-NEXT: neg r3, r3 |
| ; CHECK-P8-NEXT: rldicl r3, r3, 1, 63 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ole fp128 %0, %1 |
| %lnot = xor i1 %cmp, true |
| %lnot.ext = zext i1 %lnot to i32 |
| ret i32 %lnot.ext |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define dso_local signext i32 @not_equal_qp() { |
| ; CHECK-LABEL: not_equal_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: li r3, 1 |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: iseleq r3, 0, r3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: not_equal_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -32(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v2, 0, r3 |
| ; CHECK-P8-NEXT: lvx v3, 0, r4 |
| ; CHECK-P8-NEXT: bl __nekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cntlzw r3, r3 |
| ; CHECK-P8-NEXT: srwi r3, r3, 5 |
| ; CHECK-P8-NEXT: xori r3, r3, 1 |
| ; CHECK-P8-NEXT: addi r1, r1, 32 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp une fp128 %0, %1 |
| %conv = zext i1 %cmp to i32 |
| ret i32 %conv |
| } |
| |
| ; Function Attrs: norecurse nounwind readonly |
| define fp128 @greater_sel_qp() { |
| ; CHECK-LABEL: greater_sel_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: bgtlr cr0 |
| ; CHECK-NEXT: # %bb.1: # %entry |
| ; CHECK-NEXT: vmr v2, v3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: greater_sel_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -80(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: .cfi_offset v30, -32 |
| ; CHECK-P8-NEXT: .cfi_offset v31, -16 |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: lvx v30, 0, r4 |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v31, 0, r3 |
| ; CHECK-P8-NEXT: vmr v3, v30 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: bl __gtkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cmpwi r3, 0 |
| ; CHECK-P8-NEXT: bgt cr0, .LBB10_2 |
| ; CHECK-P8-NEXT: # %bb.1: # %entry |
| ; CHECK-P8-NEXT: vmr v31, v30 |
| ; CHECK-P8-NEXT: .LBB10_2: # %entry |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: addi r1, r1, 80 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ogt fp128 %0, %1 |
| %cond = select i1 %cmp, fp128 %0, fp128 %1 |
| ret fp128 %cond |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define fp128 @less_sel_qp() { |
| ; CHECK-LABEL: less_sel_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: bltlr cr0 |
| ; CHECK-NEXT: # %bb.1: # %entry |
| ; CHECK-NEXT: vmr v2, v3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: less_sel_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -80(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: .cfi_offset v30, -32 |
| ; CHECK-P8-NEXT: .cfi_offset v31, -16 |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: lvx v30, 0, r4 |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v31, 0, r3 |
| ; CHECK-P8-NEXT: vmr v3, v30 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: bl __ltkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cmpwi r3, 0 |
| ; CHECK-P8-NEXT: blt cr0, .LBB11_2 |
| ; CHECK-P8-NEXT: # %bb.1: # %entry |
| ; CHECK-P8-NEXT: vmr v31, v30 |
| ; CHECK-P8-NEXT: .LBB11_2: # %entry |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: addi r1, r1, 80 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp olt fp128 %0, %1 |
| %cond = select i1 %cmp, fp128 %0, fp128 %1 |
| ret fp128 %cond |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define fp128 @greater_eq_sel_qp() { |
| ; CHECK-LABEL: greater_eq_sel_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: crnor 4*cr5+lt, un, lt |
| ; CHECK-NEXT: bclr 12, 4*cr5+lt, 0 |
| ; CHECK-NEXT: # %bb.1: # %entry |
| ; CHECK-NEXT: vmr v2, v3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: greater_eq_sel_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -80(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: .cfi_offset v30, -32 |
| ; CHECK-P8-NEXT: .cfi_offset v31, -16 |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: lvx v30, 0, r4 |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v31, 0, r3 |
| ; CHECK-P8-NEXT: vmr v3, v30 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: bl __gekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cmpwi r3, -1 |
| ; CHECK-P8-NEXT: bgt cr0, .LBB12_2 |
| ; CHECK-P8-NEXT: # %bb.1: # %entry |
| ; CHECK-P8-NEXT: vmr v31, v30 |
| ; CHECK-P8-NEXT: .LBB12_2: # %entry |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: addi r1, r1, 80 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp oge fp128 %0, %1 |
| %cond = select i1 %cmp, fp128 %0, fp128 %1 |
| ret fp128 %cond |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define fp128 @less_eq_sel_qp() { |
| ; CHECK-LABEL: less_eq_sel_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: crnor 4*cr5+lt, un, gt |
| ; CHECK-NEXT: bclr 12, 4*cr5+lt, 0 |
| ; CHECK-NEXT: # %bb.1: # %entry |
| ; CHECK-NEXT: vmr v2, v3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: less_eq_sel_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -80(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: .cfi_offset v30, -32 |
| ; CHECK-P8-NEXT: .cfi_offset v31, -16 |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: lvx v30, 0, r4 |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v31, 0, r3 |
| ; CHECK-P8-NEXT: vmr v3, v30 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: bl __lekf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cmpwi r3, 1 |
| ; CHECK-P8-NEXT: blt cr0, .LBB13_2 |
| ; CHECK-P8-NEXT: # %bb.1: # %entry |
| ; CHECK-P8-NEXT: vmr v31, v30 |
| ; CHECK-P8-NEXT: .LBB13_2: # %entry |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: addi r1, r1, 80 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp ole fp128 %0, %1 |
| %cond = select i1 %cmp, fp128 %0, fp128 %1 |
| ret fp128 %cond |
| } |
| |
| ; Function Attrs: noinline nounwind optnone |
| define fp128 @equal_sel_qp() { |
| ; CHECK-LABEL: equal_sel_qp: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-NEXT: lxv v2, 0(r3) |
| ; CHECK-NEXT: addis r3, r2, b_qp@toc@ha |
| ; CHECK-NEXT: addi r3, r3, b_qp@toc@l |
| ; CHECK-NEXT: lxv v3, 0(r3) |
| ; CHECK-NEXT: xscmpuqp cr0, v2, v3 |
| ; CHECK-NEXT: beqlr cr0 |
| ; CHECK-NEXT: # %bb.1: # %entry |
| ; CHECK-NEXT: vmr v2, v3 |
| ; CHECK-NEXT: blr |
| ; |
| ; CHECK-P8-LABEL: equal_sel_qp: |
| ; CHECK-P8: # %bb.0: # %entry |
| ; CHECK-P8-NEXT: mflr r0 |
| ; CHECK-P8-NEXT: std r0, 16(r1) |
| ; CHECK-P8-NEXT: stdu r1, -80(r1) |
| ; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-P8-NEXT: .cfi_offset lr, 16 |
| ; CHECK-P8-NEXT: .cfi_offset v30, -32 |
| ; CHECK-P8-NEXT: .cfi_offset v31, -16 |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha |
| ; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l |
| ; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill |
| ; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha |
| ; CHECK-P8-NEXT: lvx v30, 0, r4 |
| ; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l |
| ; CHECK-P8-NEXT: lvx v31, 0, r3 |
| ; CHECK-P8-NEXT: vmr v3, v30 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: bl __eqkf2 |
| ; CHECK-P8-NEXT: nop |
| ; CHECK-P8-NEXT: cmplwi r3, 0 |
| ; CHECK-P8-NEXT: beq cr0, .LBB14_2 |
| ; CHECK-P8-NEXT: # %bb.1: # %entry |
| ; CHECK-P8-NEXT: vmr v31, v30 |
| ; CHECK-P8-NEXT: .LBB14_2: # %entry |
| ; CHECK-P8-NEXT: li r3, 64 |
| ; CHECK-P8-NEXT: vmr v2, v31 |
| ; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: li r3, 48 |
| ; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload |
| ; CHECK-P8-NEXT: addi r1, r1, 80 |
| ; CHECK-P8-NEXT: ld r0, 16(r1) |
| ; CHECK-P8-NEXT: mtlr r0 |
| ; CHECK-P8-NEXT: blr |
| entry: |
| %0 = load fp128, fp128* @a_qp, align 16 |
| %1 = load fp128, fp128* @b_qp, align 16 |
| %cmp = fcmp oeq fp128 %0, %1 |
| %cond = select i1 %cmp, fp128 %0, fp128 %1 |
| ret fp128 %cond |
| } |