| /* RTL simplification functions for GNU compiler. |
| Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, |
| 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 2, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING. If not, write to the Free |
| Software Foundation, 59 Temple Place - Suite 330, Boston, MA |
| 02111-1307, USA. */ |
| |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "tm_p.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "flags.h" |
| #include "real.h" |
| #include "insn-config.h" |
| #include "recog.h" |
| #include "function.h" |
| #include "expr.h" |
| #include "toplev.h" |
| #include "output.h" |
| #include "ggc.h" |
| #include "target.h" |
| |
| /* Simplification and canonicalization of RTL. */ |
| |
| /* Much code operates on (low, high) pairs; the low value is an |
| unsigned wide int, the high value a signed wide int. We |
| occasionally need to sign extend from low to high as if low were a |
| signed wide int. */ |
| #define HWI_SIGN_EXTEND(low) \ |
| ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) |
| |
| static rtx neg_const_int (enum machine_mode, rtx); |
| static bool plus_minus_operand_p (rtx); |
| static int simplify_plus_minus_op_data_cmp (const void *, const void *); |
| static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, |
| rtx, int); |
| static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, |
| unsigned int); |
| static rtx simplify_associative_operation (enum rtx_code, enum machine_mode, |
| rtx, rtx); |
| static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode, |
| enum machine_mode, rtx, rtx); |
| |
| /* Negate a CONST_INT rtx, truncating (because a conversion from a |
| maximally negative number can overflow). */ |
| static rtx |
| neg_const_int (enum machine_mode mode, rtx i) |
| { |
| return gen_int_mode (- INTVAL (i), mode); |
| } |
| |
| /* Test whether expression, X, is an immediate constant that represents |
| the most significant bit of machine mode MODE. */ |
| |
| bool |
| mode_signbit_p (enum machine_mode mode, rtx x) |
| { |
| unsigned HOST_WIDE_INT val; |
| unsigned int width; |
| |
| if (GET_MODE_CLASS (mode) != MODE_INT) |
| return false; |
| |
| width = GET_MODE_BITSIZE (mode); |
| if (width == 0) |
| return false; |
| |
| if (width <= HOST_BITS_PER_WIDE_INT |
| && GET_CODE (x) == CONST_INT) |
| val = INTVAL (x); |
| else if (width <= 2 * HOST_BITS_PER_WIDE_INT |
| && GET_CODE (x) == CONST_DOUBLE |
| && CONST_DOUBLE_LOW (x) == 0) |
| { |
| val = CONST_DOUBLE_HIGH (x); |
| width -= HOST_BITS_PER_WIDE_INT; |
| } |
| else |
| return false; |
| |
| if (width < HOST_BITS_PER_WIDE_INT) |
| val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; |
| return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); |
| } |
| |
| /* Make a binary operation by properly ordering the operands and |
| seeing if the expression folds. */ |
| |
| rtx |
| simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, |
| rtx op1) |
| { |
| rtx tem; |
| |
| /* Put complex operands first and constants second if commutative. */ |
| if (GET_RTX_CLASS (code) == RTX_COMM_ARITH |
| && swap_commutative_operands_p (op0, op1)) |
| tem = op0, op0 = op1, op1 = tem; |
| |
| /* If this simplifies, do it. */ |
| tem = simplify_binary_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| |
| /* Handle addition and subtraction specially. Otherwise, just form |
| the operation. */ |
| |
| if (code == PLUS || code == MINUS) |
| { |
| tem = simplify_plus_minus (code, mode, op0, op1, 1); |
| if (tem) |
| return tem; |
| } |
| |
| return gen_rtx_fmt_ee (code, mode, op0, op1); |
| } |
| |
| /* If X is a MEM referencing the constant pool, return the real value. |
| Otherwise return X. */ |
| rtx |
| avoid_constant_pool_reference (rtx x) |
| { |
| rtx c, tmp, addr; |
| enum machine_mode cmode; |
| |
| switch (GET_CODE (x)) |
| { |
| case MEM: |
| break; |
| |
| case FLOAT_EXTEND: |
| /* Handle float extensions of constant pool references. */ |
| tmp = XEXP (x, 0); |
| c = avoid_constant_pool_reference (tmp); |
| if (c != tmp && GET_CODE (c) == CONST_DOUBLE) |
| { |
| REAL_VALUE_TYPE d; |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (d, c); |
| return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x)); |
| } |
| return x; |
| |
| default: |
| return x; |
| } |
| |
| addr = XEXP (x, 0); |
| |
| /* Call target hook to avoid the effects of -fpic etc.... */ |
| addr = targetm.delegitimize_address (addr); |
| |
| if (GET_CODE (addr) == LO_SUM) |
| addr = XEXP (addr, 1); |
| |
| if (GET_CODE (addr) != SYMBOL_REF |
| || ! CONSTANT_POOL_ADDRESS_P (addr)) |
| return x; |
| |
| c = get_pool_constant (addr); |
| cmode = get_pool_mode (addr); |
| |
| /* If we're accessing the constant in a different mode than it was |
| originally stored, attempt to fix that up via subreg simplifications. |
| If that fails we have no choice but to return the original memory. */ |
| if (cmode != GET_MODE (x)) |
| { |
| c = simplify_subreg (GET_MODE (x), c, cmode, 0); |
| return c ? c : x; |
| } |
| |
| return c; |
| } |
| |
| /* APPLE LOCAL begin mainline 2005-09-07 */ |
| /* Return true if X is a MEM referencing the constant pool. */ |
| |
| bool |
| constant_pool_reference_p (rtx x) |
| { |
| return avoid_constant_pool_reference (x) != x; |
| } |
| /* APPLE LOCAL end mainline 2005-09-07 */ |
| |
| |
| /* Make a unary operation by first seeing if it folds and otherwise making |
| the specified operation. */ |
| |
| rtx |
| simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op, |
| enum machine_mode op_mode) |
| { |
| rtx tem; |
| |
| /* If this simplifies, use it. */ |
| if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) |
| return tem; |
| |
| return gen_rtx_fmt_e (code, mode, op); |
| } |
| |
| /* Likewise for ternary operations. */ |
| |
| rtx |
| simplify_gen_ternary (enum rtx_code code, enum machine_mode mode, |
| enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) |
| { |
| rtx tem; |
| |
| /* If this simplifies, use it. */ |
| if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, |
| op0, op1, op2))) |
| return tem; |
| |
| return gen_rtx_fmt_eee (code, mode, op0, op1, op2); |
| } |
| |
| /* Likewise, for relational operations. |
| CMP_MODE specifies mode comparison is done in. */ |
| |
| rtx |
| simplify_gen_relational (enum rtx_code code, enum machine_mode mode, |
| enum machine_mode cmp_mode, rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode, |
| op0, op1))) |
| return tem; |
| |
| return gen_rtx_fmt_ee (code, mode, op0, op1); |
| } |
| |
| /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the |
| resulting RTX. Return a new RTX which is as simplified as possible. */ |
| |
| rtx |
| simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx) |
| { |
| enum rtx_code code = GET_CODE (x); |
| enum machine_mode mode = GET_MODE (x); |
| enum machine_mode op_mode; |
| rtx op0, op1, op2; |
| |
| /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try |
| to build a new expression substituting recursively. If we can't do |
| anything, return our input. */ |
| |
| if (x == old_rtx) |
| return new_rtx; |
| |
| switch (GET_RTX_CLASS (code)) |
| { |
| case RTX_UNARY: |
| op0 = XEXP (x, 0); |
| op_mode = GET_MODE (op0); |
| op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); |
| if (op0 == XEXP (x, 0)) |
| return x; |
| return simplify_gen_unary (code, mode, op0, op_mode); |
| |
| case RTX_BIN_ARITH: |
| case RTX_COMM_ARITH: |
| op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); |
| op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return simplify_gen_binary (code, mode, op0, op1); |
| |
| case RTX_COMPARE: |
| case RTX_COMM_COMPARE: |
| op0 = XEXP (x, 0); |
| op1 = XEXP (x, 1); |
| op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); |
| op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); |
| op1 = simplify_replace_rtx (op1, old_rtx, new_rtx); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return simplify_gen_relational (code, mode, op_mode, op0, op1); |
| |
| case RTX_TERNARY: |
| case RTX_BITFIELD_OPS: |
| op0 = XEXP (x, 0); |
| op_mode = GET_MODE (op0); |
| op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); |
| op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); |
| op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx); |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) |
| return x; |
| if (op_mode == VOIDmode) |
| op_mode = GET_MODE (op0); |
| return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); |
| |
| case RTX_EXTRA: |
| /* The only case we try to handle is a SUBREG. */ |
| if (code == SUBREG) |
| { |
| op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx); |
| if (op0 == SUBREG_REG (x)) |
| return x; |
| op0 = simplify_gen_subreg (GET_MODE (x), op0, |
| GET_MODE (SUBREG_REG (x)), |
| SUBREG_BYTE (x)); |
| return op0 ? op0 : x; |
| } |
| break; |
| |
| case RTX_OBJ: |
| if (code == MEM) |
| { |
| op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); |
| if (op0 == XEXP (x, 0)) |
| return x; |
| return replace_equiv_address_nv (x, op0); |
| } |
| else if (code == LO_SUM) |
| { |
| op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); |
| op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); |
| |
| /* (lo_sum (high x) x) -> x */ |
| if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1)) |
| return op1; |
| |
| if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
| return x; |
| return gen_rtx_LO_SUM (mode, op0, op1); |
| } |
| else if (code == REG) |
| { |
| if (rtx_equal_p (x, old_rtx)) |
| return new_rtx; |
| } |
| break; |
| |
| default: |
| break; |
| } |
| return x; |
| } |
| |
| /* Try to simplify a unary operation CODE whose output mode is to be |
| MODE with input operand OP whose mode was originally OP_MODE. |
| Return zero if no simplification can be made. */ |
| rtx |
| simplify_unary_operation (enum rtx_code code, enum machine_mode mode, |
| rtx op, enum machine_mode op_mode) |
| { |
| unsigned int width = GET_MODE_BITSIZE (mode); |
| rtx trueop = avoid_constant_pool_reference (op); |
| |
| if (code == VEC_DUPLICATE) |
| { |
| gcc_assert (VECTOR_MODE_P (mode)); |
| if (GET_MODE (trueop) != VOIDmode) |
| { |
| if (!VECTOR_MODE_P (GET_MODE (trueop))) |
| gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop)); |
| else |
| gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER |
| (GET_MODE (trueop))); |
| } |
| if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE |
| || GET_CODE (trueop) == CONST_VECTOR) |
| { |
| int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); |
| unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); |
| rtvec v = rtvec_alloc (n_elts); |
| unsigned int i; |
| |
| if (GET_CODE (trueop) != CONST_VECTOR) |
| for (i = 0; i < n_elts; i++) |
| RTVEC_ELT (v, i) = trueop; |
| else |
| { |
| enum machine_mode inmode = GET_MODE (trueop); |
| int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); |
| unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); |
| |
| gcc_assert (in_n_elts < n_elts); |
| gcc_assert ((n_elts % in_n_elts) == 0); |
| for (i = 0; i < n_elts; i++) |
| RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts); |
| } |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| } |
| else if (GET_CODE (op) == CONST) |
| return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode); |
| |
| if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR) |
| { |
| int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); |
| unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); |
| enum machine_mode opmode = GET_MODE (trueop); |
| int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); |
| unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size); |
| rtvec v = rtvec_alloc (n_elts); |
| unsigned int i; |
| |
| gcc_assert (op_n_elts == n_elts); |
| for (i = 0; i < n_elts; i++) |
| { |
| rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), |
| CONST_VECTOR_ELT (trueop, i), |
| GET_MODE_INNER (opmode)); |
| if (!x) |
| return 0; |
| RTVEC_ELT (v, i) = x; |
| } |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| |
| /* The order of these tests is critical so that, for example, we don't |
| check the wrong mode (input vs. output) for a conversion operation, |
| such as FIX. At some point, this should be simplified. */ |
| |
| if (code == FLOAT && GET_MODE (trueop) == VOIDmode |
| && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) |
| { |
| HOST_WIDE_INT hv, lv; |
| REAL_VALUE_TYPE d; |
| |
| if (GET_CODE (trueop) == CONST_INT) |
| lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); |
| else |
| lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
| |
| REAL_VALUE_FROM_INT (d, lv, hv, mode); |
| d = real_value_truncate (mode, d); |
| return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); |
| } |
| else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode |
| && (GET_CODE (trueop) == CONST_DOUBLE |
| || GET_CODE (trueop) == CONST_INT)) |
| { |
| HOST_WIDE_INT hv, lv; |
| REAL_VALUE_TYPE d; |
| |
| if (GET_CODE (trueop) == CONST_INT) |
| lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); |
| else |
| lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
| |
| if (op_mode == VOIDmode) |
| { |
| /* We don't know how to interpret negative-looking numbers in |
| this case, so don't try to fold those. */ |
| if (hv < 0) |
| return 0; |
| } |
| else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) |
| ; |
| else |
| hv = 0, lv &= GET_MODE_MASK (op_mode); |
| |
| REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); |
| d = real_value_truncate (mode, d); |
| return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); |
| } |
| |
| if (GET_CODE (trueop) == CONST_INT |
| && width <= HOST_BITS_PER_WIDE_INT && width > 0) |
| { |
| HOST_WIDE_INT arg0 = INTVAL (trueop); |
| HOST_WIDE_INT val; |
| |
| switch (code) |
| { |
| case NOT: |
| val = ~ arg0; |
| break; |
| |
| case NEG: |
| val = - arg0; |
| break; |
| |
| case ABS: |
| val = (arg0 >= 0 ? arg0 : - arg0); |
| break; |
| |
| case FFS: |
| /* Don't use ffs here. Instead, get low order bit and then its |
| number. If arg0 is zero, this will return 0, as desired. */ |
| arg0 &= GET_MODE_MASK (mode); |
| val = exact_log2 (arg0 & (- arg0)) + 1; |
| break; |
| |
| case CLZ: |
| arg0 &= GET_MODE_MASK (mode); |
| if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) |
| ; |
| else |
| val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1; |
| break; |
| |
| case CTZ: |
| arg0 &= GET_MODE_MASK (mode); |
| if (arg0 == 0) |
| { |
| /* Even if the value at zero is undefined, we have to come |
| up with some replacement. Seems good enough. */ |
| if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) |
| val = GET_MODE_BITSIZE (mode); |
| } |
| else |
| val = exact_log2 (arg0 & -arg0); |
| break; |
| |
| case POPCOUNT: |
| arg0 &= GET_MODE_MASK (mode); |
| val = 0; |
| while (arg0) |
| val++, arg0 &= arg0 - 1; |
| break; |
| |
| case PARITY: |
| arg0 &= GET_MODE_MASK (mode); |
| val = 0; |
| while (arg0) |
| val++, arg0 &= arg0 - 1; |
| val &= 1; |
| break; |
| |
| case TRUNCATE: |
| val = arg0; |
| break; |
| |
| case ZERO_EXTEND: |
| /* When zero-extending a CONST_INT, we need to know its |
| original mode. */ |
| gcc_assert (op_mode != VOIDmode); |
| if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) |
| { |
| /* If we were really extending the mode, |
| we would have to distinguish between zero-extension |
| and sign-extension. */ |
| gcc_assert (width == GET_MODE_BITSIZE (op_mode)); |
| val = arg0; |
| } |
| else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) |
| val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); |
| else |
| return 0; |
| break; |
| |
| case SIGN_EXTEND: |
| if (op_mode == VOIDmode) |
| op_mode = mode; |
| if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) |
| { |
| /* If we were really extending the mode, |
| we would have to distinguish between zero-extension |
| and sign-extension. */ |
| gcc_assert (width == GET_MODE_BITSIZE (op_mode)); |
| val = arg0; |
| } |
| else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) |
| { |
| val |
| = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); |
| if (val |
| & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) |
| val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); |
| } |
| else |
| return 0; |
| break; |
| |
| case SQRT: |
| case FLOAT_EXTEND: |
| case FLOAT_TRUNCATE: |
| case SS_TRUNCATE: |
| case US_TRUNCATE: |
| return 0; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| val = trunc_int_for_mode (val, mode); |
| |
| return GEN_INT (val); |
| } |
| |
| /* We can do some operations on integer CONST_DOUBLEs. Also allow |
| for a DImode operation on a CONST_INT. */ |
| else if (GET_MODE (trueop) == VOIDmode |
| && width <= HOST_BITS_PER_WIDE_INT * 2 |
| && (GET_CODE (trueop) == CONST_DOUBLE |
| || GET_CODE (trueop) == CONST_INT)) |
| { |
| unsigned HOST_WIDE_INT l1, lv; |
| HOST_WIDE_INT h1, hv; |
| |
| if (GET_CODE (trueop) == CONST_DOUBLE) |
| l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop); |
| else |
| l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1); |
| |
| switch (code) |
| { |
| case NOT: |
| lv = ~ l1; |
| hv = ~ h1; |
| break; |
| |
| case NEG: |
| neg_double (l1, h1, &lv, &hv); |
| break; |
| |
| case ABS: |
| if (h1 < 0) |
| neg_double (l1, h1, &lv, &hv); |
| else |
| lv = l1, hv = h1; |
| break; |
| |
| case FFS: |
| hv = 0; |
| if (l1 == 0) |
| { |
| if (h1 == 0) |
| lv = 0; |
| else |
| lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1; |
| } |
| else |
| lv = exact_log2 (l1 & -l1) + 1; |
| break; |
| |
| case CLZ: |
| hv = 0; |
| if (h1 != 0) |
| lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1 |
| - HOST_BITS_PER_WIDE_INT; |
| else if (l1 != 0) |
| lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1; |
| else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv)) |
| lv = GET_MODE_BITSIZE (mode); |
| break; |
| |
| case CTZ: |
| hv = 0; |
| if (l1 != 0) |
| lv = exact_log2 (l1 & -l1); |
| else if (h1 != 0) |
| lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1); |
| else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv)) |
| lv = GET_MODE_BITSIZE (mode); |
| break; |
| |
| case POPCOUNT: |
| hv = 0; |
| lv = 0; |
| while (l1) |
| lv++, l1 &= l1 - 1; |
| while (h1) |
| lv++, h1 &= h1 - 1; |
| break; |
| |
| case PARITY: |
| hv = 0; |
| lv = 0; |
| while (l1) |
| lv++, l1 &= l1 - 1; |
| while (h1) |
| lv++, h1 &= h1 - 1; |
| lv &= 1; |
| break; |
| |
| case TRUNCATE: |
| /* This is just a change-of-mode, so do nothing. */ |
| lv = l1, hv = h1; |
| break; |
| |
| case ZERO_EXTEND: |
| gcc_assert (op_mode != VOIDmode); |
| |
| if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) |
| return 0; |
| |
| hv = 0; |
| lv = l1 & GET_MODE_MASK (op_mode); |
| break; |
| |
| case SIGN_EXTEND: |
| if (op_mode == VOIDmode |
| || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) |
| return 0; |
| else |
| { |
| lv = l1 & GET_MODE_MASK (op_mode); |
| if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT |
| && (lv & ((HOST_WIDE_INT) 1 |
| << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) |
| lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); |
| |
| hv = HWI_SIGN_EXTEND (lv); |
| } |
| break; |
| |
| case SQRT: |
| return 0; |
| |
| default: |
| return 0; |
| } |
| |
| return immed_double_const (lv, hv, mode); |
| } |
| |
| else if (GET_CODE (trueop) == CONST_DOUBLE |
| && GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| REAL_VALUE_TYPE d, t; |
| REAL_VALUE_FROM_CONST_DOUBLE (d, trueop); |
| |
| switch (code) |
| { |
| case SQRT: |
| if (HONOR_SNANS (mode) && real_isnan (&d)) |
| return 0; |
| real_sqrt (&t, mode, &d); |
| d = t; |
| break; |
| case ABS: |
| d = REAL_VALUE_ABS (d); |
| break; |
| case NEG: |
| d = REAL_VALUE_NEGATE (d); |
| break; |
| case FLOAT_TRUNCATE: |
| d = real_value_truncate (mode, d); |
| break; |
| case FLOAT_EXTEND: |
| /* All this does is change the mode. */ |
| break; |
| case FIX: |
| real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL); |
| break; |
| case NOT: |
| { |
| long tmp[4]; |
| int i; |
| |
| real_to_target (tmp, &d, GET_MODE (trueop)); |
| for (i = 0; i < 4; i++) |
| tmp[i] = ~tmp[i]; |
| real_from_target (&d, tmp, mode); |
| /* APPLE LOCAL mainline 2005-03-17 4050475 */ |
| break; |
| } |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); |
| } |
| |
| else if (GET_CODE (trueop) == CONST_DOUBLE |
| && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT |
| && GET_MODE_CLASS (mode) == MODE_INT |
| && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0) |
| { |
| /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX |
| operators are intentionally left unspecified (to ease implementation |
| by target backends), for consistency, this routine implements the |
| same semantics for constant folding as used by the middle-end. */ |
| |
| HOST_WIDE_INT xh, xl, th, tl; |
| REAL_VALUE_TYPE x, t; |
| REAL_VALUE_FROM_CONST_DOUBLE (x, trueop); |
| switch (code) |
| { |
| case FIX: |
| if (REAL_VALUE_ISNAN (x)) |
| return const0_rtx; |
| |
| /* Test against the signed upper bound. */ |
| if (width > HOST_BITS_PER_WIDE_INT) |
| { |
| th = ((unsigned HOST_WIDE_INT) 1 |
| << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; |
| tl = -1; |
| } |
| else |
| { |
| th = 0; |
| tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; |
| } |
| real_from_integer (&t, VOIDmode, tl, th, 0); |
| if (REAL_VALUES_LESS (t, x)) |
| { |
| xh = th; |
| xl = tl; |
| break; |
| } |
| |
| /* Test against the signed lower bound. */ |
| if (width > HOST_BITS_PER_WIDE_INT) |
| { |
| th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1); |
| tl = 0; |
| } |
| else |
| { |
| th = -1; |
| tl = (HOST_WIDE_INT) -1 << (width - 1); |
| } |
| real_from_integer (&t, VOIDmode, tl, th, 0); |
| if (REAL_VALUES_LESS (x, t)) |
| { |
| xh = th; |
| xl = tl; |
| break; |
| } |
| REAL_VALUE_TO_INT (&xl, &xh, x); |
| break; |
| |
| case UNSIGNED_FIX: |
| if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x)) |
| return const0_rtx; |
| |
| /* Test against the unsigned upper bound. */ |
| if (width == 2*HOST_BITS_PER_WIDE_INT) |
| { |
| th = -1; |
| tl = -1; |
| } |
| else if (width >= HOST_BITS_PER_WIDE_INT) |
| { |
| th = ((unsigned HOST_WIDE_INT) 1 |
| << (width - HOST_BITS_PER_WIDE_INT)) - 1; |
| tl = -1; |
| } |
| else |
| { |
| th = 0; |
| tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; |
| } |
| real_from_integer (&t, VOIDmode, tl, th, 1); |
| if (REAL_VALUES_LESS (t, x)) |
| { |
| xh = th; |
| xl = tl; |
| break; |
| } |
| |
| REAL_VALUE_TO_INT (&xl, &xh, x); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| return immed_double_const (xl, xh, mode); |
| } |
| |
| /* This was formerly used only for non-IEEE float. |
| eggert@twinsun.com says it is safe for IEEE also. */ |
| else |
| { |
| enum rtx_code reversed; |
| rtx temp; |
| |
| /* There are some simplifications we can do even if the operands |
| aren't constant. */ |
| switch (code) |
| { |
| case NOT: |
| /* (not (not X)) == X. */ |
| if (GET_CODE (op) == NOT) |
| return XEXP (op, 0); |
| |
| /* (not (eq X Y)) == (ne X Y), etc. */ |
| if (COMPARISON_P (op) |
| && (mode == BImode || STORE_FLAG_VALUE == -1) |
| && ((reversed = reversed_comparison_code (op, NULL_RTX)) |
| != UNKNOWN)) |
| return simplify_gen_relational (reversed, mode, VOIDmode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (not (plus X -1)) can become (neg X). */ |
| if (GET_CODE (op) == PLUS |
| && XEXP (op, 1) == constm1_rtx) |
| return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); |
| |
| /* Similarly, (not (neg X)) is (plus X -1). */ |
| if (GET_CODE (op) == NEG) |
| return plus_constant (XEXP (op, 0), -1); |
| |
| /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ |
| if (GET_CODE (op) == XOR |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && (temp = simplify_unary_operation (NOT, mode, |
| XEXP (op, 1), |
| mode)) != 0) |
| return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); |
| |
| /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ |
| if (GET_CODE (op) == PLUS |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && mode_signbit_p (mode, XEXP (op, 1)) |
| && (temp = simplify_unary_operation (NOT, mode, |
| XEXP (op, 1), |
| mode)) != 0) |
| return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); |
| |
| |
| |
| /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for |
| operands other than 1, but that is not valid. We could do a |
| similar simplification for (not (lshiftrt C X)) where C is |
| just the sign bit, but this doesn't seem common enough to |
| bother with. */ |
| if (GET_CODE (op) == ASHIFT |
| && XEXP (op, 0) == const1_rtx) |
| { |
| temp = simplify_gen_unary (NOT, mode, const1_rtx, mode); |
| return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done |
| by reversing the comparison code if valid. */ |
| if (STORE_FLAG_VALUE == -1 |
| && COMPARISON_P (op) |
| && (reversed = reversed_comparison_code (op, NULL_RTX)) |
| != UNKNOWN) |
| return simplify_gen_relational (reversed, mode, VOIDmode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (not (ashiftrt foo C)) where C is the number of bits in FOO |
| minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, |
| so we can perform the above simplification. */ |
| |
| if (STORE_FLAG_VALUE == -1 |
| && GET_CODE (op) == ASHIFTRT |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) |
| return simplify_gen_relational (GE, mode, VOIDmode, |
| XEXP (op, 0), const0_rtx); |
| |
| break; |
| |
| case NEG: |
| /* (neg (neg X)) == X. */ |
| if (GET_CODE (op) == NEG) |
| return XEXP (op, 0); |
| |
| /* (neg (plus X 1)) can become (not X). */ |
| if (GET_CODE (op) == PLUS |
| /* APPLE LOCAL begin disallow generating (not (SYM)) */ |
| && XEXP (op, 1) == const1_rtx |
| && GET_CODE (XEXP (op, 0)) != SYMBOL_REF) |
| /* APPLE LOCAL end disallow generating (not (SYM)) */ |
| return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); |
| |
| /* Similarly, (neg (not X)) is (plus X 1). */ |
| if (GET_CODE (op) == NOT) |
| return plus_constant (XEXP (op, 0), 1); |
| |
| /* (neg (minus X Y)) can become (minus Y X). This transformation |
| isn't safe for modes with signed zeros, since if X and Y are |
| both +0, (minus Y X) is the same as (minus X Y). If the |
| rounding mode is towards +infinity (or -infinity) then the two |
| expressions will be rounded differently. */ |
| if (GET_CODE (op) == MINUS |
| && !HONOR_SIGNED_ZEROS (mode) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| return simplify_gen_binary (MINUS, mode, XEXP (op, 1), |
| XEXP (op, 0)); |
| |
| /* APPLE LOCAL begin don't allow subtraction of symbol address */ |
| if (GET_CODE (op) == PLUS |
| && !HONOR_SIGNED_ZEROS (mode) |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode) |
| && GET_CODE (XEXP (op, 0)) != SYMBOL_REF) |
| /* APPLE LOCAL end don't allow subtraction of symbol address */ |
| { |
| /* (neg (plus A C)) is simplified to (minus -C A). */ |
| if (GET_CODE (XEXP (op, 1)) == CONST_INT |
| || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE) |
| { |
| temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), |
| mode); |
| if (temp) |
| return simplify_gen_binary (MINUS, mode, temp, |
| XEXP (op, 0)); |
| } |
| |
| /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */ |
| temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); |
| return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* (neg (mult A B)) becomes (mult (neg A) B). |
| This works even for floating-point values. */ |
| if (GET_CODE (op) == MULT |
| && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| { |
| temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); |
| return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1)); |
| } |
| |
| /* NEG commutes with ASHIFT since it is multiplication. Only do |
| this if we can then eliminate the NEG (e.g., if the operand |
| is a constant). */ |
| if (GET_CODE (op) == ASHIFT) |
| { |
| temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), |
| mode); |
| if (temp) |
| return simplify_gen_binary (ASHIFT, mode, temp, |
| XEXP (op, 1)); |
| } |
| |
| /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when |
| C is equal to the width of MODE minus 1. */ |
| if (GET_CODE (op) == ASHIFTRT |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) |
| return simplify_gen_binary (LSHIFTRT, mode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when |
| C is equal to the width of MODE minus 1. */ |
| if (GET_CODE (op) == LSHIFTRT |
| && GET_CODE (XEXP (op, 1)) == CONST_INT |
| && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) |
| return simplify_gen_binary (ASHIFTRT, mode, |
| XEXP (op, 0), XEXP (op, 1)); |
| |
| break; |
| |
| case SIGN_EXTEND: |
| /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) |
| becomes just the MINUS if its mode is MODE. This allows |
| folding switch statements on machines using casesi (such as |
| the VAX). */ |
| if (GET_CODE (op) == TRUNCATE |
| && GET_MODE (XEXP (op, 0)) == mode |
| && GET_CODE (XEXP (op, 0)) == MINUS |
| && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF |
| && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) |
| return XEXP (op, 0); |
| |
| /* Check for a sign extension of a subreg of a promoted |
| variable, where the promotion is sign-extended, and the |
| target mode is the same as the variable's promotion. */ |
| if (GET_CODE (op) == SUBREG |
| && SUBREG_PROMOTED_VAR_P (op) |
| && ! SUBREG_PROMOTED_UNSIGNED_P (op) |
| && GET_MODE (XEXP (op, 0)) == mode) |
| return XEXP (op, 0); |
| |
| #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
| if (! POINTERS_EXTEND_UNSIGNED |
| && mode == Pmode && GET_MODE (op) == ptr_mode |
| && (CONSTANT_P (op) |
| || (GET_CODE (op) == SUBREG |
| && REG_P (SUBREG_REG (op)) |
| && REG_POINTER (SUBREG_REG (op)) |
| && GET_MODE (SUBREG_REG (op)) == Pmode))) |
| return convert_memory_address (Pmode, op); |
| #endif |
| break; |
| |
| case ZERO_EXTEND: |
| /* Check for a zero extension of a subreg of a promoted |
| variable, where the promotion is zero-extended, and the |
| target mode is the same as the variable's promotion. */ |
| if (GET_CODE (op) == SUBREG |
| && SUBREG_PROMOTED_VAR_P (op) |
| && SUBREG_PROMOTED_UNSIGNED_P (op) > 0 |
| && GET_MODE (XEXP (op, 0)) == mode) |
| return XEXP (op, 0); |
| |
| #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
| if (POINTERS_EXTEND_UNSIGNED > 0 |
| && mode == Pmode && GET_MODE (op) == ptr_mode |
| && (CONSTANT_P (op) |
| || (GET_CODE (op) == SUBREG |
| && REG_P (SUBREG_REG (op)) |
| && REG_POINTER (SUBREG_REG (op)) |
| && GET_MODE (SUBREG_REG (op)) == Pmode))) |
| return convert_memory_address (Pmode, op); |
| #endif |
| break; |
| |
| default: |
| break; |
| } |
| |
| return 0; |
| } |
| } |
| |
| /* Subroutine of simplify_binary_operation to simplify a commutative, |
| associative binary operation CODE with result mode MODE, operating |
| on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR, |
| SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or |
| canonicalization is possible. */ |
| |
| static rtx |
| simplify_associative_operation (enum rtx_code code, enum machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| rtx tem; |
| |
| /* Linearize the operator to the left. */ |
| if (GET_CODE (op1) == code) |
| { |
| /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */ |
| if (GET_CODE (op0) == code) |
| { |
| tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0)); |
| return simplify_gen_binary (code, mode, tem, XEXP (op1, 1)); |
| } |
| |
| /* "a op (b op c)" becomes "(b op c) op a". */ |
| if (! swap_commutative_operands_p (op1, op0)) |
| return simplify_gen_binary (code, mode, op1, op0); |
| |
| tem = op0; |
| op0 = op1; |
| op1 = tem; |
| } |
| |
| if (GET_CODE (op0) == code) |
| { |
| /* Canonicalize "(x op c) op y" as "(x op y) op c". */ |
| if (swap_commutative_operands_p (XEXP (op0, 1), op1)) |
| { |
| tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1); |
| return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); |
| } |
| |
| /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ |
| tem = swap_commutative_operands_p (XEXP (op0, 1), op1) |
| ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1)) |
| : simplify_binary_operation (code, mode, XEXP (op0, 1), op1); |
| if (tem != 0) |
| return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); |
| |
| /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ |
| tem = swap_commutative_operands_p (XEXP (op0, 0), op1) |
| ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0)) |
| : simplify_binary_operation (code, mode, XEXP (op0, 0), op1); |
| if (tem != 0) |
| return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); |
| } |
| |
| return 0; |
| } |
| |
| /* Simplify a binary operation CODE with result mode MODE, operating on OP0 |
| and OP1. Return 0 if no simplification is possible. |
| |
| Don't use this for relational operations such as EQ or LT. |
| Use simplify_relational_operation instead. */ |
| rtx |
| simplify_binary_operation (enum rtx_code code, enum machine_mode mode, |
| rtx op0, rtx op1) |
| { |
| HOST_WIDE_INT arg0, arg1, arg0s, arg1s; |
| HOST_WIDE_INT val; |
| unsigned int width = GET_MODE_BITSIZE (mode); |
| rtx trueop0, trueop1; |
| rtx tem; |
| |
| /* Relational operations don't work here. We must know the mode |
| of the operands in order to do the comparison correctly. |
| Assuming a full word can give incorrect results. |
| Consider comparing 128 with -128 in QImode. */ |
| gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE); |
| gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE); |
| |
| /* Make sure the constant is second. */ |
| if (GET_RTX_CLASS (code) == RTX_COMM_ARITH |
| && swap_commutative_operands_p (op0, op1)) |
| { |
| tem = op0, op0 = op1, op1 = tem; |
| } |
| |
| trueop0 = avoid_constant_pool_reference (op0); |
| trueop1 = avoid_constant_pool_reference (op1); |
| |
| if (VECTOR_MODE_P (mode) |
| && code != VEC_CONCAT |
| && GET_CODE (trueop0) == CONST_VECTOR |
| && GET_CODE (trueop1) == CONST_VECTOR) |
| { |
| unsigned n_elts = GET_MODE_NUNITS (mode); |
| enum machine_mode op0mode = GET_MODE (trueop0); |
| unsigned op0_n_elts = GET_MODE_NUNITS (op0mode); |
| enum machine_mode op1mode = GET_MODE (trueop1); |
| unsigned op1_n_elts = GET_MODE_NUNITS (op1mode); |
| rtvec v = rtvec_alloc (n_elts); |
| unsigned int i; |
| |
| gcc_assert (op0_n_elts == n_elts); |
| gcc_assert (op1_n_elts == n_elts); |
| for (i = 0; i < n_elts; i++) |
| { |
| rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode), |
| CONST_VECTOR_ELT (trueop0, i), |
| CONST_VECTOR_ELT (trueop1, i)); |
| if (!x) |
| return 0; |
| RTVEC_ELT (v, i) = x; |
| } |
| |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| |
| if (VECTOR_MODE_P (mode) |
| && code == VEC_CONCAT |
| && CONSTANT_P (trueop0) && CONSTANT_P (trueop1)) |
| { |
| unsigned n_elts = GET_MODE_NUNITS (mode); |
| rtvec v = rtvec_alloc (n_elts); |
| |
| gcc_assert (n_elts >= 2); |
| if (n_elts == 2) |
| { |
| gcc_assert (GET_CODE (trueop0) != CONST_VECTOR); |
| gcc_assert (GET_CODE (trueop1) != CONST_VECTOR); |
| |
| RTVEC_ELT (v, 0) = trueop0; |
| RTVEC_ELT (v, 1) = trueop1; |
| } |
| else |
| { |
| unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0)); |
| unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1)); |
| unsigned i; |
| |
| gcc_assert (GET_CODE (trueop0) == CONST_VECTOR); |
| gcc_assert (GET_CODE (trueop1) == CONST_VECTOR); |
| gcc_assert (op0_n_elts + op1_n_elts == n_elts); |
| |
| for (i = 0; i < op0_n_elts; ++i) |
| RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i); |
| for (i = 0; i < op1_n_elts; ++i) |
| RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i); |
| } |
| |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT |
| && GET_CODE (trueop0) == CONST_DOUBLE |
| && GET_CODE (trueop1) == CONST_DOUBLE |
| && mode == GET_MODE (op0) && mode == GET_MODE (op1)) |
| { |
| if (code == AND |
| || code == IOR |
| || code == XOR) |
| { |
| long tmp0[4]; |
| long tmp1[4]; |
| REAL_VALUE_TYPE r; |
| int i; |
| |
| real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0), |
| GET_MODE (op0)); |
| real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1), |
| GET_MODE (op1)); |
| for (i = 0; i < 4; i++) |
| { |
| switch (code) |
| { |
| case AND: |
| tmp0[i] &= tmp1[i]; |
| break; |
| case IOR: |
| tmp0[i] |= tmp1[i]; |
| break; |
| case XOR: |
| tmp0[i] ^= tmp1[i]; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| real_from_target (&r, tmp0, mode); |
| return CONST_DOUBLE_FROM_REAL_VALUE (r, mode); |
| } |
| else |
| { |
| REAL_VALUE_TYPE f0, f1, value, result; |
| bool inexact; |
| |
| REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0); |
| REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1); |
| real_convert (&f0, mode, &f0); |
| real_convert (&f1, mode, &f1); |
| |
| if (HONOR_SNANS (mode) |
| && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1))) |
| return 0; |
| |
| if (code == DIV |
| && REAL_VALUES_EQUAL (f1, dconst0) |
| && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) |
| return 0; |
| |
| if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) |
| && flag_trapping_math |
| && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1)) |
| { |
| int s0 = REAL_VALUE_NEGATIVE (f0); |
| int s1 = REAL_VALUE_NEGATIVE (f1); |
| |
| switch (code) |
| { |
| case PLUS: |
| /* Inf + -Inf = NaN plus exception. */ |
| if (s0 != s1) |
| return 0; |
| break; |
| case MINUS: |
| /* Inf - Inf = NaN plus exception. */ |
| if (s0 == s1) |
| return 0; |
| break; |
| case DIV: |
| /* Inf / Inf = NaN plus exception. */ |
| return 0; |
| default: |
| break; |
| } |
| } |
| |
| if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) |
| && flag_trapping_math |
| && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0)) |
| || (REAL_VALUE_ISINF (f1) |
| && REAL_VALUES_EQUAL (f0, dconst0)))) |
| /* Inf * 0 = NaN plus exception. */ |
| return 0; |
| |
| inexact = real_arithmetic (&value, rtx_to_tree_code (code), |
| &f0, &f1); |
| real_convert (&result, mode, &value); |
| |
| /* Don't constant fold this floating point operation if the |
| result may dependent upon the run-time rounding mode and |
| flag_rounding_math is set, or if GCC's software emulation |
| is unable to accurately represent the result. */ |
| |
| if ((flag_rounding_math |
| || (REAL_MODE_FORMAT_COMPOSITE_P (mode) |
| && !flag_unsafe_math_optimizations)) |
| && (inexact || !real_identical (&result, &value))) |
| return NULL_RTX; |
| |
| return CONST_DOUBLE_FROM_REAL_VALUE (result, mode); |
| } |
| } |
| |
| /* We can fold some multi-word operations. */ |
| if (GET_MODE_CLASS (mode) == MODE_INT |
| && width == HOST_BITS_PER_WIDE_INT * 2 |
| && (GET_CODE (trueop0) == CONST_DOUBLE |
| || GET_CODE (trueop0) == CONST_INT) |
| && (GET_CODE (trueop1) == CONST_DOUBLE |
| || GET_CODE (trueop1) == CONST_INT)) |
| { |
| unsigned HOST_WIDE_INT l1, l2, lv, lt; |
| HOST_WIDE_INT h1, h2, hv, ht; |
| |
| if (GET_CODE (trueop0) == CONST_DOUBLE) |
| l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0); |
| else |
| l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1); |
| |
| if (GET_CODE (trueop1) == CONST_DOUBLE) |
| l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1); |
| else |
| l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2); |
| |
| switch (code) |
| { |
| case MINUS: |
| /* A - B == A + (-B). */ |
| neg_double (l2, h2, &lv, &hv); |
| l2 = lv, h2 = hv; |
| |
| /* Fall through.... */ |
| |
| case PLUS: |
| add_double (l1, h1, l2, h2, &lv, &hv); |
| break; |
| |
| case MULT: |
| mul_double (l1, h1, l2, h2, &lv, &hv); |
| break; |
| |
| case DIV: |
| if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, |
| &lv, &hv, <, &ht)) |
| return 0; |
| break; |
| |
| case MOD: |
| if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, |
| <, &ht, &lv, &hv)) |
| return 0; |
| break; |
| |
| case UDIV: |
| if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, |
| &lv, &hv, <, &ht)) |
| return 0; |
| break; |
| |
| case UMOD: |
| if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, |
| <, &ht, &lv, &hv)) |
| return 0; |
| break; |
| |
| case AND: |
| lv = l1 & l2, hv = h1 & h2; |
| break; |
| |
| case IOR: |
| lv = l1 | l2, hv = h1 | h2; |
| break; |
| |
| case XOR: |
| lv = l1 ^ l2, hv = h1 ^ h2; |
| break; |
| |
| case SMIN: |
| if (h1 < h2 |
| || (h1 == h2 |
| && ((unsigned HOST_WIDE_INT) l1 |
| < (unsigned HOST_WIDE_INT) l2))) |
| lv = l1, hv = h1; |
| else |
| lv = l2, hv = h2; |
| break; |
| |
| case SMAX: |
| if (h1 > h2 |
| || (h1 == h2 |
| && ((unsigned HOST_WIDE_INT) l1 |
| > (unsigned HOST_WIDE_INT) l2))) |
| lv = l1, hv = h1; |
| else |
| lv = l2, hv = h2; |
| break; |
| |
| case UMIN: |
| if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2 |
| || (h1 == h2 |
| && ((unsigned HOST_WIDE_INT) l1 |
| < (unsigned HOST_WIDE_INT) l2))) |
| lv = l1, hv = h1; |
| else |
| lv = l2, hv = h2; |
| break; |
| |
| case UMAX: |
| if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2 |
| || (h1 == h2 |
| && ((unsigned HOST_WIDE_INT) l1 |
| > (unsigned HOST_WIDE_INT) l2))) |
| lv = l1, hv = h1; |
| else |
| lv = l2, hv = h2; |
| break; |
| |
| case LSHIFTRT: case ASHIFTRT: |
| case ASHIFT: |
| case ROTATE: case ROTATERT: |
| if (SHIFT_COUNT_TRUNCATED) |
| l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0; |
| |
| if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode)) |
| return 0; |
| |
| if (code == LSHIFTRT || code == ASHIFTRT) |
| rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, |
| code == ASHIFTRT); |
| else if (code == ASHIFT) |
| lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1); |
| else if (code == ROTATE) |
| lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); |
| else /* code == ROTATERT */ |
| rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); |
| break; |
| |
| default: |
| return 0; |
| } |
| |
| return immed_double_const (lv, hv, mode); |
| } |
| |
| if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT |
| || width > HOST_BITS_PER_WIDE_INT || width == 0) |
| { |
| /* Even if we can't compute a constant result, |
| there are some cases worth simplifying. */ |
| |
| switch (code) |
| { |
| case PLUS: |
| /* Maybe simplify x + 0 to x. The two expressions are equivalent |
| when x is NaN, infinite, or finite and nonzero. They aren't |
| when x is -0 and the rounding mode is not towards -infinity, |
| since (-0) + 0 is then 0. */ |
| if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) |
| return op0; |
| |
| /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These |
| transformations are safe even for IEEE. */ |
| if (GET_CODE (op0) == NEG) |
| return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); |
| else if (GET_CODE (op1) == NEG) |
| return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); |
| |
| /* (~a) + 1 -> -a */ |
| if (INTEGRAL_MODE_P (mode) |
| && GET_CODE (op0) == NOT |
| && trueop1 == const1_rtx) |
| return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode); |
| |
| /* Handle both-operands-constant cases. We can only add |
| CONST_INTs to constants since the sum of relocatable symbols |
| can't be handled by most assemblers. Don't add CONST_INT |
| to CONST_INT since overflow won't be computed properly if wider |
| than HOST_BITS_PER_WIDE_INT. */ |
| |
| if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode |
| && GET_CODE (op1) == CONST_INT) |
| return plus_constant (op0, INTVAL (op1)); |
| else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode |
| && GET_CODE (op0) == CONST_INT) |
| return plus_constant (op1, INTVAL (op0)); |
| |
| /* See if this is something like X * C - X or vice versa or |
| if the multiplication is written as a shift. If so, we can |
| distribute and make a new multiply, shift, or maybe just |
| have X (if C is 2 in the example above). But don't make |
| something more expensive than we had before. */ |
| |
| if (SCALAR_INT_MODE_P (mode)) |
| { |
| HOST_WIDE_INT coeff0h = 0, coeff1h = 0; |
| unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1; |
| rtx lhs = op0, rhs = op1; |
| |
| if (GET_CODE (lhs) == NEG) |
| { |
| coeff0l = -1; |
| coeff0h = -1; |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == MULT |
| && GET_CODE (XEXP (lhs, 1)) == CONST_INT) |
| { |
| coeff0l = INTVAL (XEXP (lhs, 1)); |
| coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == ASHIFT |
| && GET_CODE (XEXP (lhs, 1)) == CONST_INT |
| && INTVAL (XEXP (lhs, 1)) >= 0 |
| && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) |
| { |
| coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); |
| coeff0h = 0; |
| lhs = XEXP (lhs, 0); |
| } |
| |
| if (GET_CODE (rhs) == NEG) |
| { |
| coeff1l = -1; |
| coeff1h = -1; |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == MULT |
| && GET_CODE (XEXP (rhs, 1)) == CONST_INT) |
| { |
| coeff1l = INTVAL (XEXP (rhs, 1)); |
| coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0; |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == ASHIFT |
| && GET_CODE (XEXP (rhs, 1)) == CONST_INT |
| && INTVAL (XEXP (rhs, 1)) >= 0 |
| && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) |
| { |
| coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); |
| coeff1h = 0; |
| rhs = XEXP (rhs, 0); |
| } |
| |
| if (rtx_equal_p (lhs, rhs)) |
| { |
| rtx orig = gen_rtx_PLUS (mode, op0, op1); |
| rtx coeff; |
| unsigned HOST_WIDE_INT l; |
| HOST_WIDE_INT h; |
| |
| add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h); |
| coeff = immed_double_const (l, h, mode); |
| |
| tem = simplify_gen_binary (MULT, mode, lhs, coeff); |
| return rtx_cost (tem, SET) <= rtx_cost (orig, SET) |
| ? tem : 0; |
| } |
| } |
| |
| /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ |
| if ((GET_CODE (op1) == CONST_INT |
| || GET_CODE (op1) == CONST_DOUBLE) |
| && GET_CODE (op0) == XOR |
| && (GET_CODE (XEXP (op0, 1)) == CONST_INT |
| || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) |
| && mode_signbit_p (mode, op1)) |
| return simplify_gen_binary (XOR, mode, XEXP (op0, 0), |
| simplify_gen_binary (XOR, mode, op1, |
| XEXP (op0, 1))); |
| |
| /* If one of the operands is a PLUS or a MINUS, see if we can |
| simplify this by the associative law. |
| Don't use the associative law for floating point. |
| The inaccuracy makes it nonassociative, |
| and subtle programs can break if operations are associated. */ |
| |
| if (INTEGRAL_MODE_P (mode) |
| && (plus_minus_operand_p (op0) |
| || plus_minus_operand_p (op1)) |
| && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) |
| return tem; |
| |
| /* Reassociate floating point addition only when the user |
| specifies unsafe math optimizations. */ |
| if (FLOAT_MODE_P (mode) |
| && flag_unsafe_math_optimizations) |
| { |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| } |
| break; |
| |
| case COMPARE: |
| #ifdef HAVE_cc0 |
| /* Convert (compare FOO (const_int 0)) to FOO unless we aren't |
| using cc0, in which case we want to leave it as a COMPARE |
| so we can distinguish it from a register-register-copy. |
| |
| In IEEE floating point, x-0 is not the same as x. */ |
| |
| if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT |
| || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
| && trueop1 == CONST0_RTX (mode)) |
| return op0; |
| #endif |
| |
| /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ |
| if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) |
| || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) |
| && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) |
| { |
| rtx xop00 = XEXP (op0, 0); |
| rtx xop10 = XEXP (op1, 0); |
| |
| #ifdef HAVE_cc0 |
| if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) |
| #else |
| if (REG_P (xop00) && REG_P (xop10) |
| && GET_MODE (xop00) == GET_MODE (xop10) |
| && REGNO (xop00) == REGNO (xop10) |
| && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC |
| && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) |
| #endif |
| return xop00; |
| } |
| break; |
| |
| case MINUS: |
| /* We can't assume x-x is 0 even with non-IEEE floating point, |
| but since it is zero except in very strange circumstances, we |
| will treat it as zero with -funsafe-math-optimizations. */ |
| if (rtx_equal_p (trueop0, trueop1) |
| && ! side_effects_p (op0) |
| && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) |
| return CONST0_RTX (mode); |
| |
| /* Change subtraction from zero into negation. (0 - x) is the |
| same as -x when x is NaN, infinite, or finite and nonzero. |
| But if the mode has signed zeros, and does not round towards |
| -infinity, then 0 - 0 is 0, not -0. */ |
| if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) |
| return simplify_gen_unary (NEG, mode, op1, mode); |
| |
| /* (-1 - a) is ~a. */ |
| /* APPLE LOCAL begin disallow generating (not (SYM)) |
| But not when a is relocatable (this arises temporarily when |
| pulling 386 global addresses out of a loop). */ |
| if (trueop0 == constm1_rtx |
| && GET_CODE (op1) != SYMBOL_REF ) |
| /* APPLE LOCAL end disallow generating (not (SYM)) */ |
| return simplify_gen_unary (NOT, mode, op1, mode); |
| |
| /* Subtracting 0 has no effect unless the mode has signed zeros |
| and supports rounding towards -infinity. In such a case, |
| 0 - 0 is -0. */ |
| if (!(HONOR_SIGNED_ZEROS (mode) |
| && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) |
| && trueop1 == CONST0_RTX (mode)) |
| return op0; |
| |
| /* See if this is something like X * C - X or vice versa or |
| if the multiplication is written as a shift. If so, we can |
| distribute and make a new multiply, shift, or maybe just |
| have X (if C is 2 in the example above). But don't make |
| something more expensive than we had before. */ |
| |
| if (SCALAR_INT_MODE_P (mode)) |
| { |
| HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1; |
| unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1; |
| rtx lhs = op0, rhs = op1; |
| |
| if (GET_CODE (lhs) == NEG) |
| { |
| coeff0l = -1; |
| coeff0h = -1; |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == MULT |
| && GET_CODE (XEXP (lhs, 1)) == CONST_INT) |
| { |
| coeff0l = INTVAL (XEXP (lhs, 1)); |
| coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; |
| lhs = XEXP (lhs, 0); |
| } |
| else if (GET_CODE (lhs) == ASHIFT |
| && GET_CODE (XEXP (lhs, 1)) == CONST_INT |
| && INTVAL (XEXP (lhs, 1)) >= 0 |
| && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) |
| { |
| coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); |
| coeff0h = 0; |
| lhs = XEXP (lhs, 0); |
| } |
| |
| if (GET_CODE (rhs) == NEG) |
| { |
| negcoeff1l = 1; |
| negcoeff1h = 0; |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == MULT |
| && GET_CODE (XEXP (rhs, 1)) == CONST_INT) |
| { |
| negcoeff1l = -INTVAL (XEXP (rhs, 1)); |
| negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1; |
| rhs = XEXP (rhs, 0); |
| } |
| else if (GET_CODE (rhs) == ASHIFT |
| && GET_CODE (XEXP (rhs, 1)) == CONST_INT |
| && INTVAL (XEXP (rhs, 1)) >= 0 |
| && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) |
| { |
| negcoeff1l = -(((HOST_WIDE_INT) 1) |
| << INTVAL (XEXP (rhs, 1))); |
| negcoeff1h = -1; |
| rhs = XEXP (rhs, 0); |
| } |
| |
| if (rtx_equal_p (lhs, rhs)) |
| { |
| rtx orig = gen_rtx_MINUS (mode, op0, op1); |
| rtx coeff; |
| unsigned HOST_WIDE_INT l; |
| HOST_WIDE_INT h; |
| |
| add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, |
| &l, &h); |
| coeff = immed_double_const (l, h, mode); |
| |
| tem = simplify_gen_binary (MULT, mode, lhs, coeff); |
| return rtx_cost (tem, SET) <= rtx_cost (orig, SET) |
| ? tem : 0; |
| } |
| } |
| |
| /* (a - (-b)) -> (a + b). True even for IEEE. */ |
| if (GET_CODE (op1) == NEG) |
| return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); |
| |
| /* (-x - c) may be simplified as (-c - x). */ |
| if (GET_CODE (op0) == NEG |
| && (GET_CODE (op1) == CONST_INT |
| || GET_CODE (op1) == CONST_DOUBLE)) |
| { |
| tem = simplify_unary_operation (NEG, mode, op1, mode); |
| if (tem) |
| return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0)); |
| } |
| |
| /* If one of the operands is a PLUS or a MINUS, see if we can |
| simplify this by the associative law. |
| Don't use the associative law for floating point. |
| The inaccuracy makes it nonassociative, |
| and subtle programs can break if operations are associated. */ |
| |
| if (INTEGRAL_MODE_P (mode) |
| && (plus_minus_operand_p (op0) |
| || plus_minus_operand_p (op1)) |
| && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) |
| return tem; |
| |
| /* Don't let a relocatable value get a negative coeff. */ |
| if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) |
| return simplify_gen_binary (PLUS, mode, |
| op0, |
| neg_const_int (mode, op1)); |
| |
| /* (x - (x & y)) -> (x & ~y) */ |
| if (GET_CODE (op1) == AND) |
| { |
| if (rtx_equal_p (op0, XEXP (op1, 0))) |
| { |
| tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1), |
| GET_MODE (XEXP (op1, 1))); |
| return simplify_gen_binary (AND, mode, op0, tem); |
| } |
| if (rtx_equal_p (op0, XEXP (op1, 1))) |
| { |
| tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0), |
| GET_MODE (XEXP (op1, 0))); |
| return simplify_gen_binary (AND, mode, op0, tem); |
| } |
| } |
| break; |
| |
| case MULT: |
| if (trueop1 == constm1_rtx) |
| return simplify_gen_unary (NEG, mode, op0, mode); |
| |
| /* Maybe simplify x * 0 to 0. The reduction is not valid if |
| x is NaN, since x * 0 is then also NaN. Nor is it valid |
| when the mode has signed zeros, since multiplying a negative |
| number by 0 will give -0, not 0. */ |
| if (!HONOR_NANS (mode) |
| && !HONOR_SIGNED_ZEROS (mode) |
| && trueop1 == CONST0_RTX (mode) |
| && ! side_effects_p (op0)) |
| return op1; |
| |
| /* In IEEE floating point, x*1 is not equivalent to x for |
| signalling NaNs. */ |
| if (!HONOR_SNANS (mode) |
| && trueop1 == CONST1_RTX (mode)) |
| return op0; |
| |
| /* Convert multiply by constant power of two into shift unless |
| we are still generating RTL. This test is a kludge. */ |
| if (GET_CODE (trueop1) == CONST_INT |
| && (val = exact_log2 (INTVAL (trueop1))) >= 0 |
| /* If the mode is larger than the host word size, and the |
| uppermost bit is set, then this isn't a power of two due |
| to implicit sign extension. */ |
| && (width <= HOST_BITS_PER_WIDE_INT |
| || val != HOST_BITS_PER_WIDE_INT - 1)) |
| return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); |
| |
| /* Likewise for multipliers wider than a word. */ |
| else if (GET_CODE (trueop1) == CONST_DOUBLE |
| && (GET_MODE (trueop1) == VOIDmode |
| || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT) |
| && GET_MODE (op0) == mode |
| && CONST_DOUBLE_LOW (trueop1) == 0 |
| && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0) |
| return simplify_gen_binary (ASHIFT, mode, op0, |
| GEN_INT (val |
| + HOST_BITS_PER_WIDE_INT)); |
| |
| /* x*2 is x+x and x*(-1) is -x */ |
| if (GET_CODE (trueop1) == CONST_DOUBLE |
| && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT |
| && GET_MODE (op0) == mode) |
| { |
| REAL_VALUE_TYPE d; |
| REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); |
| |
| if (REAL_VALUES_EQUAL (d, dconst2)) |
| return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); |
| |
| if (REAL_VALUES_EQUAL (d, dconstm1)) |
| return simplify_gen_unary (NEG, mode, op0, mode); |
| } |
| |
| /* Reassociate multiplication, but for floating point MULTs |
| only when the user specifies unsafe math optimizations. */ |
| if (! FLOAT_MODE_P (mode) |
| || flag_unsafe_math_optimizations) |
| { |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| } |
| break; |
| |
| case IOR: |
| if (trueop1 == const0_rtx) |
| return op0; |
| if (GET_CODE (trueop1) == CONST_INT |
| && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) |
| == GET_MODE_MASK (mode))) |
| return op1; |
| if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
| return op0; |
| /* A | (~A) -> -1 */ |
| if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) |
| || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) |
| && ! side_effects_p (op0) |
| && SCALAR_INT_MODE_P (mode)) |
| return constm1_rtx; |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case XOR: |
| if (trueop1 == const0_rtx) |
| return op0; |
| if (GET_CODE (trueop1) == CONST_INT |
| && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) |
| == GET_MODE_MASK (mode))) |
| return simplify_gen_unary (NOT, mode, op0, mode); |
| if (trueop0 == trueop1 |
| && ! side_effects_p (op0) |
| && GET_MODE_CLASS (mode) != MODE_CC) |
| return CONST0_RTX (mode); |
| |
| /* Canonicalize XOR of the most significant bit to PLUS. */ |
| if ((GET_CODE (op1) == CONST_INT |
| || GET_CODE (op1) == CONST_DOUBLE) |
| && mode_signbit_p (mode, op1)) |
| return simplify_gen_binary (PLUS, mode, op0, op1); |
| /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */ |
| if ((GET_CODE (op1) == CONST_INT |
| || GET_CODE (op1) == CONST_DOUBLE) |
| && GET_CODE (op0) == PLUS |
| && (GET_CODE (XEXP (op0, 1)) == CONST_INT |
| || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) |
| && mode_signbit_p (mode, XEXP (op0, 1))) |
| return simplify_gen_binary (XOR, mode, XEXP (op0, 0), |
| simplify_gen_binary (XOR, mode, op1, |
| XEXP (op0, 1))); |
| |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case AND: |
| if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) |
| return trueop1; |
| /* If we are turning off bits already known off in OP0, we need |
| not do an AND. */ |
| if (GET_CODE (trueop1) == CONST_INT |
| && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT |
| && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0) |
| return op0; |
| if (trueop0 == trueop1 && ! side_effects_p (op0) |
| && GET_MODE_CLASS (mode) != MODE_CC) |
| return op0; |
| /* A & (~A) -> 0 */ |
| if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) |
| || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) |
| && ! side_effects_p (op0) |
| && GET_MODE_CLASS (mode) != MODE_CC) |
| return CONST0_RTX (mode); |
| |
| /* Transform (and (extend X) C) into (zero_extend (and X C)) if |
| there are no nonzero bits of C outside of X's mode. */ |
| if ((GET_CODE (op0) == SIGN_EXTEND |
| || GET_CODE (op0) == ZERO_EXTEND) |
| && GET_CODE (trueop1) == CONST_INT |
| && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT |
| && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0))) |
| & INTVAL (trueop1)) == 0) |
| { |
| enum machine_mode imode = GET_MODE (XEXP (op0, 0)); |
| tem = simplify_gen_binary (AND, imode, XEXP (op0, 0), |
| gen_int_mode (INTVAL (trueop1), |
| imode)); |
| return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode); |
| } |
| |
| /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, |
| ((A & N) + B) & M -> (A + B) & M |
| Similarly if (N & M) == 0, |
| ((A | N) + B) & M -> (A + B) & M |
| and for - instead of + and/or ^ instead of |. */ |
| if (GET_CODE (trueop1) == CONST_INT |
| && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT |
| && ~INTVAL (trueop1) |
| && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0 |
| && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS)) |
| { |
| rtx pmop[2]; |
| int which; |
| |
| pmop[0] = XEXP (op0, 0); |
| pmop[1] = XEXP (op0, 1); |
| |
| for (which = 0; which < 2; which++) |
| { |
| tem = pmop[which]; |
| switch (GET_CODE (tem)) |
| { |
| case AND: |
| if (GET_CODE (XEXP (tem, 1)) == CONST_INT |
| && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) |
| == INTVAL (trueop1)) |
| pmop[which] = XEXP (tem, 0); |
| break; |
| case IOR: |
| case XOR: |
| if (GET_CODE (XEXP (tem, 1)) == CONST_INT |
| && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0) |
| pmop[which] = XEXP (tem, 0); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1)) |
| { |
| tem = simplify_gen_binary (GET_CODE (op0), mode, |
| pmop[0], pmop[1]); |
| return simplify_gen_binary (code, mode, tem, op1); |
| } |
| } |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case UDIV: |
| /* 0/x is 0 (or x&0 if x has side-effects). */ |
| if (trueop0 == CONST0_RTX (mode)) |
| { |
| if (side_effects_p (op1)) |
| return simplify_gen_binary (AND, mode, op1, trueop0); |
| return trueop0; |
| } |
| /* x/1 is x. */ |
| if (trueop1 == CONST1_RTX (mode)) |
| { |
| /* Handle narrowing UDIV. */ |
| rtx x = gen_lowpart_common (mode, op0); |
| if (x) |
| return x; |
| if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) |
| return gen_lowpart_SUBREG (mode, op0); |
| return op0; |
| } |
| /* Convert divide by power of two into shift. */ |
| if (GET_CODE (trueop1) == CONST_INT |
| && (arg1 = exact_log2 (INTVAL (trueop1))) > 0) |
| return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1)); |
| break; |
| |
| case DIV: |
| /* Handle floating point and integers separately. */ |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| /* Maybe change 0.0 / x to 0.0. This transformation isn't |
| safe for modes with NaNs, since 0.0 / 0.0 will then be |
| NaN rather than 0.0. Nor is it safe for modes with signed |
| zeros, since dividing 0 by a negative number gives -0.0 */ |
| if (trueop0 == CONST0_RTX (mode) |
| && !HONOR_NANS (mode) |
| && !HONOR_SIGNED_ZEROS (mode) |
| && ! side_effects_p (op1)) |
| return op0; |
| /* x/1.0 is x. */ |
| if (trueop1 == CONST1_RTX (mode) |
| && !HONOR_SNANS (mode)) |
| return op0; |
| |
| if (GET_CODE (trueop1) == CONST_DOUBLE |
| && trueop1 != CONST0_RTX (mode)) |
| { |
| REAL_VALUE_TYPE d; |
| REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); |
| |
| /* x/-1.0 is -x. */ |
| if (REAL_VALUES_EQUAL (d, dconstm1) |
| && !HONOR_SNANS (mode)) |
| return simplify_gen_unary (NEG, mode, op0, mode); |
| |
| /* Change FP division by a constant into multiplication. |
| Only do this with -funsafe-math-optimizations. */ |
| if (flag_unsafe_math_optimizations |
| && !REAL_VALUES_EQUAL (d, dconst0)) |
| { |
| REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d); |
| tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode); |
| return simplify_gen_binary (MULT, mode, op0, tem); |
| } |
| } |
| } |
| else |
| { |
| /* 0/x is 0 (or x&0 if x has side-effects). */ |
| if (trueop0 == CONST0_RTX (mode)) |
| { |
| if (side_effects_p (op1)) |
| return simplify_gen_binary (AND, mode, op1, trueop0); |
| return trueop0; |
| } |
| /* x/1 is x. */ |
| if (trueop1 == CONST1_RTX (mode)) |
| { |
| /* Handle narrowing DIV. */ |
| rtx x = gen_lowpart_common (mode, op0); |
| if (x) |
| return x; |
| if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) |
| return gen_lowpart_SUBREG (mode, op0); |
| return op0; |
| } |
| /* x/-1 is -x. */ |
| if (trueop1 == constm1_rtx) |
| { |
| rtx x = gen_lowpart_common (mode, op0); |
| if (!x) |
| x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) |
| ? gen_lowpart_SUBREG (mode, op0) : op0; |
| return simplify_gen_unary (NEG, mode, x, mode); |
| } |
| } |
| break; |
| |
| case UMOD: |
| /* 0%x is 0 (or x&0 if x has side-effects). */ |
| if (trueop0 == CONST0_RTX (mode)) |
| { |
| if (side_effects_p (op1)) |
| return simplify_gen_binary (AND, mode, op1, trueop0); |
| return trueop0; |
| } |
| /* x%1 is 0 (of x&0 if x has side-effects). */ |
| if (trueop1 == CONST1_RTX (mode)) |
| { |
| if (side_effects_p (op0)) |
| return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); |
| return CONST0_RTX (mode); |
| } |
| /* Implement modulus by power of two as AND. */ |
| if (GET_CODE (trueop1) == CONST_INT |
| && exact_log2 (INTVAL (trueop1)) > 0) |
| return simplify_gen_binary (AND, mode, op0, |
| GEN_INT (INTVAL (op1) - 1)); |
| break; |
| |
| case MOD: |
| /* 0%x is 0 (or x&0 if x has side-effects). */ |
| if (trueop0 == CONST0_RTX (mode)) |
| { |
| if (side_effects_p (op1)) |
| return simplify_gen_binary (AND, mode, op1, trueop0); |
| return trueop0; |
| } |
| /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */ |
| if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx) |
| { |
| if (side_effects_p (op0)) |
| return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); |
| return CONST0_RTX (mode); |
| } |
| break; |
| |
| case ROTATERT: |
| case ROTATE: |
| case ASHIFTRT: |
| /* Rotating ~0 always results in ~0. */ |
| if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT |
| && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) |
| && ! side_effects_p (op1)) |
| return op0; |
| |
| /* Fall through.... */ |
| |
| case ASHIFT: |
| case LSHIFTRT: |
| if (trueop1 == CONST0_RTX (mode)) |
| return op0; |
| if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) |
| return op0; |
| break; |
| |
| case SMIN: |
| if (width <= HOST_BITS_PER_WIDE_INT |
| && GET_CODE (trueop1) == CONST_INT |
| && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) |
| && ! side_effects_p (op0)) |
| return op1; |
| if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
| return op0; |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case SMAX: |
| if (width <= HOST_BITS_PER_WIDE_INT |
| && GET_CODE (trueop1) == CONST_INT |
| && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) |
| == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) |
| && ! side_effects_p (op0)) |
| return op1; |
| if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
| return op0; |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case UMIN: |
| if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) |
| return op1; |
| if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
| return op0; |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case UMAX: |
| if (trueop1 == constm1_rtx && ! side_effects_p (op0)) |
| return op1; |
| if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
| return op0; |
| tem = simplify_associative_operation (code, mode, op0, op1); |
| if (tem) |
| return tem; |
| break; |
| |
| case SS_PLUS: |
| case US_PLUS: |
| case SS_MINUS: |
| case US_MINUS: |
| /* ??? There are simplifications that can be done. */ |
| return 0; |
| |
| case VEC_SELECT: |
| if (!VECTOR_MODE_P (mode)) |
| { |
| gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); |
| gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0))); |
| gcc_assert (GET_CODE (trueop1) == PARALLEL); |
| gcc_assert (XVECLEN (trueop1, 0) == 1); |
| gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT); |
| |
| if (GET_CODE (trueop0) == CONST_VECTOR) |
| return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP |
| (trueop1, 0, 0))); |
| } |
| else |
| { |
| gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); |
| gcc_assert (GET_MODE_INNER (mode) |
| == GET_MODE_INNER (GET_MODE (trueop0))); |
| gcc_assert (GET_CODE (trueop1) == PARALLEL); |
| |
| if (GET_CODE (trueop0) == CONST_VECTOR) |
| { |
| int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); |
| unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); |
| rtvec v = rtvec_alloc (n_elts); |
| unsigned int i; |
| |
| gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts); |
| for (i = 0; i < n_elts; i++) |
| { |
| rtx x = XVECEXP (trueop1, 0, i); |
| |
| gcc_assert (GET_CODE (x) == CONST_INT); |
| RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, |
| INTVAL (x)); |
| } |
| |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| } |
| return 0; |
| case VEC_CONCAT: |
| { |
| enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode |
| ? GET_MODE (trueop0) |
| : GET_MODE_INNER (mode)); |
| enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode |
| ? GET_MODE (trueop1) |
| : GET_MODE_INNER (mode)); |
| |
| gcc_assert (VECTOR_MODE_P (mode)); |
| gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) |
| == GET_MODE_SIZE (mode)); |
| |
| if (VECTOR_MODE_P (op0_mode)) |
| gcc_assert (GET_MODE_INNER (mode) |
| == GET_MODE_INNER (op0_mode)); |
| else |
| gcc_assert (GET_MODE_INNER (mode) == op0_mode); |
| |
| if (VECTOR_MODE_P (op1_mode)) |
| gcc_assert (GET_MODE_INNER (mode) |
| == GET_MODE_INNER (op1_mode)); |
| else |
| gcc_assert (GET_MODE_INNER (mode) == op1_mode); |
| |
| if ((GET_CODE (trueop0) == CONST_VECTOR |
| || GET_CODE (trueop0) == CONST_INT |
| || GET_CODE (trueop0) == CONST_DOUBLE) |
| && (GET_CODE (trueop1) == CONST_VECTOR |
| || GET_CODE (trueop1) == CONST_INT |
| || GET_CODE (trueop1) == CONST_DOUBLE)) |
| { |
| int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); |
| unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); |
| rtvec v = rtvec_alloc (n_elts); |
| unsigned int i; |
| unsigned in_n_elts = 1; |
| |
| if (VECTOR_MODE_P (op0_mode)) |
| in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size); |
| for (i = 0; i < n_elts; i++) |
| { |
| if (i < in_n_elts) |
| { |
| if (!VECTOR_MODE_P (op0_mode)) |
| RTVEC_ELT (v, i) = trueop0; |
| else |
| RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i); |
| } |
| else |
| { |
| if (!VECTOR_MODE_P (op1_mode)) |
| RTVEC_ELT (v, i) = trueop1; |
| else |
| RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1, |
| i - in_n_elts); |
| } |
| } |
| |
| return gen_rtx_CONST_VECTOR (mode, v); |
| } |
| } |
| return 0; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| return 0; |
| } |
| |
| /* Get the integer argument values in two forms: |
| zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ |
| |
| arg0 = INTVAL (trueop0); |
| arg1 = INTVAL (trueop1); |
| |
| if (width < HOST_BITS_PER_WIDE_INT) |
| { |
| arg0 &= ((HOST_WIDE_INT) 1 << width) - 1; |
| arg1 &= ((HOST_WIDE_INT) 1 << width) - 1; |
| |
| arg0s = arg0; |
| if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1))) |
| arg0s |= ((HOST_WIDE_INT) (-1) << width); |
| |
| arg1s = arg1; |
| if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1))) |
| arg1s |= ((HOST_WIDE_INT) (-1) << width); |
| } |
| else |
| { |
| arg0s = arg0; |
| arg1s = arg1; |
| } |
| |
| /* Compute the value of the arithmetic. */ |
| |
| switch (code) |
| { |
| case PLUS: |
| val = arg0s + arg1s; |
| break; |
| |
| case MINUS: |
| val = arg0s - arg1s; |
| break; |
| |
| case MULT: |
| val = arg0s * arg1s; |
| break; |
| |
| case DIV: |
| if (arg1s == 0 |
| || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) |
| && arg1s == -1)) |
| return 0; |
| val = arg0s / arg1s; |
| break; |
| |
| case MOD: |
| if (arg1s == 0 |
| || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) |
| && arg1s == -1)) |
| return 0; |
| val = arg0s % arg1s; |
| break; |
| |
| case UDIV: |
| if (arg1 == 0 |
| || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) |
| && arg1s == -1)) |
| return 0; |
| val = (unsigned HOST_WIDE_INT) arg0 / arg1; |
| break; |
| |
| case UMOD: |
| if (arg1 == 0 |
| || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) |
| && arg1s == -1)) |
| return 0; |
| val = (unsigned HOST_WIDE_INT) arg0 % arg1; |
| break; |
| |
| case AND: |
| val = arg0 & arg1; |
| break; |
| |
| case IOR: |
| val = arg0 | arg1; |
| break; |
| |
| case XOR: |
| val = arg0 ^ arg1; |
| break; |
| |
| case LSHIFTRT: |
| case ASHIFT: |
| case ASHIFTRT: |
| /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the |
| value is in range. We can't return any old value for out-of-range |
| arguments because either the middle-end (via shift_truncation_mask) |
| or the back-end might be relying on target-specific knowledge. |
| Nor can we rely on shift_truncation_mask, since the shift might |
| not be part of an ashlM3, lshrM3 or ashrM3 instruction. */ |
| if (SHIFT_COUNT_TRUNCATED) |
| arg1 = (unsigned HOST_WIDE_INT) arg1 % width; |
| else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) |
| return 0; |
| |
| val = (code == ASHIFT |
| ? ((unsigned HOST_WIDE_INT) arg0) << arg1 |
| : ((unsigned HOST_WIDE_INT) arg0) >> arg1); |
| |
| /* Sign-extend the result for arithmetic right shifts. */ |
| if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) |
| val |= ((HOST_WIDE_INT) -1) << (width - arg1); |
| break; |
| |
| case ROTATERT: |
| if (arg1 < 0) |
| return 0; |
| |
| arg1 %= width; |
| val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) |
| | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); |
| break; |
| |
| case ROTATE: |
| if (arg1 < 0) |
| return 0; |
| |
| arg1 %= width; |
| val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) |
| | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); |
| break; |
| |
| case COMPARE: |
| /* Do nothing here. */ |
| return 0; |
| |
| case SMIN: |
| val = arg0s <= arg1s ? arg0s : arg1s; |
| break; |
| |
| case UMIN: |
| val = ((unsigned HOST_WIDE_INT) arg0 |
| <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); |
| break; |
| |
| case SMAX: |
| val = arg0s > arg1s ? arg0s : arg1s; |
| break; |
| |
| case UMAX: |
| val = ((unsigned HOST_WIDE_INT) arg0 |
| > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); |
| break; |
| |
| case SS_PLUS: |
| case US_PLUS: |
| case SS_MINUS: |
| case US_MINUS: |
| /* ??? There are simplifications that can be done. */ |
| return 0; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| val = trunc_int_for_mode (val, mode); |
| |
| return GEN_INT (val); |
| } |
| |
| /* Simplify a PLUS or MINUS, at least one of whose operands may be another |
| PLUS or MINUS. |
| |
| Rather than test for specific case, we do this by a brute-force method |
| and do all possible simplifications until no more changes occur. Then |
| we rebuild the operation. |
| |
| If FORCE is true, then always generate the rtx. This is used to |
| canonicalize stuff emitted from simplify_gen_binary. Note that this |
| can still fail if the rtx is too complex. It won't fail just because |
| the result is not 'simpler' than the input, however. */ |
| |
| struct simplify_plus_minus_op_data |
| { |
| rtx op; |
| int neg; |
| }; |
| |
| static int |
| simplify_plus_minus_op_data_cmp (const void *p1, const void *p2) |
| { |
| const struct simplify_plus_minus_op_data *d1 = p1; |
| const struct simplify_plus_minus_op_data *d2 = p2; |
| |
| return (commutative_operand_precedence (d2->op) |
| - commutative_operand_precedence (d1->op)); |
| } |
| |
| static rtx |
| simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, |
| rtx op1, int force) |
| { |
| struct simplify_plus_minus_op_data ops[8]; |
| rtx result, tem; |
| int n_ops = 2, input_ops = 2, input_consts = 0, n_consts; |
| int first, changed; |
| int i, j; |
| |
| memset (ops, 0, sizeof ops); |
| |
| /* Set up the two operands and then expand them until nothing has been |
| changed. If we run out of room in our array, give up; this should |
| almost never happen. */ |
| |
| ops[0].op = op0; |
| ops[0].neg = 0; |
| ops[1].op = op1; |
| ops[1].neg = (code == MINUS); |
| |
| do |
| { |
| changed = 0; |
| |
| for (i = 0; i < n_ops; i++) |
| { |
| rtx this_op = ops[i].op; |
| int this_neg = ops[i].neg; |
| enum rtx_code this_code = GET_CODE (this_op); |
| |
| switch (this_code) |
| { |
| case PLUS: |
| case MINUS: |
| if (n_ops == 7) |
| return NULL_RTX; |
| |
| ops[n_ops].op = XEXP (this_op, 1); |
| ops[n_ops].neg = (this_code == MINUS) ^ this_neg; |
| n_ops++; |
| |
| ops[i].op = XEXP (this_op, 0); |
| input_ops++; |
| changed = 1; |
| break; |
| |
| case NEG: |
| ops[i].op = XEXP (this_op, 0); |
| ops[i].neg = ! this_neg; |
| changed = 1; |
| break; |
| |
| case CONST: |
| if (n_ops < 7 |
| && GET_CODE (XEXP (this_op, 0)) == PLUS |
| && CONSTANT_P (XEXP (XEXP (this_op, 0), 0)) |
| && CONSTANT_P (XEXP (XEXP (this_op, 0), 1))) |
| { |
| ops[i].op = XEXP (XEXP (this_op, 0), 0); |
| ops[n_ops].op = XEXP (XEXP (this_op, 0), 1); |
| ops[n_ops].neg = this_neg; |
| n_ops++; |
| input_consts++; |
| changed = 1; |
| } |
| break; |
| |
| case NOT: |
| /* ~a -> (-a - 1) */ |
| if (n_ops != 7) |
| { |
| ops[n_ops].op = constm1_rtx; |
| ops[n_ops++].neg = this_neg; |
| ops[i].op = XEXP (this_op, 0); |
| ops[i].neg = !this_neg; |
| changed = 1; |
| } |
| break; |
| |
| case CONST_INT: |
| if (this_neg) |
| { |
| ops[i].op = neg_const_int (mode, this_op); |
| ops[i].neg = 0; |
| changed = 1; |
| } |
| break; |
| |
| default: |
| break; |
| } |
| } |
| } |
| while (changed); |
| |
| /* If we only have two operands, we can't do anything. */ |
| if (n_ops <= 2 && !force) |
| return NULL_RTX; |
| |
| /* Count the number of CONSTs we didn't split above. */ |
| for (i = 0; i < n_ops; i++) |
| if (GET_CODE (ops[i].op) == CONST) |
| input_consts++; |
| |
| /* Now simplify each pair of operands until nothing changes. The first |
| time through just simplify constants against each other. */ |
| |
| first = 1; |
| do |
| { |
| changed = first; |
| |
| for (i = 0; i < n_ops - 1; i++) |
| for (j = i + 1; j < n_ops; j++) |
| { |
| rtx lhs = ops[i].op, rhs = ops[j].op; |
| int lneg = ops[i].neg, rneg = ops[j].neg; |
| |
| if (lhs != 0 && rhs != 0 |
| && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs)))) |
| { |
| enum rtx_code ncode = PLUS; |
| |
| if (lneg != rneg) |
| { |
| ncode = MINUS; |
| if (lneg) |
| tem = lhs, lhs = rhs, rhs = tem; |
| } |
| else if (swap_commutative_operands_p (lhs, rhs)) |
| tem = lhs, lhs = rhs, rhs = tem; |
| |
| tem = simplify_binary_operation (ncode, mode, lhs, rhs); |
| |
| /* Reject "simplifications" that just wrap the two |
| arguments in a CONST. Failure to do so can result |
| in infinite recursion with simplify_binary_operation |
| when it calls us to simplify CONST operations. */ |
| if (tem |
| && ! (GET_CODE (tem) == CONST |
| && GET_CODE (XEXP (tem, 0)) == ncode |
| && XEXP (XEXP (tem, 0), 0) == lhs |
| && XEXP (XEXP (tem, 0), 1) == rhs) |
| /* Don't allow -x + -1 -> ~x simplifications in the |
| first pass. This allows us the chance to combine |
| the -1 with other constants. */ |
| && ! (first |
| && GET_CODE (tem) == NOT |
| && XEXP (tem, 0) == rhs)) |
| { |
| lneg &= rneg; |
| if (GET_CODE (tem) == NEG) |
| tem = XEXP (tem, 0), lneg = !lneg; |
| if (GET_CODE (tem) == CONST_INT && lneg) |
| tem = neg_const_int (mode, tem), lneg = 0; |
| |
| ops[i].op = tem; |
| ops[i].neg = lneg; |
| ops[j].op = NULL_RTX; |
| changed = 1; |
| } |
| } |
| } |
| |
| first = 0; |
| } |
| while (changed); |
| |
| /* Pack all the operands to the lower-numbered entries. */ |
| for (i = 0, j = 0; j < n_ops; j++) |
| if (ops[j].op) |
| ops[i++] = ops[j]; |
| n_ops = i; |
| |
| /* Sort the operations based on swap_commutative_operands_p. */ |
| qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp); |
| |
| /* Create (minus -C X) instead of (neg (const (plus X C))). */ |
| if (n_ops == 2 |
| && GET_CODE (ops[1].op) == CONST_INT |
| && CONSTANT_P (ops[0].op) |
| && ops[0].neg) |
| return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op); |
| |
| /* We suppressed creation of trivial CONST expressions in the |
| combination loop to avoid recursion. Create one manually now. |
| The combination loop should have ensured that there is exactly |
| one CONST_INT, and the sort will have ensured that it is last |
| in the array and that any other constant will be next-to-last. */ |
| |
| if (n_ops > 1 |
| && GET_CODE (ops[n_ops - 1].op) == CONST_INT |
| && CONSTANT_P (ops[n_ops - 2].op)) |
| { |
| rtx value = ops[n_ops - 1].op; |
| if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg) |
| value = neg_const_int (mode, value); |
| ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value)); |
| n_ops--; |
| } |
| |
| /* Count the number of CONSTs that we generated. */ |
| n_consts = 0; |
| for (i = 0; i < n_ops; i++) |
| if (GET_CODE (ops[i].op) == CONST) |
| n_consts++; |
| |
| /* Give up if we didn't reduce the number of operands we had. Make |
| sure we count a CONST as two operands. If we have the same |
| number of operands, but have made more CONSTs than before, this |
| is also an improvement, so accept it. */ |
| if (!force |
| && (n_ops + n_consts > input_ops |
| || (n_ops + n_consts == input_ops && n_consts <= input_consts))) |
| return NULL_RTX; |
| |
| /* Put a non-negated operand first, if possible. */ |
| |
| for (i = 0; i < n_ops && ops[i].neg; i++) |
| continue; |
| if (i == n_ops) |
| ops[0].op = gen_rtx_NEG (mode, ops[0].op); |
| else if (i != 0) |
| { |
| tem = ops[0].op; |
| ops[0] = ops[i]; |
| ops[i].op = tem; |
| ops[i].neg = 1; |
| } |
| |
| /* Now make the result by performing the requested operations. */ |
| result = ops[0].op; |
| for (i = 1; i < n_ops; i++) |
| result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS, |
| mode, result, ops[i].op); |
| |
| return result; |
| } |
| |
| /* Check whether an operand is suitable for calling simplify_plus_minus. */ |
| static bool |
| plus_minus_operand_p (rtx x) |
| { |
| return GET_CODE (x) == PLUS |
| || GET_CODE (x) == MINUS |
| || (GET_CODE (x) == CONST |
| && GET_CODE (XEXP (x, 0)) == PLUS |
| && CONSTANT_P (XEXP (XEXP (x, 0), 0)) |
| && CONSTANT_P (XEXP (XEXP (x, 0), 1))); |
| } |
| |
| /* Like simplify_binary_operation except used for relational operators. |
| MODE is the mode of the result. If MODE is VOIDmode, both operands must |
| not also be VOIDmode. |
| |
| CMP_MODE specifies in which mode the comparison is done in, so it is |
| the mode of the operands. If CMP_MODE is VOIDmode, it is taken from |
| the operands or, if both are VOIDmode, the operands are compared in |
| "infinite precision". */ |
| rtx |
| simplify_relational_operation (enum rtx_code code, enum machine_mode mode, |
| enum machine_mode cmp_mode, rtx op0, rtx op1) |
| { |
| rtx tem, trueop0, trueop1; |
| |
| if (cmp_mode == VOIDmode) |
| cmp_mode = GET_MODE (op0); |
| if (cmp_mode == VOIDmode) |
| cmp_mode = GET_MODE (op1); |
| |
| tem = simplify_const_relational_operation (code, cmp_mode, op0, op1); |
| if (tem) |
| { |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT) |
| { |
| if (tem == const0_rtx) |
| return CONST0_RTX (mode); |
| #ifdef FLOAT_STORE_FLAG_VALUE |
| { |
| REAL_VALUE_TYPE val; |
| val = FLOAT_STORE_FLAG_VALUE (mode); |
| return CONST_DOUBLE_FROM_REAL_VALUE (val, mode); |
| } |
| #else |
| return NULL_RTX; |
|