| /* Emit RTL for the GCC expander. |
| Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, |
| 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 2, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING. If not, write to the Free |
| Software Foundation, 59 Temple Place - Suite 330, Boston, MA |
| 02111-1307, USA. */ |
| |
| |
| /* Middle-to-low level generation of rtx code and insns. |
| |
| This file contains support functions for creating rtl expressions |
| and manipulating them in the doubly-linked chain of insns. |
| |
| The patterns of the insns are created by machine-dependent |
| routines in insn-emit.c, which is generated automatically from |
| the machine description. These routines make the individual rtx's |
| of the pattern with `gen_rtx_fmt_ee' and others in genrtl.[ch], |
| which are automatically generated from rtl.def; what is machine |
| dependent is the kind of rtx's they make and what arguments they |
| use. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" |
| #include "toplev.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "tm_p.h" |
| #include "flags.h" |
| #include "function.h" |
| #include "expr.h" |
| #include "regs.h" |
| #include "hard-reg-set.h" |
| #include "hashtab.h" |
| #include "insn-config.h" |
| #include "recog.h" |
| #include "real.h" |
| #include "bitmap.h" |
| #include "basic-block.h" |
| #include "ggc.h" |
| #include "debug.h" |
| #include "langhooks.h" |
| |
| /* Commonly used modes. */ |
| |
| enum machine_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */ |
| enum machine_mode word_mode; /* Mode whose width is BITS_PER_WORD. */ |
| enum machine_mode double_mode; /* Mode whose width is DOUBLE_TYPE_SIZE. */ |
| enum machine_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */ |
| |
| |
| /* This is *not* reset after each function. It gives each CODE_LABEL |
| in the entire compilation a unique label number. */ |
| |
| static GTY(()) int label_num = 1; |
| |
| /* Nonzero means do not generate NOTEs for source line numbers. */ |
| |
| static int no_line_numbers; |
| |
| /* Commonly used rtx's, so that we only need space for one copy. |
| These are initialized once for the entire compilation. |
| All of these are unique; no other rtx-object will be equal to any |
| of these. */ |
| |
| rtx global_rtl[GR_MAX]; |
| |
| /* Commonly used RTL for hard registers. These objects are not necessarily |
| unique, so we allocate them separately from global_rtl. They are |
| initialized once per compilation unit, then copied into regno_reg_rtx |
| at the beginning of each function. */ |
| static GTY(()) rtx static_regno_reg_rtx[FIRST_PSEUDO_REGISTER]; |
| |
| /* We record floating-point CONST_DOUBLEs in each floating-point mode for |
| the values of 0, 1, and 2. For the integer entries and VOIDmode, we |
| record a copy of const[012]_rtx. */ |
| |
| rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE]; |
| |
| rtx const_true_rtx; |
| |
| REAL_VALUE_TYPE dconst0; |
| REAL_VALUE_TYPE dconst1; |
| REAL_VALUE_TYPE dconst2; |
| REAL_VALUE_TYPE dconst3; |
| REAL_VALUE_TYPE dconst10; |
| REAL_VALUE_TYPE dconstm1; |
| REAL_VALUE_TYPE dconstm2; |
| REAL_VALUE_TYPE dconsthalf; |
| REAL_VALUE_TYPE dconstthird; |
| REAL_VALUE_TYPE dconstpi; |
| REAL_VALUE_TYPE dconste; |
| |
| /* All references to the following fixed hard registers go through |
| these unique rtl objects. On machines where the frame-pointer and |
| arg-pointer are the same register, they use the same unique object. |
| |
| After register allocation, other rtl objects which used to be pseudo-regs |
| may be clobbered to refer to the frame-pointer register. |
| But references that were originally to the frame-pointer can be |
| distinguished from the others because they contain frame_pointer_rtx. |
| |
| When to use frame_pointer_rtx and hard_frame_pointer_rtx is a little |
| tricky: until register elimination has taken place hard_frame_pointer_rtx |
| should be used if it is being set, and frame_pointer_rtx otherwise. After |
| register elimination hard_frame_pointer_rtx should always be used. |
| On machines where the two registers are same (most) then these are the |
| same. |
| |
| In an inline procedure, the stack and frame pointer rtxs may not be |
| used for anything else. */ |
| rtx static_chain_rtx; /* (REG:Pmode STATIC_CHAIN_REGNUM) */ |
| rtx static_chain_incoming_rtx; /* (REG:Pmode STATIC_CHAIN_INCOMING_REGNUM) */ |
| rtx pic_offset_table_rtx; /* (REG:Pmode PIC_OFFSET_TABLE_REGNUM) */ |
| |
| /* This is used to implement __builtin_return_address for some machines. |
| See for instance the MIPS port. */ |
| rtx return_address_pointer_rtx; /* (REG:Pmode RETURN_ADDRESS_POINTER_REGNUM) */ |
| |
| /* We make one copy of (const_int C) where C is in |
| [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT] |
| to save space during the compilation and simplify comparisons of |
| integers. */ |
| |
| rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1]; |
| |
| /* A hash table storing CONST_INTs whose absolute value is greater |
| than MAX_SAVED_CONST_INT. */ |
| |
| static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) |
| htab_t const_int_htab; |
| |
| /* A hash table storing memory attribute structures. */ |
| static GTY ((if_marked ("ggc_marked_p"), param_is (struct mem_attrs))) |
| htab_t mem_attrs_htab; |
| |
| /* A hash table storing register attribute structures. */ |
| static GTY ((if_marked ("ggc_marked_p"), param_is (struct reg_attrs))) |
| htab_t reg_attrs_htab; |
| |
| /* A hash table storing all CONST_DOUBLEs. */ |
| static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) |
| htab_t const_double_htab; |
| |
| #define first_insn (cfun->emit->x_first_insn) |
| #define last_insn (cfun->emit->x_last_insn) |
| #define cur_insn_uid (cfun->emit->x_cur_insn_uid) |
| #define last_location (cfun->emit->x_last_location) |
| #define first_label_num (cfun->emit->x_first_label_num) |
| |
| static rtx make_jump_insn_raw (rtx); |
| static rtx make_call_insn_raw (rtx); |
| static rtx find_line_note (rtx); |
| static rtx change_address_1 (rtx, enum machine_mode, rtx, int); |
| static void unshare_all_decls (tree); |
| static void reset_used_decls (tree); |
| static void mark_label_nuses (rtx); |
| static hashval_t const_int_htab_hash (const void *); |
| static int const_int_htab_eq (const void *, const void *); |
| static hashval_t const_double_htab_hash (const void *); |
| static int const_double_htab_eq (const void *, const void *); |
| static rtx lookup_const_double (rtx); |
| static hashval_t mem_attrs_htab_hash (const void *); |
| static int mem_attrs_htab_eq (const void *, const void *); |
| static mem_attrs *get_mem_attrs (HOST_WIDE_INT, tree, rtx, rtx, unsigned int, |
| enum machine_mode); |
| static hashval_t reg_attrs_htab_hash (const void *); |
| static int reg_attrs_htab_eq (const void *, const void *); |
| static reg_attrs *get_reg_attrs (tree, int); |
| static tree component_ref_for_mem_expr (tree); |
| static rtx gen_const_vector (enum machine_mode, int); |
| static void copy_rtx_if_shared_1 (rtx *orig); |
| |
| /* Probability of the conditional branch currently proceeded by try_split. |
| Set to -1 otherwise. */ |
| int split_branch_probability = -1; |
| |
| /* Returns a hash code for X (which is a really a CONST_INT). */ |
| |
| static hashval_t |
| const_int_htab_hash (const void *x) |
| { |
| return (hashval_t) INTVAL ((rtx) x); |
| } |
| |
| /* Returns nonzero if the value represented by X (which is really a |
| CONST_INT) is the same as that given by Y (which is really a |
| HOST_WIDE_INT *). */ |
| |
| static int |
| const_int_htab_eq (const void *x, const void *y) |
| { |
| return (INTVAL ((rtx) x) == *((const HOST_WIDE_INT *) y)); |
| } |
| |
| /* Returns a hash code for X (which is really a CONST_DOUBLE). */ |
| static hashval_t |
| const_double_htab_hash (const void *x) |
| { |
| rtx value = (rtx) x; |
| hashval_t h; |
| |
| if (GET_MODE (value) == VOIDmode) |
| h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value); |
| else |
| { |
| h = real_hash (CONST_DOUBLE_REAL_VALUE (value)); |
| /* MODE is used in the comparison, so it should be in the hash. */ |
| h ^= GET_MODE (value); |
| } |
| return h; |
| } |
| |
| /* Returns nonzero if the value represented by X (really a ...) |
| is the same as that represented by Y (really a ...) */ |
| static int |
| const_double_htab_eq (const void *x, const void *y) |
| { |
| rtx a = (rtx)x, b = (rtx)y; |
| |
| if (GET_MODE (a) != GET_MODE (b)) |
| return 0; |
| if (GET_MODE (a) == VOIDmode) |
| return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b) |
| && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b)); |
| else |
| return real_identical (CONST_DOUBLE_REAL_VALUE (a), |
| CONST_DOUBLE_REAL_VALUE (b)); |
| } |
| |
| /* Returns a hash code for X (which is a really a mem_attrs *). */ |
| |
| static hashval_t |
| mem_attrs_htab_hash (const void *x) |
| { |
| mem_attrs *p = (mem_attrs *) x; |
| |
| return (p->alias ^ (p->align * 1000) |
| ^ ((p->offset ? INTVAL (p->offset) : 0) * 50000) |
| ^ ((p->size ? INTVAL (p->size) : 0) * 2500000) |
| ^ (size_t) p->expr); |
| } |
| |
| /* Returns nonzero if the value represented by X (which is really a |
| mem_attrs *) is the same as that given by Y (which is also really a |
| mem_attrs *). */ |
| |
| static int |
| mem_attrs_htab_eq (const void *x, const void *y) |
| { |
| mem_attrs *p = (mem_attrs *) x; |
| mem_attrs *q = (mem_attrs *) y; |
| |
| return (p->alias == q->alias && p->expr == q->expr && p->offset == q->offset |
| && p->size == q->size && p->align == q->align); |
| } |
| |
| /* Allocate a new mem_attrs structure and insert it into the hash table if |
| one identical to it is not already in the table. We are doing this for |
| MEM of mode MODE. */ |
| |
| static mem_attrs * |
| get_mem_attrs (HOST_WIDE_INT alias, tree expr, rtx offset, rtx size, |
| unsigned int align, enum machine_mode mode) |
| { |
| mem_attrs attrs; |
| void **slot; |
| |
| /* If everything is the default, we can just return zero. |
| This must match what the corresponding MEM_* macros return when the |
| field is not present. */ |
| if (alias == 0 && expr == 0 && offset == 0 |
| && (size == 0 |
| || (mode != BLKmode && GET_MODE_SIZE (mode) == INTVAL (size))) |
| && (STRICT_ALIGNMENT && mode != BLKmode |
| ? align == GET_MODE_ALIGNMENT (mode) : align == BITS_PER_UNIT)) |
| return 0; |
| |
| attrs.alias = alias; |
| attrs.expr = expr; |
| attrs.offset = offset; |
| attrs.size = size; |
| attrs.align = align; |
| |
| slot = htab_find_slot (mem_attrs_htab, &attrs, INSERT); |
| if (*slot == 0) |
| { |
| *slot = ggc_alloc (sizeof (mem_attrs)); |
| memcpy (*slot, &attrs, sizeof (mem_attrs)); |
| } |
| |
| return *slot; |
| } |
| |
| /* Returns a hash code for X (which is a really a reg_attrs *). */ |
| |
| static hashval_t |
| reg_attrs_htab_hash (const void *x) |
| { |
| reg_attrs *p = (reg_attrs *) x; |
| |
| return ((p->offset * 1000) ^ (long) p->decl); |
| } |
| |
| /* Returns nonzero if the value represented by X (which is really a |
| reg_attrs *) is the same as that given by Y (which is also really a |
| reg_attrs *). */ |
| |
| static int |
| reg_attrs_htab_eq (const void *x, const void *y) |
| { |
| reg_attrs *p = (reg_attrs *) x; |
| reg_attrs *q = (reg_attrs *) y; |
| |
| return (p->decl == q->decl && p->offset == q->offset); |
| } |
| /* Allocate a new reg_attrs structure and insert it into the hash table if |
| one identical to it is not already in the table. We are doing this for |
| MEM of mode MODE. */ |
| |
| static reg_attrs * |
| get_reg_attrs (tree decl, int offset) |
| { |
| reg_attrs attrs; |
| void **slot; |
| |
| /* If everything is the default, we can just return zero. */ |
| if (decl == 0 && offset == 0) |
| return 0; |
| |
| attrs.decl = decl; |
| attrs.offset = offset; |
| |
| slot = htab_find_slot (reg_attrs_htab, &attrs, INSERT); |
| if (*slot == 0) |
| { |
| *slot = ggc_alloc (sizeof (reg_attrs)); |
| memcpy (*slot, &attrs, sizeof (reg_attrs)); |
| } |
| |
| return *slot; |
| } |
| |
| /* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and |
| don't attempt to share with the various global pieces of rtl (such as |
| frame_pointer_rtx). */ |
| |
| rtx |
| gen_raw_REG (enum machine_mode mode, int regno) |
| { |
| rtx x = gen_rtx_raw_REG (mode, regno); |
| ORIGINAL_REGNO (x) = regno; |
| return x; |
| } |
| |
| /* There are some RTL codes that require special attention; the generation |
| functions do the raw handling. If you add to this list, modify |
| special_rtx in gengenrtl.c as well. */ |
| |
| rtx |
| gen_rtx_CONST_INT (enum machine_mode mode ATTRIBUTE_UNUSED, HOST_WIDE_INT arg) |
| { |
| void **slot; |
| |
| if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT) |
| return const_int_rtx[arg + MAX_SAVED_CONST_INT]; |
| |
| #if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1 |
| if (const_true_rtx && arg == STORE_FLAG_VALUE) |
| return const_true_rtx; |
| #endif |
| |
| /* Look up the CONST_INT in the hash table. */ |
| slot = htab_find_slot_with_hash (const_int_htab, &arg, |
| (hashval_t) arg, INSERT); |
| if (*slot == 0) |
| *slot = gen_rtx_raw_CONST_INT (VOIDmode, arg); |
| |
| return (rtx) *slot; |
| } |
| |
| rtx |
| gen_int_mode (HOST_WIDE_INT c, enum machine_mode mode) |
| { |
| return GEN_INT (trunc_int_for_mode (c, mode)); |
| } |
| |
| /* CONST_DOUBLEs might be created from pairs of integers, or from |
| REAL_VALUE_TYPEs. Also, their length is known only at run time, |
| so we cannot use gen_rtx_raw_CONST_DOUBLE. */ |
| |
| /* Determine whether REAL, a CONST_DOUBLE, already exists in the |
| hash table. If so, return its counterpart; otherwise add it |
| to the hash table and return it. */ |
| static rtx |
| lookup_const_double (rtx real) |
| { |
| void **slot = htab_find_slot (const_double_htab, real, INSERT); |
| if (*slot == 0) |
| *slot = real; |
| |
| return (rtx) *slot; |
| } |
| |
| /* Return a CONST_DOUBLE rtx for a floating-point value specified by |
| VALUE in mode MODE. */ |
| rtx |
| const_double_from_real_value (REAL_VALUE_TYPE value, enum machine_mode mode) |
| { |
| rtx real = rtx_alloc (CONST_DOUBLE); |
| PUT_MODE (real, mode); |
| |
| memcpy (&CONST_DOUBLE_LOW (real), &value, sizeof (REAL_VALUE_TYPE)); |
| |
| return lookup_const_double (real); |
| } |
| |
| /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair |
| of ints: I0 is the low-order word and I1 is the high-order word. |
| Do not use this routine for non-integer modes; convert to |
| REAL_VALUE_TYPE and use CONST_DOUBLE_FROM_REAL_VALUE. */ |
| |
| rtx |
| immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode) |
| { |
| rtx value; |
| unsigned int i; |
| |
| if (mode != VOIDmode) |
| { |
| int width; |
| |
| gcc_assert (GET_MODE_CLASS (mode) == MODE_INT |
| || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT |
| /* We can get a 0 for an error mark. */ |
| || GET_MODE_CLASS (mode) == MODE_VECTOR_INT |
| || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT); |
| |
| /* We clear out all bits that don't belong in MODE, unless they and |
| our sign bit are all one. So we get either a reasonable negative |
| value or a reasonable unsigned value for this mode. */ |
| width = GET_MODE_BITSIZE (mode); |
| if (width < HOST_BITS_PER_WIDE_INT |
| && ((i0 & ((HOST_WIDE_INT) (-1) << (width - 1))) |
| != ((HOST_WIDE_INT) (-1) << (width - 1)))) |
| i0 &= ((HOST_WIDE_INT) 1 << width) - 1, i1 = 0; |
| else if (width == HOST_BITS_PER_WIDE_INT |
| && ! (i1 == ~0 && i0 < 0)) |
| i1 = 0; |
| else |
| /* We should be able to represent this value as a constant. */ |
| gcc_assert (width <= 2 * HOST_BITS_PER_WIDE_INT); |
| |
| /* If this would be an entire word for the target, but is not for |
| the host, then sign-extend on the host so that the number will |
| look the same way on the host that it would on the target. |
| |
| For example, when building a 64 bit alpha hosted 32 bit sparc |
| targeted compiler, then we want the 32 bit unsigned value -1 to be |
| represented as a 64 bit value -1, and not as 0x00000000ffffffff. |
| The latter confuses the sparc backend. */ |
| |
| if (width < HOST_BITS_PER_WIDE_INT |
| && (i0 & ((HOST_WIDE_INT) 1 << (width - 1)))) |
| i0 |= ((HOST_WIDE_INT) (-1) << width); |
| |
| /* If MODE fits within HOST_BITS_PER_WIDE_INT, always use a |
| CONST_INT. |
| |
| ??? Strictly speaking, this is wrong if we create a CONST_INT for |
| a large unsigned constant with the size of MODE being |
| HOST_BITS_PER_WIDE_INT and later try to interpret that constant |
| in a wider mode. In that case we will mis-interpret it as a |
| negative number. |
| |
| Unfortunately, the only alternative is to make a CONST_DOUBLE for |
| any constant in any mode if it is an unsigned constant larger |
| than the maximum signed integer in an int on the host. However, |
| doing this will break everyone that always expects to see a |
| CONST_INT for SImode and smaller. |
| |
| We have always been making CONST_INTs in this case, so nothing |
| new is being broken. */ |
| |
| if (width <= HOST_BITS_PER_WIDE_INT) |
| i1 = (i0 < 0) ? ~(HOST_WIDE_INT) 0 : 0; |
| } |
| |
| /* If this integer fits in one word, return a CONST_INT. */ |
| if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0)) |
| return GEN_INT (i0); |
| |
| /* We use VOIDmode for integers. */ |
| value = rtx_alloc (CONST_DOUBLE); |
| PUT_MODE (value, VOIDmode); |
| |
| CONST_DOUBLE_LOW (value) = i0; |
| CONST_DOUBLE_HIGH (value) = i1; |
| |
| for (i = 2; i < (sizeof CONST_DOUBLE_FORMAT - 1); i++) |
| XWINT (value, i) = 0; |
| |
| return lookup_const_double (value); |
| } |
| |
| rtx |
| gen_rtx_REG (enum machine_mode mode, unsigned int regno) |
| { |
| /* In case the MD file explicitly references the frame pointer, have |
| all such references point to the same frame pointer. This is |
| used during frame pointer elimination to distinguish the explicit |
| references to these registers from pseudos that happened to be |
| assigned to them. |
| |
| If we have eliminated the frame pointer or arg pointer, we will |
| be using it as a normal register, for example as a spill |
| register. In such cases, we might be accessing it in a mode that |
| is not Pmode and therefore cannot use the pre-allocated rtx. |
| |
| Also don't do this when we are making new REGs in reload, since |
| we don't want to get confused with the real pointers. */ |
| |
| if (mode == Pmode && !reload_in_progress) |
| { |
| if (regno == FRAME_POINTER_REGNUM |
| && (!reload_completed || frame_pointer_needed)) |
| return frame_pointer_rtx; |
| #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM |
| if (regno == HARD_FRAME_POINTER_REGNUM |
| && (!reload_completed || frame_pointer_needed)) |
| return hard_frame_pointer_rtx; |
| #endif |
| #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
| if (regno == ARG_POINTER_REGNUM) |
| return arg_pointer_rtx; |
| #endif |
| #ifdef RETURN_ADDRESS_POINTER_REGNUM |
| if (regno == RETURN_ADDRESS_POINTER_REGNUM) |
| return return_address_pointer_rtx; |
| #endif |
| if (regno == (unsigned) PIC_OFFSET_TABLE_REGNUM |
| && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) |
| return pic_offset_table_rtx; |
| if (regno == STACK_POINTER_REGNUM) |
| return stack_pointer_rtx; |
| } |
| |
| #if 0 |
| /* If the per-function register table has been set up, try to re-use |
| an existing entry in that table to avoid useless generation of RTL. |
| |
| This code is disabled for now until we can fix the various backends |
| which depend on having non-shared hard registers in some cases. Long |
| term we want to re-enable this code as it can significantly cut down |
| on the amount of useless RTL that gets generated. |
| |
| We'll also need to fix some code that runs after reload that wants to |
| set ORIGINAL_REGNO. */ |
| |
| if (cfun |
| && cfun->emit |
| && regno_reg_rtx |
| && regno < FIRST_PSEUDO_REGISTER |
| && reg_raw_mode[regno] == mode) |
| return regno_reg_rtx[regno]; |
| #endif |
| |
| return gen_raw_REG (mode, regno); |
| } |
| |
| rtx |
| gen_rtx_MEM (enum machine_mode mode, rtx addr) |
| { |
| rtx rt = gen_rtx_raw_MEM (mode, addr); |
| |
| /* This field is not cleared by the mere allocation of the rtx, so |
| we clear it here. */ |
| MEM_ATTRS (rt) = 0; |
| |
| return rt; |
| } |
| |
| /* Generate a memory referring to non-trapping constant memory. */ |
| |
| rtx |
| gen_const_mem (enum machine_mode mode, rtx addr) |
| { |
| rtx mem = gen_rtx_MEM (mode, addr); |
| MEM_READONLY_P (mem) = 1; |
| MEM_NOTRAP_P (mem) = 1; |
| return mem; |
| } |
| |
| /* APPLE LOCAL begin mainline 2006-02-17 4356747 stack realign */ |
| /* Generate a MEM referring to fixed portions of the frame, e.g., register |
| save areas. */ |
| |
| rtx |
| gen_frame_mem (enum machine_mode mode, rtx addr) |
| { |
| rtx mem = gen_rtx_MEM (mode, addr); |
| MEM_NOTRAP_P (mem) = 1; |
| set_mem_alias_set (mem, get_frame_alias_set ()); |
| return mem; |
| } |
| |
| /* Generate a MEM referring to a temporary use of the stack, not part |
| of the fixed stack frame. For example, something which is pushed |
| by a target splitter. */ |
| rtx |
| gen_tmp_stack_mem (enum machine_mode mode, rtx addr) |
| { |
| rtx mem = gen_rtx_MEM (mode, addr); |
| MEM_NOTRAP_P (mem) = 1; |
| if (!current_function_calls_alloca) |
| set_mem_alias_set (mem, get_frame_alias_set ()); |
| return mem; |
| } |
| /* APPLE LOCAL end mainline 2006-02-17 4356747 stack realign */ |
| |
| /* We want to create (subreg:OMODE (obj:IMODE) OFFSET). Return true if |
| this construct would be valid, and false otherwise. */ |
| |
| bool |
| validate_subreg (enum machine_mode omode, enum machine_mode imode, |
| rtx reg, unsigned int offset) |
| { |
| unsigned int isize = GET_MODE_SIZE (imode); |
| unsigned int osize = GET_MODE_SIZE (omode); |
| |
| /* All subregs must be aligned. */ |
| if (offset % osize != 0) |
| return false; |
| |
| /* The subreg offset cannot be outside the inner object. */ |
| if (offset >= isize) |
| return false; |
| |
| /* ??? This should not be here. Temporarily continue to allow word_mode |
| subregs of anything. The most common offender is (subreg:SI (reg:DF)). |
| Generally, backends are doing something sketchy but it'll take time to |
| fix them all. */ |
| if (omode == word_mode) |
| ; |
| /* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field |
| is the culprit here, and not the backends. */ |
| else if (osize >= UNITS_PER_WORD && isize >= osize) |
| ; |
| /* Allow component subregs of complex and vector. Though given the below |
| extraction rules, it's not always clear what that means. */ |
| else if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode)) |
| && GET_MODE_INNER (imode) == omode) |
| ; |
| /* ??? x86 sse code makes heavy use of *paradoxical* vector subregs, |
| i.e. (subreg:V4SF (reg:SF) 0). This surely isn't the cleanest way to |
| represent this. It's questionable if this ought to be represented at |
| all -- why can't this all be hidden in post-reload splitters that make |
| arbitrarily mode changes to the registers themselves. */ |
| else if (VECTOR_MODE_P (omode) && GET_MODE_INNER (omode) == imode) |
| ; |
| /* Subregs involving floating point modes are not allowed to |
| change size. Therefore (subreg:DI (reg:DF) 0) is fine, but |
| (subreg:SI (reg:DF) 0) isn't. */ |
| else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode)) |
| { |
| if (isize != osize) |
| return false; |
| } |
| |
| /* Paradoxical subregs must have offset zero. */ |
| if (osize > isize) |
| return offset == 0; |
| |
| /* This is a normal subreg. Verify that the offset is representable. */ |
| |
| /* For hard registers, we already have most of these rules collected in |
| subreg_offset_representable_p. */ |
| if (reg && REG_P (reg) && HARD_REGISTER_P (reg)) |
| { |
| unsigned int regno = REGNO (reg); |
| |
| #ifdef CANNOT_CHANGE_MODE_CLASS |
| if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode)) |
| && GET_MODE_INNER (imode) == omode) |
| ; |
| else if (REG_CANNOT_CHANGE_MODE_P (regno, imode, omode)) |
| return false; |
| #endif |
| |
| return subreg_offset_representable_p (regno, imode, offset, omode); |
| } |
| |
| /* For pseudo registers, we want most of the same checks. Namely: |
| If the register no larger than a word, the subreg must be lowpart. |
| If the register is larger than a word, the subreg must be the lowpart |
| of a subword. A subreg does *not* perform arbitrary bit extraction. |
| Given that we've already checked mode/offset alignment, we only have |
| to check subword subregs here. */ |
| if (osize < UNITS_PER_WORD) |
| { |
| enum machine_mode wmode = isize > UNITS_PER_WORD ? word_mode : imode; |
| unsigned int low_off = subreg_lowpart_offset (omode, wmode); |
| if (offset % UNITS_PER_WORD != low_off) |
| return false; |
| } |
| return true; |
| } |
| |
| rtx |
| gen_rtx_SUBREG (enum machine_mode mode, rtx reg, int offset) |
| { |
| gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset)); |
| return gen_rtx_raw_SUBREG (mode, reg, offset); |
| } |
| |
| /* Generate a SUBREG representing the least-significant part of REG if MODE |
| is smaller than mode of REG, otherwise paradoxical SUBREG. */ |
| |
| rtx |
| gen_lowpart_SUBREG (enum machine_mode mode, rtx reg) |
| { |
| enum machine_mode inmode; |
| |
| inmode = GET_MODE (reg); |
| if (inmode == VOIDmode) |
| inmode = mode; |
| return gen_rtx_SUBREG (mode, reg, |
| subreg_lowpart_offset (mode, inmode)); |
| } |
| |
| /* gen_rtvec (n, [rt1, ..., rtn]) |
| ** |
| ** This routine creates an rtvec and stores within it the |
| ** pointers to rtx's which are its arguments. |
| */ |
| |
| /*VARARGS1*/ |
| rtvec |
| gen_rtvec (int n, ...) |
| { |
| int i, save_n; |
| rtx *vector; |
| va_list p; |
| |
| va_start (p, n); |
| |
| if (n == 0) |
| return NULL_RTVEC; /* Don't allocate an empty rtvec... */ |
| |
| vector = alloca (n * sizeof (rtx)); |
| |
| for (i = 0; i < n; i++) |
| vector[i] = va_arg (p, rtx); |
| |
| /* The definition of VA_* in K&R C causes `n' to go out of scope. */ |
| save_n = n; |
| va_end (p); |
| |
| return gen_rtvec_v (save_n, vector); |
| } |
| |
| rtvec |
| gen_rtvec_v (int n, rtx *argp) |
| { |
| int i; |
| rtvec rt_val; |
| |
| if (n == 0) |
| return NULL_RTVEC; /* Don't allocate an empty rtvec... */ |
| |
| rt_val = rtvec_alloc (n); /* Allocate an rtvec... */ |
| |
| for (i = 0; i < n; i++) |
| rt_val->elem[i] = *argp++; |
| |
| return rt_val; |
| } |
| |
| /* Generate a REG rtx for a new pseudo register of mode MODE. |
| This pseudo is assigned the next sequential register number. */ |
| |
| rtx |
| gen_reg_rtx (enum machine_mode mode) |
| { |
| struct function *f = cfun; |
| rtx val; |
| |
| /* Don't let anything called after initial flow analysis create new |
| registers. */ |
| gcc_assert (!no_new_pseudos); |
| |
| if (generating_concat_p |
| && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT |
| || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)) |
| { |
| /* For complex modes, don't make a single pseudo. |
| Instead, make a CONCAT of two pseudos. |
| This allows noncontiguous allocation of the real and imaginary parts, |
| which makes much better code. Besides, allocating DCmode |
| pseudos overstrains reload on some machines like the 386. */ |
| rtx realpart, imagpart; |
| enum machine_mode partmode = GET_MODE_INNER (mode); |
| |
| realpart = gen_reg_rtx (partmode); |
| imagpart = gen_reg_rtx (partmode); |
| return gen_rtx_CONCAT (mode, realpart, imagpart); |
| } |
| |
| /* Make sure regno_pointer_align, and regno_reg_rtx are large |
| enough to have an element for this pseudo reg number. */ |
| |
| if (reg_rtx_no == f->emit->regno_pointer_align_length) |
| { |
| int old_size = f->emit->regno_pointer_align_length; |
| char *new; |
| rtx *new1; |
| |
| new = ggc_realloc (f->emit->regno_pointer_align, old_size * 2); |
| memset (new + old_size, 0, old_size); |
| f->emit->regno_pointer_align = (unsigned char *) new; |
| |
| new1 = ggc_realloc (f->emit->x_regno_reg_rtx, |
| old_size * 2 * sizeof (rtx)); |
| memset (new1 + old_size, 0, old_size * sizeof (rtx)); |
| regno_reg_rtx = new1; |
| |
| f->emit->regno_pointer_align_length = old_size * 2; |
| } |
| |
| val = gen_raw_REG (mode, reg_rtx_no); |
| regno_reg_rtx[reg_rtx_no++] = val; |
| return val; |
| } |
| |
| /* Generate a register with same attributes as REG, but offsetted by OFFSET. |
| Do the big endian correction if needed. */ |
| |
| rtx |
| gen_rtx_REG_offset (rtx reg, enum machine_mode mode, unsigned int regno, int offset) |
| { |
| rtx new = gen_rtx_REG (mode, regno); |
| tree decl; |
| HOST_WIDE_INT var_size; |
| |
| /* PR middle-end/14084 |
| The problem appears when a variable is stored in a larger register |
| and later it is used in the original mode or some mode in between |
| or some part of variable is accessed. |
| |
| On little endian machines there is no problem because |
| the REG_OFFSET of the start of the variable is the same when |
| accessed in any mode (it is 0). |
| |
| However, this is not true on big endian machines. |
| The offset of the start of the variable is different when accessed |
| in different modes. |
| When we are taking a part of the REG we have to change the OFFSET |
| from offset WRT size of mode of REG to offset WRT size of variable. |
| |
| If we would not do the big endian correction the resulting REG_OFFSET |
| would be larger than the size of the DECL. |
| |
| Examples of correction, for BYTES_BIG_ENDIAN WORDS_BIG_ENDIAN machine: |
| |
| REG.mode MODE DECL size old offset new offset description |
| DI SI 4 4 0 int32 in SImode |
| DI SI 1 4 0 char in SImode |
| DI QI 1 7 0 char in QImode |
| DI QI 4 5 1 1st element in QImode |
| of char[4] |
| DI HI 4 6 2 1st element in HImode |
| of int16[2] |
| |
| If the size of DECL is equal or greater than the size of REG |
| we can't do this correction because the register holds the |
| whole variable or a part of the variable and thus the REG_OFFSET |
| is already correct. */ |
| |
| decl = REG_EXPR (reg); |
| if ((BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN) |
| && decl != NULL |
| && offset > 0 |
| && GET_MODE_SIZE (GET_MODE (reg)) > GET_MODE_SIZE (mode) |
| && ((var_size = int_size_in_bytes (TREE_TYPE (decl))) > 0 |
| && var_size < GET_MODE_SIZE (GET_MODE (reg)))) |
| { |
| int offset_le; |
| |
| /* Convert machine endian to little endian WRT size of mode of REG. */ |
| if (WORDS_BIG_ENDIAN) |
| offset_le = ((GET_MODE_SIZE (GET_MODE (reg)) - 1 - offset) |
| / UNITS_PER_WORD) * UNITS_PER_WORD; |
| else |
| offset_le = (offset / UNITS_PER_WORD) * UNITS_PER_WORD; |
| |
| if (BYTES_BIG_ENDIAN) |
| offset_le += ((GET_MODE_SIZE (GET_MODE (reg)) - 1 - offset) |
| % UNITS_PER_WORD); |
| else |
| offset_le += offset % UNITS_PER_WORD; |
| |
| if (offset_le >= var_size) |
| { |
| /* MODE is wider than the variable so the new reg will cover |
| the whole variable so the resulting OFFSET should be 0. */ |
| offset = 0; |
| } |
| else |
| { |
| /* Convert little endian to machine endian WRT size of variable. */ |
| if (WORDS_BIG_ENDIAN) |
| offset = ((var_size - 1 - offset_le) |
| / UNITS_PER_WORD) * UNITS_PER_WORD; |
| else |
| offset = (offset_le / UNITS_PER_WORD) * UNITS_PER_WORD; |
| |
| if (BYTES_BIG_ENDIAN) |
| offset += ((var_size - 1 - offset_le) |
| % UNITS_PER_WORD); |
| else |
| offset += offset_le % UNITS_PER_WORD; |
| } |
| } |
| |
| REG_ATTRS (new) = get_reg_attrs (REG_EXPR (reg), |
| REG_OFFSET (reg) + offset); |
| return new; |
| } |
| |
| /* Set the decl for MEM to DECL. */ |
| |
| void |
| set_reg_attrs_from_mem (rtx reg, rtx mem) |
| { |
| if (MEM_OFFSET (mem) && GET_CODE (MEM_OFFSET (mem)) == CONST_INT) |
| REG_ATTRS (reg) |
| = get_reg_attrs (MEM_EXPR (mem), INTVAL (MEM_OFFSET (mem))); |
| } |
| |
| /* Set the register attributes for registers contained in PARM_RTX. |
| Use needed values from memory attributes of MEM. */ |
| |
| void |
| set_reg_attrs_for_parm (rtx parm_rtx, rtx mem) |
| { |
| if (REG_P (parm_rtx)) |
| set_reg_attrs_from_mem (parm_rtx, mem); |
| else if (GET_CODE (parm_rtx) == PARALLEL) |
| { |
| /* Check for a NULL entry in the first slot, used to indicate that the |
| parameter goes both on the stack and in registers. */ |
| int i = XEXP (XVECEXP (parm_rtx, 0, 0), 0) ? 0 : 1; |
| for (; i < XVECLEN (parm_rtx, 0); i++) |
| { |
| rtx x = XVECEXP (parm_rtx, 0, i); |
| if (REG_P (XEXP (x, 0))) |
| REG_ATTRS (XEXP (x, 0)) |
| = get_reg_attrs (MEM_EXPR (mem), |
| INTVAL (XEXP (x, 1))); |
| } |
| } |
| } |
| |
| /* Assign the RTX X to declaration T. */ |
| void |
| set_decl_rtl (tree t, rtx x) |
| { |
| DECL_CHECK (t)->decl.rtl = x; |
| |
| if (!x) |
| return; |
| /* For register, we maintain the reverse information too. */ |
| if (REG_P (x)) |
| REG_ATTRS (x) = get_reg_attrs (t, 0); |
| else if (GET_CODE (x) == SUBREG) |
| REG_ATTRS (SUBREG_REG (x)) |
| = get_reg_attrs (t, -SUBREG_BYTE (x)); |
| if (GET_CODE (x) == CONCAT) |
| { |
| if (REG_P (XEXP (x, 0))) |
| REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (t, 0); |
| if (REG_P (XEXP (x, 1))) |
| REG_ATTRS (XEXP (x, 1)) |
| = get_reg_attrs (t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0)))); |
| } |
| if (GET_CODE (x) == PARALLEL) |
| { |
| int i; |
| for (i = 0; i < XVECLEN (x, 0); i++) |
| { |
| rtx y = XVECEXP (x, 0, i); |
| if (REG_P (XEXP (y, 0))) |
| REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (t, INTVAL (XEXP (y, 1))); |
| } |
| } |
| } |
| |
| /* Assign the RTX X to parameter declaration T. */ |
| void |
| set_decl_incoming_rtl (tree t, rtx x) |
| { |
| DECL_INCOMING_RTL (t) = x; |
| |
| if (!x) |
| return; |
| /* For register, we maintain the reverse information too. */ |
| if (REG_P (x)) |
| REG_ATTRS (x) = get_reg_attrs (t, 0); |
| else if (GET_CODE (x) == SUBREG) |
| REG_ATTRS (SUBREG_REG (x)) |
| = get_reg_attrs (t, -SUBREG_BYTE (x)); |
| if (GET_CODE (x) == CONCAT) |
| { |
| if (REG_P (XEXP (x, 0))) |
| REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (t, 0); |
| if (REG_P (XEXP (x, 1))) |
| REG_ATTRS (XEXP (x, 1)) |
| = get_reg_attrs (t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0)))); |
| } |
| if (GET_CODE (x) == PARALLEL) |
| { |
| int i, start; |
| |
| /* Check for a NULL entry, used to indicate that the parameter goes |
| both on the stack and in registers. */ |
| if (XEXP (XVECEXP (x, 0, 0), 0)) |
| start = 0; |
| else |
| start = 1; |
| |
| for (i = start; i < XVECLEN (x, 0); i++) |
| { |
| rtx y = XVECEXP (x, 0, i); |
| if (REG_P (XEXP (y, 0))) |
| REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (t, INTVAL (XEXP (y, 1))); |
| } |
| } |
| } |
| |
| /* Identify REG (which may be a CONCAT) as a user register. */ |
| |
| void |
| mark_user_reg (rtx reg) |
| { |
| if (GET_CODE (reg) == CONCAT) |
| { |
| REG_USERVAR_P (XEXP (reg, 0)) = 1; |
| REG_USERVAR_P (XEXP (reg, 1)) = 1; |
| } |
| else |
| { |
| gcc_assert (REG_P (reg)); |
| REG_USERVAR_P (reg) = 1; |
| } |
| } |
| |
| /* Identify REG as a probable pointer register and show its alignment |
| as ALIGN, if nonzero. */ |
| |
| void |
| mark_reg_pointer (rtx reg, int align) |
| { |
| if (! REG_POINTER (reg)) |
| { |
| REG_POINTER (reg) = 1; |
| |
| if (align) |
| REGNO_POINTER_ALIGN (REGNO (reg)) = align; |
| } |
| else if (align && align < REGNO_POINTER_ALIGN (REGNO (reg))) |
| /* We can no-longer be sure just how aligned this pointer is. */ |
| REGNO_POINTER_ALIGN (REGNO (reg)) = align; |
| } |
| |
| /* Return 1 plus largest pseudo reg number used in the current function. */ |
| |
| int |
| max_reg_num (void) |
| { |
| return reg_rtx_no; |
| } |
| |
| /* Return 1 + the largest label number used so far in the current function. */ |
| |
| int |
| max_label_num (void) |
| { |
| return label_num; |
| } |
| |
| /* Return first label number used in this function (if any were used). */ |
| |
| int |
| get_first_label_num (void) |
| { |
| return first_label_num; |
| } |
| |
| /* If the rtx for label was created during the expansion of a nested |
| function, then first_label_num won't include this label number. |
| Fix this now so that array indicies work later. */ |
| |
| void |
| maybe_set_first_label_num (rtx x) |
| { |
| if (CODE_LABEL_NUMBER (x) < first_label_num) |
| first_label_num = CODE_LABEL_NUMBER (x); |
| } |
| |
| /* Return a value representing some low-order bits of X, where the number |
| of low-order bits is given by MODE. Note that no conversion is done |
| between floating-point and fixed-point values, rather, the bit |
| representation is returned. |
| |
| This function handles the cases in common between gen_lowpart, below, |
| and two variants in cse.c and combine.c. These are the cases that can |
| be safely handled at all points in the compilation. |
| |
| If this is not a case we can handle, return 0. */ |
| |
| rtx |
| gen_lowpart_common (enum machine_mode mode, rtx x) |
| { |
| int msize = GET_MODE_SIZE (mode); |
| int xsize; |
| int offset = 0; |
| enum machine_mode innermode; |
| |
| /* Unfortunately, this routine doesn't take a parameter for the mode of X, |
| so we have to make one up. Yuk. */ |
| innermode = GET_MODE (x); |
| if (GET_CODE (x) == CONST_INT && msize <= HOST_BITS_PER_WIDE_INT) |
| innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0); |
| else if (innermode == VOIDmode) |
| innermode = mode_for_size (HOST_BITS_PER_WIDE_INT * 2, MODE_INT, 0); |
| |
| xsize = GET_MODE_SIZE (innermode); |
| |
| gcc_assert (innermode != VOIDmode && innermode != BLKmode); |
| |
| if (innermode == mode) |
| return x; |
| |
| /* MODE must occupy no more words than the mode of X. */ |
| if ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD |
| > ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)) |
| return 0; |
| |
| /* Don't allow generating paradoxical FLOAT_MODE subregs. */ |
| if (GET_MODE_CLASS (mode) == MODE_FLOAT && msize > xsize) |
| return 0; |
| |
| offset = subreg_lowpart_offset (mode, innermode); |
| |
| if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND) |
| && (GET_MODE_CLASS (mode) == MODE_INT |
| || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)) |
| { |
| /* If we are getting the low-order part of something that has been |
| sign- or zero-extended, we can either just use the object being |
| extended or make a narrower extension. If we want an even smaller |
| piece than the size of the object being extended, call ourselves |
| recursively. |
| |
| This case is used mostly by combine and cse. */ |
| |
| if (GET_MODE (XEXP (x, 0)) == mode) |
| return XEXP (x, 0); |
| else if (msize < GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))) |
| return gen_lowpart_common (mode, XEXP (x, 0)); |
| else if (msize < xsize) |
| return gen_rtx_fmt_e (GET_CODE (x), mode, XEXP (x, 0)); |
| } |
| else if (GET_CODE (x) == SUBREG || REG_P (x) |
| || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR |
| || GET_CODE (x) == CONST_DOUBLE || GET_CODE (x) == CONST_INT) |
| return simplify_gen_subreg (mode, x, innermode, offset); |
| |
| /* Otherwise, we can't do this. */ |
| return 0; |
| } |
| |
| rtx |
| gen_highpart (enum machine_mode mode, rtx x) |
| { |
| unsigned int msize = GET_MODE_SIZE (mode); |
| rtx result; |
| |
| /* This case loses if X is a subreg. To catch bugs early, |
| complain if an invalid MODE is used even in other cases. */ |
| gcc_assert (msize <= UNITS_PER_WORD |
| || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x))); |
| |
| result = simplify_gen_subreg (mode, x, GET_MODE (x), |
| subreg_highpart_offset (mode, GET_MODE (x))); |
| gcc_assert (result); |
| |
| /* simplify_gen_subreg is not guaranteed to return a valid operand for |
| the target if we have a MEM. gen_highpart must return a valid operand, |
| emitting code if necessary to do so. */ |
| if (MEM_P (result)) |
| { |
| result = validize_mem (result); |
| gcc_assert (result); |
| } |
| |
| return result; |
| } |
| |
| /* Like gen_highpart, but accept mode of EXP operand in case EXP can |
| be VOIDmode constant. */ |
| rtx |
| gen_highpart_mode (enum machine_mode outermode, enum machine_mode innermode, rtx exp) |
| { |
| if (GET_MODE (exp) != VOIDmode) |
| { |
| gcc_assert (GET_MODE (exp) == innermode); |
| return gen_highpart (outermode, exp); |
| } |
| return simplify_gen_subreg (outermode, exp, innermode, |
| subreg_highpart_offset (outermode, innermode)); |
| } |
| |
| /* Return offset in bytes to get OUTERMODE low part |
| of the value in mode INNERMODE stored in memory in target format. */ |
| |
| unsigned int |
| subreg_lowpart_offset (enum machine_mode outermode, enum machine_mode innermode) |
| { |
| unsigned int offset = 0; |
| int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); |
| |
| if (difference > 0) |
| { |
| if (WORDS_BIG_ENDIAN) |
| offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; |
| if (BYTES_BIG_ENDIAN) |
| offset += difference % UNITS_PER_WORD; |
| } |
| |
| return offset; |
| } |
| |
| /* Return offset in bytes to get OUTERMODE high part |
| of the value in mode INNERMODE stored in memory in target format. */ |
| unsigned int |
| subreg_highpart_offset (enum machine_mode outermode, enum machine_mode innermode) |
| { |
| unsigned int offset = 0; |
| int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); |
| |
| gcc_assert (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode)); |
| |
| if (difference > 0) |
| { |
| if (! WORDS_BIG_ENDIAN) |
| offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; |
| if (! BYTES_BIG_ENDIAN) |
| offset += difference % UNITS_PER_WORD; |
| } |
| |
| return offset; |
| } |
| |
| /* Return 1 iff X, assumed to be a SUBREG, |
| refers to the least significant part of its containing reg. |
| If X is not a SUBREG, always return 1 (it is its own low part!). */ |
| |
| int |
| subreg_lowpart_p (rtx x) |
| { |
| if (GET_CODE (x) != SUBREG) |
| return 1; |
| else if (GET_MODE (SUBREG_REG (x)) == VOIDmode) |
| return 0; |
| |
| return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x))) |
| == SUBREG_BYTE (x)); |
| } |
| |
| /* Return subword OFFSET of operand OP. |
| The word number, OFFSET, is interpreted as the word number starting |
| at the low-order address. OFFSET 0 is the low-order word if not |
| WORDS_BIG_ENDIAN, otherwise it is the high-order word. |
| |
| If we cannot extract the required word, we return zero. Otherwise, |
| an rtx corresponding to the requested word will be returned. |
| |
| VALIDATE_ADDRESS is nonzero if the address should be validated. Before |
| reload has completed, a valid address will always be returned. After |
| reload, if a valid address cannot be returned, we return zero. |
| |
| If VALIDATE_ADDRESS is zero, we simply form the required address; validating |
| it is the responsibility of the caller. |
| |
| MODE is the mode of OP in case it is a CONST_INT. |
| |
| ??? This is still rather broken for some cases. The problem for the |
| moment is that all callers of this thing provide no 'goal mode' to |
| tell us to work with. This exists because all callers were written |
| in a word based SUBREG world. |
| Now use of this function can be deprecated by simplify_subreg in most |
| cases. |
| */ |
| |
| rtx |
| operand_subword (rtx op, unsigned int offset, int validate_address, enum machine_mode mode) |
| { |
| if (mode == VOIDmode) |
| mode = GET_MODE (op); |
| |
| gcc_assert (mode != VOIDmode); |
| |
| /* If OP is narrower than a word, fail. */ |
| if (mode != BLKmode |
| && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)) |
| return 0; |
| |
| /* If we want a word outside OP, return zero. */ |
| if (mode != BLKmode |
| && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode)) |
| return const0_rtx; |
| |
| /* Form a new MEM at the requested address. */ |
| if (MEM_P (op)) |
| { |
| rtx new = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD); |
| |
| if (! validate_address) |
| return new; |
| |
| else if (reload_completed) |
| { |
| if (! strict_memory_address_p (word_mode, XEXP (new, 0))) |
| return 0; |
| } |
| else |
| return replace_equiv_address (new, XEXP (new, 0)); |
| } |
| |
| /* Rest can be handled by simplify_subreg. */ |
| return simplify_gen_subreg (word_mode, op, mode, (offset * UNITS_PER_WORD)); |
| } |
| |
| /* Similar to `operand_subword', but never return 0. If we can't extract |
| the required subword, put OP into a register and try again. If that fails, |
| abort. We always validate the address in this case. |
| |
| MODE is the mode of OP, in case it is CONST_INT. */ |
| |
| rtx |
| operand_subword_force (rtx op, unsigned int offset, enum machine_mode mode) |
| { |
| rtx result = operand_subword (op, offset, 1, mode); |
| |
| if (result) |
| return result; |
| |
| if (mode != BLKmode && mode != VOIDmode) |
| { |
| /* If this is a register which can not be accessed by words, copy it |
| to a pseudo register. */ |
| if (REG_P (op)) |
| op = copy_to_reg (op); |
| else |
| op = force_reg (mode, op); |
| } |
| |
| result = operand_subword (op, offset, 1, mode); |
| gcc_assert (result); |
| |
| return result; |
| } |
| |
| /* Given a compare instruction, swap the operands. |
| A test instruction is changed into a compare of 0 against the operand. */ |
| |
| void |
| reverse_comparison (rtx insn) |
| { |
| rtx body = PATTERN (insn); |
| rtx comp; |
| |
| if (GET_CODE (body) == SET) |
| comp = SET_SRC (body); |
| else |
| comp = SET_SRC (XVECEXP (body, 0, 0)); |
| |
| if (GET_CODE (comp) == COMPARE) |
| { |
| rtx op0 = XEXP (comp, 0); |
| rtx op1 = XEXP (comp, 1); |
| XEXP (comp, 0) = op1; |
| XEXP (comp, 1) = op0; |
| } |
| else |
| { |
| rtx new = gen_rtx_COMPARE (VOIDmode, |
| CONST0_RTX (GET_MODE (comp)), comp); |
| if (GET_CODE (body) == SET) |
| SET_SRC (body) = new; |
| else |
| SET_SRC (XVECEXP (body, 0, 0)) = new; |
| } |
| } |
| |
| /* Within a MEM_EXPR, we care about either (1) a component ref of a decl, |
| or (2) a component ref of something variable. Represent the later with |
| a NULL expression. */ |
| |
| static tree |
| component_ref_for_mem_expr (tree ref) |
| { |
| tree inner = TREE_OPERAND (ref, 0); |
| |
| if (TREE_CODE (inner) == COMPONENT_REF) |
| inner = component_ref_for_mem_expr (inner); |
| else |
| { |
| /* Now remove any conversions: they don't change what the underlying |
| object is. Likewise for SAVE_EXPR. */ |
| while (TREE_CODE (inner) == NOP_EXPR || TREE_CODE (inner) == CONVERT_EXPR |
| || TREE_CODE (inner) == NON_LVALUE_EXPR |
| || TREE_CODE (inner) == VIEW_CONVERT_EXPR |
| || TREE_CODE (inner) == SAVE_EXPR) |
| inner = TREE_OPERAND (inner, 0); |
| |
| if (! DECL_P (inner)) |
| inner = NULL_TREE; |
| } |
| |
| if (inner == TREE_OPERAND (ref, 0)) |
| return ref; |
| else |
| return build3 (COMPONENT_REF, TREE_TYPE (ref), inner, |
| TREE_OPERAND (ref, 1), NULL_TREE); |
| } |
| |
| /* Returns 1 if both MEM_EXPR can be considered equal |
| and 0 otherwise. */ |
| |
| int |
| mem_expr_equal_p (tree expr1, tree expr2) |
| { |
| if (expr1 == expr2) |
| return 1; |
| |
| if (! expr1 || ! expr2) |
| return 0; |
| |
| if (TREE_CODE (expr1) != TREE_CODE (expr2)) |
| return 0; |
| |
| if (TREE_CODE (expr1) == COMPONENT_REF) |
| return |
| mem_expr_equal_p (TREE_OPERAND (expr1, 0), |
| TREE_OPERAND (expr2, 0)) |
| && mem_expr_equal_p (TREE_OPERAND (expr1, 1), /* field decl */ |
| TREE_OPERAND (expr2, 1)); |
| |
| if (INDIRECT_REF_P (expr1)) |
| return mem_expr_equal_p (TREE_OPERAND (expr1, 0), |
| TREE_OPERAND (expr2, 0)); |
| |
| /* ARRAY_REFs, ARRAY_RANGE_REFs and BIT_FIELD_REFs should already |
| have been resolved here. */ |
| gcc_assert (DECL_P (expr1)); |
| |
| /* Decls with different pointers can't be equal. */ |
| return 0; |
| } |
| |
| /* Given REF, a MEM, and T, either the type of X or the expression |
| corresponding to REF, set the memory attributes. OBJECTP is nonzero |
| if we are making a new object of this type. BITPOS is nonzero if |
| there is an offset outstanding on T that will be applied later. */ |
| |
| void |
| set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, |
| HOST_WIDE_INT bitpos) |
| { |
| HOST_WIDE_INT alias = MEM_ALIAS_SET (ref); |
| tree expr = MEM_EXPR (ref); |
| rtx offset = MEM_OFFSET (ref); |
| rtx size = MEM_SIZE (ref); |
| unsigned int align = MEM_ALIGN (ref); |
| HOST_WIDE_INT apply_bitpos = 0; |
| tree type; |
| |
| /* It can happen that type_for_mode was given a mode for which there |
| is no language-level type. In which case it returns NULL, which |
| we can see here. */ |
| if (t == NULL_TREE) |
| return; |
| |
| type = TYPE_P (t) ? t : TREE_TYPE (t); |
| if (type == error_mark_node) |
| return; |
| |
| /* If we have already set DECL_RTL = ref, get_alias_set will get the |
| wrong answer, as it assumes that DECL_RTL already has the right alias |
| info. Callers should not set DECL_RTL until after the call to |
| set_mem_attributes. */ |
| gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t)); |
| |
| /* Get the alias set from the expression or type (perhaps using a |
| front-end routine) and use it. */ |
| alias = get_alias_set (t); |
| |
| MEM_VOLATILE_P (ref) |= TYPE_VOLATILE (type); |
| MEM_IN_STRUCT_P (ref) = AGGREGATE_TYPE_P (type); |
| MEM_POINTER (ref) = POINTER_TYPE_P (type); |
| MEM_NOTRAP_P (ref) = TREE_THIS_NOTRAP (t); |
| |
| /* If we are making an object of this type, or if this is a DECL, we know |
| that it is a scalar if the type is not an aggregate. */ |
| if ((objectp || DECL_P (t)) && ! AGGREGATE_TYPE_P (type)) |
| MEM_SCALAR_P (ref) = 1; |
| |
| /* We can set the alignment from the type if we are making an object, |
| this is an INDIRECT_REF, or if TYPE_ALIGN_OK. */ |
| if (objectp || TREE_CODE (t) == INDIRECT_REF |
| || TREE_CODE (t) == ALIGN_INDIRECT_REF |
| || TYPE_ALIGN_OK (type)) |
| align = MAX (align, TYPE_ALIGN (type)); |
| else |
| if (TREE_CODE (t) == MISALIGNED_INDIRECT_REF) |
| { |
| if (integer_zerop (TREE_OPERAND (t, 1))) |
| /* We don't know anything about the alignment. */ |
| align = BITS_PER_UNIT; |
| else |
| align = tree_low_cst (TREE_OPERAND (t, 1), 1); |
| } |
| |
| /* If the size is known, we can set that. */ |
| if (TYPE_SIZE_UNIT (type) && host_integerp (TYPE_SIZE_UNIT (type), 1)) |
| size = GEN_INT (tree_low_cst (TYPE_SIZE_UNIT (type), 1)); |
| |
| /* If T is not a type, we may be able to deduce some more information about |
| the expression. */ |
| if (! TYPE_P (t)) |
| { |
| tree base = get_base_address (t); |
| if (base && DECL_P (base) |
| && TREE_READONLY (base) |
| && (TREE_STATIC (base) || DECL_EXTERNAL (base))) |
| { |
| tree base_type = TREE_TYPE (base); |
| gcc_assert (!(base_type && TYPE_NEEDS_CONSTRUCTING (base_type)) |
| || DECL_ARTIFICIAL (base)); |
| MEM_READONLY_P (ref) = 1; |
| } |
| |
| if (TREE_THIS_VOLATILE (t)) |
| MEM_VOLATILE_P (ref) = 1; |
| |
| /* Now remove any conversions: they don't change what the underlying |
| object is. Likewise for SAVE_EXPR. */ |
| while (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR |
| || TREE_CODE (t) == NON_LVALUE_EXPR |
| || TREE_CODE (t) == VIEW_CONVERT_EXPR |
| || TREE_CODE (t) == SAVE_EXPR) |
| t = TREE_OPERAND (t, 0); |
| |
| /* If this expression uses it's parent's alias set, mark it such |
| that we won't change it. */ |
| if (component_uses_parent_alias_set (t)) |
| MEM_KEEP_ALIAS_SET_P (ref) = 1; |
| |
| /* If this is a decl, set the attributes of the MEM from it. */ |
| if (DECL_P (t)) |
| { |
| expr = t; |
| offset = const0_rtx; |
| apply_bitpos = bitpos; |
| size = (DECL_SIZE_UNIT (t) |
| && host_integerp (DECL_SIZE_UNIT (t), 1) |
| ? GEN_INT (tree_low_cst (DECL_SIZE_UNIT (t), 1)) : 0); |
| align = DECL_ALIGN (t); |
| } |
| |
| /* If this is a constant, we know the alignment. */ |
| else if (CONSTANT_CLASS_P (t)) |
| { |
| align = TYPE_ALIGN (type); |
| #ifdef CONSTANT_ALIGNMENT |
| align = CONSTANT_ALIGNMENT (t, align); |
| #endif |
| } |
| |
| /* If this is a field reference and not a bit-field, record it. */ |
| /* ??? There is some information that can be gleened from bit-fields, |
| such as the word offset in the structure that might be modified. |
| But skip it for now. */ |
| else if (TREE_CODE (t) == COMPONENT_REF |
| && ! DECL_BIT_FIELD (TREE_OPERAND (t, 1))) |
| { |
| expr = component_ref_for_mem_expr (t); |
| offset = const0_rtx; |
| apply_bitpos = bitpos; |
| /* ??? Any reason the field size would be different than |
| the size we got from the type? */ |
| } |
| |
| /* If this is an array reference, look for an outer field reference. */ |
| else if (TREE_CODE (t) == ARRAY_REF) |
| { |
| tree off_tree = size_zero_node; |
| /* We can't modify t, because we use it at the end of the |
| function. */ |
| tree t2 = t; |
| |
| do |
| { |
| tree index = TREE_OPERAND (t2, 1); |
| tree low_bound = array_ref_low_bound (t2); |
| tree unit_size = array_ref_element_size (t2); |
| |
| /* We assume all arrays have sizes that are a multiple of a byte. |
| First subtract the lower bound, if any, in the type of the |
| index, then convert to sizetype and multiply by the size of |
| the array element. */ |
| if (! integer_zerop (low_bound)) |
| index = fold (build2 (MINUS_EXPR, TREE_TYPE (index), |
| index, low_bound)); |
| |
| off_tree = size_binop (PLUS_EXPR, |
| size_binop (MULT_EXPR, convert (sizetype, |
| index), |
| unit_size), |
| off_tree); |
| t2 = TREE_OPERAND (t2, 0); |
| } |
| while (TREE_CODE (t2) == ARRAY_REF); |
| |
| if (DECL_P (t2)) |
| { |
| expr = t2; |
| offset = NULL; |
| if (host_integerp (off_tree, 1)) |
| { |
| HOST_WIDE_INT ioff = tree_low_cst (off_tree, 1); |
| HOST_WIDE_INT aoff = (ioff & -ioff) * BITS_PER_UNIT; |
| align = DECL_ALIGN (t2); |
| if (aoff && (unsigned HOST_WIDE_INT) aoff < align) |
| align = aoff; |
| offset = GEN_INT (ioff); |
| apply_bitpos = bitpos; |
| } |
| } |
| else if (TREE_CODE (t2) == COMPONENT_REF) |
| { |
| expr = component_ref_for_mem_expr (t2); |
| if (host_integerp (off_tree, 1)) |
| { |
| offset = GEN_INT (tree_low_cst (off_tree, 1)); |
| apply_bitpos = bitpos; |
| } |
| /* ??? Any reason the field size would be different than |
| the size we got from the type? */ |
| } |
| else if (flag_argument_noalias > 1 |
| && (INDIRECT_REF_P (t2)) |
| && TREE_CODE (TREE_OPERAND (t2, 0)) == PARM_DECL) |
| { |
| expr = t2; |
| offset = NULL; |
| } |
| } |
| |
| /* If this is a Fortran indirect argument reference, record the |
| parameter decl. */ |
| else if (flag_argument_noalias > 1 |
| && (INDIRECT_REF_P (t)) |
| && TREE_CODE (TREE_OPERAND (t, 0)) == PARM_DECL) |
| { |
| expr = t; |
| offset = NULL; |
| } |
| } |
| |
| /* If we modified OFFSET based on T, then subtract the outstanding |
| bit position offset. Similarly, increase the size of the accessed |
| object to contain the negative offset. */ |
| if (apply_bitpos) |
| { |
| offset = plus_constant (offset, -(apply_bitpos / BITS_PER_UNIT)); |
| if (size) |
| size = plus_constant (size, apply_bitpos / BITS_PER_UNIT); |
| } |
| |
| if (TREE_CODE (t) == ALIGN_INDIRECT_REF) |
| { |
| /* Force EXPR and OFFSE to NULL, since we don't know exactly what |
| we're overlapping. */ |
| offset = NULL; |
| expr = NULL; |
| } |
| |
| /* Now set the attributes we computed above. */ |
| MEM_ATTRS (ref) |
| = get_mem_attrs (alias, expr, offset, size, align, GET_MODE (ref)); |
| |
| /* If this is already known to be a scalar or aggregate, we are done. */ |
| if (MEM_IN_STRUCT_P (ref) || MEM_SCALAR_P (ref)) |
| return; |
| |
| /* If it is a reference into an aggregate, this is part of an aggregate. |
| Otherwise we don't know. */ |
| else if (TREE_CODE (t) == COMPONENT_REF || TREE_CODE (t) == ARRAY_REF |
| || TREE_CODE (t) == ARRAY_RANGE_REF |
| || TREE_CODE (t) == BIT_FIELD_REF) |
| MEM_IN_STRUCT_P (ref) = 1; |
| } |
| |
| void |
| set_mem_attributes (rtx ref, tree t, int objectp) |
| { |
| set_mem_attributes_minus_bitpos (ref, t, objectp, 0); |
| } |
| |
| /* Set the decl for MEM to DECL. */ |
| |
| void |
| set_mem_attrs_from_reg (rtx mem, rtx reg) |
| { |
| MEM_ATTRS (mem) |
| = get_mem_attrs (MEM_ALIAS_SET (mem), REG_EXPR (reg), |
| GEN_INT (REG_OFFSET (reg)), |
| MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); |
| } |
| |
| /* Set the alias set of MEM to SET. */ |
| |
| void |
| set_mem_alias_set (rtx mem, HOST_WIDE_INT set) |
| { |
| #ifdef ENABLE_CHECKING |
| /* If the new and old alias sets don't conflict, something is wrong. */ |
| gcc_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem))); |
| #endif |
| |
| MEM_ATTRS (mem) = get_mem_attrs (set, MEM_EXPR (mem), MEM_OFFSET (mem), |
| MEM_SIZE (mem), MEM_ALIGN (mem), |
| GET_MODE (mem)); |
| } |
| |
| /* Set the alignment of MEM to ALIGN bits. */ |
| |
| void |
| set_mem_align (rtx mem, unsigned int align) |
| { |
| MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), |
| MEM_OFFSET (mem), MEM_SIZE (mem), align, |
| GET_MODE (mem)); |
| } |
| |
| /* Set the expr for MEM to EXPR. */ |
| |
| void |
| set_mem_expr (rtx mem, tree expr) |
| { |
| MEM_ATTRS (mem) |
| = get_mem_attrs (MEM_ALIAS_SET (mem), expr, MEM_OFFSET (mem), |
| MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); |
| } |
| |
| /* Set the offset of MEM to OFFSET. */ |
| |
| void |
| set_mem_offset (rtx mem, rtx offset) |
| { |
| MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), |
| offset, MEM_SIZE (mem), MEM_ALIGN (mem), |
| GET_MODE (mem)); |
| } |
| |
| /* Set the size of MEM to SIZE. */ |
| |
| void |
| set_mem_size (rtx mem, rtx size) |
| { |
| MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), |
| MEM_OFFSET (mem), size, MEM_ALIGN (mem), |
| GET_MODE (mem)); |
| } |
| |
| /* Return a memory reference like MEMREF, but with its mode changed to MODE |
| and its address changed to ADDR. (VOIDmode means don't change the mode. |
| NULL for ADDR means don't change the address.) VALIDATE is nonzero if the |
| returned memory location is required to be valid. The memory |
| attributes are not changed. */ |
| |
| static rtx |
| change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate) |
| { |
| rtx new; |
| |
| gcc_assert (MEM_P (memref)); |
| if (mode == VOIDmode) |
| mode = GET_MODE (memref); |
| if (addr == 0) |
| addr = XEXP (memref, 0); |
| if (mode == GET_MODE (memref) && addr == XEXP (memref, 0) |
| && (!validate || memory_address_p (mode, addr))) |
| return memref; |
| |
| if (validate) |
| { |
| if (reload_in_progress || reload_completed) |
| gcc_assert (memory_address_p (mode, addr)); |
| else |
| addr = memory_address (mode, addr); |
| } |
| |
| if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref)) |
| return memref; |
| |
| new = gen_rtx_MEM (mode, addr); |
| MEM_COPY_ATTRIBUTES (new, memref); |
| return new; |
| } |
| |
| /* Like change_address_1 with VALIDATE nonzero, but we are not saying in what |
| way we are changing MEMREF, so we only preserve the alias set. */ |
| |
| rtx |
| change_address (rtx memref, enum machine_mode mode, rtx addr) |
| { |
| rtx new = change_address_1 (memref, mode, addr, 1), size; |
| enum machine_mode mmode = GET_MODE (new); |
| unsigned int align; |
| |
| size = mmode == BLKmode ? 0 : GEN_INT (GET_MODE_SIZE (mmode)); |
| align = mmode == BLKmode ? BITS_PER_UNIT : GET_MODE_ALIGNMENT (mmode); |
| |
| /* If there are no changes, just return the original memory reference. */ |
| if (new == memref) |
| { |
| if (MEM_ATTRS (memref) == 0 |
| || (MEM_EXPR (memref) == NULL |
| && MEM_OFFSET (memref) == NULL |
| && MEM_SIZE (memref) == size |
| && MEM_ALIGN (memref) == align)) |
| return new; |
| |
| new = gen_rtx_MEM (mmode, XEXP (memref, 0)); |
| MEM_COPY_ATTRIBUTES (new, memref); |
| } |
| |
| MEM_ATTRS (new) |
| = get_mem_attrs (MEM_ALIAS_SET (memref), 0, 0, size, align, mmode); |
| |
| return new; |
| } |
| |
| /* Return a memory reference like MEMREF, but with its mode changed |
| to MODE and its address offset by OFFSET bytes. If VALIDATE is |
| nonzero, the memory address is forced to be valid. |
| If ADJUST is zero, OFFSET is only used to update MEM_ATTRS |
| and caller is responsible for adjusting MEMREF base register. */ |
| |
| rtx |
| adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset, |
| int validate, int adjust) |
| { |
| rtx addr = XEXP (memref, 0); |
| rtx new; |
| rtx memoffset = MEM_OFFSET (memref); |
| rtx size = 0; |
| unsigned int memalign = MEM_ALIGN (memref); |
| |
| /* If there are no changes, just return the original memory reference. */ |
| if (mode == GET_MODE (memref) && !offset |
| && (!validate || memory_address_p (mode, addr))) |
| return memref; |
| |
| /* ??? Prefer to create garbage instead of creating shared rtl. |
| This may happen even if offset is nonzero -- consider |
| (plus (plus reg reg) const_int) -- so do this always. */ |
| addr = copy_rtx (addr); |
| |
| if (adjust) |
| { |
| /* If MEMREF is a LO_SUM and the offset is within the alignment of the |
| object, we can merge it into the LO_SUM. */ |
| if (GET_MODE (memref) != BLKmode && GET_CODE (addr) == LO_SUM |
| && offset >= 0 |
| && (unsigned HOST_WIDE_INT) offset |
| < GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT) |
| addr = gen_rtx_LO_SUM (Pmode, XEXP (addr, 0), |
| plus_constant (XEXP (addr, 1), offset)); |
| else |
| addr = plus_constant (addr, offset); |
| } |
| |
| new = change_address_1 (memref, mode, addr, validate); |
| |
| /* Compute the new values of the memory attributes due to this adjustment. |
| We add the offsets and update the alignment. */ |
| if (memoffset) |
| memoffset = GEN_INT (offset + INTVAL (memoffset)); |
| |
| /* Compute the new alignment by taking the MIN of the alignment and the |
| lowest-order set bit in OFFSET, but don't change the alignment if OFFSET |
| if zero. */ |
| if (offset != 0) |
| memalign |
| = MIN (memalign, |
| (unsigned HOST_WIDE_INT) (offset & -offset) * BITS_PER_UNIT); |
| |
| /* We can compute the size in a number of ways. */ |
| if (GET_MODE (new) != BLKmode) |
| size = GEN_INT (GET_MODE_SIZE (GET_MODE (new))); |
| else if (MEM_SIZE (memref)) |
| size = plus_constant (MEM_SIZE (memref), -offset); |
| |
| MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref), |
| memoffset, size, memalign, GET_MODE (new)); |
| |
| /* At some point, we should validate that this offset is within the object, |
| if all the appropriate values are known. */ |
| return new; |
| } |
| |
| /* Return a memory reference like MEMREF, but with its mode changed |
| to MODE and its address changed to ADDR, which is assumed to be |
| MEMREF offseted by OFFSET bytes. If VALIDATE is |
| nonzero, the memory address is forced to be valid. */ |
| |
| rtx |
| adjust_automodify_address_1 (rtx memref, enum machine_mode mode, rtx addr, |
| HOST_WIDE_INT offset, int validate) |
| { |
| memref = change_address_1 (memref, VOIDmode, addr, validate); |
| return adjust_address_1 (memref, mode, offset, validate, 0); |
| } |
| |
| /* Return a memory reference like MEMREF, but whose address is changed by |
| adding OFFSET, an RTX, to it. POW2 is the highest power of two factor |
| known to be in OFFSET (possibly 1). */ |
| |
| rtx |
| offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2) |
| { |
| rtx new, addr = XEXP (memref, 0); |
| |
| new = simplify_gen_binary (PLUS, Pmode, addr, offset); |
| |
| /* At this point we don't know _why_ the address is invalid. It |
| could have secondary memory references, multiplies or anything. |
| |
| However, if we did go and rearrange things, we can wind up not |
| being able to recognize the magic around pic_offset_table_rtx. |
| This stuff is fragile, and is yet another example of why it is |
| bad to expose PIC machinery too early. */ |
| if (! memory_address_p (GET_MODE (memref), new) |
| && GET_CODE (addr) == PLUS |
| && XEXP (addr, 0) == pic_offset_table_rtx) |
| { |
| addr = force_reg (GET_MODE (addr), addr); |
| new = simplify_gen_binary (PLUS, Pmode, addr, offset); |
| } |
| |
| update_temp_slot_address (XEXP (memref, 0), new); |
| new = change_address_1 (memref, VOIDmode, new, 1); |
| |
| /* If there are no changes, just return the original memory reference. */ |
| if (new == memref) |
| return new; |
| |
| /* Update the alignment to reflect the offset. Reset the offset, which |
| we don't know. */ |
| MEM_ATTRS (new) |
| = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref), 0, 0, |
| MIN (MEM_ALIGN (memref), pow2 * BITS_PER_UNIT), |
| GET_MODE (new)); |
| return new; |
| } |
| |
| /* Return a memory reference like MEMREF, but with its address changed to |
| ADDR. The caller is asserting that the actual piece of memory pointed |
| to is the same, just the form of the address is being changed, such as |
| by putting something into a register. */ |
| |
| rtx |
| replace_equiv_address (rtx memref, rtx addr) |
| { |
| /* change_address_1 copies the memory attribute structure without change |
| and that's exactly what we want here. */ |
| update_temp_slot_address (XEXP (memref, 0), addr); |
| return change_address_1 (memref, VOIDmode, addr, 1); |
| } |
| |
| /* Likewise, but the reference is not required to be valid. */ |
| |
| rtx |
| replace_equiv_address_nv (rtx memref, rtx addr) |
| { |
| return change_address_1 (memref, VOIDmode, addr, 0); |
| } |
| |
| /* Return a memory reference like MEMREF, but with its mode widened to |
| MODE and offset by OFFSET. This would be used by targets that e.g. |
| cannot issue QImode memory operations and have to use SImode memory |
| operations plus masking logic. */ |
| |
| rtx |
| widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset) |
| { |
| rtx new = adjust_address_1 (memref, mode, offset, 1, 1); |
| tree expr = MEM_EXPR (new); |
| rtx memoffset = MEM_OFFSET (new); |
| unsigned int size = GET_MODE_SIZE (mode); |
| |
| /* If there are no changes, just return the original memory reference. */ |
| if (new == memref) |
| return new; |
| |
| /* If we don't know what offset we were at within the expression, then |
| we can't know if we've overstepped the bounds. */ |
| if (! memoffset) |
| expr = NULL_TREE; |
| |
| while (expr) |
| { |
| if (TREE_CODE (expr) == COMPONENT_REF) |
| { |
| tree field = TREE_OPERAND (expr, 1); |
| tree offset = component_ref_field_offset (expr); |
| |
| if (! DECL_SIZE_UNIT (field)) |
| { |
| expr = NULL_TREE; |
| break; |
| } |
| |
| /* Is the field at least as large as the access? If so, ok, |
| otherwise strip back to the containing structure. */ |
| if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST |
| && compare_tree_int (DECL_SIZE_UNIT (field), size) >= 0 |
| && INTVAL (memoffset) >= 0) |
| break; |
| |
| if (! host_integerp (offset, 1)) |
| { |
| expr = NULL_TREE; |
| break; |
| } |
| |
| expr = TREE_OPERAND (expr, 0); |
| memoffset |
| = (GEN_INT (INTVAL (memoffset) |
| + tree_low_cst (offset, 1) |
| + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) |
| / BITS_PER_UNIT))); |
| } |
| /* Similarly for the decl. */ |
| else if (DECL_P (expr) |
| && DECL_SIZE_UNIT (expr) |
| && TREE_CODE (DECL_SIZE_UNIT (expr)) == INTEGER_CST |
| && compare_tree_int (DECL_SIZE_UNIT (expr), size) >= 0 |
| && (! memoffset || INTVAL (memoffset) >= 0)) |
| break; |
| else |
| { |
| /* The widened memory access overflows the expression, which means |
| that it could alias another expression. Zap it. */ |
| expr = NULL_TREE; |
| break; |
| } |
| } |
| |
| if (! expr) |
| memoffset = NULL_RTX; |
| |
| /* The widened memory may alias other stuff, so zap the alias set. */ |
| /* ??? Maybe use get_alias_set on any remaining expression. */ |
| |
| MEM_ATTRS (new) = get_mem_attrs (0, expr, memoffset, GEN_INT (size), |
| MEM_ALIGN (new), mode); |
| |
| return new; |
| } |
| |
| /* Return a newly created CODE_LABEL rtx with a unique label number. */ |
| |
| rtx |
| gen_label_rtx (void) |
| { |
| return gen_rtx_CODE_LABEL (VOIDmode, 0, NULL_RTX, NULL_RTX, |
| NULL, label_num++, NULL); |
| } |
| |
| /* For procedure integration. */ |
| |
| /* Install new pointers to the first and last insns in the chain. |
| Also, set cur_insn_uid to one higher than the last in use. |
| Used for an inline-procedure after copying the insn chain. */ |
| |
| void |
| set_new_first_and_last_insn (rtx first, rtx last) |
| { |
| rtx insn; |
| |
| first_insn = first; |
| last_insn = last; |
| cur_insn_uid = 0; |
| |
| for (insn = first; insn; insn = NEXT_INSN (insn)) |
| cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn)); |
| |
| cur_insn_uid++; |
| } |
| |
| /* Go through all the RTL insn bodies and copy any invalid shared |
| structure. This routine should only be called once. */ |
| |
| static void |
| unshare_all_rtl_1 (tree fndecl, rtx insn) |
| { |
| tree decl; |
| |
| /* Make sure that virtual parameters are not shared. */ |
| for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl)) |
| SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl))); |
| |
| /* Make sure that virtual stack slots are not shared. */ |
| unshare_all_decls (DECL_INITIAL (fndecl)); |
| |
| /* Unshare just about everything else. */ |
| unshare_all_rtl_in_chain (insn); |
| |
| /* Make sure the addresses of stack slots found outside the insn chain |
| (such as, in DECL_RTL of a variable) are not shared |
| with the insn chain. |
| |
| This special care is necessary when the stack slot MEM does not |
| actually appear in the insn chain. If it does appear, its address |
| is unshared from all else at that point. */ |
| stack_slot_list = copy_rtx_if_shared (stack_slot_list); |
| } |
| |
| /* Go through all the RTL insn bodies and copy any invalid shared |
| structure, again. This is a fairly expensive thing to do so it |
| should be done sparingly. */ |
| |
| void |
| unshare_all_rtl_again (rtx insn) |
| { |
| rtx p; |
| tree decl; |
| |
| for (p = insn; p; p = NEXT_INSN (p)) |
| if (INSN_P (p)) |
| { |
| reset_used_flags (PATTERN (p)); |
| reset_used_flags (REG_NOTES (p)); |
| reset_used_flags (LOG_LINKS (p)); |
| } |
| |
| /* Make sure that virtual stack slots are not shared. */ |
| reset_used_decls (DECL_INITIAL (cfun->decl)); |
| |
| /* Make sure that virtual parameters are not shared. */ |
| for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = TREE_CHAIN (decl)) |
| reset_used_flags (DECL_RTL (decl)); |
| |
| reset_used_flags (stack_slot_list); |
| |
| unshare_all_rtl_1 (cfun->decl, insn); |
| } |
| |
| void |
| unshare_all_rtl (void) |
| { |
| unshare_all_rtl_1 (current_function_decl, get_insns ()); |
| } |
| |
| /* Check that ORIG is not marked when it should not be and mark ORIG as in use, |
| Recursively does the same for subexpressions. */ |
| |
| static void |
| verify_rtx_sharing (rtx orig, rtx insn) |
| { |
| rtx x = orig; |
| int i; |
| enum rtx_code code; |
| const char *format_ptr; |
| |
| if (x == 0) |
| return; |
| |
| code = GET_CODE (x); |
| |
| /* These types may be freely shared. */ |
| |
| switch (code) |
| { |
| case REG: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| case CODE_LABEL: |
| case PC: |
| case CC0: |
| case SCRATCH: |
| return; |
| /* SCRATCH must be shared because they represent distinct values. */ |
| case CLOBBER: |
| if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER) |
| return; |
| break; |
| |
| case CONST: |
| /* CONST can be shared if it contains a SYMBOL_REF. If it contains |
| a LABEL_REF, it isn't sharable. */ |
| if (GET_CODE (XEXP (x, 0)) == PLUS |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF |
| && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) |
| return; |
| break; |
| |
| case MEM: |
| /* A MEM is allowed to be shared if its address is constant. */ |
| if (CONSTANT_ADDRESS_P (XEXP (x, 0)) |
| || reload_completed || reload_in_progress) |
| return; |
| |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* This rtx may not be shared. If it has already been seen, |
| replace it with a copy of itself. */ |
| #ifdef ENABLE_CHECKING |
| if (RTX_FLAG (x, used)) |
| { |
| error ("Invalid rtl sharing found in the insn"); |
| debug_rtx (insn); |
| error ("Shared rtx"); |
| debug_rtx (x); |
| internal_error ("Internal consistency failure"); |
| } |
| #endif |
| gcc_assert (!RTX_FLAG (x, used)); |
| |
| RTX_FLAG (x, used) = 1; |
| |
| /* Now scan the subexpressions recursively. */ |
| |
| format_ptr = GET_RTX_FORMAT (code); |
| |
| for (i = 0; i < GET_RTX_LENGTH (code); i++) |
| { |
| switch (*format_ptr++) |
| { |
| case 'e': |
| verify_rtx_sharing (XEXP (x, i), insn); |
| break; |
| |
| case 'E': |
| if (XVEC (x, i) != NULL) |
| { |
| int j; |
| int len = XVECLEN (x, i); |
| |
| for (j = 0; j < len; j++) |
| { |
| /* We allow sharing of ASM_OPERANDS inside single |
| instruction. */ |
| if (j && GET_CODE (XVECEXP (x, i, j)) == SET |
| && (GET_CODE (SET_SRC (XVECEXP (x, i, j))) |
| == ASM_OPERANDS)) |
| verify_rtx_sharing (SET_DEST (XVECEXP (x, i, j)), insn); |
| else |
| verify_rtx_sharing (XVECEXP (x, i, j), insn); |
| } |
| } |
| break; |
| } |
| } |
| return; |
| } |
| |
| /* Go through all the RTL insn bodies and check that there is no unexpected |
| sharing in between the subexpressions. */ |
| |
| void |
| verify_rtl_sharing (void) |
| { |
| rtx p; |
| |
| for (p = get_insns (); p; p = NEXT_INSN (p)) |
| if (INSN_P (p)) |
| { |
| reset_used_flags (PATTERN (p)); |
| reset_used_flags (REG_NOTES (p)); |
| reset_used_flags (LOG_LINKS (p)); |
| } |
| |
| for (p = get_insns (); p; p = NEXT_INSN (p)) |
| if (INSN_P (p)) |
| { |
| verify_rtx_sharing (PATTERN (p), p); |
| verify_rtx_sharing (REG_NOTES (p), p); |
| verify_rtx_sharing (LOG_LINKS (p), p); |
| } |
| } |
| |
| /* Go through all the RTL insn bodies and copy any invalid shared structure. |
| Assumes the mark bits are cleared at entry. */ |
| |
| void |
| unshare_all_rtl_in_chain (rtx insn) |
| { |
| for (; insn; insn = NEXT_INSN (insn)) |
| if (INSN_P (insn)) |
| { |
| PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn)); |
| REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn)); |
| LOG_LINKS (insn) = copy_rtx_if_shared (LOG_LINKS (insn)); |
| } |
| } |
| |
| /* Go through all virtual stack slots of a function and copy any |
| shared structure. */ |
| static void |
| unshare_all_decls (tree blk) |
| { |
| tree t; |
| |
| /* Copy shared decls. */ |
| for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t)) |
| if (DECL_RTL_SET_P (t)) |
| SET_DECL_RTL (t, copy_rtx_if_shared (DECL_RTL (t))); |
| |
| /* Now process sub-blocks. */ |
| for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t)) |
| unshare_all_decls (t); |
| } |
| |
| /* Go through all virtual stack slots of a function and mark them as |
| not shared. */ |
| static void |
| reset_used_decls (tree blk) |
| { |
| tree t; |
| |
| /* Mark decls. */ |
| for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t)) |
| if (DECL_RTL_SET_P (t)) |
| reset_used_flags (DECL_RTL (t)); |
| |
| /* Now process sub-blocks. */ |
| for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t)) |
| reset_used_decls (t); |
| } |
| |
| /* Mark ORIG as in use, and return a copy of it if it was already in use. |
| Recursively does the same for subexpressions. Uses |
| copy_rtx_if_shared_1 to reduce stack space. */ |
| |
| rtx |
| copy_rtx_if_shared (rtx orig) |
| { |
| copy_rtx_if_shared_1 (&orig); |
| return orig; |
| } |
| |
| /* Mark *ORIG1 as in use, and set it to a copy of it if it was already in |
| use. Recursively does the same for subexpressions. */ |
| |
| static void |
| copy_rtx_if_shared_1 (rtx *orig1) |
| { |
| rtx x; |
| int i; |
| enum rtx_code code; |
| rtx *last_ptr; |
| const char *format_ptr; |
| int copied = 0; |
| int length; |
| |
| /* Repeat is used to turn tail-recursion into iteration. */ |
| repeat: |
| x = *orig1; |
| |
| if (x == 0) |
| return; |
| |
| code = GET_CODE (x); |
| |
| /* These types may be freely shared. */ |
| |
| switch (code) |
| { |
| case REG: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case LABEL_REF: |
| case CODE_LABEL: |
| case PC: |
| case CC0: |
| case SCRATCH: |
| /* SCRATCH must be shared because they represent distinct values. */ |
| return; |
| case CLOBBER: |
| if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER) |
| return; |
| break; |
| |
| case CONST: |
| /* CONST can be shared if it contains a SYMBOL_REF. If it contains |
| a LABEL_REF, it isn't sharable. */ |
| if (GET_CODE (XEXP (x, 0)) == PLUS |
| && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF |
| && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) |
| return; |
| break; |
| |
| case INSN: |
| case JUMP_INSN: |
| case CALL_INSN: |
| case NOTE: |
| case BARRIER: |
| /* The chain of insns is not being copied. */ |
| return; |
| |
| default: |
| break; |
| } |
| |
| /* This rtx may not be shared. If it has already been seen, |
| replace it with a copy of itself. */ |
| |
| if (RTX_FLAG (x, used)) |
| { |
| rtx copy; |
| |
| copy = rtx_alloc (code); |
| memcpy (copy, x, RTX_SIZE (code)); |
| x = copy; |
| copied = 1; |
| } |
| RTX_FLAG (x, used) = 1; |
| |
| /* Now scan the subexpressions recursively. |
| We can store any replaced subexpressions directly into X |
| since we know X is not shared! Any vectors in X |
| must be copied if X was copied. */ |
| |
| format_ptr = GET_RTX_FORMAT (code); |
| length = GET_RTX_LENGTH (code); |
| last_ptr = NULL; |
| |
| for (i = 0; i < length; i++) |
| { |
| switch (*format_ptr++) |
| { |
| case 'e': |
| if (last_ptr) |
| copy_rtx_if_shared_1 (last_ptr); |
| last_ptr = &XEXP (x, i); |
| break; |
| |
| case 'E': |
| if (XVEC (x, i) != NULL) |
| { |
| int j; |
| int len = XVECLEN (x, i); |
| |
| /* Copy the vector iff I copied the rtx and the length |
| is nonzero. */ |
| if (copied && len > 0) |
| XVEC (x, i) = gen_rtvec_v (len, XVEC (x, i)->elem); |
| |
| /* Call recursively on all inside the vector. */ |
| for (j = 0; j < len; j++) |
| { |
| if (last_ptr) |
| copy_rtx_if_shared_1 (last_ptr); |
| last_ptr = &XVECEXP (x, i, j); |
| } |
| } |
| break; |
| } |
| } |
| *orig1 = x; |
| if (last_ptr) |
| { |
| orig1 = last_ptr; |
| goto repeat; |
| } |
| return; |
| } |
| |
| /* Clear all the USED bits in X to allow copy_rtx_if_shared to be used |
| to look for shared sub-parts. */ |
| |
| void |
| reset_used_flags (rtx x) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *format_ptr; |
| int length; |
| |
| /* Repeat is used to turn tail-recursion into iteration. */ |
| repeat: |
| if (x == 0) |
| return; |
| |
| code = GET_CODE (x); |
| |
| /* These types may be freely shared so we needn't do any resetting |
| for them. */ |
| |
| switch (code) |
| { |
| case REG: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case CODE_LABEL: |
| case PC: |
| case CC0: |
| return; |
| |
| case INSN: |
| case JUMP_INSN: |
| case CALL_INSN: |
| case NOTE: |
| case LABEL_REF: |
| case BARRIER: |
| /* The chain of insns is not being copied. */ |
| return; |
| |
| default: |
| break; |
| } |
| |
| RTX_FLAG (x, used) = 0; |
| |
| format_ptr = GET_RTX_FORMAT (code); |
| length = GET_RTX_LENGTH (code); |
| |
| for (i = 0; i < length; i++) |
| { |
| switch (*format_ptr++) |
| { |
| case 'e': |
| if (i == length-1) |
| { |
| x = XEXP (x, i); |
| goto repeat; |
| } |
| reset_used_flags (XEXP (x, i)); |
| break; |
| |
| case 'E': |
| for (j = 0; j < XVECLEN (x, i); j++) |
| reset_used_flags (XVECEXP (x, i, j)); |
| break; |
| } |
| } |
| } |
| |
| /* Set all the USED bits in X to allow copy_rtx_if_shared to be used |
| to look for shared sub-parts. */ |
| |
| void |
| set_used_flags (rtx x) |
| { |
| int i, j; |
| enum rtx_code code; |
| const char *format_ptr; |
| |
| if (x == 0) |
| return; |
| |
| code = GET_CODE (x); |
| |
| /* These types may be freely shared so we needn't do any resetting |
| for them. */ |
| |
| switch (code) |
| { |
| case REG: |
| case CONST_INT: |
| case CONST_DOUBLE: |
| case CONST_VECTOR: |
| case SYMBOL_REF: |
| case CODE_LABEL: |
| case PC: |
| case CC0: |
| return; |
| |
| case INSN: |
| case JUMP_INSN: |
| case CALL_INSN: |
| case NOTE: |
| case LABEL_REF: |
| case BARRIER: |
| /* The chain of insns is not being copied. */ |
| return; |
| |
| default: |
| break; |
| } |
| |
| RTX_FLAG (x, used) = 1; |
| |
| format_ptr = GET_RTX_FORMAT (code); |
| for (i = 0; i < GET_RTX_LENGTH (code); i++) |
| { |
| switch (*format_ptr++) |
| { |
| case 'e': |
| set_used_flags (XEXP (x, i)); |
| break; |
| |
| case 'E': |
| for (j = 0; j < XVECLEN (x, i); j++) |
| set_used_flags (XVECEXP (x, i, j)); |
| break; |
| } |
| } |
| } |
| |
| /* Copy X if necessary so that it won't be altered by changes in OTHER. |
| Return X or the rtx for the pseudo reg the value of X was copied into. |
| OTHER must be valid as a SET_DEST. */ |
| |
| rtx |
| make_safe_from (rtx x, rtx other) |
| { |
| while (1) |
| switch (GET_CODE (other)) |
| { |
| case SUBREG: |
| other = SUBREG_REG (other); |
| break; |
| case STRICT_LOW_PART: |
| case SIGN_EXTEND: |
| case ZERO_EXTEND: |
| other = XEXP (other, 0); |
| break; |
| default: |
| goto done; |
| } |
| done: |
| if ((MEM_P (other) |
| && ! CONSTANT_P (x) |
| && !REG_P (x) |
| && GET_CODE (x) != SUBREG) |
| || (REG_P (other) |
| && (REGNO (other) < FIRST_PSEUDO_REGISTER |
| || reg_mentioned_p (other, x)))) |
| { |
| rtx temp = gen_reg_rtx (GET_MODE (x)); |
| emit_move_insn (temp, x); |
| return temp; |
| } |
| return x; |
| } |
| |
| /* Emission of insns (adding them to the doubly-linked list). */ |
| |
| /* Return the first insn of the current sequence or current function. */ |
| |
| rtx |
| get_insns (void) |
| { |
| return first_insn; |
| } |
| |
| /* Specify a new insn as the first in the chain. */ |
| |
| void |
| set_first_insn (rtx insn) |
| { |
| gcc_assert (!PREV_INSN (insn)); |
| first_insn = insn; |
| } |
| |
| /* Return the last insn emitted in current sequence or current function. */ |
| |
| rtx |
| get_last_insn (void) |
| { |
| return last_insn; |
| } |
| |
| /* Specify a new insn as the last in the chain. */ |
| |
| void |
| set_last_insn (rtx insn) |
| { |
| gcc_assert (!NEXT_INSN (insn)); |
| last_insn = insn; |
| } |
| |
| /* Return the last insn emitted, even if it is in a sequence now pushed. */ |
| |
| rtx |
| get_last_insn_anywhere (void) |
| { |
| struct sequence_stack *stack; |
| if (last_insn) |
| return last_insn; |
| for (stack = seq_stack; stack; stack = stack->next) |
| if (stack->last != 0) |
| return stack->last; |
| return 0; |
| } |
| |
| /* Return the first nonnote insn emitted in current sequence or current |
| function. This routine looks inside SEQUENCEs. */ |
| |
| rtx |
| get_first_nonnote_insn (void) |
| { |
| rtx insn = first_insn; |
| |
| if (insn) |
| { |
| if (NOTE_P (insn)) |
| for (insn = next_insn (insn); |
| insn && NOTE_P (insn); |
| insn = next_insn (insn)) |
| continue; |
| else |
| { |
| if (GET_CODE (insn) == INSN |
| && GET_CODE (PATTERN (insn)) == SEQUENCE) |
| insn = XVECEXP (PATTERN (insn), 0, 0); |
| } |
| } |
| |
| return insn; |
| } |
| |
| /* Return the last nonnote insn emitted in current sequence or current |
| function. This routine looks inside SEQUENCEs. */ |
| |
| rtx |
| get_last_nonnote_insn (void) |
| { |
| rtx insn = last_insn; |
| |
| if (insn) |
| { |
| if (NOTE_P (insn)) |
| for (insn = previous_insn (insn); |
| insn && NOTE_P (insn); |
| insn = previous_insn (insn)) |
| continue; |
| else |
| { |
| if (GET_CODE (insn) == INSN |
| && GET_CODE (PATTERN (insn)) == SEQUENCE) |
| insn = XVECEXP (PATTERN (insn), 0, |
| XVECLEN (PATTERN (insn), 0) - 1); |
| } |
| } |
| |
| return insn; |
| } |
| |
| /* Return a number larger than any instruction's uid in this function. */ |
| |
| int |
| get_max_uid (void) |
| { |
| return cur_insn_uid; |
| } |
| |
| /* Renumber instructions so that no instruction UIDs are wasted. */ |
| |
| void |
| renumber_insns (FILE *stream) |
| { |
| rtx insn; |
| |
| /* If we're not supposed to renumber instructions, don't. */ |
| if (!flag_renumber_insns) |
| return; |
| |
| /* If there aren't that many instructions, then it's not really |
| worth renumbering them. */ |
| if (flag_renumber_insns == 1 && get_max_uid () < 25000) |
| return; |
| |
| cur_insn_uid = 1; |
| |
| for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
| { |
| if (stream) |
| fprintf (stream, "Renumbering insn %d to %d\n", |
| INSN_UID (insn), cur_insn_uid); |
| INSN_UID (insn) = cur_insn_uid++; |
| } |
| } |
| |
| /* Return the next insn. If it is a SEQUENCE, return the first insn |
| of the sequence. */ |
| |
| rtx |
| next_insn (rtx insn) |
| { |
| if (insn) |
| { |
| insn = NEXT_INSN (insn); |
| if (insn && NONJUMP_INSN_P (insn) |
| && GET_CODE (PATTERN (insn)) == SEQUENCE) |
| insn = XVECEXP (PATTERN (insn), 0, 0); |
| } |
| |
| return insn; |
| } |
| |
| /* Return the previous insn. If it is a SEQUENCE, return the last insn |
| of the sequence. */ |
| |
| rtx |
| previous_insn (rtx insn) |
| { |
| if (insn) |
| { |
| insn = PREV_INSN (insn); |
| if (insn && NONJUMP_INSN_P (insn) |
| && GET_CODE (PATTERN (insn)) == SEQUENCE) |
| insn = XVECEXP (PATTERN (insn), 0, XVECLEN (PATTERN (insn), 0) - 1); |
| } |
| |
| return insn; |
| } |
| |
| /* Return the next insn after INSN that is not a NOTE. This routine does not |
| look inside SEQUENCEs. */ |
| |
| rtx |
| next_nonnote_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = NEXT_INSN (insn); |
| if (insn == 0 || !NOTE_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the previous insn before INSN that is not a NOTE. This routine does |
| not look inside SEQUENCEs. */ |
| |
| rtx |
| prev_nonnote_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = PREV_INSN (insn); |
| if (insn == 0 || !NOTE_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the next INSN, CALL_INSN or JUMP_INSN after INSN; |
| or 0, if there is none. This routine does not look inside |
| SEQUENCEs. */ |
| |
| rtx |
| next_real_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = NEXT_INSN (insn); |
| if (insn == 0 || INSN_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the last INSN, CALL_INSN or JUMP_INSN before INSN; |
| or 0, if there is none. This routine does not look inside |
| SEQUENCEs. */ |
| |
| rtx |
| prev_real_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = PREV_INSN (insn); |
| if (insn == 0 || INSN_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the last CALL_INSN in the current list, or 0 if there is none. |
| This routine does not look inside SEQUENCEs. */ |
| |
| rtx |
| last_call_insn (void) |
| { |
| rtx insn; |
| |
| for (insn = get_last_insn (); |
| insn && !CALL_P (insn); |
| insn = PREV_INSN (insn)) |
| ; |
| |
| return insn; |
| } |
| |
| /* Find the next insn after INSN that really does something. This routine |
| does not look inside SEQUENCEs. Until reload has completed, this is the |
| same as next_real_insn. */ |
| |
| int |
| active_insn_p (rtx insn) |
| { |
| return (CALL_P (insn) || JUMP_P (insn) |
| || (NONJUMP_INSN_P (insn) |
| && (! reload_completed |
| || (GET_CODE (PATTERN (insn)) != USE |
| && GET_CODE (PATTERN (insn)) != CLOBBER)))); |
| } |
| |
| rtx |
| next_active_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = NEXT_INSN (insn); |
| if (insn == 0 || active_insn_p (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Find the last insn before INSN that really does something. This routine |
| does not look inside SEQUENCEs. Until reload has completed, this is the |
| same as prev_real_insn. */ |
| |
| rtx |
| prev_active_insn (rtx insn) |
| { |
| while (insn) |
| { |
| insn = PREV_INSN (insn); |
| if (insn == 0 || active_insn_p (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the next CODE_LABEL after the insn INSN, or 0 if there is none. */ |
| |
| rtx |
| next_label (rtx insn) |
| { |
| while (insn) |
| { |
| insn = NEXT_INSN (insn); |
| if (insn == 0 || LABEL_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the last CODE_LABEL before the insn INSN, or 0 if there is none. */ |
| |
| rtx |
| prev_label (rtx insn) |
| { |
| while (insn) |
| { |
| insn = PREV_INSN (insn); |
| if (insn == 0 || LABEL_P (insn)) |
| break; |
| } |
| |
| return insn; |
| } |
| |
| /* Return the last label to mark the same position as LABEL. Return null |
| if LABEL itself is null. */ |
| |
| rtx |
| skip_consecutive_labels (rtx label) |
| { |
| rtx insn; |
| |
| for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn)) |
| if (LABEL_P (insn)) |
| label = insn; |
| |
| return label; |
| } |
| |
| #ifdef HAVE_cc0 |
| /* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER |
| and REG_CC_USER notes so we can find it. */ |
| |
| void |
| link_cc0_insns (rtx insn) |
| { |
| rtx user = next_nonnote_insn (insn); |
| |
| if (NONJUMP_INSN_P (user) && GET_CODE (PATTERN (user)) == SEQUENCE) |
| user = XVECEXP (PATTERN (user), 0, 0); |
| |
| REG_NOTES (user) = gen_rtx_INSN_LIST (REG_CC_SETTER, insn, |
| REG_NOTES (user)); |
| REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_CC_USER, user, REG_NOTES (insn)); |
| } |
| |
| /* Return the next insn that uses CC0 after INSN, which is assumed to |
| set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter |
| applied to the result of this function should yield INSN). |
| |
| Normally, this is simply the next insn. However, if a REG_CC_USER note |
| is present, it contains the insn that uses CC0. |
| |
| Return 0 if we can't find the insn. */ |
| |
| rtx |
| next_cc0_user (rtx insn) |
| { |
| rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX); |
| |
| if (note) |
| return XEXP (note, 0); |
| |
| insn = next_nonnote_insn (insn); |
| if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE) |
| insn = XVECEXP (PATTERN (insn), 0, 0); |
| |
| if (insn && INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn))) |
| return insn; |
| |
| return 0; |
| } |
| |
| /* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER |
| note, it is the previous insn. */ |
| |
| rtx |
| prev_cc0_setter (rtx insn) |
| { |
| rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); |
| |
| if (note) |
| return XEXP (note, 0); |
| |
| insn = prev_nonnote_insn (insn); |
| gcc_assert (sets_cc0_p (PATTERN (insn))); |
| |
| return insn; |
| } |
| #endif |
| |
| /* Increment the label uses for all labels present in rtx. */ |
| |
| static void |
| mark_label_nuses (rtx x) |
| { |
| enum rtx_code code; |
| int i, j; |
| const char *fmt; |
| |
| code = GET_CODE (x); |
| if (code == LABEL_REF && LABEL_P (XEXP (x, 0))) |
| LABEL_NUSES (XEXP (x, 0))++; |
| |
| fmt = GET_RTX_FORMAT (code); |
| for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
| { |
| if (fmt[i] == 'e') |
| mark_label_nuses (XEXP (x, i)); |
| else if (fmt[i] == 'E') |
| for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
| mark_label_nuses (XVECEXP (x, i, j)); |
| } |
| } |
| |
| |
| /* Try splitting insns that can be split for better scheduling. |
| PAT is the pattern which might split. |
| TRIAL is the insn providing PAT. |
| LAST is nonzero if we should return the last insn of the sequence produced. |
| |
| If this routine succeeds in splitting, it returns the first or last |
| replacement insn depending on the value of LAST. Otherwise, it |
| returns TRIAL. If the insn to be returned can be split, it will be. */ |
| |
| rtx |
| try_split (rtx pat, rtx trial, int last) |
| { |
| rtx before = PREV_INSN (trial); |
| rtx after = NEXT_INSN (trial); |
| int has_barrier = 0; |
| rtx tem; |
| rtx note, seq; |
| int probability; |
| rtx insn_last, insn; |
| int njumps = 0; |
| |
| /* APPLE LOCAL begin LLVM */ |
| #ifdef ENABLE_LLVM |
| return 0; |
| #endif |
| /* APPLE LOCAL end LLVM */ |
| |
| if (any_condjump_p (trial) |
| && (note = find_reg_note (trial, REG_BR_PROB, 0))) |
| split_branch_probability = INTVAL (XEXP (note, 0)); |
| probability = split_branch_probability; |
| |
| seq = split_insns (pat, trial); |
| |
| split_branch_probability = -1; |
| |
| /* If we are splitting a JUMP_INSN, it might be followed by a BARRIER. |
| We may need to handle this specially. */ |
| if (after && BARRIER_P (after)) |
| { |
| has_barrier = 1; |
| after = NEXT_INSN (after); |
| } |
| |
| if (!seq) |
| return trial; |
| |
| /* Avoid infinite loop if any insn of the result matches |
| the original pattern. */ |
| insn_last = seq; |
| while (1) |
| { |
| if (INSN_P (insn_last) |
| && rtx_equal_p (PATTERN (insn_last), pat)) |
| return trial; |
| if (!NEXT_INSN (insn_last)) |
| break; |
| insn_last = NEXT_INSN (insn_last); |
| } |
| |
| /* Mark labels. */ |
| for (insn = insn_last; insn ; insn = PREV_INSN (insn)) |
| { |
| if (JUMP_P (insn)) |
| { |
| mark_jump_label (PATTERN (insn), insn, 0); |
| njumps++; |
| if (probability != -1 |
| && any_condjump_p (insn) |
| && !find_reg_note (insn, REG_BR_PROB, 0)) |
| { |
| /* We can preserve the REG_BR_PROB notes only if exactly |
| one jump is created, otherwise the machine description |
| is responsible for this step using |
| split_branch_probability variable. */ |
| gcc_assert (njumps == 1); |
| REG_NOTES (insn) |
| = gen_rtx_EXPR_LIST (REG_BR_PROB, |
| GEN_INT (probability), |
| REG_NOTES (insn)); |
| } |
| } |
| } |
| |
| /* If we are splitting a CALL_INSN, look for the CALL_INSN |
| in SEQ and copy our CALL_INSN_FUNCTION_USAGE to it. */ |
| if (CALL_P (trial)) |
| { |
| for (insn = insn_last; insn ; insn = PREV_INSN (insn)) |
| if (CALL_P (insn)) |
| { |
| rtx *p = &CALL_INSN_FUNCTION_USAGE (insn); |
| while (*p) |
| p = &XEXP (*p, 1); |
| *p = CALL_INSN_FUNCTION_USAGE (trial); |
| SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial); |
| } |
| } |
| |
| /* Copy notes, particularly those related to the CFG. */ |
| for (note = REG_NOTES (trial); note; note = XEXP (note, 1)) |
| { |
| switch (REG_NOTE_KIND (note)) |
| { |
| case REG_EH_REGION: |
| insn = insn_last; |
| while (insn != NULL_RTX) |
| { |
| if (CALL_P (insn) |
| || (flag_non_call_exceptions && INSN_P (insn) |
| && may_trap_p (PATTERN (insn)))) |
| REG_NOTES (insn) |
| = gen_rtx_EXPR_LIST (REG_EH_REGION, |
| XEXP (note, 0), |
| REG_NOTES (insn)); |
| insn = PREV_INSN (insn); |
| } |
| break; |
| |
| case REG_NORETURN: |
| case REG_SETJMP: |
| case REG_ALWAYS_RETURN: |
| insn = insn_last; |
| while (insn != NULL_RTX) |
| { |
| if (CALL_P (insn)) |
| REG_NOTES (insn) |
| = gen_rtx_EXPR_LIST (REG_NOTE_KIND (note), |
| XEXP (note, 0), |
| REG_NOTES (insn)); |
| insn = PREV_INSN (insn); |
| } |
| break; |
| |
| case REG_NON_LOCAL_GOTO: |
| insn = insn_last; |
| while (insn != NULL_RTX) |
|