aboutsummaryrefslogtreecommitdiff
path: root/contrib/gcc/combine.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/gcc/combine.c')
-rw-r--r--contrib/gcc/combine.c399
1 files changed, 266 insertions, 133 deletions
diff --git a/contrib/gcc/combine.c b/contrib/gcc/combine.c
index 47ac3a8aa623..bb22b0cdfb8c 100644
--- a/contrib/gcc/combine.c
+++ b/contrib/gcc/combine.c
@@ -424,6 +424,33 @@ do_SUBST (into, newval)
if (oldval == newval)
return;
+ /* We'd like to catch as many invalid transformations here as
+ possible. Unfortunately, there are way too many mode changes
+ that are perfectly valid, so we'd waste too much effort for
+ little gain doing the checks here. Focus on catching invalid
+ transformations involving integer constants. */
+ if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
+ && GET_CODE (newval) == CONST_INT)
+ {
+ /* Sanity check that we're replacing oldval with a CONST_INT
+ that is a valid sign-extension for the original mode. */
+ if (INTVAL (newval) != trunc_int_for_mode (INTVAL (newval),
+ GET_MODE (oldval)))
+ abort ();
+
+ /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
+ CONST_INT is not valid, because after the replacement, the
+ original mode would be gone. Unfortunately, we can't tell
+ when do_SUBST is called to replace the operand thereof, so we
+ perform this test on oldval instead, checking whether an
+ invalid replacement took place before we got here. */
+ if ((GET_CODE (oldval) == SUBREG
+ && GET_CODE (SUBREG_REG (oldval)) == CONST_INT)
+ || (GET_CODE (oldval) == ZERO_EXTEND
+ && GET_CODE (XEXP (oldval, 0)) == CONST_INT))
+ abort ();
+ }
+
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
@@ -877,8 +904,10 @@ set_nonzero_bits_and_sign_copies (x, set, data)
<< GET_MODE_BITSIZE (GET_MODE (x))));
#endif
- reg_nonzero_bits[REGNO (x)]
- |= nonzero_bits (src, nonzero_bits_mode);
+ /* Don't call nonzero_bits if it cannot change anything. */
+ if (reg_nonzero_bits[REGNO (x)] != ~(unsigned HOST_WIDE_INT) 0)
+ reg_nonzero_bits[REGNO (x)]
+ |= nonzero_bits (src, nonzero_bits_mode);
num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
if (reg_sign_bit_copies[REGNO (x)] == 0
|| reg_sign_bit_copies[REGNO (x)] > num)
@@ -1478,6 +1507,7 @@ try_combine (i3, i2, i1, new_direct_jump_p)
{
/* New patterns for I3 and I2, respectively. */
rtx newpat, newi2pat = 0;
+ int substed_i2 = 0, substed_i1 = 0;
/* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */
int added_sets_1, added_sets_2;
/* Total number of SETs to put into I3. */
@@ -1939,6 +1969,7 @@ try_combine (i3, i2, i1, new_direct_jump_p)
subst_low_cuid = INSN_CUID (i2);
newpat = subst (PATTERN (i3), i2dest, i2src, 0,
! i1_feeds_i3 && i1dest_in_i1src);
+ substed_i2 = 1;
/* Record whether i2's body now appears within i3's body. */
i2_is_used = n_occurrences;
@@ -1963,6 +1994,7 @@ try_combine (i3, i2, i1, new_direct_jump_p)
n_occurrences = 0;
subst_low_cuid = INSN_CUID (i1);
newpat = subst (newpat, i1dest, i1src, 0, 0);
+ substed_i1 = 1;
}
/* Fail if an autoincrement side-effect has been duplicated. Be careful
@@ -2534,6 +2566,23 @@ try_combine (i3, i2, i1, new_direct_jump_p)
INSN_CODE (i3) = insn_code_number;
PATTERN (i3) = newpat;
+
+ if (GET_CODE (i3) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (i3))
+ {
+ rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
+
+ reset_used_flags (call_usage);
+ call_usage = copy_rtx (call_usage);
+
+ if (substed_i2)
+ replace_rtx (call_usage, i2dest, i2src);
+
+ if (substed_i1)
+ replace_rtx (call_usage, i1dest, i1src);
+
+ CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
+ }
+
if (undobuf.other_insn)
INSN_CODE (undobuf.other_insn) = other_code_number;
@@ -3013,8 +3062,11 @@ find_split_point (loc, insn)
SUBST (SET_SRC (x),
gen_binary (IOR, mode,
gen_binary (AND, mode, dest,
- GEN_INT (~(mask << pos)
- & GET_MODE_MASK (mode))),
+ GEN_INT
+ (
+ trunc_int_for_mode
+ (~(mask << pos)
+ & GET_MODE_MASK (mode), mode))),
GEN_INT (src << pos)));
SUBST (SET_DEST (x), dest);
@@ -3485,7 +3537,24 @@ subst (x, from, to, in_dest, unique_copy)
if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
return new;
- SUBST (XEXP (x, i), new);
+ if (GET_CODE (new) == CONST_INT && GET_CODE (x) == SUBREG)
+ {
+ x = simplify_subreg (GET_MODE (x), new,
+ GET_MODE (SUBREG_REG (x)),
+ SUBREG_BYTE (x));
+ if (! x)
+ abort ();
+ }
+ else if (GET_CODE (new) == CONST_INT
+ && GET_CODE (x) == ZERO_EXTEND)
+ {
+ x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
+ new, GET_MODE (XEXP (x, 0)));
+ if (! x)
+ abort ();
+ }
+ else
+ SUBST (XEXP (x, i), new);
}
}
}
@@ -3807,6 +3876,13 @@ combine_simplify_rtx (x, op0_mode, last, in_dest)
return temp;
}
+ /* Don't change the mode of the MEM if that would change the meaning
+ of the address. */
+ if (GET_CODE (SUBREG_REG (x)) == MEM
+ && (MEM_VOLATILE_P (SUBREG_REG (x))
+ || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
+ return gen_rtx_CLOBBER (mode, const0_rtx);
+
/* Note that we cannot do any narrowing for non-constants since
we might have been counting on using the fact that some bits were
zero. We now do this in the SET. */
@@ -5963,6 +6039,9 @@ make_extraction (mode, inner, pos, pos_rtx, len,
if (mode == tmode)
return new;
+ if (GET_CODE (new) == CONST_INT)
+ return GEN_INT (trunc_int_for_mode (INTVAL (new), mode));
+
/* If we know that no extraneous bits are set, and that the high
bit is not set, convert the extraction to the cheaper of
sign and zero extension, that are equivalent in these cases. */
@@ -6753,33 +6832,12 @@ force_to_mode (x, mode, mask, reg, just_select)
smask |= (HOST_WIDE_INT) -1 << width;
if (GET_CODE (XEXP (x, 1)) == CONST_INT
- && exact_log2 (- smask) >= 0)
- {
-#ifdef STACK_BIAS
- if (STACK_BIAS
- && (XEXP (x, 0) == stack_pointer_rtx
- || XEXP (x, 0) == frame_pointer_rtx))
- {
- int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
- unsigned HOST_WIDE_INT sp_mask = GET_MODE_MASK (mode);
-
- sp_mask &= ~(sp_alignment - 1);
- if ((sp_mask & ~smask) == 0
- && ((INTVAL (XEXP (x, 1)) - STACK_BIAS) & ~smask) != 0)
- return force_to_mode (plus_constant (XEXP (x, 0),
- ((INTVAL (XEXP (x, 1)) -
- STACK_BIAS) & smask)
- + STACK_BIAS),
- mode, smask, reg, next_select);
- }
-#endif
- if ((nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
- && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
- return force_to_mode (plus_constant (XEXP (x, 0),
- (INTVAL (XEXP (x, 1))
- & smask)),
- mode, smask, reg, next_select);
- }
+ && exact_log2 (- smask) >= 0
+ && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
+ && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
+ return force_to_mode (plus_constant (XEXP (x, 0),
+ (INTVAL (XEXP (x, 1)) & smask)),
+ mode, smask, reg, next_select);
}
/* ... fall through ... */
@@ -7322,7 +7380,7 @@ if_then_else_cond (x, ptrue, pfalse)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
{
- *ptrue = GEN_INT (nz), *pfalse = const0_rtx;
+ *ptrue = GEN_INT (trunc_int_for_mode (nz, mode)), *pfalse = const0_rtx;
return x;
}
@@ -7431,6 +7489,50 @@ known_cond (x, cond, reg, val)
}
}
}
+ else if (code == SUBREG)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
+ rtx new, r = known_cond (SUBREG_REG (x), cond, reg, val);
+
+ if (SUBREG_REG (x) != r)
+ {
+ /* We must simplify subreg here, before we lose track of the
+ original inner_mode. */
+ new = simplify_subreg (GET_MODE (x), r,
+ inner_mode, SUBREG_BYTE (x));
+ if (new)
+ return new;
+ else
+ SUBST (SUBREG_REG (x), r);
+ }
+
+ return x;
+ }
+ /* We don't have to handle SIGN_EXTEND here, because even in the
+ case of replacing something with a modeless CONST_INT, a
+ CONST_INT is already (supposed to be) a valid sign extension for
+ its narrower mode, which implies it's already properly
+ sign-extended for the wider mode. Now, for ZERO_EXTEND, the
+ story is different. */
+ else if (code == ZERO_EXTEND)
+ {
+ enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
+ rtx new, r = known_cond (XEXP (x, 0), cond, reg, val);
+
+ if (XEXP (x, 0) != r)
+ {
+ /* We must simplify the zero_extend here, before we lose
+ track of the original inner_mode. */
+ new = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
+ r, inner_mode);
+ if (new)
+ return new;
+ else
+ SUBST (XEXP (x, 0), r);
+ }
+
+ return x;
+ }
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
@@ -7743,14 +7845,23 @@ simplify_and_const_int (x, mode, varop, constop)
int i;
/* Simplify VAROP knowing that we will be only looking at some of the
- bits in it. */
+ bits in it.
+
+ Note by passing in CONSTOP, we guarantee that the bits not set in
+ CONSTOP are not significant and will never be examined. We must
+ ensure that is the case by explicitly masking out those bits
+ before returning. */
varop = force_to_mode (varop, mode, constop, NULL_RTX, 0);
- /* If VAROP is a CLOBBER, we will fail so return it; if it is a
- CONST_INT, we are done. */
- if (GET_CODE (varop) == CLOBBER || GET_CODE (varop) == CONST_INT)
+ /* If VAROP is a CLOBBER, we will fail so return it. */
+ if (GET_CODE (varop) == CLOBBER)
return varop;
+ /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
+ to VAROP and return the new constant. */
+ if (GET_CODE (varop) == CONST_INT)
+ return GEN_INT (trunc_int_for_mode (INTVAL (varop) & constop, mode));
+
/* See what bits may be nonzero in VAROP. Unlike the general case of
a call to nonzero_bits, here we don't care about bits outside
MODE. */
@@ -7916,40 +8027,28 @@ nonzero_bits (x, mode)
nonzero &= GET_MODE_MASK (ptr_mode);
#endif
-#ifdef STACK_BOUNDARY
- /* If this is the stack pointer, we may know something about its
- alignment. If PUSH_ROUNDING is defined, it is possible for the
- stack to be momentarily aligned only to that amount, so we pick
- the least alignment. */
-
- /* We can't check for arg_pointer_rtx here, because it is not
- guaranteed to have as much alignment as the stack pointer.
- In particular, in the Irix6 n64 ABI, the stack has 128 bit
- alignment but the argument pointer has only 64 bit alignment. */
-
- if ((x == frame_pointer_rtx
- || x == stack_pointer_rtx
- || x == hard_frame_pointer_rtx
- || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
- && REGNO (x) <= LAST_VIRTUAL_REGISTER))
-#ifdef STACK_BIAS
- && !STACK_BIAS
-#endif
- )
+ /* Include declared information about alignment of pointers. */
+ /* ??? We don't properly preserve REG_POINTER changes across
+ pointer-to-integer casts, so we can't trust it except for
+ things that we know must be pointers. See execute/960116-1.c. */
+ if ((x == stack_pointer_rtx
+ || x == frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ && REGNO_POINTER_ALIGN (REGNO (x)))
{
- int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT alignment
+ = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
#ifdef PUSH_ROUNDING
- if (REGNO (x) == STACK_POINTER_REGNUM && PUSH_ARGS)
- sp_alignment = MIN (PUSH_ROUNDING (1), sp_alignment);
+ /* If PUSH_ROUNDING is defined, it is possible for the
+ stack to be momentarily aligned only to that amount,
+ so we pick the least alignment. */
+ if (x == stack_pointer_rtx && PUSH_ARGS)
+ alignment = MIN (PUSH_ROUNDING (1), alignment);
#endif
- /* We must return here, otherwise we may get a worse result from
- one of the choices below. There is nothing useful below as
- far as the stack pointer is concerned. */
- return nonzero &= ~(sp_alignment - 1);
+ nonzero &= ~(alignment - 1);
}
-#endif
/* If X is a register whose nonzero bits value is current, use it.
Otherwise, if X is a register whose value we can find, use that
@@ -7957,14 +8056,16 @@ nonzero_bits (x, mode)
for this register. */
if (reg_last_set_value[REGNO (x)] != 0
- && reg_last_set_mode[REGNO (x)] == mode
+ && (reg_last_set_mode[REGNO (x)] == mode
+ || (GET_MODE_CLASS (reg_last_set_mode[REGNO (x)]) == MODE_INT
+ && GET_MODE_CLASS (mode) == MODE_INT))
&& (reg_last_set_label[REGNO (x)] == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
- return reg_last_set_nonzero_bits[REGNO (x)];
+ return reg_last_set_nonzero_bits[REGNO (x)] & nonzero;
tem = get_last_value (x);
@@ -7990,7 +8091,7 @@ nonzero_bits (x, mode)
| ((HOST_WIDE_INT) (-1)
<< GET_MODE_BITSIZE (GET_MODE (x))));
#endif
- return nonzero_bits (tem, mode);
+ return nonzero_bits (tem, mode) & nonzero;
}
else if (nonzero_sign_valid && reg_nonzero_bits[REGNO (x)])
{
@@ -8099,8 +8200,14 @@ nonzero_bits (x, mode)
case XOR: case IOR:
case UMIN: case UMAX: case SMIN: case SMAX:
- nonzero &= (nonzero_bits (XEXP (x, 0), mode)
- | nonzero_bits (XEXP (x, 1), mode));
+ {
+ unsigned HOST_WIDE_INT nonzero0 = nonzero_bits (XEXP (x, 0), mode);
+
+ /* Don't call nonzero_bits for the second time if it cannot change
+ anything. */
+ if ((nonzero & nonzero0) != nonzero)
+ nonzero &= (nonzero0 | nonzero_bits (XEXP (x, 1), mode));
+ }
break;
case PLUS: case MINUS:
@@ -8128,22 +8235,6 @@ nonzero_bits (x, mode)
switch (code)
{
case PLUS:
-#ifdef STACK_BIAS
- if (STACK_BIAS
- && (XEXP (x, 0) == stack_pointer_rtx
- || XEXP (x, 0) == frame_pointer_rtx)
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
-
- nz0 = (GET_MODE_MASK (mode) & ~(sp_alignment - 1));
- nz1 = INTVAL (XEXP (x, 1)) - STACK_BIAS;
- width0 = floor_log2 (nz0) + 1;
- width1 = floor_log2 (nz1) + 1;
- low0 = floor_log2 (nz0 & -nz0);
- low1 = floor_log2 (nz1 & -nz1);
- }
-#endif
result_width = MAX (width0, width1) + 1;
result_low = MIN (low0, low1);
break;
@@ -9590,7 +9681,7 @@ recog_for_combine (pnewpat, insn, pnotes)
int num_clobbers_to_add = 0;
int i;
rtx notes = 0;
- rtx old_notes;
+ rtx dummy_insn;
/* If PAT is a PARALLEL, check to see if it contains the CLOBBER
we use to indicate that something didn't match. If we find such a
@@ -9601,11 +9692,13 @@ recog_for_combine (pnewpat, insn, pnotes)
&& XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
return -1;
- /* Remove the old notes prior to trying to recognize the new pattern. */
- old_notes = REG_NOTES (insn);
- REG_NOTES (insn) = 0;
+ /* *pnewpat does not have to be actual PATTERN (insn), so make a dummy
+ instruction for pattern recognition. */
+ dummy_insn = shallow_copy_rtx (insn);
+ PATTERN (dummy_insn) = pat;
+ REG_NOTES (dummy_insn) = 0;
- insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ insn_code_number = recog (pat, dummy_insn, &num_clobbers_to_add);
/* If it isn't, there is the possibility that we previously had an insn
that clobbered some register as a side effect, but the combined
@@ -9630,15 +9723,14 @@ recog_for_combine (pnewpat, insn, pnotes)
if (pos == 1)
pat = XVECEXP (pat, 0, 0);
- insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ PATTERN (dummy_insn) = pat;
+ insn_code_number = recog (pat, dummy_insn, &num_clobbers_to_add);
}
/* Recognize all noop sets, these will be killed by followup pass. */
if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
- REG_NOTES (insn) = old_notes;
-
/* If we had any clobbers to add, make a new pattern than contains
them. Then check to make sure that all of them are dead. */
if (num_clobbers_to_add)
@@ -10775,9 +10867,9 @@ simplify_comparison (code, pop0, pop1)
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
&& (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
MODE_INT, 1)) != BLKmode
- && ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
- || ((unsigned HOST_WIDE_INT) -const_op
- <= GET_MODE_MASK (tmode))))
+ && (((unsigned HOST_WIDE_INT) const_op
+ + (GET_MODE_MASK (tmode) >> 1) + 1)
+ <= GET_MODE_MASK (tmode)))
{
op0 = gen_lowpart_for_combine (tmode, XEXP (XEXP (op0, 0), 0));
continue;
@@ -10794,9 +10886,9 @@ simplify_comparison (code, pop0, pop1)
&& XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
&& (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
MODE_INT, 1)) != BLKmode
- && ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
- || ((unsigned HOST_WIDE_INT) -const_op
- <= GET_MODE_MASK (tmode))))
+ && (((unsigned HOST_WIDE_INT) const_op
+ + (GET_MODE_MASK (tmode) >> 1) + 1)
+ <= GET_MODE_MASK (tmode)))
{
rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
rtx add_const = XEXP (XEXP (op0, 0), 1);
@@ -10821,10 +10913,18 @@ simplify_comparison (code, pop0, pop1)
&& mode_width <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (XEXP (op0, 0), mode)
& (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0
- && (const_op == 0
- || (floor_log2 (const_op) + INTVAL (XEXP (op0, 1))
- < mode_width)))
- {
+ && (((unsigned HOST_WIDE_INT) const_op
+ + (GET_CODE (op0) != LSHIFTRT
+ ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
+ + 1)
+ : 0))
+ <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
+ {
+ /* If the shift was logical, then we must make the condition
+ unsigned. */
+ if (GET_CODE (op0) == LSHIFTRT)
+ code = unsigned_condition (code);
+
const_op <<= INTVAL (XEXP (op0, 1));
op1 = GEN_INT (const_op);
op0 = XEXP (op0, 0);
@@ -10853,38 +10953,56 @@ simplify_comparison (code, pop0, pop1)
/* Now make any compound operations involved in this comparison. Then,
check for an outmost SUBREG on OP0 that is not doing anything or is
- paradoxical. The latter case can only occur when it is known that the
- "extra" bits will be zero. Therefore, it is safe to remove the SUBREG.
- We can never remove a SUBREG for a non-equality comparison because the
- sign bit is in a different place in the underlying object. */
+ paradoxical. The latter transformation must only be performed when
+ it is known that the "extra" bits will be the same in op0 and op1 or
+ that they don't matter. There are three cases to consider:
+
+ 1. SUBREG_REG (op0) is a register. In this case the bits are don't
+ care bits and we can assume they have any convenient value. So
+ making the transformation is safe.
+
+ 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
+ In this case the upper bits of op0 are undefined. We should not make
+ the simplification in that case as we do not know the contents of
+ those bits.
+
+ 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
+ NIL. In that case we know those bits are zeros or ones. We must
+ also be sure that they are the same as the upper bits of op1.
+
+ We can never remove a SUBREG for a non-equality comparison because
+ the sign bit is in a different place in the underlying object. */
op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
op1 = make_compound_operation (op1, SET);
if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
+ /* Case 3 above, to sometimes allow (subreg (mem x)), isn't
+ implemented. */
+ && GET_CODE (SUBREG_REG (op0)) == REG
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
- && (code == NE || code == EQ)
- && ((GET_MODE_SIZE (GET_MODE (op0))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))))
+ && (code == NE || code == EQ))
{
- op0 = SUBREG_REG (op0);
- op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
- }
+ if (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
+ {
+ op0 = SUBREG_REG (op0);
+ op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
+ }
+ else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (nonzero_bits (SUBREG_REG (op0),
+ GET_MODE (SUBREG_REG (op0)))
+ & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
+ {
+ tem = gen_lowpart_for_combine (GET_MODE (SUBREG_REG (op0)), op1);
- else if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
- && (code == NE || code == EQ)
- && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
- <= HOST_BITS_PER_WIDE_INT)
- && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (GET_MODE (op0))) == 0
- && (tem = gen_lowpart_for_combine (GET_MODE (SUBREG_REG (op0)),
- op1),
- (nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (GET_MODE (op0))) == 0))
- op0 = SUBREG_REG (op0), op1 = tem;
+ if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
+ & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
+ op0 = SUBREG_REG (op0), op1 = tem;
+ }
+ }
/* We now do the opposite procedure: Some machines don't have compare
insns in all modes. If OP0's mode is an integer mode smaller than a
@@ -10902,14 +11020,22 @@ simplify_comparison (code, pop0, pop1)
tmode = GET_MODE_WIDER_MODE (tmode))
if (have_insn_for (COMPARE, tmode))
{
+ int zero_extended;
+
/* If the only nonzero bits in OP0 and OP1 are those in the
narrower mode and this is an equality or unsigned comparison,
we can use the wider mode. Similarly for sign-extended
values, in which case it is true for all comparisons. */
- if (((code == EQ || code == NE
- || code == GEU || code == GTU || code == LEU || code == LTU)
- && (nonzero_bits (op0, tmode) & ~GET_MODE_MASK (mode)) == 0
- && (nonzero_bits (op1, tmode) & ~GET_MODE_MASK (mode)) == 0)
+ zero_extended = ((code == EQ || code == NE
+ || code == GEU || code == GTU
+ || code == LEU || code == LTU)
+ && (nonzero_bits (op0, tmode)
+ & ~GET_MODE_MASK (mode)) == 0
+ && ((GET_CODE (op1) == CONST_INT
+ || (nonzero_bits (op1, tmode)
+ & ~GET_MODE_MASK (mode)) == 0)));
+
+ if (zero_extended
|| ((num_sign_bit_copies (op0, tmode)
> GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))
&& (num_sign_bit_copies (op1, tmode)
@@ -10926,6 +11052,8 @@ simplify_comparison (code, pop0, pop1)
XEXP (op0, 1)));
op0 = gen_lowpart_for_combine (tmode, op0);
+ if (zero_extended && GET_CODE (op1) == CONST_INT)
+ op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (mode));
op1 = gen_lowpart_for_combine (tmode, op1);
break;
}
@@ -11123,9 +11251,13 @@ record_value_for_reg (reg, insn, value)
if (value)
{
+ enum machine_mode mode = GET_MODE (reg);
subst_low_cuid = INSN_CUID (insn);
- reg_last_set_mode[regno] = GET_MODE (reg);
- reg_last_set_nonzero_bits[regno] = nonzero_bits (value, GET_MODE (reg));
+ reg_last_set_mode[regno] = mode;
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ mode = nonzero_bits_mode;
+ reg_last_set_nonzero_bits[regno] = nonzero_bits (value, mode);
reg_last_set_sign_bit_copies[regno]
= num_sign_bit_copies (value, GET_MODE (reg));
}
@@ -11612,6 +11744,7 @@ mark_used_regs_combine (x)
case CONST_INT:
case CONST:
case CONST_DOUBLE:
+ case CONST_VECTOR:
case PC:
case ADDR_VEC:
case ADDR_DIFF_VEC: