===================================================================
@@ -527,7 +527,7 @@ mode_to_bytes (machine_mode mode)
/* Return the base GET_MODE_BITSIZE value for MODE. */
-ALWAYS_INLINE unsigned short
+ALWAYS_INLINE poly_uint16
mode_to_bits (machine_mode mode)
{
return mode_to_bytes (mode) * BITS_PER_UNIT;
@@ -600,7 +600,29 @@ #define GET_MODE_SIZE(MODE) (mode_to_byt
/* Get the size in bits of an object of mode MODE. */
-#define GET_MODE_BITSIZE(MODE) (mode_to_bits (MODE))
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_BITSIZE(MODE) ((unsigned short) mode_to_bits (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_BITSIZE (machine_mode mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::t
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::t
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode).coeffs[0];
+}
+#endif
/* Get the number of value bits of an object of mode MODE. */
===================================================================
@@ -2835,12 +2835,11 @@ check_sibcall_argument_overlap (rtx_insn
bool
shift_return_value (machine_mode mode, bool left_p, rtx value)
{
- HOST_WIDE_INT shift;
-
gcc_assert (REG_P (value) && HARD_REGISTER_P (value));
machine_mode value_mode = GET_MODE (value);
- shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
- if (shift == 0)
+ poly_int64 shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
+
+ if (known_zero (shift))
return false;
/* Use ashr rather than lshr for right shifts. This is for the benefit
===================================================================
@@ -7675,8 +7675,9 @@ make_extraction (machine_mode mode, rtx
are the same as for a register operation, since at present we don't
have named patterns for aligned memory structures. */
struct extraction_insn insn;
- if (get_best_reg_extraction_insn (&insn, pattern,
- GET_MODE_BITSIZE (inner_mode), mode))
+ unsigned int inner_size;
+ if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
+ && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
{
wanted_inner_reg_mode = insn.struct_mode.require ();
pos_mode = insn.pos_mode;
@@ -7712,9 +7713,11 @@ make_extraction (machine_mode mode, rtx
If it's a MEM we need to recompute POS relative to that.
However, if we're extracting from (or inserting into) a register,
we want to recompute POS relative to wanted_inner_mode. */
- int width = (MEM_P (inner)
- ? GET_MODE_BITSIZE (is_mode)
- : GET_MODE_BITSIZE (wanted_inner_mode));
+ int width;
+ if (!MEM_P (inner))
+ width = GET_MODE_BITSIZE (wanted_inner_mode);
+ else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
+ return NULL_RTX;
if (pos_rtx == 0)
pos = width - len - pos;
===================================================================
@@ -1728,7 +1728,7 @@ find_shift_sequence (poly_int64 access_s
/* Try a wider mode if truncating the store mode to NEW_MODE
requires a real instruction. */
- if (GET_MODE_BITSIZE (new_mode) < GET_MODE_BITSIZE (store_mode)
+ if (may_lt (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (store_mode))
&& !TRULY_NOOP_TRUNCATION_MODES_P (new_mode, store_mode))
continue;
===================================================================
@@ -15339,7 +15339,8 @@ mem_loc_descriptor (rtx rtl, machine_mod
We output CONST_DOUBLEs as blocks. */
if (mode == VOIDmode
|| (GET_MODE (rtl) == VOIDmode
- && GET_MODE_BITSIZE (mode) != HOST_BITS_PER_DOUBLE_INT))
+ && may_ne (GET_MODE_BITSIZE (mode),
+ HOST_BITS_PER_DOUBLE_INT)))
break;
type_die = base_type_for_mode (mode, SCALAR_INT_MODE_P (mode));
if (type_die == NULL)
===================================================================
@@ -866,7 +866,7 @@ store_integral_bit_field (rtx op0, opt_s
if (!MEM_P (op0)
&& !reverse
&& lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
- && bitsize == GET_MODE_BITSIZE (fieldmode)
+ && must_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
struct expand_operand ops[2];
@@ -1637,9 +1637,10 @@ extract_bit_field_1 (rtx str_rtx, poly_u
if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
{
scalar_mode inner_mode = GET_MODE_INNER (tmode);
- unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0))
- / GET_MODE_UNIT_BITSIZE (tmode));
- if (!mode_for_vector (inner_mode, nunits).exists (&new_mode)
+ poly_uint64 nunits;
+ if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
+ GET_MODE_UNIT_BITSIZE (tmode), &nunits)
+ || !mode_for_vector (inner_mode, nunits).exists (&new_mode)
|| !VECTOR_MODE_P (new_mode)
|| GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
|| GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
@@ -2042,9 +2043,9 @@ extract_bit_field (rtx str_rtx, poly_uin
machine_mode mode1;
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
- if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
+ if (maybe_nonzero (GET_MODE_BITSIZE (GET_MODE (str_rtx))))
mode1 = GET_MODE (str_rtx);
- else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+ else if (target && maybe_nonzero (GET_MODE_BITSIZE (GET_MODE (target))))
mode1 = GET_MODE (target);
else
mode1 = tmode;
@@ -2360,7 +2361,7 @@ extract_low_bits (machine_mode mode, mac
if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
return NULL_RTX;
- if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
+ if (must_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
&& targetm.modes_tieable_p (mode, src_mode))
{
rtx x = gen_lowpart_common (mode, src);
===================================================================
@@ -245,7 +245,8 @@ convert_move (rtx to, rtx from, int unsi
if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
{
- gcc_assert (GET_MODE_BITSIZE (from_mode) == GET_MODE_BITSIZE (to_mode));
+ gcc_assert (must_eq (GET_MODE_BITSIZE (from_mode),
+ GET_MODE_BITSIZE (to_mode)));
if (VECTOR_MODE_P (to_mode))
from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
@@ -698,7 +699,8 @@ convert_modes (machine_mode mode, machin
subreg operation. */
if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
{
- gcc_assert (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (oldmode));
+ gcc_assert (must_eq (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (oldmode)));
return simplify_gen_subreg (mode, x, oldmode, 0);
}
@@ -3677,7 +3679,8 @@ emit_move_insn_1 (rtx x, rtx y)
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (!CONSTANT_P (y)
+ || must_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
{
rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
@@ -4620,8 +4623,9 @@ optimize_bitfield_assignment_op (poly_ui
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
+ /* str_mode is not guaranteed to be a scalar type. */
machine_mode str_mode = GET_MODE (str_rtx);
- unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
+ unsigned int str_bitsize;
tree op0, op1;
rtx value, result;
optab binop;
@@ -4635,6 +4639,7 @@ optimize_bitfield_assignment_op (poly_ui
|| !pbitregion_start.is_constant (&bitregion_start)
|| !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
+ || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
|| TREE_THIS_VOLATILE (to))
@@ -5147,7 +5152,7 @@ expand_assignment (tree to, tree from, b
}
else
{
- rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ rtx temp = assign_stack_temp (to_mode,
GET_MODE_SIZE (GET_MODE (to_rtx)));
write_complex_part (temp, XEXP (to_rtx, 0), false);
write_complex_part (temp, XEXP (to_rtx, 1), true);
@@ -6878,7 +6883,8 @@ store_field (rtx target, poly_int64 bits
{
tree type = TREE_TYPE (exp);
if (INTEGRAL_TYPE_P (type)
- && TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
+ && may_ne (TYPE_PRECISION (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type)))
&& must_eq (bitsize, TYPE_PRECISION (type)))
{
tree op = gimple_assign_rhs1 (nop_def);
@@ -10268,8 +10274,8 @@ expand_expr_real_1 (tree exp, rtx target
if (known_zero (offset)
&& !reverse
&& tree_fits_uhwi_p (TYPE_SIZE (type))
- && (GET_MODE_BITSIZE (DECL_MODE (base))
- == tree_to_uhwi (TYPE_SIZE (type))))
+ && must_eq (GET_MODE_BITSIZE (DECL_MODE (base)),
+ tree_to_uhwi (TYPE_SIZE (type))))
return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
target, tmode, modifier);
if (TYPE_MODE (type) == BLKmode)
===================================================================
@@ -4150,7 +4150,7 @@ optimize_bit_field_compare (location_t l
|| !known_size_p (plbitsize)
|| !plbitsize.is_constant (&lbitsize)
|| !plbitpos.is_constant (&lbitpos)
- || lbitsize == GET_MODE_BITSIZE (lmode)
+ || must_eq (lbitsize, GET_MODE_BITSIZE (lmode))
|| offset != 0
|| TREE_CODE (linner) == PLACEHOLDER_EXPR
|| lvolatilep)
@@ -5275,8 +5275,9 @@ merge_ranges (int *pin_p, tree *plow, tr
switch (TREE_CODE (TREE_TYPE (low0)))
{
case ENUMERAL_TYPE:
- if (TYPE_PRECISION (TREE_TYPE (low0))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (low0))))
+ if (may_ne (TYPE_PRECISION (TREE_TYPE (low0)),
+ GET_MODE_BITSIZE
+ (TYPE_MODE (TREE_TYPE (low0)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:
@@ -5298,8 +5299,9 @@ merge_ranges (int *pin_p, tree *plow, tr
switch (TREE_CODE (TREE_TYPE (high1)))
{
case ENUMERAL_TYPE:
- if (TYPE_PRECISION (TREE_TYPE (high1))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (high1))))
+ if (may_ne (TYPE_PRECISION (TREE_TYPE (high1)),
+ GET_MODE_BITSIZE
+ (TYPE_MODE (TREE_TYPE (high1)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:
===================================================================
@@ -3616,7 +3616,7 @@ optimize_atomic_compare_exchange_p (gimp
/* Don't optimize floating point expected vars, VIEW_CONVERT_EXPRs
might not preserve all the bits. See PR71716. */
|| SCALAR_FLOAT_TYPE_P (etype)
- || TYPE_PRECISION (etype) != GET_MODE_BITSIZE (TYPE_MODE (etype)))
+ || may_ne (TYPE_PRECISION (etype), GET_MODE_BITSIZE (TYPE_MODE (etype))))
return false;
tree weak = gimple_call_arg (stmt, 3);
===================================================================
@@ -3121,10 +3121,11 @@ find_reloads (rtx_insn *insn, int replac
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
&& (WORD_REGISTER_OPERATIONS
- || ((GET_MODE_BITSIZE (GET_MODE (operand))
- < BIGGEST_ALIGNMENT)
- && paradoxical_subreg_p (operand_mode[i],
- GET_MODE (operand)))
+ || (((may_lt
+ (GET_MODE_BITSIZE (GET_MODE (operand)),
+ BIGGEST_ALIGNMENT))
+ && (paradoxical_subreg_p
+ (operand_mode[i], GET_MODE (operand)))))
|| BYTES_BIG_ENDIAN
|| ((GET_MODE_SIZE (operand_mode[i])
<= UNITS_PER_WORD)
===================================================================
@@ -2146,7 +2146,11 @@ alter_reg (int i, int from_reg, bool don
unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
machine_mode wider_mode = wider_subreg_mode (mode, reg_max_ref_mode[i]);
poly_uint64 total_size = GET_MODE_SIZE (wider_mode);
- unsigned int min_align = GET_MODE_BITSIZE (reg_max_ref_mode[i]);
+ /* ??? Seems strange to derive the minimum alignment from the size,
+ but that's the traditional behavior. For polynomial-size modes,
+ the natural extension is to use the minimum possible size. */
+ unsigned int min_align
+ = constant_lower_bound (GET_MODE_BITSIZE (reg_max_ref_mode[i]));
poly_int64 adjust = 0;
something_was_spilled = true;
===================================================================
@@ -410,7 +410,6 @@ int_mode_for_mode (machine_mode mode)
bitwise_mode_for_mode (machine_mode mode)
{
/* Quick exit if we already have a suitable mode. */
- unsigned int bitsize = GET_MODE_BITSIZE (mode);
scalar_int_mode int_mode;
if (is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
@@ -419,6 +418,8 @@ bitwise_mode_for_mode (machine_mode mode
/* Reuse the sanity checks from int_mode_for_mode. */
gcc_checking_assert ((int_mode_for_mode (mode), true));
+ poly_int64 bitsize = GET_MODE_BITSIZE (mode);
+
/* Try to replace complex modes with complex modes. In general we
expect both components to be processed independently, so we only
care whether there is a register for the inner mode. */
@@ -433,7 +434,8 @@ bitwise_mode_for_mode (machine_mode mode
/* Try to replace vector modes with vector modes. Also try using vector
modes if an integer mode would be too big. */
- if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE)
+ if (VECTOR_MODE_P (mode)
+ || may_gt (bitsize, MAX_FIXED_MODE_SIZE))
{
machine_mode trial = mode;
if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
@@ -1771,7 +1773,7 @@ compute_record_mode (tree type)
does not apply to unions. */
if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
&& tree_fits_uhwi_p (TYPE_SIZE (type))
- && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type)))
+ && must_eq (GET_MODE_BITSIZE (mode), tree_to_uhwi (TYPE_SIZE (type))))
;
else
mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
===================================================================
@@ -1143,7 +1143,7 @@ default_secondary_reload (bool in_p ATTR
default_secondary_memory_needed_mode (machine_mode mode)
{
if (!targetm.lra_p ()
- && GET_MODE_BITSIZE (mode) < BITS_PER_WORD
+ && must_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
&& INTEGRAL_MODE_P (mode))
return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
return mode;
===================================================================
@@ -2228,7 +2228,10 @@ predicate_mem_writes (loop_p loop)
tree ref, addr, ptr, mask;
gcall *new_stmt;
gimple_seq stmts = NULL;
- int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
+ machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
+ /* We checked before setting GF_PLF_2 that an equivalent
+ integer mode exists. */
+ int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
mark_addressable (ref);
addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
===================================================================
@@ -2132,7 +2132,7 @@ handle_builtin_memcmp (gimple_stmt_itera
location_t loc = gimple_location (stmt2);
tree type, off;
type = build_nonstandard_integer_type (leni, 1);
- gcc_assert (GET_MODE_BITSIZE (TYPE_MODE (type)) == leni);
+ gcc_assert (must_eq (GET_MODE_BITSIZE (TYPE_MODE (type)), leni));
tree ptrtype = build_pointer_type_for_mode (char_type_node,
ptr_mode, true);
off = build_int_cst (ptrtype, 0);
===================================================================
@@ -3388,8 +3388,8 @@ adjust_bool_pattern (tree var, tree out_
gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison);
if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
|| !TYPE_UNSIGNED (TREE_TYPE (rhs1))
- || (TYPE_PRECISION (TREE_TYPE (rhs1))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
+ || may_ne (TYPE_PRECISION (TREE_TYPE (rhs1)),
+ GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
{
scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
itype
===================================================================
@@ -3585,7 +3585,7 @@ vectorizable_simd_clone_call (gimple *st
if (simd_clone_subparts (atype)
< simd_clone_subparts (arginfo[i].vectype))
{
- unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
+ poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
k = (simd_clone_subparts (arginfo[i].vectype)
/ simd_clone_subparts (atype));
gcc_assert ((k & (k - 1)) == 0);
@@ -3749,7 +3749,8 @@ vectorizable_simd_clone_call (gimple *st
if (simd_clone_subparts (vectype) < nunits)
{
unsigned int k, l;
- unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
+ poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
+ poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
k = nunits / simd_clone_subparts (vectype);
gcc_assert ((k & (k - 1)) == 0);
for (l = 0; l < k; l++)
@@ -3759,8 +3760,7 @@ vectorizable_simd_clone_call (gimple *st
{
t = build_fold_addr_expr (new_temp);
t = build2 (MEM_REF, vectype, t,
- build_int_cst (TREE_TYPE (t),
- l * prec / BITS_PER_UNIT));
+ build_int_cst (TREE_TYPE (t), l * bytes));
}
else
t = build3 (BIT_FIELD_REF, vectype, new_temp,
===================================================================
@@ -606,10 +606,13 @@ dead_debug_insert_temp (struct dead_debu
usesp = &cur->next;
*tailp = cur->next;
cur->next = NULL;
+ /* "may" rather than "must" because we want (for example)
+ N V4SFs to win over plain V4SF even though N might be 1. */
+ rtx candidate = *DF_REF_REAL_LOC (cur->use);
if (!reg
- || (GET_MODE_BITSIZE (GET_MODE (reg))
- < GET_MODE_BITSIZE (GET_MODE (*DF_REF_REAL_LOC (cur->use)))))
- reg = *DF_REF_REAL_LOC (cur->use);
+ || may_lt (GET_MODE_BITSIZE (GET_MODE (reg)),
+ GET_MODE_BITSIZE (GET_MODE (candidate))))
+ reg = candidate;
}
else
tailp = &(*tailp)->next;
===================================================================
@@ -843,12 +843,10 @@ mergeable_constant_section (machine_mode
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED,
unsigned int flags ATTRIBUTE_UNUSED)
{
- unsigned int modesize = GET_MODE_BITSIZE (mode);
-
if (HAVE_GAS_SHF_MERGE && flag_merge_constants
&& mode != VOIDmode
&& mode != BLKmode
- && modesize <= align
+ && must_le (GET_MODE_BITSIZE (mode), align)
&& align >= 8
&& align <= 256
&& (align & (align - 1)) == 0)
===================================================================
@@ -468,7 +468,9 @@ #define FASTEST_ALIGNMENT (32)
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_INT \
|| GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_FLOAT) \
- ? (unsigned) MIN (BIGGEST_ALIGNMENT, GET_MODE_BITSIZE (TYPE_MODE (TYPE))) \
+ ? (unsigned) MIN (BIGGEST_ALIGNMENT, \
+ GET_MODE_BITSIZE (as_a <fixed_size_mode> \
+ (TYPE_MODE (TYPE)))) \
: (unsigned) DATA_ALIGNMENT(TYPE, ALIGN))
/* Make arrays of chars word-aligned for the same reasons. */
===================================================================
@@ -1298,14 +1298,14 @@ enumerate_modes (void (*f) (const char *
}
/* If no predefined C types were found, register the mode itself. */
- int nunits, precision;
+ int nunits, precision, bitsize;
if (!skip_p
&& GET_MODE_NUNITS (i).is_constant (&nunits)
- && GET_MODE_PRECISION (i).is_constant (&precision))
+ && GET_MODE_PRECISION (i).is_constant (&precision)
+ && GET_MODE_BITSIZE (i).is_constant (&bitsize))
f (GET_MODE_NAME (i), digs, complex_p,
vector_p ? nunits : 0, float_rep,
- precision, GET_MODE_BITSIZE (i),
- GET_MODE_ALIGNMENT (i));
+ precision, bitsize, GET_MODE_ALIGNMENT (i));
}
}
===================================================================
@@ -132,7 +132,8 @@ ubsan_instrument_shift (location_t loc,
/* If this is not a signed operation, don't perform overflow checks.
Also punt on bit-fields. */
if (TYPE_OVERFLOW_WRAPS (type0)
- || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0)
+ || may_ne (GET_MODE_BITSIZE (TYPE_MODE (type0)),
+ TYPE_PRECISION (type0))
|| !sanitize_flags_p (SANITIZE_SHIFT_BASE))
;