===================================================================
@@ -662,25 +662,25 @@ (define_mode_attr vwcore [(V8QI "w") (V
;; Double vector types for ALLX.
(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
-;; Mode of result of comparison operations.
-(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
- (V4HI "V4HI") (V8HI "V8HI")
- (V2SI "V2SI") (V4SI "V4SI")
- (DI "DI") (V2DI "V2DI")
- (V4HF "V4HI") (V8HF "V8HI")
- (V2SF "V2SI") (V4SF "V4SI")
- (V2DF "V2DI") (DF "DI")
- (SF "SI") (HF "HI")])
+;; Mode with floating-point values replaced by like-sized integers.
+(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (DI "DI") (V2DI "V2DI")
+ (V4HF "V4HI") (V8HF "V8HI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (V2DF "V2DI") (DF "DI")
+ (SF "SI") (HF "HI")])
-;; Lower case mode of results of comparison operations.
-(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
- (V4HI "v4hi") (V8HI "v8hi")
- (V2SI "v2si") (V4SI "v4si")
- (DI "di") (V2DI "v2di")
- (V4HF "v4hi") (V8HF "v8hi")
- (V2SF "v2si") (V4SF "v4si")
- (V2DF "v2di") (DF "di")
- (SF "si")])
+;; Lower case mode with floating-point values replaced by like-sized integers.
+(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi")
+ (V4HI "v4hi") (V8HI "v8hi")
+ (V2SI "v2si") (V4SI "v4si")
+ (DI "di") (V2DI "v2di")
+ (V4HF "v4hi") (V8HF "v8hi")
+ (V2SF "v2si") (V4SF "v4si")
+ (V2DF "v2di") (DF "di")
+ (SF "si")])
;; Mode for vector conditional operations where the comparison has
;; different type from the lhs.
===================================================================
@@ -5196,7 +5196,7 @@ (define_expand "xorsign<mode>3"
"TARGET_FLOAT && TARGET_SIMD"
{
- machine_mode imode = <V_cmp_result>mode;
+ machine_mode imode = <V_INT_EQUIV>mode;
rtx mask = gen_reg_rtx (imode);
rtx op1x = gen_reg_rtx (imode);
rtx op2x = gen_reg_rtx (imode);
@@ -5205,13 +5205,13 @@ (define_expand "xorsign<mode>3"
emit_move_insn (mask, GEN_INT (trunc_int_for_mode (HOST_WIDE_INT_M1U << bits,
imode)));
- emit_insn (gen_and<v_cmp_result>3 (op2x, mask,
- lowpart_subreg (imode, operands[2],
- <MODE>mode)));
- emit_insn (gen_xor<v_cmp_result>3 (op1x,
- lowpart_subreg (imode, operands[1],
- <MODE>mode),
- op2x));
+ emit_insn (gen_and<v_int_equiv>3 (op2x, mask,
+ lowpart_subreg (imode, operands[2],
+ <MODE>mode)));
+ emit_insn (gen_xor<v_int_equiv>3 (op1x,
+ lowpart_subreg (imode, operands[1],
+ <MODE>mode),
+ op2x));
emit_move_insn (operands[0],
lowpart_subreg (<MODE>mode, op1x, imode));
DONE;
===================================================================
@@ -364,7 +364,7 @@ (define_expand "xorsign<mode>3"
"TARGET_SIMD"
{
- machine_mode imode = <V_cmp_result>mode;
+ machine_mode imode = <V_INT_EQUIV>mode;
rtx v_bitmask = gen_reg_rtx (imode);
rtx op1x = gen_reg_rtx (imode);
rtx op2x = gen_reg_rtx (imode);
@@ -375,11 +375,11 @@ (define_expand "xorsign<mode>3"
int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
emit_move_insn (v_bitmask,
- aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
+ aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
HOST_WIDE_INT_M1U << bits));
- emit_insn (gen_and<v_cmp_result>3 (op2x, v_bitmask, arg2));
- emit_insn (gen_xor<v_cmp_result>3 (op1x, arg1, op2x));
+ emit_insn (gen_and<v_int_equiv>3 (op2x, v_bitmask, arg2));
+ emit_insn (gen_xor<v_int_equiv>3 (op1x, arg1, op2x));
emit_move_insn (operands[0],
lowpart_subreg (<MODE>mode, op1x, imode));
DONE;
@@ -392,11 +392,11 @@ (define_expand "copysign<mode>3"
(match_operand:VHSDF 2 "register_operand")]
"TARGET_FLOAT && TARGET_SIMD"
{
- rtx v_bitmask = gen_reg_rtx (<V_cmp_result>mode);
+ rtx v_bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
emit_move_insn (v_bitmask,
- aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
+ aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
HOST_WIDE_INT_M1U << bits));
emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], v_bitmask,
operands[2], operands[1]));
@@ -2319,10 +2319,10 @@ (define_insn "aarch64_simd_bsl<mode>_int
(xor:VSDQ_I_DI
(and:VSDQ_I_DI
(xor:VSDQ_I_DI
- (match_operand:<V_cmp_result> 3 "register_operand" "w,0,w")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
(match_operand:VSDQ_I_DI 2 "register_operand" "w,w,0"))
(match_operand:VSDQ_I_DI 1 "register_operand" "0,w,w"))
- (match_dup:<V_cmp_result> 3)
+ (match_dup:<V_INT_EQUIV> 3)
))]
"TARGET_SIMD"
"@
@@ -2357,7 +2357,7 @@ (define_insn "*aarch64_simd_bsl<mode>_al
(define_expand "aarch64_simd_bsl<mode>"
[(match_operand:VALLDIF 0 "register_operand")
- (match_operand:<V_cmp_result> 1 "register_operand")
+ (match_operand:<V_INT_EQUIV> 1 "register_operand")
(match_operand:VALLDIF 2 "register_operand")
(match_operand:VALLDIF 3 "register_operand")]
"TARGET_SIMD"
@@ -2366,26 +2366,26 @@ (define_expand "aarch64_simd_bsl<mode>"
rtx tmp = operands[0];
if (FLOAT_MODE_P (<MODE>mode))
{
- operands[2] = gen_lowpart (<V_cmp_result>mode, operands[2]);
- operands[3] = gen_lowpart (<V_cmp_result>mode, operands[3]);
- tmp = gen_reg_rtx (<V_cmp_result>mode);
+ operands[2] = gen_lowpart (<V_INT_EQUIV>mode, operands[2]);
+ operands[3] = gen_lowpart (<V_INT_EQUIV>mode, operands[3]);
+ tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
}
- operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
- emit_insn (gen_aarch64_simd_bsl<v_cmp_result>_internal (tmp,
- operands[1],
- operands[2],
- operands[3]));
+ operands[1] = gen_lowpart (<V_INT_EQUIV>mode, operands[1]);
+ emit_insn (gen_aarch64_simd_bsl<v_int_equiv>_internal (tmp,
+ operands[1],
+ operands[2],
+ operands[3]));
if (tmp != operands[0])
emit_move_insn (operands[0], gen_lowpart (<MODE>mode, tmp));
DONE;
})
-(define_expand "vcond_mask_<mode><v_cmp_result>"
+(define_expand "vcond_mask_<mode><v_int_equiv>"
[(match_operand:VALLDI 0 "register_operand")
(match_operand:VALLDI 1 "nonmemory_operand")
(match_operand:VALLDI 2 "nonmemory_operand")
- (match_operand:<V_cmp_result> 3 "register_operand")]
+ (match_operand:<V_INT_EQUIV> 3 "register_operand")]
"TARGET_SIMD"
{
/* If we have (a = (P) ? -1 : 0);
@@ -2396,7 +2396,7 @@ (define_expand "vcond_mask_<mode><v_cmp_
/* Similarly, (a = (P) ? 0 : -1) is just inverting the generated mask. */
else if (operands[1] == CONST0_RTX (<MODE>mode)
&& operands[2] == CONSTM1_RTX (<MODE>mode))
- emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[3]));
+ emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[3]));
else
{
if (!REG_P (operands[1]))
@@ -2478,7 +2478,7 @@ (define_expand "vec_cmp<mode><mode>"
case NE:
/* Handle NE as !EQ. */
emit_insn (gen_aarch64_cmeq<mode> (mask, operands[2], operands[3]));
- emit_insn (gen_one_cmpl<v_cmp_result>2 (mask, mask));
+ emit_insn (gen_one_cmpl<v_int_equiv>2 (mask, mask));
break;
case EQ:
@@ -2492,8 +2492,8 @@ (define_expand "vec_cmp<mode><mode>"
DONE;
})
-(define_expand "vec_cmp<mode><v_cmp_result>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand")
+(define_expand "vec_cmp<mode><v_int_equiv>"
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(match_operator 1 "comparison_operator"
[(match_operand:VDQF 2 "register_operand")
(match_operand:VDQF 3 "nonmemory_operand")]))]
@@ -2501,7 +2501,7 @@ (define_expand "vec_cmp<mode><v_cmp_resu
{
int use_zero_form = 0;
enum rtx_code code = GET_CODE (operands[1]);
- rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
+ rtx tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
rtx (*comparison) (rtx, rtx, rtx) = NULL;
@@ -2587,7 +2587,7 @@ (define_expand "vec_cmp<mode><v_cmp_resu
a NE b -> !(a EQ b) */
gcc_assert (comparison != NULL);
emit_insn (comparison (operands[0], operands[2], operands[3]));
- emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+ emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case LT:
@@ -2612,8 +2612,8 @@ (define_expand "vec_cmp<mode><v_cmp_resu
emit_insn (gen_aarch64_cmgt<mode> (operands[0],
operands[2], operands[3]));
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[3], operands[2]));
- emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
- emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+ emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
+ emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case UNORDERED:
@@ -2622,15 +2622,15 @@ (define_expand "vec_cmp<mode><v_cmp_resu
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
emit_insn (gen_aarch64_cmge<mode> (operands[0],
operands[3], operands[2]));
- emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
- emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+ emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
+ emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case ORDERED:
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
emit_insn (gen_aarch64_cmge<mode> (operands[0],
operands[3], operands[2]));
- emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
+ emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
break;
default:
@@ -2662,7 +2662,7 @@ (define_expand "vcond<mode><mode>"
(match_operand:VALLDI 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
- rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+ rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@@ -2674,10 +2674,10 @@ (define_expand "vcond<mode><mode>"
operands[4], operands[5]);
std::swap (operands[1], operands[2]);
}
- emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
- operands[2], mask));
+ emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
+ operands[4], operands[5]));
+ emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+ operands[2], mask));
DONE;
})
@@ -2692,7 +2692,7 @@ (define_expand "vcond<v_cmp_mixed><mode>
(match_operand:<V_cmp_mixed> 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
- rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+ rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@@ -2704,9 +2704,9 @@ (define_expand "vcond<v_cmp_mixed><mode>
operands[4], operands[5]);
std::swap (operands[1], operands[2]);
}
- emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
- operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<v_cmp_mixed><v_cmp_result> (
+ emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
+ operands[4], operands[5]));
+ emit_insn (gen_vcond_mask_<v_cmp_mixed><v_int_equiv> (
operands[0], operands[1],
operands[2], mask));
@@ -2737,8 +2737,8 @@ (define_expand "vcondu<mode><mode>"
}
emit_insn (gen_vec_cmp<mode><mode> (mask, operands[3],
operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
- operands[2], mask));
+ emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+ operands[2], mask));
DONE;
})
@@ -2752,7 +2752,7 @@ (define_expand "vcondu<mode><v_cmp_mixed
(match_operand:VDQF 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
- rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+ rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@@ -2767,8 +2767,8 @@ (define_expand "vcondu<mode><v_cmp_mixed
emit_insn (gen_vec_cmp<v_cmp_mixed><v_cmp_mixed> (
mask, operands[3],
operands[4], operands[5]));
- emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
- operands[2], mask));
+ emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+ operands[2], mask));
DONE;
})
@@ -4208,9 +4208,9 @@ (define_insn "aarch64_<sur>q<r>shr<u>n_n
;; have different ideas of what should be passed to this pattern.
(define_insn "aarch64_cm<optab><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
- (neg:<V_cmp_result>
- (COMPARISONS:<V_cmp_result>
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+ (neg:<V_INT_EQUIV>
+ (COMPARISONS:<V_INT_EQUIV>
(match_operand:VDQ_I 1 "register_operand" "w,w")
(match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero" "w,ZDz")
)))]
@@ -4273,9 +4273,9 @@ (define_insn "*aarch64_cm<optab>di"
;; cm(hs|hi)
(define_insn "aarch64_cm<optab><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
- (neg:<V_cmp_result>
- (UCOMPARISONS:<V_cmp_result>
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+ (neg:<V_INT_EQUIV>
+ (UCOMPARISONS:<V_INT_EQUIV>
(match_operand:VDQ_I 1 "register_operand" "w")
(match_operand:VDQ_I 2 "register_operand" "w")
)))]
@@ -4340,14 +4340,14 @@ (define_insn "*aarch64_cm<optab>di"
;; plus (eq (and x y) 0) -1.
(define_insn "aarch64_cmtst<mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
- (plus:<V_cmp_result>
- (eq:<V_cmp_result>
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+ (plus:<V_INT_EQUIV>
+ (eq:<V_INT_EQUIV>
(and:VDQ_I
(match_operand:VDQ_I 1 "register_operand" "w")
(match_operand:VDQ_I 2 "register_operand" "w"))
(match_operand:VDQ_I 3 "aarch64_simd_imm_zero"))
- (match_operand:<V_cmp_result> 4 "aarch64_simd_imm_minus_one")))
+ (match_operand:<V_INT_EQUIV> 4 "aarch64_simd_imm_minus_one")))
]
"TARGET_SIMD"
"cmtst\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
@@ -4408,9 +4408,9 @@ (define_insn "*aarch64_cmtstdi"
;; fcm(eq|ge|gt|le|lt)
(define_insn "aarch64_cm<optab><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
- (neg:<V_cmp_result>
- (COMPARISONS:<V_cmp_result>
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+ (neg:<V_INT_EQUIV>
+ (COMPARISONS:<V_INT_EQUIV>
(match_operand:VHSDF_HSDF 1 "register_operand" "w,w")
(match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero" "w,YDz")
)))]
@@ -4426,9 +4426,9 @@ (define_insn "aarch64_cm<optab><mode>"
;; generating fac(ge|gt).
(define_insn "aarch64_fac<optab><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
- (neg:<V_cmp_result>
- (FAC_COMPARISONS:<V_cmp_result>
+ [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+ (neg:<V_INT_EQUIV>
+ (FAC_COMPARISONS:<V_INT_EQUIV>
(abs:VHSDF_HSDF
(match_operand:VHSDF_HSDF 1 "register_operand" "w"))
(abs:VHSDF_HSDF
@@ -5130,7 +5130,7 @@ (define_expand "vec_perm_const<mode>"
[(match_operand:VALL_F16 0 "register_operand")
(match_operand:VALL_F16 1 "register_operand")
(match_operand:VALL_F16 2 "register_operand")
- (match_operand:<V_cmp_result> 3)]
+ (match_operand:<V_INT_EQUIV> 3)]
"TARGET_SIMD"
{
if (aarch64_expand_vec_perm_const (operands[0], operands[1],