From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
Hi, Richard and Richi.
Base on the suggestions from Richard:
https://gcc.gnu.org/pipermail/gcc-patches/2023-July/625396.html
This patch choose (1) approach that Richard provided, meaning:
RVV implements cond_* optabs as expanders. RVV therefore supports
both IFN_COND_ADD and IFN_COND_LEN_ADD. No dummy length arguments
are needed at the gimple level.
Such approach can make codes much cleaner and reasonable.
Consider this following case:
void foo (float * __restrict a, float * __restrict b, int * __restrict cond, int n)
{
for (int i = 0; i < n; i++)
if (cond[i])
a[i] = b[i] + a[i];
}
Output of RISC-V (32-bits) gcc (trunk) (Compiler #3)
<source>:5:21: missed: couldn't vectorize loop
<source>:5:21: missed: not vectorized: control flow in loop.
ARM SVE:
...
mask__27.10_51 = vect__4.9_49 != { 0, ... };
...
vec_mask_and_55 = loop_mask_49 & mask__27.10_51;
...
vect__9.17_62 = .COND_ADD (vec_mask_and_55, vect__6.13_56, vect__8.16_60, vect__6.13_56);
For RVV, we want IR as follows:
...
_68 = .SELECT_VL (ivtmp_66, POLY_INT_CST [4, 4]);
...
mask__27.10_51 = vect__4.9_49 != { 0, ... };
...
vect__9.17_60 = .COND_LEN_ADD (mask__27.10_51, vect__6.13_55, vect__8.16_59, vect__6.13_55, _68, 0);
...
Both len and mask of COND_LEN_ADD are real not dummy.
This patch has been fully tested in RISC-V port with supporting both COND_* and COND_LEN_*.
And also, Bootstrap and Regression on X86 passed.
OK for trunk?
gcc/ChangeLog:
* internal-fn.cc (get_len_internal_fn): New function.
(DEF_INTERNAL_COND_FN): Ditto.
(DEF_INTERNAL_SIGNED_COND_FN): Ditto.
* internal-fn.h (get_len_internal_fn): Ditto.
* tree-vect-stmts.cc (vectorizable_call): Add CALL auto-vectorization.
---
gcc/internal-fn.cc | 24 +++++++++++
gcc/internal-fn.h | 1 +
gcc/tree-vect-stmts.cc | 90 +++++++++++++++++++++++++++++++++++++-----
3 files changed, 106 insertions(+), 9 deletions(-)
@@ -4443,6 +4443,30 @@ get_conditional_internal_fn (internal_fn fn)
}
}
+/* If there exists an internal function like IFN that operates on vectors,
+ but with additional length and bias parameters, return the internal_fn
+ for that function, otherwise return IFN_LAST. */
+internal_fn
+get_len_internal_fn (internal_fn fn)
+{
+ switch (fn)
+ {
+#undef DEF_INTERNAL_COND_FN
+#undef DEF_INTERNAL_SIGNED_COND_FN
+#define DEF_INTERNAL_COND_FN(NAME, ...) \
+ case IFN_COND_##NAME: \
+ return IFN_COND_LEN_##NAME;
+#define DEF_INTERNAL_SIGNED_COND_FN(NAME, ...) \
+ case IFN_COND_##NAME: \
+ return IFN_COND_LEN_##NAME;
+#include "internal-fn.def"
+#undef DEF_INTERNAL_COND_FN
+#undef DEF_INTERNAL_SIGNED_COND_FN
+ default:
+ return IFN_LAST;
+ }
+}
+
/* If IFN implements the conditional form of an unconditional internal
function, return that unconditional function, otherwise return IFN_LAST. */
@@ -224,6 +224,7 @@ extern bool set_edom_supported_p (void);
extern internal_fn get_conditional_internal_fn (tree_code);
extern internal_fn get_conditional_internal_fn (internal_fn);
+extern internal_fn get_len_internal_fn (internal_fn);
extern internal_fn get_conditional_len_internal_fn (tree_code);
extern tree_code conditional_internal_fn_code (internal_fn);
extern internal_fn get_unconditional_internal_fn (internal_fn);
@@ -3540,7 +3540,10 @@ vectorizable_call (vec_info *vinfo,
int reduc_idx = STMT_VINFO_REDUC_IDX (stmt_info);
internal_fn cond_fn = get_conditional_internal_fn (ifn);
+ internal_fn cond_len_fn = get_len_internal_fn (ifn);
+ int len_opno = internal_fn_len_index (cond_len_fn);
vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
+ vec_loop_lens *lens = (loop_vinfo ? &LOOP_VINFO_LENS (loop_vinfo) : NULL);
if (!vec_stmt) /* transformation not required. */
{
if (slp_node)
@@ -3569,6 +3572,9 @@ vectorizable_call (vec_info *vinfo,
if (reduc_idx >= 0
&& (cond_fn == IFN_LAST
|| !direct_internal_fn_supported_p (cond_fn, vectype_out,
+ OPTIMIZE_FOR_SPEED))
+ && (cond_len_fn == IFN_LAST
+ || !direct_internal_fn_supported_p (cond_len_fn, vectype_out,
OPTIMIZE_FOR_SPEED)))
{
if (dump_enabled_p ())
@@ -3586,8 +3592,14 @@ vectorizable_call (vec_info *vinfo,
tree scalar_mask = NULL_TREE;
if (mask_opno >= 0)
scalar_mask = gimple_call_arg (stmt_info->stmt, mask_opno);
- vect_record_loop_mask (loop_vinfo, masks, nvectors,
- vectype_out, scalar_mask);
+ if (cond_len_fn != IFN_LAST
+ && direct_internal_fn_supported_p (cond_len_fn, vectype_out,
+ OPTIMIZE_FOR_SPEED))
+ vect_record_loop_len (loop_vinfo, lens, nvectors, vectype_out,
+ 1);
+ else
+ vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out,
+ scalar_mask);
}
}
return true;
@@ -3603,8 +3615,24 @@ vectorizable_call (vec_info *vinfo,
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
+ bool len_loop_p = loop_vinfo && LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo);
unsigned int vect_nargs = nargs;
- if (masked_loop_p && reduc_idx >= 0)
+ if (len_loop_p)
+ {
+ if (len_opno >= 0)
+ {
+ ifn = cond_len_fn;
+ /* COND_* -> COND_LEN_* takes 2 extra arguments:LEN,BIAS. */
+ vect_nargs += 2;
+ }
+ else if (reduc_idx >= 0)
+ {
+ /* FMA -> COND_LEN_FMA takes 4 extra arguments:MASK,ELSE,LEN,BIAS. */
+ ifn = get_len_internal_fn (cond_fn);
+ vect_nargs += 4;
+ }
+ }
+ else if (masked_loop_p && reduc_idx >= 0)
{
ifn = cond_fn;
vect_nargs += 2;
@@ -3629,7 +3657,18 @@ vectorizable_call (vec_info *vinfo,
FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
{
int varg = 0;
- if (masked_loop_p && reduc_idx >= 0)
+ if (len_loop_p && reduc_idx >= 0)
+ {
+ /* Always true for SLP. */
+ gcc_assert (ncopies == 1);
+ /* For COND_LEN_* operations used by reduction of
+ CALL vectorization, the LEN argument is the real
+ loop len produced by SELECT_VL or MIN wheras the
+ MASK argument here is the dummy mask. */
+ vargs[varg++]
+ = build_minus_one_cst (truth_type_for (vectype_out));
+ }
+ else if (masked_loop_p && reduc_idx >= 0)
{
unsigned int vec_num = vec_oprnds0.length ();
/* Always true for SLP. */
@@ -3644,7 +3683,7 @@ vectorizable_call (vec_info *vinfo,
vec<tree> vec_oprndsk = vec_defs[k];
vargs[varg++] = vec_oprndsk[i];
}
- if (masked_loop_p && reduc_idx >= 0)
+ if ((masked_loop_p || len_loop_p) && reduc_idx >= 0)
vargs[varg++] = vargs[reduc_idx + 1];
gimple *new_stmt;
if (modifier == NARROW)
@@ -3671,7 +3710,21 @@ vectorizable_call (vec_info *vinfo,
}
else
{
- if (mask_opno >= 0 && masked_loop_p)
+ if (len_opno >= 0 && len_loop_p)
+ {
+ unsigned int vec_num = vec_oprnds0.length ();
+ /* Always true for SLP. */
+ gcc_assert (ncopies == 1);
+ tree len
+ = vect_get_loop_len (loop_vinfo, gsi, lens, vec_num,
+ vectype_out, i, 1);
+ signed char biasval
+ = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+ tree bias = build_int_cst (intQI_type_node, biasval);
+ vargs[len_opno] = len;
+ vargs[len_opno + 1] = bias;
+ }
+ else if (mask_opno >= 0 && masked_loop_p)
{
unsigned int vec_num = vec_oprnds0.length ();
/* Always true for SLP. */
@@ -3701,7 +3754,16 @@ vectorizable_call (vec_info *vinfo,
}
int varg = 0;
- if (masked_loop_p && reduc_idx >= 0)
+ if (len_loop_p && reduc_idx >= 0)
+ {
+ /* For COND_LEN_* operations used by reduction of
+ CALL vectorization, the LEN argument is the real
+ loop len produced by SELECT_VL or MIN wheras the
+ MASK argument here is the dummy mask. */
+ vargs[varg++]
+ = build_minus_one_cst (truth_type_for (vectype_out));
+ }
+ else if (masked_loop_p && reduc_idx >= 0)
vargs[varg++] = vect_get_loop_mask (loop_vinfo, gsi, masks, ncopies,
vectype_out, j);
for (i = 0; i < nargs; i++)
@@ -3716,10 +3778,20 @@ vectorizable_call (vec_info *vinfo,
}
vargs[varg++] = vec_defs[i][j];
}
- if (masked_loop_p && reduc_idx >= 0)
+ if ((masked_loop_p || len_loop_p) && reduc_idx >= 0)
vargs[varg++] = vargs[reduc_idx + 1];
- if (mask_opno >= 0 && masked_loop_p)
+ if (len_opno >= 0 && len_loop_p)
+ {
+ tree len = vect_get_loop_len (loop_vinfo, gsi, lens, ncopies,
+ vectype_out, j, 1);
+ signed char biasval
+ = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+ tree bias = build_int_cst (intQI_type_node, biasval);
+ vargs[len_opno] = len;
+ vargs[len_opno + 1] = bias;
+ }
+ else if (mask_opno >= 0 && masked_loop_p)
{
tree mask = vect_get_loop_mask (loop_vinfo, gsi, masks, ncopies,
vectype_out, j);