VECT: Apply LEN_MASK_{LOAD,STORE} into vectorizer

Message ID 20230619161705.251983-1-juzhe.zhong@rivai.ai
State Unresolved
Headers
Series VECT: Apply LEN_MASK_{LOAD,STORE} into vectorizer |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

juzhe.zhong@rivai.ai June 19, 2023, 4:17 p.m. UTC
  From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

This patch is apply LEN_MASK_{LOAD,STORE} into vectorizer.
I refactor gimple IR build to make codes look cleaner.

gcc/ChangeLog:

        * internal-fn.cc (expand_partial_store_optab_fn): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (internal_load_fn_p): Ditto.
        (internal_store_fn_p): Ditto.
        (internal_fn_mask_index): Ditto.
        (internal_fn_stored_value_index): Ditto.
        (internal_len_load_store_bias): Ditto.
        * optabs-query.cc (can_vec_mask_load_store_p): Ditto.
        (get_len_load_store_mode): Ditto.
        * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Ditto.
        (get_all_ones_mask): New function.
        (vectorizable_store): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (vectorizable_load): Ditto.

---
 gcc/internal-fn.cc     |  35 +++++-
 gcc/optabs-query.cc    |  25 +++-
 gcc/tree-vect-stmts.cc | 259 +++++++++++++++++++++++++----------------
 3 files changed, 213 insertions(+), 106 deletions(-)
  

Comments

juzhe.zhong@rivai.ai June 19, 2023, 9:57 p.m. UTC | #1
Hi, this patch refactors the codes in tree-vect-stmts.cc in case of gimple IR generation.

I realize the codes change too much and I am not sure whether you are happy with it.

Originally, the codes are like:

if (final_mask)
 {
    generate IFN_MASK_LOAD...
 }
else if (loop_len)
{
   generate IFN_LEN_LOAD
   handle BIAS.
}
else
{ 
NORMAL_LOAD
}

Now, I refactor it:

if (final_mask || loop_len)
{
  if (get_len_load_store ().exisits ())
  {
    /* LEN_MASK_LOAD or LEN_LOAD */
    get len..
    if (LEN_MASK_LOAD)
     {
       get mask...
       generate IFN_LEN_MASK_LOAD...
     }
     else
      {
        generate IFN_LEN_LOAD...
      }
      Handle BIAS....
  }
  else
  {
    gcc_assert (final_mask)
    /* MASK_LOAD */
   }
}
else 
{
 NORMAL_LOAD
}

The reason I refactor it is I found LEN_MASK_LOAD and LEN_LOAD share some common codes.
Avoid duplicate codes make the codes looks reasonable.

Boostrap and Regression is on the way.


juzhe.zhong@rivai.ai
 
From: juzhe.zhong
Date: 2023-06-20 00:17
To: gcc-patches
CC: rguenther; richard.sandiford; Ju-Zhe Zhong
Subject: [PATCH] VECT: Apply LEN_MASK_{LOAD,STORE} into vectorizer
From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
 
This patch is apply LEN_MASK_{LOAD,STORE} into vectorizer.
I refactor gimple IR build to make codes look cleaner.
 
gcc/ChangeLog:
 
        * internal-fn.cc (expand_partial_store_optab_fn): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (internal_load_fn_p): Ditto.
        (internal_store_fn_p): Ditto.
        (internal_fn_mask_index): Ditto.
        (internal_fn_stored_value_index): Ditto.
        (internal_len_load_store_bias): Ditto.
        * optabs-query.cc (can_vec_mask_load_store_p): Ditto.
        (get_len_load_store_mode): Ditto.
        * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Ditto.
        (get_all_ones_mask): New function.
        (vectorizable_store): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (vectorizable_load): Ditto.
 
---
gcc/internal-fn.cc     |  35 +++++-
gcc/optabs-query.cc    |  25 +++-
gcc/tree-vect-stmts.cc | 259 +++++++++++++++++++++++++----------------
3 files changed, 213 insertions(+), 106 deletions(-)
 
diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index c911ae790cb..e10c21de5f1 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -2949,7 +2949,7 @@ expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
  * OPTAB.  */
static void
-expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
{
   class expand_operand ops[5];
   tree type, lhs, rhs, maskt, biast;
@@ -2957,7 +2957,7 @@ expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
   insn_code icode;
   maskt = gimple_call_arg (stmt, 2);
-  rhs = gimple_call_arg (stmt, 3);
+  rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
   type = TREE_TYPE (rhs);
   lhs = expand_call_mem_ref (type, stmt, 0);
@@ -4435,6 +4435,7 @@ internal_load_fn_p (internal_fn fn)
     case IFN_GATHER_LOAD:
     case IFN_MASK_GATHER_LOAD:
     case IFN_LEN_LOAD:
+    case IFN_LEN_MASK_LOAD:
       return true;
     default:
@@ -4455,6 +4456,7 @@ internal_store_fn_p (internal_fn fn)
     case IFN_SCATTER_STORE:
     case IFN_MASK_SCATTER_STORE:
     case IFN_LEN_STORE:
+    case IFN_LEN_MASK_STORE:
       return true;
     default:
@@ -4494,6 +4496,10 @@ internal_fn_mask_index (internal_fn fn)
     case IFN_MASK_STORE_LANES:
       return 2;
+    case IFN_LEN_MASK_LOAD:
+    case IFN_LEN_MASK_STORE:
+      return 3;
+
     case IFN_MASK_GATHER_LOAD:
     case IFN_MASK_SCATTER_STORE:
       return 4;
@@ -4519,6 +4525,9 @@ internal_fn_stored_value_index (internal_fn fn)
     case IFN_LEN_STORE:
       return 3;
+    case IFN_LEN_MASK_STORE:
+      return 4;
+
     default:
       return -1;
     }
@@ -4583,13 +4592,31 @@ internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
{
   optab optab = direct_internal_fn_optab (ifn);
   insn_code icode = direct_optab_handler (optab, mode);
+  int bias_argno = 3;
+  if (icode == CODE_FOR_nothing)
+    {
+      machine_mode mask_mode
+ = targetm.vectorize.get_mask_mode (mode).require ();
+      if (ifn == IFN_LEN_LOAD)
+ {
+   /* Try LEN_MASK_LOAD.  */
+   optab = direct_internal_fn_optab (IFN_LEN_MASK_LOAD);
+ }
+      else
+ {
+   /* Try LEN_MASK_STORE.  */
+   optab = direct_internal_fn_optab (IFN_LEN_MASK_STORE);
+ }
+      icode = convert_optab_handler (optab, mode, mask_mode);
+      bias_argno = 4;
+    }
   if (icode != CODE_FOR_nothing)
     {
       /* For now we only support biases of 0 or -1.  Try both of them.  */
-      if (insn_operand_matches (icode, 3, GEN_INT (0)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (0)))
return 0;
-      if (insn_operand_matches (icode, 3, GEN_INT (-1)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (-1)))
return -1;
     }
diff --git a/gcc/optabs-query.cc b/gcc/optabs-query.cc
index 276f8408dd7..4394d391200 100644
--- a/gcc/optabs-query.cc
+++ b/gcc/optabs-query.cc
@@ -566,11 +566,14 @@ can_vec_mask_load_store_p (machine_mode mode,
   bool is_load)
{
   optab op = is_load ? maskload_optab : maskstore_optab;
+  optab len_op = is_load ? len_maskload_optab : len_maskstore_optab;
   machine_mode vmode;
   /* If mode is vector mode, check it directly.  */
   if (VECTOR_MODE_P (mode))
-    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
+    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing
+    || convert_optab_handler (len_op, mode, mask_mode)
+ != CODE_FOR_nothing;
   /* Otherwise, return true if there is some vector mode with
      the mask load/store supported.  */
@@ -584,7 +587,9 @@ can_vec_mask_load_store_p (machine_mode mode,
   vmode = targetm.vectorize.preferred_simd_mode (smode);
   if (VECTOR_MODE_P (vmode)
       && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
-      && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+      && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+   || convert_optab_handler (len_op, vmode, mask_mode)
+        != CODE_FOR_nothing))
     return true;
   auto_vector_modes vector_modes;
@@ -592,7 +597,9 @@ can_vec_mask_load_store_p (machine_mode mode,
   for (machine_mode base_mode : vector_modes)
     if (related_vector_mode (base_mode, smode).exists (&vmode)
&& targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
- && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+ && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+     || convert_optab_handler (len_op, vmode, mask_mode)
+ != CODE_FOR_nothing))
       return true;
   return false;
}
@@ -608,17 +615,27 @@ opt_machine_mode
get_len_load_store_mode (machine_mode mode, bool is_load)
{
   optab op = is_load ? len_load_optab : len_store_optab;
+  optab masked_op = is_load ? len_maskload_optab : len_maskstore_optab;
   gcc_assert (VECTOR_MODE_P (mode));
   /* Check if length in lanes supported for this mode directly.  */
   if (direct_optab_handler (op, mode))
     return mode;
+  /* Check if length in lanes supported by len_maskload/store.  */
+  machine_mode mask_mode;
+  if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
+      && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
+    return mode;
+
   /* Check if length in bytes supported for same vector size VnQI.  */
   machine_mode vmode;
   poly_uint64 nunits = GET_MODE_SIZE (mode);
   if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
-      && direct_optab_handler (op, vmode))
+      && (direct_optab_handler (op, vmode)
+   || (targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
+       && convert_optab_handler (masked_op, vmode, mask_mode)
+    != CODE_FOR_nothing)))
     return vmode;
   return opt_machine_mode ();
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 056a0ecb2be..45bc1e4b5bc 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1819,16 +1819,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
   poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   machine_mode mask_mode;
+  machine_mode vmode;
   bool using_partial_vectors_p = false;
   if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
-      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
+      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load)
+      && !get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
       vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
       using_partial_vectors_p = true;
     }
-  machine_mode vmode;
   if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
@@ -2809,6 +2810,17 @@ vect_build_zero_merge_argument (vec_info *vinfo,
   return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
}
+/* Get all-ones vector mask for corresponding maskmode.  */
+
+static tree
+get_all_ones_mask (machine_mode maskmode)
+{
+  poly_uint64 nunits = GET_MODE_NUNITS (maskmode);
+  tree masktype = build_truth_vector_type_for_mode (nunits, maskmode);
+  tree mask = build_int_cst (TREE_TYPE (masktype), 1);
+  return build_vector_from_val (masktype, mask);
+}
+
/* Build a gather load call while vectorizing STMT_INFO.  Insert new
    instructions before GSI and add them to VEC_STMT.  GS_INFO describes
    the gather load operation.  If the load is conditional, MASK is the
@@ -8945,56 +8957,80 @@ vectorizable_store (vec_info *vinfo,
}
      /* Arguments are ready.  Create the new vector stmt.  */
-       if (final_mask)
+       if (final_mask || loop_lens)
{
+   gcall *call;
  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-   gcall *call
-     = gimple_build_call_internal (IFN_MASK_STORE, 4,
-   dataref_ptr, ptr,
-   final_mask, vec_oprnd);
-   gimple_call_set_nothrow (call, true);
-   vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-   new_stmt = call;
- }
-       else if (loop_lens)
- {
  machine_mode vmode = TYPE_MODE (vectype);
-   opt_machine_mode new_ovmode
-     = get_len_load_store_mode (vmode, false);
-   machine_mode new_vmode = new_ovmode.require ();
-   unsigned factor
-     = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
-   tree final_len
-     = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
- vec_num * ncopies, vectype,
- vec_num * j + i, factor);
-   tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-   /* Need conversion if it's wrapped with VnQI.  */
-   if (vmode != new_vmode)
+   machine_mode new_vmode = vmode;
+
+   if (get_len_load_store_mode (vmode, false)
+ .exists (&new_vmode))
    {
-       tree new_vtype
- = build_vector_type_for_mode (unsigned_intQI_type_node,
-       new_vmode);
-       tree var
- = vect_get_new_ssa_name (new_vtype, vect_simple_var);
-       vec_oprnd
- = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
-       gassign *new_stmt
- = gimple_build_assign (var, VIEW_CONVERT_EXPR,
-        vec_oprnd);
-       vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
-    gsi);
-       vec_oprnd = var;
-     }
+       tree final_len;
+       machine_mode maskmode;
+       unsigned factor = (new_vmode == vmode)
+   ? 1
+   : GET_MODE_UNIT_SIZE (vmode);
+       if (loop_lens)
+ final_len
+   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+        vec_num * ncopies, vectype,
+        vec_num * j + i, factor);
+       else
+ {
+   tree iv_type = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+   final_len
+     = build_int_cst (iv_type,
+      TYPE_VECTOR_SUBPARTS (vectype));
+ }
-   signed char biasval =
-     LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+       /* Need conversion if it's wrapped with VnQI.  */
+       if (vmode != new_vmode)
+ {
+   tree new_vtype = build_vector_type_for_mode (
+     unsigned_intQI_type_node, new_vmode);
+   tree var = vect_get_new_ssa_name (new_vtype,
+     vect_simple_var);
+   vec_oprnd
+     = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
+   gassign *new_stmt
+     = gimple_build_assign (var, VIEW_CONVERT_EXPR,
+    vec_oprnd);
+   vect_finish_stmt_generation (vinfo, stmt_info,
+        new_stmt, gsi);
+   vec_oprnd = var;
+ }
+       signed char biasval
+ = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+
+       tree bias = build_int_cst (intQI_type_node, biasval);
-   tree bias = build_int_cst (intQI_type_node, biasval);
-   gcall *call
-     = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
-   ptr, final_len, vec_oprnd,
-   bias);
+       if (targetm.vectorize.get_mask_mode (vmode).exists (
+     &maskmode)
+   && can_vec_mask_load_store_p (vmode, maskmode, false))
+ {
+   if (!final_mask)
+     final_mask = get_all_ones_mask (maskmode);
+   call
+     = gimple_build_call_internal (IFN_LEN_MASK_STORE, 6,
+   dataref_ptr, ptr,
+   final_len, final_mask,
+   vec_oprnd, bias);
+ }
+       else
+ call = gimple_build_call_internal (IFN_LEN_STORE, 5,
+    dataref_ptr, ptr,
+    final_len, vec_oprnd,
+    bias);
+     }
+   else
+     {
+       gcc_assert (final_mask);
+       call = gimple_build_call_internal (IFN_MASK_STORE, 4,
+ dataref_ptr, ptr,
+ final_mask, vec_oprnd);
+     }
  gimple_call_set_nothrow (call, true);
  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
  new_stmt = call;
@@ -10304,63 +10340,90 @@ vectorizable_load (vec_info *vinfo,
      align, misalign);
    align = least_bit_hwi (misalign | align);
-     if (final_mask)
+     if (final_mask
+ || (loop_lens && memory_access_type != VMAT_INVARIANT))
      {
- tree ptr = build_int_cst (ref_type,
-   align * BITS_PER_UNIT);
- gcall *call
-   = gimple_build_call_internal (IFN_MASK_LOAD, 3,
- dataref_ptr, ptr,
- final_mask);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
- data_ref = NULL_TREE;
-       }
-     else if (loop_lens && memory_access_type != VMAT_INVARIANT)
-       {
- machine_mode vmode = TYPE_MODE (vectype);
- opt_machine_mode new_ovmode
-   = get_len_load_store_mode (vmode, true);
- machine_mode new_vmode = new_ovmode.require ();
- unsigned factor = (new_ovmode == vmode)
-     ? 1
-     : GET_MODE_UNIT_SIZE (vmode);
- tree final_len
-   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
-        vec_num * ncopies, vectype,
-        vec_num * j + i, factor);
+ gcall *call;
tree ptr
  = build_int_cst (ref_type, align * BITS_PER_UNIT);
+ machine_mode vmode = TYPE_MODE (vectype);
+ machine_mode new_vmode = vmode;
+ if (get_len_load_store_mode (vmode, true)
+       .exists (&new_vmode))
+   {
+     tree final_len;
+     machine_mode maskmode;
+     tree qi_type = unsigned_intQI_type_node;
+     signed char biasval
+       = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+     tree bias
+       = build_int_cst (intQI_type_node, biasval);
+     unsigned factor = (new_vmode == vmode)
+ ? 1
+ : GET_MODE_UNIT_SIZE (vmode);
+     if (loop_lens)
+       final_len
+ = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+      vec_num * ncopies, vectype,
+      vec_num * j + i, factor);
+     else
+       {
+ tree iv_type
+   = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+ final_len = build_int_cst (
+   iv_type, TYPE_VECTOR_SUBPARTS (vectype));
+       }
- tree qi_type = unsigned_intQI_type_node;
-
- signed char biasval =
-   LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
-
- tree bias = build_int_cst (intQI_type_node, biasval);
-
- gcall *call
-   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
- dataref_ptr, ptr,
- final_len, bias);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
- data_ref = NULL_TREE;
-
- /* Need conversion if it's wrapped with VnQI.  */
- if (vmode != new_vmode)
+     if (targetm.vectorize.get_mask_mode (vmode).exists (
+   &maskmode)
+ && can_vec_mask_load_store_p (vmode, maskmode,
+       false))
+       {
+ if (!final_mask)
+   final_mask = get_all_ones_mask (maskmode);
+ call = gimple_build_call_internal (
+   IFN_LEN_MASK_LOAD, 5, dataref_ptr, ptr,
+   final_len, final_mask, bias);
+       }
+     else
+       {
+ call
+   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
+ dataref_ptr,
+ ptr, final_len,
+ bias);
+       }
+     gimple_call_set_nothrow (call, true);
+     new_stmt = call;
+     data_ref = NULL_TREE;
+     /* Need conversion if it's wrapped with VnQI.  */
+     if (vmode != new_vmode)
+       {
+ tree new_vtype
+   = build_vector_type_for_mode (qi_type,
+ new_vmode);
+ tree var
+   = vect_get_new_ssa_name (new_vtype,
+    vect_simple_var);
+ gimple_set_lhs (call, var);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+      call, gsi);
+ tree op
+   = build1 (VIEW_CONVERT_EXPR, vectype, var);
+ new_stmt
+   = gimple_build_assign (vec_dest,
+ VIEW_CONVERT_EXPR, op);
+       }
+   }
+ else
  {
-     tree new_vtype
-       = build_vector_type_for_mode (qi_type, new_vmode);
-     tree var = vect_get_new_ssa_name (new_vtype,
-       vect_simple_var);
-     gimple_set_lhs (call, var);
-     vect_finish_stmt_generation (vinfo, stmt_info, call,
- gsi);
-     tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
-     new_stmt
-       = gimple_build_assign (vec_dest,
-      VIEW_CONVERT_EXPR, op);
+     gcc_assert (final_mask);
+     call = gimple_build_call_internal (IFN_MASK_LOAD, 3,
+        dataref_ptr, ptr,
+        final_mask);
+     gimple_call_set_nothrow (call, true);
+     new_stmt = call;
+     data_ref = NULL_TREE;
  }
      }
    else
-- 
2.36.1
  
juzhe.zhong@rivai.ai June 20, 2023, 6:01 a.m. UTC | #2
Bootstrap && Regression on X86 no surprise difference.



juzhe.zhong@rivai.ai
 
From: juzhe.zhong
Date: 2023-06-20 00:17
To: gcc-patches
CC: rguenther; richard.sandiford; Ju-Zhe Zhong
Subject: [PATCH] VECT: Apply LEN_MASK_{LOAD,STORE} into vectorizer
From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
 
This patch is apply LEN_MASK_{LOAD,STORE} into vectorizer.
I refactor gimple IR build to make codes look cleaner.
 
gcc/ChangeLog:
 
        * internal-fn.cc (expand_partial_store_optab_fn): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (internal_load_fn_p): Ditto.
        (internal_store_fn_p): Ditto.
        (internal_fn_mask_index): Ditto.
        (internal_fn_stored_value_index): Ditto.
        (internal_len_load_store_bias): Ditto.
        * optabs-query.cc (can_vec_mask_load_store_p): Ditto.
        (get_len_load_store_mode): Ditto.
        * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Ditto.
        (get_all_ones_mask): New function.
        (vectorizable_store): Add LEN_MASK_{LOAD,STORE} vectorizer support.
        (vectorizable_load): Ditto.
 
---
gcc/internal-fn.cc     |  35 +++++-
gcc/optabs-query.cc    |  25 +++-
gcc/tree-vect-stmts.cc | 259 +++++++++++++++++++++++++----------------
3 files changed, 213 insertions(+), 106 deletions(-)
 
diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index c911ae790cb..e10c21de5f1 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -2949,7 +2949,7 @@ expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
  * OPTAB.  */
static void
-expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
{
   class expand_operand ops[5];
   tree type, lhs, rhs, maskt, biast;
@@ -2957,7 +2957,7 @@ expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
   insn_code icode;
   maskt = gimple_call_arg (stmt, 2);
-  rhs = gimple_call_arg (stmt, 3);
+  rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
   type = TREE_TYPE (rhs);
   lhs = expand_call_mem_ref (type, stmt, 0);
@@ -4435,6 +4435,7 @@ internal_load_fn_p (internal_fn fn)
     case IFN_GATHER_LOAD:
     case IFN_MASK_GATHER_LOAD:
     case IFN_LEN_LOAD:
+    case IFN_LEN_MASK_LOAD:
       return true;
     default:
@@ -4455,6 +4456,7 @@ internal_store_fn_p (internal_fn fn)
     case IFN_SCATTER_STORE:
     case IFN_MASK_SCATTER_STORE:
     case IFN_LEN_STORE:
+    case IFN_LEN_MASK_STORE:
       return true;
     default:
@@ -4494,6 +4496,10 @@ internal_fn_mask_index (internal_fn fn)
     case IFN_MASK_STORE_LANES:
       return 2;
+    case IFN_LEN_MASK_LOAD:
+    case IFN_LEN_MASK_STORE:
+      return 3;
+
     case IFN_MASK_GATHER_LOAD:
     case IFN_MASK_SCATTER_STORE:
       return 4;
@@ -4519,6 +4525,9 @@ internal_fn_stored_value_index (internal_fn fn)
     case IFN_LEN_STORE:
       return 3;
+    case IFN_LEN_MASK_STORE:
+      return 4;
+
     default:
       return -1;
     }
@@ -4583,13 +4592,31 @@ internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
{
   optab optab = direct_internal_fn_optab (ifn);
   insn_code icode = direct_optab_handler (optab, mode);
+  int bias_argno = 3;
+  if (icode == CODE_FOR_nothing)
+    {
+      machine_mode mask_mode
+ = targetm.vectorize.get_mask_mode (mode).require ();
+      if (ifn == IFN_LEN_LOAD)
+ {
+   /* Try LEN_MASK_LOAD.  */
+   optab = direct_internal_fn_optab (IFN_LEN_MASK_LOAD);
+ }
+      else
+ {
+   /* Try LEN_MASK_STORE.  */
+   optab = direct_internal_fn_optab (IFN_LEN_MASK_STORE);
+ }
+      icode = convert_optab_handler (optab, mode, mask_mode);
+      bias_argno = 4;
+    }
   if (icode != CODE_FOR_nothing)
     {
       /* For now we only support biases of 0 or -1.  Try both of them.  */
-      if (insn_operand_matches (icode, 3, GEN_INT (0)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (0)))
return 0;
-      if (insn_operand_matches (icode, 3, GEN_INT (-1)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (-1)))
return -1;
     }
diff --git a/gcc/optabs-query.cc b/gcc/optabs-query.cc
index 276f8408dd7..4394d391200 100644
--- a/gcc/optabs-query.cc
+++ b/gcc/optabs-query.cc
@@ -566,11 +566,14 @@ can_vec_mask_load_store_p (machine_mode mode,
   bool is_load)
{
   optab op = is_load ? maskload_optab : maskstore_optab;
+  optab len_op = is_load ? len_maskload_optab : len_maskstore_optab;
   machine_mode vmode;
   /* If mode is vector mode, check it directly.  */
   if (VECTOR_MODE_P (mode))
-    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
+    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing
+    || convert_optab_handler (len_op, mode, mask_mode)
+ != CODE_FOR_nothing;
   /* Otherwise, return true if there is some vector mode with
      the mask load/store supported.  */
@@ -584,7 +587,9 @@ can_vec_mask_load_store_p (machine_mode mode,
   vmode = targetm.vectorize.preferred_simd_mode (smode);
   if (VECTOR_MODE_P (vmode)
       && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
-      && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+      && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+   || convert_optab_handler (len_op, vmode, mask_mode)
+        != CODE_FOR_nothing))
     return true;
   auto_vector_modes vector_modes;
@@ -592,7 +597,9 @@ can_vec_mask_load_store_p (machine_mode mode,
   for (machine_mode base_mode : vector_modes)
     if (related_vector_mode (base_mode, smode).exists (&vmode)
&& targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
- && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+ && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+     || convert_optab_handler (len_op, vmode, mask_mode)
+ != CODE_FOR_nothing))
       return true;
   return false;
}
@@ -608,17 +615,27 @@ opt_machine_mode
get_len_load_store_mode (machine_mode mode, bool is_load)
{
   optab op = is_load ? len_load_optab : len_store_optab;
+  optab masked_op = is_load ? len_maskload_optab : len_maskstore_optab;
   gcc_assert (VECTOR_MODE_P (mode));
   /* Check if length in lanes supported for this mode directly.  */
   if (direct_optab_handler (op, mode))
     return mode;
+  /* Check if length in lanes supported by len_maskload/store.  */
+  machine_mode mask_mode;
+  if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
+      && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
+    return mode;
+
   /* Check if length in bytes supported for same vector size VnQI.  */
   machine_mode vmode;
   poly_uint64 nunits = GET_MODE_SIZE (mode);
   if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
-      && direct_optab_handler (op, vmode))
+      && (direct_optab_handler (op, vmode)
+   || (targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
+       && convert_optab_handler (masked_op, vmode, mask_mode)
+    != CODE_FOR_nothing)))
     return vmode;
   return opt_machine_mode ();
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 056a0ecb2be..45bc1e4b5bc 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1819,16 +1819,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
   poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   machine_mode mask_mode;
+  machine_mode vmode;
   bool using_partial_vectors_p = false;
   if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
-      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
+      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load)
+      && !get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
       vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
       using_partial_vectors_p = true;
     }
-  machine_mode vmode;
   if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
@@ -2809,6 +2810,17 @@ vect_build_zero_merge_argument (vec_info *vinfo,
   return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
}
+/* Get all-ones vector mask for corresponding maskmode.  */
+
+static tree
+get_all_ones_mask (machine_mode maskmode)
+{
+  poly_uint64 nunits = GET_MODE_NUNITS (maskmode);
+  tree masktype = build_truth_vector_type_for_mode (nunits, maskmode);
+  tree mask = build_int_cst (TREE_TYPE (masktype), 1);
+  return build_vector_from_val (masktype, mask);
+}
+
/* Build a gather load call while vectorizing STMT_INFO.  Insert new
    instructions before GSI and add them to VEC_STMT.  GS_INFO describes
    the gather load operation.  If the load is conditional, MASK is the
@@ -8945,56 +8957,80 @@ vectorizable_store (vec_info *vinfo,
}
      /* Arguments are ready.  Create the new vector stmt.  */
-       if (final_mask)
+       if (final_mask || loop_lens)
{
+   gcall *call;
  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-   gcall *call
-     = gimple_build_call_internal (IFN_MASK_STORE, 4,
-   dataref_ptr, ptr,
-   final_mask, vec_oprnd);
-   gimple_call_set_nothrow (call, true);
-   vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-   new_stmt = call;
- }
-       else if (loop_lens)
- {
  machine_mode vmode = TYPE_MODE (vectype);
-   opt_machine_mode new_ovmode
-     = get_len_load_store_mode (vmode, false);
-   machine_mode new_vmode = new_ovmode.require ();
-   unsigned factor
-     = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
-   tree final_len
-     = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
- vec_num * ncopies, vectype,
- vec_num * j + i, factor);
-   tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-   /* Need conversion if it's wrapped with VnQI.  */
-   if (vmode != new_vmode)
+   machine_mode new_vmode = vmode;
+
+   if (get_len_load_store_mode (vmode, false)
+ .exists (&new_vmode))
    {
-       tree new_vtype
- = build_vector_type_for_mode (unsigned_intQI_type_node,
-       new_vmode);
-       tree var
- = vect_get_new_ssa_name (new_vtype, vect_simple_var);
-       vec_oprnd
- = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
-       gassign *new_stmt
- = gimple_build_assign (var, VIEW_CONVERT_EXPR,
-        vec_oprnd);
-       vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
-    gsi);
-       vec_oprnd = var;
-     }
+       tree final_len;
+       machine_mode maskmode;
+       unsigned factor = (new_vmode == vmode)
+   ? 1
+   : GET_MODE_UNIT_SIZE (vmode);
+       if (loop_lens)
+ final_len
+   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+        vec_num * ncopies, vectype,
+        vec_num * j + i, factor);
+       else
+ {
+   tree iv_type = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+   final_len
+     = build_int_cst (iv_type,
+      TYPE_VECTOR_SUBPARTS (vectype));
+ }
-   signed char biasval =
-     LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+       /* Need conversion if it's wrapped with VnQI.  */
+       if (vmode != new_vmode)
+ {
+   tree new_vtype = build_vector_type_for_mode (
+     unsigned_intQI_type_node, new_vmode);
+   tree var = vect_get_new_ssa_name (new_vtype,
+     vect_simple_var);
+   vec_oprnd
+     = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
+   gassign *new_stmt
+     = gimple_build_assign (var, VIEW_CONVERT_EXPR,
+    vec_oprnd);
+   vect_finish_stmt_generation (vinfo, stmt_info,
+        new_stmt, gsi);
+   vec_oprnd = var;
+ }
+       signed char biasval
+ = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+
+       tree bias = build_int_cst (intQI_type_node, biasval);
-   tree bias = build_int_cst (intQI_type_node, biasval);
-   gcall *call
-     = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
-   ptr, final_len, vec_oprnd,
-   bias);
+       if (targetm.vectorize.get_mask_mode (vmode).exists (
+     &maskmode)
+   && can_vec_mask_load_store_p (vmode, maskmode, false))
+ {
+   if (!final_mask)
+     final_mask = get_all_ones_mask (maskmode);
+   call
+     = gimple_build_call_internal (IFN_LEN_MASK_STORE, 6,
+   dataref_ptr, ptr,
+   final_len, final_mask,
+   vec_oprnd, bias);
+ }
+       else
+ call = gimple_build_call_internal (IFN_LEN_STORE, 5,
+    dataref_ptr, ptr,
+    final_len, vec_oprnd,
+    bias);
+     }
+   else
+     {
+       gcc_assert (final_mask);
+       call = gimple_build_call_internal (IFN_MASK_STORE, 4,
+ dataref_ptr, ptr,
+ final_mask, vec_oprnd);
+     }
  gimple_call_set_nothrow (call, true);
  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
  new_stmt = call;
@@ -10304,63 +10340,90 @@ vectorizable_load (vec_info *vinfo,
      align, misalign);
    align = least_bit_hwi (misalign | align);
-     if (final_mask)
+     if (final_mask
+ || (loop_lens && memory_access_type != VMAT_INVARIANT))
      {
- tree ptr = build_int_cst (ref_type,
-   align * BITS_PER_UNIT);
- gcall *call
-   = gimple_build_call_internal (IFN_MASK_LOAD, 3,
- dataref_ptr, ptr,
- final_mask);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
- data_ref = NULL_TREE;
-       }
-     else if (loop_lens && memory_access_type != VMAT_INVARIANT)
-       {
- machine_mode vmode = TYPE_MODE (vectype);
- opt_machine_mode new_ovmode
-   = get_len_load_store_mode (vmode, true);
- machine_mode new_vmode = new_ovmode.require ();
- unsigned factor = (new_ovmode == vmode)
-     ? 1
-     : GET_MODE_UNIT_SIZE (vmode);
- tree final_len
-   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
-        vec_num * ncopies, vectype,
-        vec_num * j + i, factor);
+ gcall *call;
tree ptr
  = build_int_cst (ref_type, align * BITS_PER_UNIT);
+ machine_mode vmode = TYPE_MODE (vectype);
+ machine_mode new_vmode = vmode;
+ if (get_len_load_store_mode (vmode, true)
+       .exists (&new_vmode))
+   {
+     tree final_len;
+     machine_mode maskmode;
+     tree qi_type = unsigned_intQI_type_node;
+     signed char biasval
+       = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+     tree bias
+       = build_int_cst (intQI_type_node, biasval);
+     unsigned factor = (new_vmode == vmode)
+ ? 1
+ : GET_MODE_UNIT_SIZE (vmode);
+     if (loop_lens)
+       final_len
+ = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+      vec_num * ncopies, vectype,
+      vec_num * j + i, factor);
+     else
+       {
+ tree iv_type
+   = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+ final_len = build_int_cst (
+   iv_type, TYPE_VECTOR_SUBPARTS (vectype));
+       }
- tree qi_type = unsigned_intQI_type_node;
-
- signed char biasval =
-   LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
-
- tree bias = build_int_cst (intQI_type_node, biasval);
-
- gcall *call
-   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
- dataref_ptr, ptr,
- final_len, bias);
- gimple_call_set_nothrow (call, true);
- new_stmt = call;
- data_ref = NULL_TREE;
-
- /* Need conversion if it's wrapped with VnQI.  */
- if (vmode != new_vmode)
+     if (targetm.vectorize.get_mask_mode (vmode).exists (
+   &maskmode)
+ && can_vec_mask_load_store_p (vmode, maskmode,
+       false))
+       {
+ if (!final_mask)
+   final_mask = get_all_ones_mask (maskmode);
+ call = gimple_build_call_internal (
+   IFN_LEN_MASK_LOAD, 5, dataref_ptr, ptr,
+   final_len, final_mask, bias);
+       }
+     else
+       {
+ call
+   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
+ dataref_ptr,
+ ptr, final_len,
+ bias);
+       }
+     gimple_call_set_nothrow (call, true);
+     new_stmt = call;
+     data_ref = NULL_TREE;
+     /* Need conversion if it's wrapped with VnQI.  */
+     if (vmode != new_vmode)
+       {
+ tree new_vtype
+   = build_vector_type_for_mode (qi_type,
+ new_vmode);
+ tree var
+   = vect_get_new_ssa_name (new_vtype,
+    vect_simple_var);
+ gimple_set_lhs (call, var);
+ vect_finish_stmt_generation (vinfo, stmt_info,
+      call, gsi);
+ tree op
+   = build1 (VIEW_CONVERT_EXPR, vectype, var);
+ new_stmt
+   = gimple_build_assign (vec_dest,
+ VIEW_CONVERT_EXPR, op);
+       }
+   }
+ else
  {
-     tree new_vtype
-       = build_vector_type_for_mode (qi_type, new_vmode);
-     tree var = vect_get_new_ssa_name (new_vtype,
-       vect_simple_var);
-     gimple_set_lhs (call, var);
-     vect_finish_stmt_generation (vinfo, stmt_info, call,
- gsi);
-     tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
-     new_stmt
-       = gimple_build_assign (vec_dest,
-      VIEW_CONVERT_EXPR, op);
+     gcc_assert (final_mask);
+     call = gimple_build_call_internal (IFN_MASK_LOAD, 3,
+        dataref_ptr, ptr,
+        final_mask);
+     gimple_call_set_nothrow (call, true);
+     new_stmt = call;
+     data_ref = NULL_TREE;
  }
      }
    else
-- 
2.36.1
  
Richard Biener June 20, 2023, 12:18 p.m. UTC | #3
On Tue, 20 Jun 2023, juzhe.zhong@rivai.ai wrote:

> From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> 
> This patch is apply LEN_MASK_{LOAD,STORE} into vectorizer.
> I refactor gimple IR build to make codes look cleaner.
> 
> gcc/ChangeLog:
> 
>         * internal-fn.cc (expand_partial_store_optab_fn): Add LEN_MASK_{LOAD,STORE} vectorizer support.
>         (internal_load_fn_p): Ditto.
>         (internal_store_fn_p): Ditto.
>         (internal_fn_mask_index): Ditto.
>         (internal_fn_stored_value_index): Ditto.
>         (internal_len_load_store_bias): Ditto.
>         * optabs-query.cc (can_vec_mask_load_store_p): Ditto.
>         (get_len_load_store_mode): Ditto.
>         * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Ditto.
>         (get_all_ones_mask): New function.
>         (vectorizable_store): Add LEN_MASK_{LOAD,STORE} vectorizer support.
>         (vectorizable_load): Ditto.
> 
> ---
>  gcc/internal-fn.cc     |  35 +++++-
>  gcc/optabs-query.cc    |  25 +++-
>  gcc/tree-vect-stmts.cc | 259 +++++++++++++++++++++++++----------------
>  3 files changed, 213 insertions(+), 106 deletions(-)
> 
> diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
> index c911ae790cb..e10c21de5f1 100644
> --- a/gcc/internal-fn.cc
> +++ b/gcc/internal-fn.cc
> @@ -2949,7 +2949,7 @@ expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
>   * OPTAB.  */
>  
>  static void
> -expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
> +expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
>  {
>    class expand_operand ops[5];
>    tree type, lhs, rhs, maskt, biast;
> @@ -2957,7 +2957,7 @@ expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
>    insn_code icode;
>  
>    maskt = gimple_call_arg (stmt, 2);
> -  rhs = gimple_call_arg (stmt, 3);
> +  rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
>    type = TREE_TYPE (rhs);
>    lhs = expand_call_mem_ref (type, stmt, 0);
>  
> @@ -4435,6 +4435,7 @@ internal_load_fn_p (internal_fn fn)
>      case IFN_GATHER_LOAD:
>      case IFN_MASK_GATHER_LOAD:
>      case IFN_LEN_LOAD:
> +    case IFN_LEN_MASK_LOAD:
>        return true;
>  
>      default:
> @@ -4455,6 +4456,7 @@ internal_store_fn_p (internal_fn fn)
>      case IFN_SCATTER_STORE:
>      case IFN_MASK_SCATTER_STORE:
>      case IFN_LEN_STORE:
> +    case IFN_LEN_MASK_STORE:
>        return true;
>  
>      default:
> @@ -4494,6 +4496,10 @@ internal_fn_mask_index (internal_fn fn)
>      case IFN_MASK_STORE_LANES:
>        return 2;
>  
> +    case IFN_LEN_MASK_LOAD:
> +    case IFN_LEN_MASK_STORE:
> +      return 3;
> +
>      case IFN_MASK_GATHER_LOAD:
>      case IFN_MASK_SCATTER_STORE:
>        return 4;
> @@ -4519,6 +4525,9 @@ internal_fn_stored_value_index (internal_fn fn)
>      case IFN_LEN_STORE:
>        return 3;
>  
> +    case IFN_LEN_MASK_STORE:
> +      return 4;
> +
>      default:
>        return -1;
>      }
> @@ -4583,13 +4592,31 @@ internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
>  {
>    optab optab = direct_internal_fn_optab (ifn);
>    insn_code icode = direct_optab_handler (optab, mode);
> +  int bias_argno = 3;
> +  if (icode == CODE_FOR_nothing)
> +    {
> +      machine_mode mask_mode
> +	= targetm.vectorize.get_mask_mode (mode).require ();
> +      if (ifn == IFN_LEN_LOAD)
> +	{
> +	  /* Try LEN_MASK_LOAD.  */
> +	  optab = direct_internal_fn_optab (IFN_LEN_MASK_LOAD);
> +	}
> +      else
> +	{
> +	  /* Try LEN_MASK_STORE.  */
> +	  optab = direct_internal_fn_optab (IFN_LEN_MASK_STORE);
> +	}
> +      icode = convert_optab_handler (optab, mode, mask_mode);
> +      bias_argno = 4;
> +    }
>  
>    if (icode != CODE_FOR_nothing)
>      {
>        /* For now we only support biases of 0 or -1.  Try both of them.  */
> -      if (insn_operand_matches (icode, 3, GEN_INT (0)))
> +      if (insn_operand_matches (icode, bias_argno, GEN_INT (0)))
>  	return 0;
> -      if (insn_operand_matches (icode, 3, GEN_INT (-1)))
> +      if (insn_operand_matches (icode, bias_argno, GEN_INT (-1)))
>  	return -1;
>      }
>  
> diff --git a/gcc/optabs-query.cc b/gcc/optabs-query.cc
> index 276f8408dd7..4394d391200 100644
> --- a/gcc/optabs-query.cc
> +++ b/gcc/optabs-query.cc
> @@ -566,11 +566,14 @@ can_vec_mask_load_store_p (machine_mode mode,
>  			   bool is_load)
>  {
>    optab op = is_load ? maskload_optab : maskstore_optab;
> +  optab len_op = is_load ? len_maskload_optab : len_maskstore_optab;
>    machine_mode vmode;
>  
>    /* If mode is vector mode, check it directly.  */
>    if (VECTOR_MODE_P (mode))
> -    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
> +    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing
> +	   || convert_optab_handler (len_op, mode, mask_mode)
> +		!= CODE_FOR_nothing;
>  
>    /* Otherwise, return true if there is some vector mode with
>       the mask load/store supported.  */
> @@ -584,7 +587,9 @@ can_vec_mask_load_store_p (machine_mode mode,
>    vmode = targetm.vectorize.preferred_simd_mode (smode);
>    if (VECTOR_MODE_P (vmode)
>        && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> -      && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
> +      && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
> +	  || convert_optab_handler (len_op, vmode, mask_mode)
> +	       != CODE_FOR_nothing))
>      return true;
>  
>    auto_vector_modes vector_modes;
> @@ -592,7 +597,9 @@ can_vec_mask_load_store_p (machine_mode mode,
>    for (machine_mode base_mode : vector_modes)
>      if (related_vector_mode (base_mode, smode).exists (&vmode)
>  	&& targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> -	&& convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
> +	&& (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
> +	    || convert_optab_handler (len_op, vmode, mask_mode)
> +		 != CODE_FOR_nothing))
>        return true;
>    return false;
>  }
> @@ -608,17 +615,27 @@ opt_machine_mode
>  get_len_load_store_mode (machine_mode mode, bool is_load)
>  {
>    optab op = is_load ? len_load_optab : len_store_optab;
> +  optab masked_op = is_load ? len_maskload_optab : len_maskstore_optab;
>    gcc_assert (VECTOR_MODE_P (mode));
>  
>    /* Check if length in lanes supported for this mode directly.  */
>    if (direct_optab_handler (op, mode))
>      return mode;
>  
> +  /* Check if length in lanes supported by len_maskload/store.  */
> +  machine_mode mask_mode;
> +  if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
> +      && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
> +    return mode;
> +
>    /* Check if length in bytes supported for same vector size VnQI.  */
>    machine_mode vmode;
>    poly_uint64 nunits = GET_MODE_SIZE (mode);
>    if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
> -      && direct_optab_handler (op, vmode))
> +      && (direct_optab_handler (op, vmode)
> +	  || (targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> +	      && convert_optab_handler (masked_op, vmode, mask_mode)
> +		   != CODE_FOR_nothing)))
>      return vmode;
>  
>    return opt_machine_mode ();
> diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
> index 056a0ecb2be..45bc1e4b5bc 100644
> --- a/gcc/tree-vect-stmts.cc
> +++ b/gcc/tree-vect-stmts.cc
> @@ -1819,16 +1819,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
>    poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
>    poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
>    machine_mode mask_mode;
> +  machine_mode vmode;
>    bool using_partial_vectors_p = false;
>    if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
> -      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
> +      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load)
> +      && !get_len_load_store_mode (vecmode, is_load).exists (&vmode))
>      {
>        nvectors = group_memory_nvectors (group_size * vf, nunits);
>        vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
>        using_partial_vectors_p = true;
>      }
>  
> -  machine_mode vmode;
>    if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
>      {
>        nvectors = group_memory_nvectors (group_size * vf, nunits);

Can you instead swap both checks and do an else if for the mask case?
If we ever record both len and mask we will fail the vectorization
anyway.

> @@ -2809,6 +2810,17 @@ vect_build_zero_merge_argument (vec_info *vinfo,
>    return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
>  }
>  
> +/* Get all-ones vector mask for corresponding maskmode.  */
> +
> +static tree
> +get_all_ones_mask (machine_mode maskmode)
> +{
> +  poly_uint64 nunits = GET_MODE_NUNITS (maskmode);
> +  tree masktype = build_truth_vector_type_for_mode (nunits, maskmode);
> +  tree mask = build_int_cst (TREE_TYPE (masktype), 1);

I think you want build_all_ones_cst (masktype), using '1' looks wrong
at least.  The alternative is constant_boolean_node (true, masktype).

> +  return build_vector_from_val (masktype, mask);
> +}
> +
>  /* Build a gather load call while vectorizing STMT_INFO.  Insert new
>     instructions before GSI and add them to VEC_STMT.  GS_INFO describes
>     the gather load operation.  If the load is conditional, MASK is the
> @@ -8945,56 +8957,80 @@ vectorizable_store (vec_info *vinfo,
>  		}
>  
>  	      /* Arguments are ready.  Create the new vector stmt.  */
> -	      if (final_mask)
> +	      if (final_mask || loop_lens)
>  		{

I've seen the later refactoring patch but it seems garbled white-space
wise (this is also a case where context diffs would make review easier).

As said I'd like to see

        if (loop_lens)
          ... produce 'len' argument ...

        if ((loop_lens || final_mask)
            && use-len-mask)
          ..
        else if (loop_lens && use-len)
          ..
        else if (final_mask && use-mask)
          ..

maybe it is that, but it's hard to identify :/  It would be nice
to do the optab queries only once and compute a
partial_or_mask_vector_ifn in the analysis phase when we register 
lens/masks
so we don't have to re-do that.  Like the

+                           if (targetm.vectorize.get_mask_mode 
(vmode).exists (
+                                 &maskmode)
+                               && can_vec_mask_load_store_p (vmode, 
maskmode,
+                                                             false))

stuff.  If we arrive here we know we can code-generate with lengths
if loop_lens and with masks when final_mask (and with both when both).
What we don't easily know is whether we have to use
LEN_MASK_LOAD when just 'final_mask' or just 'loop_lens' and that's
something we should know as well in a more easy way.  With a
partial_or_mask_vector_ifn we could just switch () on that.

That said, your patch seems to at least commpute the len only once
(this original patch, not the unreadable "refactoring").

Richard.

> +		  gcall *call;
>  		  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
> -		  gcall *call
> -		    = gimple_build_call_internal (IFN_MASK_STORE, 4,
> -						  dataref_ptr, ptr,
> -						  final_mask, vec_oprnd);
> -		  gimple_call_set_nothrow (call, true);
> -		  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
> -		  new_stmt = call;
> -		}
> -	      else if (loop_lens)
> -		{
>  		  machine_mode vmode = TYPE_MODE (vectype);
> -		  opt_machine_mode new_ovmode
> -		    = get_len_load_store_mode (vmode, false);
> -		  machine_mode new_vmode = new_ovmode.require ();
> -		  unsigned factor
> -		    = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
> -		  tree final_len
> -		    = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> -					 vec_num * ncopies, vectype,
> -					 vec_num * j + i, factor);
> -		  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
> -		  /* Need conversion if it's wrapped with VnQI.  */
> -		  if (vmode != new_vmode)
> +		  machine_mode new_vmode = vmode;
> +
> +		  if (get_len_load_store_mode (vmode, false)
> +			.exists (&new_vmode))
>  		    {
> -		      tree new_vtype
> -			= build_vector_type_for_mode (unsigned_intQI_type_node,
> -						      new_vmode);
> -		      tree var
> -			= vect_get_new_ssa_name (new_vtype, vect_simple_var);
> -		      vec_oprnd
> -			= build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
> -		      gassign *new_stmt
> -			= gimple_build_assign (var, VIEW_CONVERT_EXPR,
> -					       vec_oprnd);
> -		      vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
> -						   gsi);
> -		      vec_oprnd = var;
> -		    }
> +		      tree final_len;
> +		      machine_mode maskmode;
> +		      unsigned factor = (new_vmode == vmode)
> +					  ? 1
> +					  : GET_MODE_UNIT_SIZE (vmode);
> +		      if (loop_lens)
> +			final_len
> +			  = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> +					       vec_num * ncopies, vectype,
> +					       vec_num * j + i, factor);
> +		      else
> +			{
> +			  tree iv_type = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
> +			  final_len
> +			    = build_int_cst (iv_type,
> +					     TYPE_VECTOR_SUBPARTS (vectype));
> +			}
>  
> -		  signed char biasval =
> -		    LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +		      /* Need conversion if it's wrapped with VnQI.  */
> +		      if (vmode != new_vmode)
> +			{
> +			  tree new_vtype = build_vector_type_for_mode (
> +			    unsigned_intQI_type_node, new_vmode);
> +			  tree var = vect_get_new_ssa_name (new_vtype,
> +							    vect_simple_var);
> +			  vec_oprnd
> +			    = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
> +			  gassign *new_stmt
> +			    = gimple_build_assign (var, VIEW_CONVERT_EXPR,
> +						   vec_oprnd);
> +			  vect_finish_stmt_generation (vinfo, stmt_info,
> +						       new_stmt, gsi);
> +			  vec_oprnd = var;
> +			}
> +		      signed char biasval
> +			= LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +
> +		      tree bias = build_int_cst (intQI_type_node, biasval);
>  
> -		  tree bias = build_int_cst (intQI_type_node, biasval);
> -		  gcall *call
> -		    = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
> -						  ptr, final_len, vec_oprnd,
> -						  bias);
> +		      if (targetm.vectorize.get_mask_mode (vmode).exists (
> +			    &maskmode)
> +			  && can_vec_mask_load_store_p (vmode, maskmode, false))
> +			{
> +			  if (!final_mask)
> +			    final_mask = get_all_ones_mask (maskmode);
> +			  call
> +			    = gimple_build_call_internal (IFN_LEN_MASK_STORE, 6,
> +							  dataref_ptr, ptr,
> +							  final_len, final_mask,
> +							  vec_oprnd, bias);
> +			}
> +		      else
> +			call = gimple_build_call_internal (IFN_LEN_STORE, 5,
> +							   dataref_ptr, ptr,
> +							   final_len, vec_oprnd,
> +							   bias);
> +		    }
> +		  else
> +		    {
> +		      gcc_assert (final_mask);
> +		      call = gimple_build_call_internal (IFN_MASK_STORE, 4,
> +							 dataref_ptr, ptr,
> +							 final_mask, vec_oprnd);
> +		    }
>  		  gimple_call_set_nothrow (call, true);
>  		  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
>  		  new_stmt = call;
> @@ -10304,63 +10340,90 @@ vectorizable_load (vec_info *vinfo,
>  					      align, misalign);
>  		    align = least_bit_hwi (misalign | align);
>  
> -		    if (final_mask)
> +		    if (final_mask
> +			|| (loop_lens && memory_access_type != VMAT_INVARIANT))
>  		      {
> -			tree ptr = build_int_cst (ref_type,
> -						  align * BITS_PER_UNIT);
> -			gcall *call
> -			  = gimple_build_call_internal (IFN_MASK_LOAD, 3,
> -							dataref_ptr, ptr,
> -							final_mask);
> -			gimple_call_set_nothrow (call, true);
> -			new_stmt = call;
> -			data_ref = NULL_TREE;
> -		      }
> -		    else if (loop_lens && memory_access_type != VMAT_INVARIANT)
> -		      {
> -			machine_mode vmode = TYPE_MODE (vectype);
> -			opt_machine_mode new_ovmode
> -			  = get_len_load_store_mode (vmode, true);
> -			machine_mode new_vmode = new_ovmode.require ();
> -			unsigned factor = (new_ovmode == vmode)
> -					    ? 1
> -					    : GET_MODE_UNIT_SIZE (vmode);
> -			tree final_len
> -			  = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> -					       vec_num * ncopies, vectype,
> -					       vec_num * j + i, factor);
> +			gcall *call;
>  			tree ptr
>  			  = build_int_cst (ref_type, align * BITS_PER_UNIT);
> +			machine_mode vmode = TYPE_MODE (vectype);
> +			machine_mode new_vmode = vmode;
> +			if (get_len_load_store_mode (vmode, true)
> +			      .exists (&new_vmode))
> +			  {
> +			    tree final_len;
> +			    machine_mode maskmode;
> +			    tree qi_type = unsigned_intQI_type_node;
> +			    signed char biasval
> +			      = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +			    tree bias
> +			      = build_int_cst (intQI_type_node, biasval);
> +			    unsigned factor = (new_vmode == vmode)
> +						? 1
> +						: GET_MODE_UNIT_SIZE (vmode);
> +			    if (loop_lens)
> +			      final_len
> +				= vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> +						     vec_num * ncopies, vectype,
> +						     vec_num * j + i, factor);
> +			    else
> +			      {
> +				tree iv_type
> +				  = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
> +				final_len = build_int_cst (
> +				  iv_type, TYPE_VECTOR_SUBPARTS (vectype));
> +			      }
>  
> -			tree qi_type = unsigned_intQI_type_node;
> -
> -			signed char biasval =
> -			  LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> -
> -			tree bias = build_int_cst (intQI_type_node, biasval);
> -
> -			gcall *call
> -			  = gimple_build_call_internal (IFN_LEN_LOAD, 4,
> -							dataref_ptr, ptr,
> -							final_len, bias);
> -			gimple_call_set_nothrow (call, true);
> -			new_stmt = call;
> -			data_ref = NULL_TREE;
> -
> -			/* Need conversion if it's wrapped with VnQI.  */
> -			if (vmode != new_vmode)
> +			    if (targetm.vectorize.get_mask_mode (vmode).exists (
> +				  &maskmode)
> +				&& can_vec_mask_load_store_p (vmode, maskmode,
> +							      false))
> +			      {
> +				if (!final_mask)
> +				  final_mask = get_all_ones_mask (maskmode);
> +				call = gimple_build_call_internal (
> +				  IFN_LEN_MASK_LOAD, 5, dataref_ptr, ptr,
> +				  final_len, final_mask, bias);
> +			      }
> +			    else
> +			      {
> +				call
> +				  = gimple_build_call_internal (IFN_LEN_LOAD, 4,
> +								dataref_ptr,
> +								ptr, final_len,
> +								bias);
> +			      }
> +			    gimple_call_set_nothrow (call, true);
> +			    new_stmt = call;
> +			    data_ref = NULL_TREE;
> +			    /* Need conversion if it's wrapped with VnQI.  */
> +			    if (vmode != new_vmode)
> +			      {
> +				tree new_vtype
> +				  = build_vector_type_for_mode (qi_type,
> +								new_vmode);
> +				tree var
> +				  = vect_get_new_ssa_name (new_vtype,
> +							   vect_simple_var);
> +				gimple_set_lhs (call, var);
> +				vect_finish_stmt_generation (vinfo, stmt_info,
> +							     call, gsi);
> +				tree op
> +				  = build1 (VIEW_CONVERT_EXPR, vectype, var);
> +				new_stmt
> +				  = gimple_build_assign (vec_dest,
> +							 VIEW_CONVERT_EXPR, op);
> +			      }
> +			  }
> +			else
>  			  {
> -			    tree new_vtype
> -			      = build_vector_type_for_mode (qi_type, new_vmode);
> -			    tree var = vect_get_new_ssa_name (new_vtype,
> -							      vect_simple_var);
> -			    gimple_set_lhs (call, var);
> -			    vect_finish_stmt_generation (vinfo, stmt_info, call,
> -							 gsi);
> -			    tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
> -			    new_stmt
> -			      = gimple_build_assign (vec_dest,
> -						     VIEW_CONVERT_EXPR, op);
> +			    gcc_assert (final_mask);
> +			    call = gimple_build_call_internal (IFN_MASK_LOAD, 3,
> +							       dataref_ptr, ptr,
> +							       final_mask);
> +			    gimple_call_set_nothrow (call, true);
> +			    new_stmt = call;
> +			    data_ref = NULL_TREE;
>  			  }
>  		      }
>  		    else
>
  
juzhe.zhong@rivai.ai June 20, 2023, 3:10 p.m. UTC | #4
Thanks Richi's comments.
I have sent a new patch with addressing your comments.

I am so sorry V2 patch just broke on boostrap.
I sent the V3 patch that fixed it.

Thanks.


juzhe.zhong@rivai.ai
 
From: Richard Biener
Date: 2023-06-20 20:18
To: Ju-Zhe Zhong
CC: gcc-patches; richard.sandiford
Subject: Re: [PATCH] VECT: Apply LEN_MASK_{LOAD,STORE} into vectorizer
On Tue, 20 Jun 2023, juzhe.zhong@rivai.ai wrote:
 
> From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> 
> This patch is apply LEN_MASK_{LOAD,STORE} into vectorizer.
> I refactor gimple IR build to make codes look cleaner.
> 
> gcc/ChangeLog:
> 
>         * internal-fn.cc (expand_partial_store_optab_fn): Add LEN_MASK_{LOAD,STORE} vectorizer support.
>         (internal_load_fn_p): Ditto.
>         (internal_store_fn_p): Ditto.
>         (internal_fn_mask_index): Ditto.
>         (internal_fn_stored_value_index): Ditto.
>         (internal_len_load_store_bias): Ditto.
>         * optabs-query.cc (can_vec_mask_load_store_p): Ditto.
>         (get_len_load_store_mode): Ditto.
>         * tree-vect-stmts.cc (check_load_store_for_partial_vectors): Ditto.
>         (get_all_ones_mask): New function.
>         (vectorizable_store): Add LEN_MASK_{LOAD,STORE} vectorizer support.
>         (vectorizable_load): Ditto.
> 
> ---
>  gcc/internal-fn.cc     |  35 +++++-
>  gcc/optabs-query.cc    |  25 +++-
>  gcc/tree-vect-stmts.cc | 259 +++++++++++++++++++++++++----------------
>  3 files changed, 213 insertions(+), 106 deletions(-)
> 
> diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
> index c911ae790cb..e10c21de5f1 100644
> --- a/gcc/internal-fn.cc
> +++ b/gcc/internal-fn.cc
> @@ -2949,7 +2949,7 @@ expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
>   * OPTAB.  */
>  
>  static void
> -expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
> +expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
>  {
>    class expand_operand ops[5];
>    tree type, lhs, rhs, maskt, biast;
> @@ -2957,7 +2957,7 @@ expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
>    insn_code icode;
>  
>    maskt = gimple_call_arg (stmt, 2);
> -  rhs = gimple_call_arg (stmt, 3);
> +  rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
>    type = TREE_TYPE (rhs);
>    lhs = expand_call_mem_ref (type, stmt, 0);
>  
> @@ -4435,6 +4435,7 @@ internal_load_fn_p (internal_fn fn)
>      case IFN_GATHER_LOAD:
>      case IFN_MASK_GATHER_LOAD:
>      case IFN_LEN_LOAD:
> +    case IFN_LEN_MASK_LOAD:
>        return true;
>  
>      default:
> @@ -4455,6 +4456,7 @@ internal_store_fn_p (internal_fn fn)
>      case IFN_SCATTER_STORE:
>      case IFN_MASK_SCATTER_STORE:
>      case IFN_LEN_STORE:
> +    case IFN_LEN_MASK_STORE:
>        return true;
>  
>      default:
> @@ -4494,6 +4496,10 @@ internal_fn_mask_index (internal_fn fn)
>      case IFN_MASK_STORE_LANES:
>        return 2;
>  
> +    case IFN_LEN_MASK_LOAD:
> +    case IFN_LEN_MASK_STORE:
> +      return 3;
> +
>      case IFN_MASK_GATHER_LOAD:
>      case IFN_MASK_SCATTER_STORE:
>        return 4;
> @@ -4519,6 +4525,9 @@ internal_fn_stored_value_index (internal_fn fn)
>      case IFN_LEN_STORE:
>        return 3;
>  
> +    case IFN_LEN_MASK_STORE:
> +      return 4;
> +
>      default:
>        return -1;
>      }
> @@ -4583,13 +4592,31 @@ internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
>  {
>    optab optab = direct_internal_fn_optab (ifn);
>    insn_code icode = direct_optab_handler (optab, mode);
> +  int bias_argno = 3;
> +  if (icode == CODE_FOR_nothing)
> +    {
> +      machine_mode mask_mode
> + = targetm.vectorize.get_mask_mode (mode).require ();
> +      if (ifn == IFN_LEN_LOAD)
> + {
> +   /* Try LEN_MASK_LOAD.  */
> +   optab = direct_internal_fn_optab (IFN_LEN_MASK_LOAD);
> + }
> +      else
> + {
> +   /* Try LEN_MASK_STORE.  */
> +   optab = direct_internal_fn_optab (IFN_LEN_MASK_STORE);
> + }
> +      icode = convert_optab_handler (optab, mode, mask_mode);
> +      bias_argno = 4;
> +    }
>  
>    if (icode != CODE_FOR_nothing)
>      {
>        /* For now we only support biases of 0 or -1.  Try both of them.  */
> -      if (insn_operand_matches (icode, 3, GEN_INT (0)))
> +      if (insn_operand_matches (icode, bias_argno, GEN_INT (0)))
>  return 0;
> -      if (insn_operand_matches (icode, 3, GEN_INT (-1)))
> +      if (insn_operand_matches (icode, bias_argno, GEN_INT (-1)))
>  return -1;
>      }
>  
> diff --git a/gcc/optabs-query.cc b/gcc/optabs-query.cc
> index 276f8408dd7..4394d391200 100644
> --- a/gcc/optabs-query.cc
> +++ b/gcc/optabs-query.cc
> @@ -566,11 +566,14 @@ can_vec_mask_load_store_p (machine_mode mode,
>     bool is_load)
>  {
>    optab op = is_load ? maskload_optab : maskstore_optab;
> +  optab len_op = is_load ? len_maskload_optab : len_maskstore_optab;
>    machine_mode vmode;
>  
>    /* If mode is vector mode, check it directly.  */
>    if (VECTOR_MODE_P (mode))
> -    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
> +    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing
> +    || convert_optab_handler (len_op, mode, mask_mode)
> + != CODE_FOR_nothing;
>  
>    /* Otherwise, return true if there is some vector mode with
>       the mask load/store supported.  */
> @@ -584,7 +587,9 @@ can_vec_mask_load_store_p (machine_mode mode,
>    vmode = targetm.vectorize.preferred_simd_mode (smode);
>    if (VECTOR_MODE_P (vmode)
>        && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> -      && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
> +      && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
> +   || convert_optab_handler (len_op, vmode, mask_mode)
> +        != CODE_FOR_nothing))
>      return true;
>  
>    auto_vector_modes vector_modes;
> @@ -592,7 +597,9 @@ can_vec_mask_load_store_p (machine_mode mode,
>    for (machine_mode base_mode : vector_modes)
>      if (related_vector_mode (base_mode, smode).exists (&vmode)
>  && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> - && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
> + && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
> +     || convert_optab_handler (len_op, vmode, mask_mode)
> + != CODE_FOR_nothing))
>        return true;
>    return false;
>  }
> @@ -608,17 +615,27 @@ opt_machine_mode
>  get_len_load_store_mode (machine_mode mode, bool is_load)
>  {
>    optab op = is_load ? len_load_optab : len_store_optab;
> +  optab masked_op = is_load ? len_maskload_optab : len_maskstore_optab;
>    gcc_assert (VECTOR_MODE_P (mode));
>  
>    /* Check if length in lanes supported for this mode directly.  */
>    if (direct_optab_handler (op, mode))
>      return mode;
>  
> +  /* Check if length in lanes supported by len_maskload/store.  */
> +  machine_mode mask_mode;
> +  if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
> +      && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
> +    return mode;
> +
>    /* Check if length in bytes supported for same vector size VnQI.  */
>    machine_mode vmode;
>    poly_uint64 nunits = GET_MODE_SIZE (mode);
>    if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
> -      && direct_optab_handler (op, vmode))
> +      && (direct_optab_handler (op, vmode)
> +   || (targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
> +       && convert_optab_handler (masked_op, vmode, mask_mode)
> +    != CODE_FOR_nothing)))
>      return vmode;
>  
>    return opt_machine_mode ();
> diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
> index 056a0ecb2be..45bc1e4b5bc 100644
> --- a/gcc/tree-vect-stmts.cc
> +++ b/gcc/tree-vect-stmts.cc
> @@ -1819,16 +1819,17 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
>    poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
>    poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
>    machine_mode mask_mode;
> +  machine_mode vmode;
>    bool using_partial_vectors_p = false;
>    if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
> -      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
> +      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load)
> +      && !get_len_load_store_mode (vecmode, is_load).exists (&vmode))
>      {
>        nvectors = group_memory_nvectors (group_size * vf, nunits);
>        vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
>        using_partial_vectors_p = true;
>      }
>  
> -  machine_mode vmode;
>    if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
>      {
>        nvectors = group_memory_nvectors (group_size * vf, nunits);
 
Can you instead swap both checks and do an else if for the mask case?
If we ever record both len and mask we will fail the vectorization
anyway.
 
> @@ -2809,6 +2810,17 @@ vect_build_zero_merge_argument (vec_info *vinfo,
>    return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
>  }
>  
> +/* Get all-ones vector mask for corresponding maskmode.  */
> +
> +static tree
> +get_all_ones_mask (machine_mode maskmode)
> +{
> +  poly_uint64 nunits = GET_MODE_NUNITS (maskmode);
> +  tree masktype = build_truth_vector_type_for_mode (nunits, maskmode);
> +  tree mask = build_int_cst (TREE_TYPE (masktype), 1);
 
I think you want build_all_ones_cst (masktype), using '1' looks wrong
at least.  The alternative is constant_boolean_node (true, masktype).
 
> +  return build_vector_from_val (masktype, mask);
> +}
> +
>  /* Build a gather load call while vectorizing STMT_INFO.  Insert new
>     instructions before GSI and add them to VEC_STMT.  GS_INFO describes
>     the gather load operation.  If the load is conditional, MASK is the
> @@ -8945,56 +8957,80 @@ vectorizable_store (vec_info *vinfo,
>  }
>  
>        /* Arguments are ready.  Create the new vector stmt.  */
> -       if (final_mask)
> +       if (final_mask || loop_lens)
>  {
 
I've seen the later refactoring patch but it seems garbled white-space
wise (this is also a case where context diffs would make review easier).
 
As said I'd like to see
 
        if (loop_lens)
          ... produce 'len' argument ...
 
        if ((loop_lens || final_mask)
            && use-len-mask)
          ..
        else if (loop_lens && use-len)
          ..
        else if (final_mask && use-mask)
          ..
 
maybe it is that, but it's hard to identify :/  It would be nice
to do the optab queries only once and compute a
partial_or_mask_vector_ifn in the analysis phase when we register 
lens/masks
so we don't have to re-do that.  Like the
 
+                           if (targetm.vectorize.get_mask_mode 
(vmode).exists (
+                                 &maskmode)
+                               && can_vec_mask_load_store_p (vmode, 
maskmode,
+                                                             false))
 
stuff.  If we arrive here we know we can code-generate with lengths
if loop_lens and with masks when final_mask (and with both when both).
What we don't easily know is whether we have to use
LEN_MASK_LOAD when just 'final_mask' or just 'loop_lens' and that's
something we should know as well in a more easy way.  With a
partial_or_mask_vector_ifn we could just switch () on that.
 
That said, your patch seems to at least commpute the len only once
(this original patch, not the unreadable "refactoring").
 
Richard.
 
> +   gcall *call;
>    tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
> -   gcall *call
> -     = gimple_build_call_internal (IFN_MASK_STORE, 4,
> -   dataref_ptr, ptr,
> -   final_mask, vec_oprnd);
> -   gimple_call_set_nothrow (call, true);
> -   vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
> -   new_stmt = call;
> - }
> -       else if (loop_lens)
> - {
>    machine_mode vmode = TYPE_MODE (vectype);
> -   opt_machine_mode new_ovmode
> -     = get_len_load_store_mode (vmode, false);
> -   machine_mode new_vmode = new_ovmode.require ();
> -   unsigned factor
> -     = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
> -   tree final_len
> -     = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> - vec_num * ncopies, vectype,
> - vec_num * j + i, factor);
> -   tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
> -   /* Need conversion if it's wrapped with VnQI.  */
> -   if (vmode != new_vmode)
> +   machine_mode new_vmode = vmode;
> +
> +   if (get_len_load_store_mode (vmode, false)
> + .exists (&new_vmode))
>      {
> -       tree new_vtype
> - = build_vector_type_for_mode (unsigned_intQI_type_node,
> -       new_vmode);
> -       tree var
> - = vect_get_new_ssa_name (new_vtype, vect_simple_var);
> -       vec_oprnd
> - = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
> -       gassign *new_stmt
> - = gimple_build_assign (var, VIEW_CONVERT_EXPR,
> -        vec_oprnd);
> -       vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
> -    gsi);
> -       vec_oprnd = var;
> -     }
> +       tree final_len;
> +       machine_mode maskmode;
> +       unsigned factor = (new_vmode == vmode)
> +   ? 1
> +   : GET_MODE_UNIT_SIZE (vmode);
> +       if (loop_lens)
> + final_len
> +   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> +        vec_num * ncopies, vectype,
> +        vec_num * j + i, factor);
> +       else
> + {
> +   tree iv_type = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
> +   final_len
> +     = build_int_cst (iv_type,
> +      TYPE_VECTOR_SUBPARTS (vectype));
> + }
>  
> -   signed char biasval =
> -     LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +       /* Need conversion if it's wrapped with VnQI.  */
> +       if (vmode != new_vmode)
> + {
> +   tree new_vtype = build_vector_type_for_mode (
> +     unsigned_intQI_type_node, new_vmode);
> +   tree var = vect_get_new_ssa_name (new_vtype,
> +     vect_simple_var);
> +   vec_oprnd
> +     = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
> +   gassign *new_stmt
> +     = gimple_build_assign (var, VIEW_CONVERT_EXPR,
> +    vec_oprnd);
> +   vect_finish_stmt_generation (vinfo, stmt_info,
> +        new_stmt, gsi);
> +   vec_oprnd = var;
> + }
> +       signed char biasval
> + = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +
> +       tree bias = build_int_cst (intQI_type_node, biasval);
>  
> -   tree bias = build_int_cst (intQI_type_node, biasval);
> -   gcall *call
> -     = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
> -   ptr, final_len, vec_oprnd,
> -   bias);
> +       if (targetm.vectorize.get_mask_mode (vmode).exists (
> +     &maskmode)
> +   && can_vec_mask_load_store_p (vmode, maskmode, false))
> + {
> +   if (!final_mask)
> +     final_mask = get_all_ones_mask (maskmode);
> +   call
> +     = gimple_build_call_internal (IFN_LEN_MASK_STORE, 6,
> +   dataref_ptr, ptr,
> +   final_len, final_mask,
> +   vec_oprnd, bias);
> + }
> +       else
> + call = gimple_build_call_internal (IFN_LEN_STORE, 5,
> +    dataref_ptr, ptr,
> +    final_len, vec_oprnd,
> +    bias);
> +     }
> +   else
> +     {
> +       gcc_assert (final_mask);
> +       call = gimple_build_call_internal (IFN_MASK_STORE, 4,
> + dataref_ptr, ptr,
> + final_mask, vec_oprnd);
> +     }
>    gimple_call_set_nothrow (call, true);
>    vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
>    new_stmt = call;
> @@ -10304,63 +10340,90 @@ vectorizable_load (vec_info *vinfo,
>        align, misalign);
>      align = least_bit_hwi (misalign | align);
>  
> -     if (final_mask)
> +     if (final_mask
> + || (loop_lens && memory_access_type != VMAT_INVARIANT))
>        {
> - tree ptr = build_int_cst (ref_type,
> -   align * BITS_PER_UNIT);
> - gcall *call
> -   = gimple_build_call_internal (IFN_MASK_LOAD, 3,
> - dataref_ptr, ptr,
> - final_mask);
> - gimple_call_set_nothrow (call, true);
> - new_stmt = call;
> - data_ref = NULL_TREE;
> -       }
> -     else if (loop_lens && memory_access_type != VMAT_INVARIANT)
> -       {
> - machine_mode vmode = TYPE_MODE (vectype);
> - opt_machine_mode new_ovmode
> -   = get_len_load_store_mode (vmode, true);
> - machine_mode new_vmode = new_ovmode.require ();
> - unsigned factor = (new_ovmode == vmode)
> -     ? 1
> -     : GET_MODE_UNIT_SIZE (vmode);
> - tree final_len
> -   = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> -        vec_num * ncopies, vectype,
> -        vec_num * j + i, factor);
> + gcall *call;
>  tree ptr
>    = build_int_cst (ref_type, align * BITS_PER_UNIT);
> + machine_mode vmode = TYPE_MODE (vectype);
> + machine_mode new_vmode = vmode;
> + if (get_len_load_store_mode (vmode, true)
> +       .exists (&new_vmode))
> +   {
> +     tree final_len;
> +     machine_mode maskmode;
> +     tree qi_type = unsigned_intQI_type_node;
> +     signed char biasval
> +       = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> +     tree bias
> +       = build_int_cst (intQI_type_node, biasval);
> +     unsigned factor = (new_vmode == vmode)
> + ? 1
> + : GET_MODE_UNIT_SIZE (vmode);
> +     if (loop_lens)
> +       final_len
> + = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
> +      vec_num * ncopies, vectype,
> +      vec_num * j + i, factor);
> +     else
> +       {
> + tree iv_type
> +   = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
> + final_len = build_int_cst (
> +   iv_type, TYPE_VECTOR_SUBPARTS (vectype));
> +       }
>  
> - tree qi_type = unsigned_intQI_type_node;
> -
> - signed char biasval =
> -   LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
> -
> - tree bias = build_int_cst (intQI_type_node, biasval);
> -
> - gcall *call
> -   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
> - dataref_ptr, ptr,
> - final_len, bias);
> - gimple_call_set_nothrow (call, true);
> - new_stmt = call;
> - data_ref = NULL_TREE;
> -
> - /* Need conversion if it's wrapped with VnQI.  */
> - if (vmode != new_vmode)
> +     if (targetm.vectorize.get_mask_mode (vmode).exists (
> +   &maskmode)
> + && can_vec_mask_load_store_p (vmode, maskmode,
> +       false))
> +       {
> + if (!final_mask)
> +   final_mask = get_all_ones_mask (maskmode);
> + call = gimple_build_call_internal (
> +   IFN_LEN_MASK_LOAD, 5, dataref_ptr, ptr,
> +   final_len, final_mask, bias);
> +       }
> +     else
> +       {
> + call
> +   = gimple_build_call_internal (IFN_LEN_LOAD, 4,
> + dataref_ptr,
> + ptr, final_len,
> + bias);
> +       }
> +     gimple_call_set_nothrow (call, true);
> +     new_stmt = call;
> +     data_ref = NULL_TREE;
> +     /* Need conversion if it's wrapped with VnQI.  */
> +     if (vmode != new_vmode)
> +       {
> + tree new_vtype
> +   = build_vector_type_for_mode (qi_type,
> + new_vmode);
> + tree var
> +   = vect_get_new_ssa_name (new_vtype,
> +    vect_simple_var);
> + gimple_set_lhs (call, var);
> + vect_finish_stmt_generation (vinfo, stmt_info,
> +      call, gsi);
> + tree op
> +   = build1 (VIEW_CONVERT_EXPR, vectype, var);
> + new_stmt
> +   = gimple_build_assign (vec_dest,
> + VIEW_CONVERT_EXPR, op);
> +       }
> +   }
> + else
>    {
> -     tree new_vtype
> -       = build_vector_type_for_mode (qi_type, new_vmode);
> -     tree var = vect_get_new_ssa_name (new_vtype,
> -       vect_simple_var);
> -     gimple_set_lhs (call, var);
> -     vect_finish_stmt_generation (vinfo, stmt_info, call,
> - gsi);
> -     tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
> -     new_stmt
> -       = gimple_build_assign (vec_dest,
> -      VIEW_CONVERT_EXPR, op);
> +     gcc_assert (final_mask);
> +     call = gimple_build_call_internal (IFN_MASK_LOAD, 3,
> +        dataref_ptr, ptr,
> +        final_mask);
> +     gimple_call_set_nothrow (call, true);
> +     new_stmt = call;
> +     data_ref = NULL_TREE;
>    }
>        }
>      else
> 
 
-- 
Richard Biener <rguenther@suse.de>
SUSE Software Solutions Germany GmbH, Frankenstrasse 146, 90461 Nuernberg,
Germany; GF: Ivo Totev, Andrew Myers, Andrew McDonald, Boudien Moerman;
HRB 36809 (AG Nuernberg)
  

Patch

diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index c911ae790cb..e10c21de5f1 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -2949,7 +2949,7 @@  expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
  * OPTAB.  */
 
 static void
-expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
 {
   class expand_operand ops[5];
   tree type, lhs, rhs, maskt, biast;
@@ -2957,7 +2957,7 @@  expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
   insn_code icode;
 
   maskt = gimple_call_arg (stmt, 2);
-  rhs = gimple_call_arg (stmt, 3);
+  rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
   type = TREE_TYPE (rhs);
   lhs = expand_call_mem_ref (type, stmt, 0);
 
@@ -4435,6 +4435,7 @@  internal_load_fn_p (internal_fn fn)
     case IFN_GATHER_LOAD:
     case IFN_MASK_GATHER_LOAD:
     case IFN_LEN_LOAD:
+    case IFN_LEN_MASK_LOAD:
       return true;
 
     default:
@@ -4455,6 +4456,7 @@  internal_store_fn_p (internal_fn fn)
     case IFN_SCATTER_STORE:
     case IFN_MASK_SCATTER_STORE:
     case IFN_LEN_STORE:
+    case IFN_LEN_MASK_STORE:
       return true;
 
     default:
@@ -4494,6 +4496,10 @@  internal_fn_mask_index (internal_fn fn)
     case IFN_MASK_STORE_LANES:
       return 2;
 
+    case IFN_LEN_MASK_LOAD:
+    case IFN_LEN_MASK_STORE:
+      return 3;
+
     case IFN_MASK_GATHER_LOAD:
     case IFN_MASK_SCATTER_STORE:
       return 4;
@@ -4519,6 +4525,9 @@  internal_fn_stored_value_index (internal_fn fn)
     case IFN_LEN_STORE:
       return 3;
 
+    case IFN_LEN_MASK_STORE:
+      return 4;
+
     default:
       return -1;
     }
@@ -4583,13 +4592,31 @@  internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
 {
   optab optab = direct_internal_fn_optab (ifn);
   insn_code icode = direct_optab_handler (optab, mode);
+  int bias_argno = 3;
+  if (icode == CODE_FOR_nothing)
+    {
+      machine_mode mask_mode
+	= targetm.vectorize.get_mask_mode (mode).require ();
+      if (ifn == IFN_LEN_LOAD)
+	{
+	  /* Try LEN_MASK_LOAD.  */
+	  optab = direct_internal_fn_optab (IFN_LEN_MASK_LOAD);
+	}
+      else
+	{
+	  /* Try LEN_MASK_STORE.  */
+	  optab = direct_internal_fn_optab (IFN_LEN_MASK_STORE);
+	}
+      icode = convert_optab_handler (optab, mode, mask_mode);
+      bias_argno = 4;
+    }
 
   if (icode != CODE_FOR_nothing)
     {
       /* For now we only support biases of 0 or -1.  Try both of them.  */
-      if (insn_operand_matches (icode, 3, GEN_INT (0)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (0)))
 	return 0;
-      if (insn_operand_matches (icode, 3, GEN_INT (-1)))
+      if (insn_operand_matches (icode, bias_argno, GEN_INT (-1)))
 	return -1;
     }
 
diff --git a/gcc/optabs-query.cc b/gcc/optabs-query.cc
index 276f8408dd7..4394d391200 100644
--- a/gcc/optabs-query.cc
+++ b/gcc/optabs-query.cc
@@ -566,11 +566,14 @@  can_vec_mask_load_store_p (machine_mode mode,
 			   bool is_load)
 {
   optab op = is_load ? maskload_optab : maskstore_optab;
+  optab len_op = is_load ? len_maskload_optab : len_maskstore_optab;
   machine_mode vmode;
 
   /* If mode is vector mode, check it directly.  */
   if (VECTOR_MODE_P (mode))
-    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
+    return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing
+	   || convert_optab_handler (len_op, mode, mask_mode)
+		!= CODE_FOR_nothing;
 
   /* Otherwise, return true if there is some vector mode with
      the mask load/store supported.  */
@@ -584,7 +587,9 @@  can_vec_mask_load_store_p (machine_mode mode,
   vmode = targetm.vectorize.preferred_simd_mode (smode);
   if (VECTOR_MODE_P (vmode)
       && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
-      && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+      && (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+	  || convert_optab_handler (len_op, vmode, mask_mode)
+	       != CODE_FOR_nothing))
     return true;
 
   auto_vector_modes vector_modes;
@@ -592,7 +597,9 @@  can_vec_mask_load_store_p (machine_mode mode,
   for (machine_mode base_mode : vector_modes)
     if (related_vector_mode (base_mode, smode).exists (&vmode)
 	&& targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
-	&& convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
+	&& (convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing
+	    || convert_optab_handler (len_op, vmode, mask_mode)
+		 != CODE_FOR_nothing))
       return true;
   return false;
 }
@@ -608,17 +615,27 @@  opt_machine_mode
 get_len_load_store_mode (machine_mode mode, bool is_load)
 {
   optab op = is_load ? len_load_optab : len_store_optab;
+  optab masked_op = is_load ? len_maskload_optab : len_maskstore_optab;
   gcc_assert (VECTOR_MODE_P (mode));
 
   /* Check if length in lanes supported for this mode directly.  */
   if (direct_optab_handler (op, mode))
     return mode;
 
+  /* Check if length in lanes supported by len_maskload/store.  */
+  machine_mode mask_mode;
+  if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
+      && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
+    return mode;
+
   /* Check if length in bytes supported for same vector size VnQI.  */
   machine_mode vmode;
   poly_uint64 nunits = GET_MODE_SIZE (mode);
   if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
-      && direct_optab_handler (op, vmode))
+      && (direct_optab_handler (op, vmode)
+	  || (targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
+	      && convert_optab_handler (masked_op, vmode, mask_mode)
+		   != CODE_FOR_nothing)))
     return vmode;
 
   return opt_machine_mode ();
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 056a0ecb2be..45bc1e4b5bc 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1819,16 +1819,17 @@  check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
   poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   machine_mode mask_mode;
+  machine_mode vmode;
   bool using_partial_vectors_p = false;
   if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
-      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
+      && can_vec_mask_load_store_p (vecmode, mask_mode, is_load)
+      && !get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
       vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
       using_partial_vectors_p = true;
     }
 
-  machine_mode vmode;
   if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
     {
       nvectors = group_memory_nvectors (group_size * vf, nunits);
@@ -2809,6 +2810,17 @@  vect_build_zero_merge_argument (vec_info *vinfo,
   return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
 }
 
+/* Get all-ones vector mask for corresponding maskmode.  */
+
+static tree
+get_all_ones_mask (machine_mode maskmode)
+{
+  poly_uint64 nunits = GET_MODE_NUNITS (maskmode);
+  tree masktype = build_truth_vector_type_for_mode (nunits, maskmode);
+  tree mask = build_int_cst (TREE_TYPE (masktype), 1);
+  return build_vector_from_val (masktype, mask);
+}
+
 /* Build a gather load call while vectorizing STMT_INFO.  Insert new
    instructions before GSI and add them to VEC_STMT.  GS_INFO describes
    the gather load operation.  If the load is conditional, MASK is the
@@ -8945,56 +8957,80 @@  vectorizable_store (vec_info *vinfo,
 		}
 
 	      /* Arguments are ready.  Create the new vector stmt.  */
-	      if (final_mask)
+	      if (final_mask || loop_lens)
 		{
+		  gcall *call;
 		  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-		  gcall *call
-		    = gimple_build_call_internal (IFN_MASK_STORE, 4,
-						  dataref_ptr, ptr,
-						  final_mask, vec_oprnd);
-		  gimple_call_set_nothrow (call, true);
-		  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-		  new_stmt = call;
-		}
-	      else if (loop_lens)
-		{
 		  machine_mode vmode = TYPE_MODE (vectype);
-		  opt_machine_mode new_ovmode
-		    = get_len_load_store_mode (vmode, false);
-		  machine_mode new_vmode = new_ovmode.require ();
-		  unsigned factor
-		    = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
-		  tree final_len
-		    = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
-					 vec_num * ncopies, vectype,
-					 vec_num * j + i, factor);
-		  tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-		  /* Need conversion if it's wrapped with VnQI.  */
-		  if (vmode != new_vmode)
+		  machine_mode new_vmode = vmode;
+
+		  if (get_len_load_store_mode (vmode, false)
+			.exists (&new_vmode))
 		    {
-		      tree new_vtype
-			= build_vector_type_for_mode (unsigned_intQI_type_node,
-						      new_vmode);
-		      tree var
-			= vect_get_new_ssa_name (new_vtype, vect_simple_var);
-		      vec_oprnd
-			= build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
-		      gassign *new_stmt
-			= gimple_build_assign (var, VIEW_CONVERT_EXPR,
-					       vec_oprnd);
-		      vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
-						   gsi);
-		      vec_oprnd = var;
-		    }
+		      tree final_len;
+		      machine_mode maskmode;
+		      unsigned factor = (new_vmode == vmode)
+					  ? 1
+					  : GET_MODE_UNIT_SIZE (vmode);
+		      if (loop_lens)
+			final_len
+			  = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+					       vec_num * ncopies, vectype,
+					       vec_num * j + i, factor);
+		      else
+			{
+			  tree iv_type = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+			  final_len
+			    = build_int_cst (iv_type,
+					     TYPE_VECTOR_SUBPARTS (vectype));
+			}
 
-		  signed char biasval =
-		    LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+		      /* Need conversion if it's wrapped with VnQI.  */
+		      if (vmode != new_vmode)
+			{
+			  tree new_vtype = build_vector_type_for_mode (
+			    unsigned_intQI_type_node, new_vmode);
+			  tree var = vect_get_new_ssa_name (new_vtype,
+							    vect_simple_var);
+			  vec_oprnd
+			    = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
+			  gassign *new_stmt
+			    = gimple_build_assign (var, VIEW_CONVERT_EXPR,
+						   vec_oprnd);
+			  vect_finish_stmt_generation (vinfo, stmt_info,
+						       new_stmt, gsi);
+			  vec_oprnd = var;
+			}
+		      signed char biasval
+			= LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+
+		      tree bias = build_int_cst (intQI_type_node, biasval);
 
-		  tree bias = build_int_cst (intQI_type_node, biasval);
-		  gcall *call
-		    = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
-						  ptr, final_len, vec_oprnd,
-						  bias);
+		      if (targetm.vectorize.get_mask_mode (vmode).exists (
+			    &maskmode)
+			  && can_vec_mask_load_store_p (vmode, maskmode, false))
+			{
+			  if (!final_mask)
+			    final_mask = get_all_ones_mask (maskmode);
+			  call
+			    = gimple_build_call_internal (IFN_LEN_MASK_STORE, 6,
+							  dataref_ptr, ptr,
+							  final_len, final_mask,
+							  vec_oprnd, bias);
+			}
+		      else
+			call = gimple_build_call_internal (IFN_LEN_STORE, 5,
+							   dataref_ptr, ptr,
+							   final_len, vec_oprnd,
+							   bias);
+		    }
+		  else
+		    {
+		      gcc_assert (final_mask);
+		      call = gimple_build_call_internal (IFN_MASK_STORE, 4,
+							 dataref_ptr, ptr,
+							 final_mask, vec_oprnd);
+		    }
 		  gimple_call_set_nothrow (call, true);
 		  vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
 		  new_stmt = call;
@@ -10304,63 +10340,90 @@  vectorizable_load (vec_info *vinfo,
 					      align, misalign);
 		    align = least_bit_hwi (misalign | align);
 
-		    if (final_mask)
+		    if (final_mask
+			|| (loop_lens && memory_access_type != VMAT_INVARIANT))
 		      {
-			tree ptr = build_int_cst (ref_type,
-						  align * BITS_PER_UNIT);
-			gcall *call
-			  = gimple_build_call_internal (IFN_MASK_LOAD, 3,
-							dataref_ptr, ptr,
-							final_mask);
-			gimple_call_set_nothrow (call, true);
-			new_stmt = call;
-			data_ref = NULL_TREE;
-		      }
-		    else if (loop_lens && memory_access_type != VMAT_INVARIANT)
-		      {
-			machine_mode vmode = TYPE_MODE (vectype);
-			opt_machine_mode new_ovmode
-			  = get_len_load_store_mode (vmode, true);
-			machine_mode new_vmode = new_ovmode.require ();
-			unsigned factor = (new_ovmode == vmode)
-					    ? 1
-					    : GET_MODE_UNIT_SIZE (vmode);
-			tree final_len
-			  = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
-					       vec_num * ncopies, vectype,
-					       vec_num * j + i, factor);
+			gcall *call;
 			tree ptr
 			  = build_int_cst (ref_type, align * BITS_PER_UNIT);
+			machine_mode vmode = TYPE_MODE (vectype);
+			machine_mode new_vmode = vmode;
+			if (get_len_load_store_mode (vmode, true)
+			      .exists (&new_vmode))
+			  {
+			    tree final_len;
+			    machine_mode maskmode;
+			    tree qi_type = unsigned_intQI_type_node;
+			    signed char biasval
+			      = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+			    tree bias
+			      = build_int_cst (intQI_type_node, biasval);
+			    unsigned factor = (new_vmode == vmode)
+						? 1
+						: GET_MODE_UNIT_SIZE (vmode);
+			    if (loop_lens)
+			      final_len
+				= vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+						     vec_num * ncopies, vectype,
+						     vec_num * j + i, factor);
+			    else
+			      {
+				tree iv_type
+				  = LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo);
+				final_len = build_int_cst (
+				  iv_type, TYPE_VECTOR_SUBPARTS (vectype));
+			      }
 
-			tree qi_type = unsigned_intQI_type_node;
-
-			signed char biasval =
-			  LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
-
-			tree bias = build_int_cst (intQI_type_node, biasval);
-
-			gcall *call
-			  = gimple_build_call_internal (IFN_LEN_LOAD, 4,
-							dataref_ptr, ptr,
-							final_len, bias);
-			gimple_call_set_nothrow (call, true);
-			new_stmt = call;
-			data_ref = NULL_TREE;
-
-			/* Need conversion if it's wrapped with VnQI.  */
-			if (vmode != new_vmode)
+			    if (targetm.vectorize.get_mask_mode (vmode).exists (
+				  &maskmode)
+				&& can_vec_mask_load_store_p (vmode, maskmode,
+							      false))
+			      {
+				if (!final_mask)
+				  final_mask = get_all_ones_mask (maskmode);
+				call = gimple_build_call_internal (
+				  IFN_LEN_MASK_LOAD, 5, dataref_ptr, ptr,
+				  final_len, final_mask, bias);
+			      }
+			    else
+			      {
+				call
+				  = gimple_build_call_internal (IFN_LEN_LOAD, 4,
+								dataref_ptr,
+								ptr, final_len,
+								bias);
+			      }
+			    gimple_call_set_nothrow (call, true);
+			    new_stmt = call;
+			    data_ref = NULL_TREE;
+			    /* Need conversion if it's wrapped with VnQI.  */
+			    if (vmode != new_vmode)
+			      {
+				tree new_vtype
+				  = build_vector_type_for_mode (qi_type,
+								new_vmode);
+				tree var
+				  = vect_get_new_ssa_name (new_vtype,
+							   vect_simple_var);
+				gimple_set_lhs (call, var);
+				vect_finish_stmt_generation (vinfo, stmt_info,
+							     call, gsi);
+				tree op
+				  = build1 (VIEW_CONVERT_EXPR, vectype, var);
+				new_stmt
+				  = gimple_build_assign (vec_dest,
+							 VIEW_CONVERT_EXPR, op);
+			      }
+			  }
+			else
 			  {
-			    tree new_vtype
-			      = build_vector_type_for_mode (qi_type, new_vmode);
-			    tree var = vect_get_new_ssa_name (new_vtype,
-							      vect_simple_var);
-			    gimple_set_lhs (call, var);
-			    vect_finish_stmt_generation (vinfo, stmt_info, call,
-							 gsi);
-			    tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
-			    new_stmt
-			      = gimple_build_assign (vec_dest,
-						     VIEW_CONVERT_EXPR, op);
+			    gcc_assert (final_mask);
+			    call = gimple_build_call_internal (IFN_MASK_LOAD, 3,
+							       dataref_ptr, ptr,
+							       final_mask);
+			    gimple_call_set_nothrow (call, true);
+			    new_stmt = call;
+			    data_ref = NULL_TREE;
 			  }
 		      }
 		    else