@@ -120,6 +120,17 @@ extern void riscv_run_selftests (void);
namespace riscv_vector {
#define RVV_VLMAX gen_rtx_REG (Pmode, X0_REGNUM)
+enum vlmul_type
+{
+ LMUL_1 = 0,
+ LMUL_2 = 1,
+ LMUL_4 = 2,
+ LMUL_8 = 3,
+ LMUL_RESERVED = 4,
+ LMUL_F8 = 5,
+ LMUL_F4 = 6,
+ LMUL_F2 = 7,
+};
/* Routines implemented in riscv-vector-builtins.cc. */
extern void init_builtins (void);
extern const char *mangle_builtin_type (const_tree);
@@ -132,6 +143,8 @@ extern rtx expand_builtin (unsigned int, tree, rtx);
extern bool const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
extern bool legitimize_move (rtx, rtx, machine_mode);
extern void emit_pred_op (unsigned, rtx, rtx, machine_mode);
+extern enum vlmul_type get_vlmul (machine_mode);
+extern unsigned int get_ratio (machine_mode);
enum tail_policy
{
TAIL_UNDISTURBED = 0,
@@ -214,4 +214,45 @@ legitimize_move (rtx dest, rtx src, machine_mode mask_mode)
return true;
}
+/* VTYPE information for machine_mode. */
+struct mode_vtype_group
+{
+ enum vlmul_type vlmul_for_min_vlen32[NUM_MACHINE_MODES];
+ uint8_t ratio_for_min_vlen32[NUM_MACHINE_MODES];
+ enum vlmul_type vlmul_for_min_vlen64[NUM_MACHINE_MODES];
+ uint8_t ratio_for_min_vlen64[NUM_MACHINE_MODES];
+ mode_vtype_group ()
+ {
+#define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32, \
+ VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64) \
+ vlmul_for_min_vlen32[MODE##mode] = VLMUL_FOR_MIN_VLEN32; \
+ ratio_for_min_vlen32[MODE##mode] = RATIO_FOR_MIN_VLEN32; \
+ vlmul_for_min_vlen64[MODE##mode] = VLMUL_FOR_MIN_VLEN64; \
+ ratio_for_min_vlen64[MODE##mode] = RATIO_FOR_MIN_VLEN64;
+#include "riscv-vector-switch.def"
+ }
+};
+
+static mode_vtype_group mode_vtype_infos;
+
+/* Get vlmul field value by comparing LMUL with BYTES_PER_RISCV_VECTOR. */
+enum vlmul_type
+get_vlmul (machine_mode mode)
+{
+ if (TARGET_MIN_VLEN == 32)
+ return mode_vtype_infos.vlmul_for_min_vlen32[mode];
+ else
+ return mode_vtype_infos.vlmul_for_min_vlen64[mode];
+}
+
+/* Get ratio according to machine mode. */
+unsigned int
+get_ratio (machine_mode mode)
+{
+ if (TARGET_MIN_VLEN == 32)
+ return mode_vtype_infos.ratio_for_min_vlen32[mode];
+ else
+ return mode_vtype_infos.ratio_for_min_vlen64[mode];
+}
+
} // namespace riscv_vector
@@ -80,7 +80,8 @@ TODO: FP16 vector needs support of 'zvfh', we don't support it yet. */
/* Return 'REQUIREMENT' for machine_mode 'MODE'.
For example: 'MODE' = VNx64BImode needs TARGET_MIN_VLEN > 32. */
#ifndef ENTRY
-#define ENTRY(MODE, REQUIREMENT)
+#define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32, \
+ VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64)
#endif
/* Flag of FP32 vector. */
#ifndef TARGET_VECTOR_FP32
@@ -94,66 +95,68 @@ TODO: FP16 vector needs support of 'zvfh', we don't support it yet. */
#endif
/* Mask modes. Disable VNx64BImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx64BI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx32BI, true)
-ENTRY (VNx16BI, true)
-ENTRY (VNx8BI, true)
-ENTRY (VNx4BI, true)
-ENTRY (VNx2BI, true)
-ENTRY (VNx1BI, true)
+ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_F8, 64, LMUL_RESERVED, 0)
+ENTRY (VNx32BI, true, LMUL_F4, 32, LMUL_RESERVED, 0)
+ENTRY (VNx16BI, true, LMUL_F2, 16, LMUL_RESERVED, 0)
+ENTRY (VNx8BI, true, LMUL_1, 8, LMUL_RESERVED, 0)
+ENTRY (VNx4BI, true, LMUL_2, 4, LMUL_RESERVED, 0)
+ENTRY (VNx2BI, true, LMUL_4, 2, LMUL_RESERVED, 0)
+ENTRY (VNx1BI, true, LMUL_8, 1, LMUL_RESERVED, 0)
/* SEW = 8. Disable VNx64QImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx64QI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx32QI, true)
-ENTRY (VNx16QI, true)
-ENTRY (VNx8QI, true)
-ENTRY (VNx4QI, true)
-ENTRY (VNx2QI, true)
-ENTRY (VNx1QI, true)
+ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_8, 1, LMUL_RESERVED, 0)
+ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2)
+ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4)
+ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8)
+ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16)
+ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32)
+ENTRY (VNx1QI, true, LMUL_F4, 32, LMUL_F8, 64)
/* SEW = 16. Disable VNx32HImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx32HI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx16HI, true)
-ENTRY (VNx8HI, true)
-ENTRY (VNx4HI, true)
-ENTRY (VNx2HI, true)
-ENTRY (VNx1HI, true)
+ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_8, 2, LMUL_RESERVED, 0)
+ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx1HI, true, LMUL_F2, 32, LMUL_F4, 64)
/* TODO:Disable all FP16 vector, enable them when 'zvfh' is supported. */
-ENTRY (VNx32HF, false)
-ENTRY (VNx16HF, false)
-ENTRY (VNx8HF, false)
-ENTRY (VNx4HF, false)
-ENTRY (VNx2HF, false)
-ENTRY (VNx1HF, false)
+ENTRY (VNx32HF, false, LMUL_8, 2, LMUL_RESERVED, 0)
+ENTRY (VNx16HF, false, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx8HF, false, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx4HF, false, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx2HF, false, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx1HF, false, LMUL_F2, 32, LMUL_F4, 64)
/* SEW = 32. Disable VNx16SImode when TARGET_MIN_VLEN == 32.
For single-precision floating-point, we need TARGET_VECTOR_FP32 ==
RVV_ENABLE. */
-ENTRY (VNx16SI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx8SI, true)
-ENTRY (VNx4SI, true)
-ENTRY (VNx2SI, true)
-ENTRY (VNx1SI, true)
-
-ENTRY (VNx16SF, TARGET_VECTOR_FP32 && (TARGET_MIN_VLEN > 32))
-ENTRY (VNx8SF, TARGET_VECTOR_FP32)
-ENTRY (VNx4SF, TARGET_VECTOR_FP32)
-ENTRY (VNx2SF, TARGET_VECTOR_FP32)
-ENTRY (VNx1SF, TARGET_VECTOR_FP32)
+ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_8, 4, LMUL_RESERVED, 0)
+ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 4)
+ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 2)
+ENTRY (VNx1SI, true, LMUL_1, 32, LMUL_F2, 1)
+
+ENTRY (VNx16SF, TARGET_VECTOR_FP32 && (TARGET_MIN_VLEN > 32), LMUL_8, 4,
+ LMUL_RESERVED, 0)
+ENTRY (VNx8SF, TARGET_VECTOR_FP32, LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx4SF, TARGET_VECTOR_FP32, LMUL_4, 8, LMUL_2, 4)
+ENTRY (VNx2SF, TARGET_VECTOR_FP32, LMUL_2, 16, LMUL_1, 2)
+ENTRY (VNx1SF, TARGET_VECTOR_FP32, LMUL_1, 32, LMUL_F2, 1)
/* SEW = 64. Enable when TARGET_MIN_VLEN > 32.
For double-precision floating-point, we need TARGET_VECTOR_FP64 ==
RVV_ENABLE. */
-ENTRY (VNx8DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx4DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx2DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx1DI, TARGET_MIN_VLEN > 32)
-
-ENTRY (VNx8DF, TARGET_VECTOR_FP64 && (TARGET_MIN_VLEN > 32))
-ENTRY (VNx4DF, TARGET_VECTOR_FP64)
-ENTRY (VNx2DF, TARGET_VECTOR_FP64)
-ENTRY (VNx1DF, TARGET_VECTOR_FP64)
+ENTRY (VNx8DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 8)
+ENTRY (VNx4DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_4, 16)
+ENTRY (VNx2DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_2, 32)
+ENTRY (VNx1DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_1, 64)
+
+ENTRY (VNx8DF, TARGET_VECTOR_FP64 && (TARGET_MIN_VLEN > 32), LMUL_RESERVED, 0,
+ LMUL_8, 8)
+ENTRY (VNx4DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_4, 16)
+ENTRY (VNx2DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_2, 32)
+ENTRY (VNx1DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_1, 64)
#undef TARGET_VECTOR_FP32
#undef TARGET_VECTOR_FP64
@@ -955,7 +955,7 @@ riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode,
bool
riscv_v_ext_vector_mode_p (machine_mode mode)
{
-#define ENTRY(MODE, REQUIREMENT) \
+#define ENTRY(MODE, REQUIREMENT, ...) \
case MODE##mode: \
return REQUIREMENT;
switch (mode)
@@ -34,6 +34,191 @@
UNSPEC_VPREDICATE
])
+(define_constants [
+ (INVALID_ATTRIBUTE 255)
+])
+
+;; True if the type is RVV instructions that include VTYPE
+;; global status register in the use op list.
+;; We known VTYPE has 4 fields: SEW, LMUL, TA, MA.
+;; The instruction need any of VTYPE field is set as true
+;; in this attribute.
+(define_attr "has_vtype_op" "false,true"
+ (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
+ vldux,vldox,vstux,vstox,vldff,\
+ vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+ vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
+ vsalu,vaalu,vsmul,vsshift,vnclip,\
+ vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
+ vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+ vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
+ vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
+ vired,viwred,vfred,vfredo,vfwred,vfwredo,\
+ vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,\
+ vislide,vislide1,vfslide1,vgather,vcompress")
+ (const_string "true")]
+ (const_string "false")))
+
+;; True if the type is RVV instructions that include VL
+;; global status register in the use op list.
+;; The instruction need vector length to be specified is set
+;; in this attribute.
+(define_attr "has_vl_op" "false,true"
+ (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
+ vldux,vldox,vstux,vstox,vldff,\
+ vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+ vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
+ vsalu,vaalu,vsmul,vsshift,vnclip,\
+ vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
+ vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+ vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
+ vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
+ vired,viwred,vfred,vfredo,vfwred,vfwredo,\
+ vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovxv,vfmovfv,\
+ vislide,vislide1,vfslide1,vgather,vcompress")
+ (const_string "true")]
+ (const_string "false")))
+
+;; The default SEW of RVV instruction. This attribute doesn't mean the instruction
+;; is necessary to require SEW check for example vlm.v which require ratio to
+;; check. However, we need default value of SEW for vsetvl instruction since there
+;; is no field for ratio in the vsetvl instruction encoding.
+(define_attr "sew" ""
+ (cond [(eq_attr "mode" "VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,\
+ VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI")
+ (const_int 8)
+ (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI")
+ (const_int 16)
+ (eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,\
+ VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF")
+ (const_int 32)
+ (eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,\
+ VNx1DF,VNx2DF,VNx4DF,VNx8DF")
+ (const_int 64)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; Ditto to LMUL.
+(define_attr "vlmul" ""
+ (cond [(eq_attr "mode" "VNx1QI,VNx1BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1QImode)")
+ (eq_attr "mode" "VNx2QI,VNx2BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2QImode)")
+ (eq_attr "mode" "VNx4QI,VNx4BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4QImode)")
+ (eq_attr "mode" "VNx8QI,VNx8BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8QImode)")
+ (eq_attr "mode" "VNx16QI,VNx16BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16QImode)")
+ (eq_attr "mode" "VNx32QI,VNx32BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx32QImode)")
+ (eq_attr "mode" "VNx64QI,VNx64BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx64QImode)")
+ (eq_attr "mode" "VNx1HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1HImode)")
+ (eq_attr "mode" "VNx2HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2HImode)")
+ (eq_attr "mode" "VNx4HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4HImode)")
+ (eq_attr "mode" "VNx8HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8HImode)")
+ (eq_attr "mode" "VNx16HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16HImode)")
+ (eq_attr "mode" "VNx32HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx32HImode)")
+ (eq_attr "mode" "VNx1SI,VNx1SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1SImode)")
+ (eq_attr "mode" "VNx2SI,VNx2SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2SImode)")
+ (eq_attr "mode" "VNx4SI,VNx4SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4SImode)")
+ (eq_attr "mode" "VNx8SI,VNx8SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8SImode)")
+ (eq_attr "mode" "VNx16SI,VNx16SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16SImode)")
+ (eq_attr "mode" "VNx1DI,VNx1DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1DImode)")
+ (eq_attr "mode" "VNx2DI,VNx2DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2DImode)")
+ (eq_attr "mode" "VNx4DI,VNx4DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4DImode)")
+ (eq_attr "mode" "VNx8DI,VNx8DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; It is valid for instruction that require sew/lmul ratio.
+(define_attr "ratio" ""
+ (cond [(eq_attr "type" "vimov,vfmov")
+ (const_int INVALID_ATTRIBUTE)
+ (eq_attr "mode" "VNx1QI,VNx1BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
+ (eq_attr "mode" "VNx2QI,VNx2BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2QImode)")
+ (eq_attr "mode" "VNx4QI,VNx4BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4QImode)")
+ (eq_attr "mode" "VNx8QI,VNx8BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8QImode)")
+ (eq_attr "mode" "VNx16QI,VNx16BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16QImode)")
+ (eq_attr "mode" "VNx32QI,VNx32BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx32QImode)")
+ (eq_attr "mode" "VNx64QI,VNx64BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx64QImode)")
+ (eq_attr "mode" "VNx1HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1HImode)")
+ (eq_attr "mode" "VNx2HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2HImode)")
+ (eq_attr "mode" "VNx4HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4HImode)")
+ (eq_attr "mode" "VNx8HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8HImode)")
+ (eq_attr "mode" "VNx16HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16HImode)")
+ (eq_attr "mode" "VNx32HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx32HImode)")
+ (eq_attr "mode" "VNx1SI,VNx1SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1SImode)")
+ (eq_attr "mode" "VNx2SI,VNx2SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2SImode)")
+ (eq_attr "mode" "VNx4SI,VNx4SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4SImode)")
+ (eq_attr "mode" "VNx8SI,VNx8SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8SImode)")
+ (eq_attr "mode" "VNx16SI,VNx16SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16SImode)")
+ (eq_attr "mode" "VNx1DI,VNx1DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1DImode)")
+ (eq_attr "mode" "VNx2DI,VNx2DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2DImode)")
+ (eq_attr "mode" "VNx4DI,VNx4DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4DImode)")
+ (eq_attr "mode" "VNx8DI,VNx8DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the merge op.
+(define_attr "merge_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vlds,vmalu")
+ (const_int 2)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the avl op.
+(define_attr "vl_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vlds,vmalu")
+ (const_int 4)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the tail policy op.
+(define_attr "tail_policy_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vlds")
+ (const_int 5)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the mask policy op.
+(define_attr "mask_policy_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vlds")
+ (const_int 6)]
+ (const_int INVALID_ATTRIBUTE)))
+
;; -----------------------------------------------------------------
;; ---- Miscellaneous Operations
;; -----------------------------------------------------------------