@@ -93,6 +93,11 @@ (define_c_enum "unspec"
UNSPEC_MMA_XXMTACC
UNSPEC_MMA_VECTOR_PAIR_MEMORY
UNSPEC_DM_ASSEMBLE_ACC
+ UNSPEC_DM_INSERT512_UPPER
+ UNSPEC_DM_INSERT512_LOWER
+ UNSPEC_DM_EXTRACT512
+ UNSPEC_DMR_RELOAD_FROM_MEMORY
+ UNSPEC_DMR_RELOAD_TO_MEMORY
])
(define_c_enum "unspecv"
@@ -916,3 +921,150 @@ (define_insn "mma_<avvi4i4i4>"
[(set_attr "type" "mma")
(set_attr "prefixed" "yes")
(set_attr "isa" "dm,not_dm,not_dm")])
+
+
+;; TDOmode (i.e. __dmr).
+(define_expand "movtdo"
+ [(set (match_operand:TDO 0 "nonimmediate_operand")
+ (match_operand:TDO 1 "input_operand"))]
+ "TARGET_DENSE_MATH"
+{
+ rs6000_emit_move (operands[0], operands[1], TDOmode);
+ DONE;
+})
+
+(define_insn_and_split "*movtdo"
+ [(set (match_operand:TDO 0 "nonimmediate_operand" "=wa,m,wa,wD,wD,wa")
+ (match_operand:TDO 1 "input_operand" "m,wa,wa,wa,wD,wD"))]
+ "TARGET_DENSE_MATH
+ && (gpc_reg_operand (operands[0], TDOmode)
+ || gpc_reg_operand (operands[1], TDOmode))"
+ "@
+ #
+ #
+ #
+ #
+ dmmr %0,%1
+ #"
+ "&& reload_completed
+ && (!dmr_operand (operands[0], TDOmode) || !dmr_operand (operands[1], TDOmode))"
+ [(const_int 0)]
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ if (REG_P (op0) && REG_P (op1))
+ {
+ int regno0 = REGNO (op0);
+ int regno1 = REGNO (op1);
+
+ if (DMR_REGNO_P (regno0) && VSX_REGNO_P (regno1))
+ {
+ rtx op1_upper = gen_rtx_REG (XOmode, regno1);
+ rtx op1_lower = gen_rtx_REG (XOmode, regno1 + 4);
+ emit_insn (gen_movtdo_insert512_upper (op0, op1_upper));
+ emit_insn (gen_movtdo_insert512_lower (op0, op0, op1_lower));
+ DONE;
+ }
+
+ else if (VSX_REGNO_P (regno0) && DMR_REGNO_P (regno1))
+ {
+ rtx op0_upper = gen_rtx_REG (XOmode, regno0);
+ rtx op0_lower = gen_rtx_REG (XOmode, regno0 + 4);
+ emit_insn (gen_movtdo_extract512 (op0_upper, op1, const0_rtx));
+ emit_insn (gen_movtdo_extract512 (op0_lower, op1, const1_rtx));
+ DONE;
+ }
+ }
+
+ rs6000_split_multireg_move (operands[0], operands[1]);
+ DONE;
+}
+ [(set_attr "type" "vecload,vecstore,vecmove,vecmove,vecmove,vecmove")
+ (set_attr "length" "*,*,32,8,*,8")
+ (set_attr "max_prefixed_insns" "4,4,*,*,*,*")])
+
+;; Move from VSX registers to DMR registers via two insert 512 bit
+;; instructions.
+(define_insn "movtdo_insert512_upper"
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:XO 1 "vsx_register_operand" "wa")]
+ UNSPEC_DM_INSERT512_UPPER))]
+ "TARGET_DENSE_MATH"
+ "dmxxinstdmr512 %0,%1,%Y1,0"
+ [(set_attr "type" "mma")])
+
+(define_insn "movtdo_insert512_lower"
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:TDO 1 "dmr_operand" "0")
+ (match_operand:XO 2 "vsx_register_operand" "wa")]
+ UNSPEC_DM_INSERT512_LOWER))]
+ "TARGET_DENSE_MATH"
+ "dmxxinstdmr512 %0,%2,%Y2,1"
+ [(set_attr "type" "mma")])
+
+;; Move from DMR registers to VSX registers via two extract 512 bit
+;; instructions.
+(define_insn "movtdo_extract512"
+ [(set (match_operand:XO 0 "vsx_register_operand" "=wa")
+ (unspec:XO [(match_operand:TDO 1 "dmr_operand" "wD")
+ (match_operand 2 "const_0_to_1_operand" "n")]
+ UNSPEC_DM_EXTRACT512))]
+ "TARGET_DENSE_MATH"
+ "dmxxextfdmr512 %0,%Y0,%1,%2"
+ [(set_attr "type" "mma")])
+
+;; Reload DMR registers from memory
+(define_insn_and_split "reload_dmr_from_memory"
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:TDO 1 "memory_operand" "m")]
+ UNSPEC_DMR_RELOAD_FROM_MEMORY))
+ (clobber (match_operand:XO 2 "vsx_register_operand" "=wa"))]
+ "TARGET_DENSE_MATH"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx tmp = operands[2];
+ rtx mem_upper = adjust_address (src, XOmode, BYTES_BIG_ENDIAN ? 0 : 32);
+ rtx mem_lower = adjust_address (src, XOmode, BYTES_BIG_ENDIAN ? 32 : 0);
+
+ emit_move_insn (tmp, mem_upper);
+ emit_insn (gen_movtdo_insert512_upper (dest, tmp));
+
+ emit_move_insn (tmp, mem_lower);
+ emit_insn (gen_movtdo_insert512_lower (dest, dest, tmp));
+ DONE;
+}
+ [(set_attr "length" "16")
+ (set_attr "max_prefixed_insns" "2")
+ (set_attr "type" "vecload")])
+
+;; Reload dense math registers to memory
+(define_insn_and_split "reload_dmr_to_memory"
+ [(set (match_operand:TDO 0 "memory_operand" "=m")
+ (unspec:TDO [(match_operand:TDO 1 "dmr_operand" "wD")]
+ UNSPEC_DMR_RELOAD_TO_MEMORY))
+ (clobber (match_operand:XO 2 "vsx_register_operand" "=wa"))]
+ "TARGET_DENSE_MATH"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx tmp = operands[2];
+ rtx mem_upper = adjust_address (dest, XOmode, BYTES_BIG_ENDIAN ? 0 : 32);
+ rtx mem_lower = adjust_address (dest, XOmode, BYTES_BIG_ENDIAN ? 32 : 0);
+
+ emit_insn (gen_movtdo_extract512 (tmp, src, const0_rtx));
+ emit_move_insn (mem_upper, tmp);
+
+ emit_insn (gen_movtdo_extract512 (tmp, src, const1_rtx));
+ emit_move_insn (mem_lower, tmp);
+ DONE;
+}
+ [(set_attr "length" "16")
+ (set_attr "max_prefixed_insns" "2")])
@@ -495,6 +495,8 @@ const char *rs6000_type_string (tree type_node)
return "__vector_pair";
else if (type_node == vector_quad_type_node)
return "__vector_quad";
+ else if (type_node == dmr_type_node)
+ return "__dmr";
return "unknown";
}
@@ -781,6 +783,17 @@ rs6000_init_builtins (void)
t = build_qualified_type (vector_quad_type_node, TYPE_QUAL_CONST);
ptr_vector_quad_type_node = build_pointer_type (t);
+ dmr_type_node = make_node (OPAQUE_TYPE);
+ SET_TYPE_MODE (dmr_type_node, TDOmode);
+ TYPE_SIZE (dmr_type_node) = bitsize_int (GET_MODE_BITSIZE (TDOmode));
+ TYPE_PRECISION (dmr_type_node) = GET_MODE_BITSIZE (TDOmode);
+ TYPE_SIZE_UNIT (dmr_type_node) = size_int (GET_MODE_SIZE (TDOmode));
+ SET_TYPE_ALIGN (dmr_type_node, 512);
+ TYPE_USER_ALIGN (dmr_type_node) = 0;
+ lang_hooks.types.register_builtin_type (dmr_type_node, "__dmr");
+ t = build_qualified_type (dmr_type_node, TYPE_QUAL_CONST);
+ ptr_dmr_type_node = build_pointer_type (t);
+
tdecl = add_builtin_type ("__bool char", bool_char_type_node);
TYPE_NAME (bool_char_type_node) = tdecl;
@@ -437,7 +437,8 @@ rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
if (cfun
&& !cfun->machine->mma_return_type_error
&& TREE_TYPE (cfun->decl) == fntype
- && (TYPE_MODE (type) == OOmode || TYPE_MODE (type) == XOmode))
+ && (TYPE_MODE (type) == OOmode || TYPE_MODE (type) == XOmode
+ || TYPE_MODE (type) == TDOmode))
{
/* Record we have now handled function CFUN, so the next time we
are called, we do not re-report the same error. */
@@ -1641,6 +1642,16 @@ rs6000_function_arg (cumulative_args_t cum_v, const function_arg_info &arg)
return NULL_RTX;
}
+ if (mode == TDOmode)
+ {
+ if (TYPE_CANONICAL (type) != NULL_TREE)
+ type = TYPE_CANONICAL (type);
+ error ("invalid use of dense math operand of type %qs as a function "
+ "parameter",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ return NULL_RTX;
+ }
+
/* Return a marker to indicate whether CR1 needs to set or clear the
bit that V.4 uses to say fp args were passed in registers.
Assume that we don't need the marker for software floating point,
@@ -86,3 +86,7 @@ PARTIAL_INT_MODE (TI, 128, PTI);
/* Modes used by __vector_pair and __vector_quad. */
OPAQUE_MODE (OO, 32);
OPAQUE_MODE (XO, 64);
+
+/* Modes used by __dmr. */
+OPAQUE_MODE (TDO, 128);
+
@@ -1837,7 +1837,9 @@ rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
128-bit floating point that can go in vector registers, which has VSX
memory addressing. */
if (FP_REGNO_P (regno))
- reg_size = (VECTOR_MEM_VSX_P (mode) || VECTOR_ALIGNMENT_P (mode)
+ reg_size = (VECTOR_MEM_VSX_P (mode)
+ || VECTOR_ALIGNMENT_P (mode)
+ || mode == TDOmode
? UNITS_PER_VSX_WORD
: UNITS_PER_FP_WORD);
@@ -1871,9 +1873,9 @@ rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
/* On ISA 3.1 (power10), MMA accumulator modes need FPR registers divisible
by 4.
- If dense math is enabled, allow all VSX registers plus the DMR registers.
- We need to make sure we don't cross between the boundary of FPRs and
- traditional Altiviec registers. */
+ If dense math is enabled, allow all VSX registers plus the dense math
+ registers. We need to make sure we don't cross between the boundary of
+ FPRs and traditional Altiviec registers. */
if (mode == XOmode)
{
if (TARGET_MMA && !TARGET_DENSE_MATH)
@@ -1895,7 +1897,27 @@ rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
return 0;
}
- /* No other types other than XOmode can go in DMRs. */
+ /* Dense math register modes need DMR registers or VSX registers divisible by
+ 2. We need to make sure we don't cross between the boundary of FPRs and
+ traditional Altiviec registers. */
+ if (mode == TDOmode)
+ {
+ if (!TARGET_DENSE_MATH)
+ return 0;
+
+ if (DMR_REGNO_P (regno))
+ return 1;
+
+ if (FP_REGNO_P (regno))
+ return ((regno & 1) == 0 && regno <= LAST_FPR_REGNO - 7);
+
+ if (ALTIVEC_REGNO_P (regno))
+ return ((regno & 1) == 0 && regno <= LAST_ALTIVEC_REGNO - 7);
+
+ return 0;
+ }
+
+ /* No other types other than XOmode or TDOmode can go in DMRs. */
if (DMR_REGNO_P (regno))
return 0;
@@ -2003,9 +2025,11 @@ rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
GPR registers, and TImode can go in any GPR as well as VSX registers (PR
57744).
- Similarly, don't allow OOmode (vector pair, restricted to even VSX
- registers) or XOmode (vector quad, restricted to FPR registers divisible
- by 4) to tie with other modes.
+ Similarly, don't allow OOmode (vector pair), XOmode (vector quad), or
+ TDOmode (dmr register) to pair with anything else. Vector pairs are
+ restricted to even/odd VSX registers. Without dense math, vector quads are
+ limited to FPR registers divisible by 4. With dense math, vector quads are
+ limited to even VSX registers or DMR registers.
Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
128-bit floating point on VSX systems ties with other vectors. */
@@ -2014,7 +2038,8 @@ static bool
rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
{
if (mode1 == PTImode || mode1 == OOmode || mode1 == XOmode
- || mode2 == PTImode || mode2 == OOmode || mode2 == XOmode)
+ || mode1 == TDOmode || mode2 == PTImode || mode2 == OOmode
+ || mode2 == XOmode || mode2 == TDOmode)
return mode1 == mode2;
if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
@@ -2305,6 +2330,7 @@ rs6000_debug_reg_global (void)
V4DFmode,
OOmode,
XOmode,
+ TDOmode,
CCmode,
CCUNSmode,
CCEQmode,
@@ -2670,7 +2696,7 @@ rs6000_setup_reg_addr_masks (void)
/* Special case DMR registers. */
if (rc == RELOAD_REG_DMR)
{
- if (TARGET_DENSE_MATH && m2 == XOmode)
+ if (TARGET_DENSE_MATH && (m2 == XOmode || m2 == TDOmode))
{
addr_mask = RELOAD_REG_VALID;
reg_addr[m].addr_mask[rc] = addr_mask;
@@ -2777,12 +2803,14 @@ rs6000_setup_reg_addr_masks (void)
/* Vector pairs can do both indexed and offset loads if the
instructions are enabled, otherwise they can only do offset loads
- since it will be broken into two vector moves. Vector quads can
- only do offset loads. If the user restricted generation of either
- of the LXVP or STXVP instructions, do not allow indexed mode so
- that we can split the load/store. */
+ since it will be broken into two vector moves. If the user
+ restricted generation of either of the LXVP or STXVP instructions,
+ do not allow indexed mode so that we can split the load/store.
+
+ Vector quads and dense math 1,024 bit registers can only do offset
+ loads. */
else if ((addr_mask != 0) && TARGET_MMA
- && (m2 == OOmode || m2 == XOmode))
+ && (m2 == OOmode || m2 == XOmode || m2 == TDOmode))
{
addr_mask |= RELOAD_REG_OFFSET;
if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
@@ -3012,6 +3040,14 @@ rs6000_init_hard_regno_mode_ok (bool global_init_p)
rs6000_vector_align[XOmode] = 512;
}
+ /* Add support for 1,024 bit DMR registers. */
+ if (TARGET_DENSE_MATH)
+ {
+ rs6000_vector_unit[TDOmode] = VECTOR_NONE;
+ rs6000_vector_mem[TDOmode] = VECTOR_VSX;
+ rs6000_vector_align[TDOmode] = 512;
+ }
+
/* Register class constraints for the constraints that depend on compile
switches. When the VSX code was added, different constraints were added
based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
@@ -3225,6 +3261,12 @@ rs6000_init_hard_regno_mode_ok (bool global_init_p)
}
}
+ if (TARGET_DENSE_MATH)
+ {
+ reg_addr[TDOmode].reload_load = CODE_FOR_reload_dmr_from_memory;
+ reg_addr[TDOmode].reload_store = CODE_FOR_reload_dmr_to_memory;
+ }
+
/* Precalculate HARD_REGNO_NREGS. */
for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
for (m = 0; m < NUM_MACHINE_MODES; ++m)
@@ -8802,12 +8844,15 @@ reg_offset_addressing_ok_p (machine_mode mode)
return mode_supports_dq_form (mode);
break;
- /* The vector pair/quad types support offset addressing if the
- underlying vectors support offset addressing. */
+ /* The vector pair/quad types and the dense math types support offset
+ addressing if the underlying vectors support offset addressing. */
case E_OOmode:
case E_XOmode:
return TARGET_MMA;
+ case E_TDOmode:
+ return TARGET_DENSE_MATH;
+
case E_SDmode:
/* If we can do direct load/stores of SDmode, restrict it to reg+reg
addressing for the LFIWZX and STFIWX instructions. */
@@ -11325,6 +11370,12 @@ rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
(mode == OOmode) ? "__vector_pair" : "__vector_quad");
break;
+ case E_TDOmode:
+ if (CONST_INT_P (operands[1]))
+ error ("%qs is an opaque type, and you cannot set it to constants",
+ "__dmr");
+ break;
+
case E_SImode:
case E_DImode:
/* Use default pattern for address of ELF small data */
@@ -12788,7 +12839,7 @@ rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
/* We can transfer between VSX registers and DMR registers without needing
extra registers. */
- if (TARGET_DENSE_MATH && mode == XOmode
+ if (TARGET_DENSE_MATH && (mode == XOmode || mode == TDOmode)
&& ((to_type == DMR_REG_TYPE && from_type == VSX_REG_TYPE)
|| (to_type == VSX_REG_TYPE && from_type == DMR_REG_TYPE)))
return true;
@@ -13589,6 +13640,9 @@ rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
if (mode == XOmode)
return TARGET_DENSE_MATH ? VSX_REGS : FLOAT_REGS;
+ if (mode == TDOmode)
+ return VSX_REGS;
+
if (GET_MODE_CLASS (mode) == MODE_INT)
return GENERAL_REGS;
}
@@ -13712,8 +13766,9 @@ rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
else
regno = -1;
- /* DMR registers don't have loads or stores. We have to go through the VSX
- registers to load XOmode (vector quad). */
+ /* Dense math registers don't have loads or stores. We have to go through
+ the VSX registers to load XOmode (vector quad) and TDOmode (dmr 1024
+ bit). */
if (TARGET_DENSE_MATH && rclass == DM_REGS)
return VSX_REGS;
@@ -20789,6 +20844,8 @@ rs6000_mangle_type (const_tree type)
return "u13__vector_pair";
if (type == vector_quad_type_node)
return "u13__vector_quad";
+ if (type == dmr_type_node)
+ return "u5__dmr";
/* For all other types, use the default mangling. */
return NULL;
@@ -22913,6 +22970,10 @@ rs6000_dmr_register_move_cost (machine_mode mode, reg_class_t rclass)
if (mode == XOmode)
return reg_move_base;
+ /* __dmr (i.e. TDOmode) is transferred in 2 instructions. */
+ else if (mode == TDOmode)
+ return reg_move_base * 2;
+
else
return reg_move_base * 2 * hard_regno_nregs (FIRST_DMR_REGNO, mode);
}
@@ -27606,9 +27667,10 @@ rs6000_split_multireg_move (rtx dst, rtx src)
mode = GET_MODE (dst);
nregs = hard_regno_nregs (reg, mode);
- /* If we have a vector quad register for MMA, and this is a load or store,
- see if we can use vector paired load/stores. */
- if (mode == XOmode && TARGET_MMA
+ /* If we have a vector quad register for MMA or DMR register for dense math,
+ and this is a load or store, see if we can use vector paired
+ load/stores. */
+ if ((mode == XOmode || mode == TDOmode) && TARGET_MMA
&& (MEM_P (dst) || MEM_P (src)))
{
reg_mode = OOmode;
@@ -27616,7 +27678,7 @@ rs6000_split_multireg_move (rtx dst, rtx src)
}
/* If we have a vector pair/quad mode, split it into two/four separate
vectors. */
- else if (mode == OOmode || mode == XOmode)
+ else if (mode == OOmode || mode == XOmode || mode == TDOmode)
reg_mode = V1TImode;
else if (FP_REGNO_P (reg))
reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
@@ -27662,13 +27724,13 @@ rs6000_split_multireg_move (rtx dst, rtx src)
return;
}
- /* The __vector_pair and __vector_quad modes are multi-register
- modes, so if we have to load or store the registers, we have to be
- careful to properly swap them if we're in little endian mode
- below. This means the last register gets the first memory
- location. We also need to be careful of using the right register
- numbers if we are splitting XO to OO. */
- if (mode == OOmode || mode == XOmode)
+ /* The __vector_pair, __vector_quad, and __dmr modes are multi-register
+ modes, so if we have to load or store the registers, we have to be careful
+ to properly swap them if we're in little endian mode below. This means
+ the last register gets the first memory location. We also need to be
+ careful of using the right register numbers if we are splitting XO to
+ OO. */
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
nregs = hard_regno_nregs (reg, mode);
int reg_mode_nregs = hard_regno_nregs (reg, reg_mode);
@@ -27805,7 +27867,7 @@ rs6000_split_multireg_move (rtx dst, rtx src)
overlap. */
int i;
/* XO/OO are opaque so cannot use subregs. */
- if (mode == OOmode || mode == XOmode )
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
for (i = nregs - 1; i >= 0; i--)
{
@@ -27979,7 +28041,7 @@ rs6000_split_multireg_move (rtx dst, rtx src)
continue;
/* XO/OO are opaque so cannot use subregs. */
- if (mode == OOmode || mode == XOmode )
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
rtx dst_i = gen_rtx_REG (reg_mode, REGNO (dst) + j);
rtx src_i = gen_rtx_REG (reg_mode, REGNO (src) + j);
@@ -28961,7 +29023,8 @@ rs6000_invalid_conversion (const_tree fromtype, const_tree totype)
if (frommode != tomode)
{
- /* Do not allow conversions to/from XOmode and OOmode types. */
+ /* Do not allow conversions to/from XOmode, OOmode, and TDOmode
+ types. */
if (frommode == XOmode)
return N_("invalid conversion from type %<__vector_quad%>");
if (tomode == XOmode)
@@ -28970,6 +29033,10 @@ rs6000_invalid_conversion (const_tree fromtype, const_tree totype)
return N_("invalid conversion from type %<__vector_pair%>");
if (tomode == OOmode)
return N_("invalid conversion to type %<__vector_pair%>");
+ if (frommode == TDOmode)
+ return N_("invalid conversion from type %<__dmr%>");
+ if (tomode == TDOmode)
+ return N_("invalid conversion to type %<__dmr%>");
}
/* Conversion allowed. */
@@ -1006,7 +1006,8 @@ enum data_align { align_abi, align_opt, align_both };
/* Modes that are not vectors, but require vector alignment. Treat these like
vectors in terms of loads and stores. */
#define VECTOR_ALIGNMENT_P(MODE) \
- (FLOAT128_VECTOR_P (MODE) || (MODE) == OOmode || (MODE) == XOmode)
+ (FLOAT128_VECTOR_P (MODE) || (MODE) == OOmode || (MODE) == XOmode \
+ || (MODE) == TDOmode)
#define ALTIVEC_VECTOR_MODE(MODE) \
((MODE) == V16QImode \
@@ -2292,6 +2293,7 @@ enum rs6000_builtin_type_index
RS6000_BTI_const_str, /* pointer to const char * */
RS6000_BTI_vector_pair, /* unsigned 256-bit types (vector pair). */
RS6000_BTI_vector_quad, /* unsigned 512-bit types (vector quad). */
+ RS6000_BTI_dmr, /* unsigned 1,024-bit types (dmr). */
RS6000_BTI_const_ptr_void, /* const pointer to void */
RS6000_BTI_ptr_V16QI,
RS6000_BTI_ptr_V1TI,
@@ -2330,6 +2332,7 @@ enum rs6000_builtin_type_index
RS6000_BTI_ptr_dfloat128,
RS6000_BTI_ptr_vector_pair,
RS6000_BTI_ptr_vector_quad,
+ RS6000_BTI_ptr_dmr,
RS6000_BTI_ptr_long_long,
RS6000_BTI_ptr_long_long_unsigned,
RS6000_BTI_MAX
@@ -2387,6 +2390,7 @@ enum rs6000_builtin_type_index
#define const_str_type_node (rs6000_builtin_types[RS6000_BTI_const_str])
#define vector_pair_type_node (rs6000_builtin_types[RS6000_BTI_vector_pair])
#define vector_quad_type_node (rs6000_builtin_types[RS6000_BTI_vector_quad])
+#define dmr_type_node (rs6000_builtin_types[RS6000_BTI_dmr])
#define pcvoid_type_node (rs6000_builtin_types[RS6000_BTI_const_ptr_void])
#define ptr_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_ptr_V16QI])
#define ptr_V1TI_type_node (rs6000_builtin_types[RS6000_BTI_ptr_V1TI])
@@ -2425,6 +2429,7 @@ enum rs6000_builtin_type_index
#define ptr_dfloat128_type_node (rs6000_builtin_types[RS6000_BTI_ptr_dfloat128])
#define ptr_vector_pair_type_node (rs6000_builtin_types[RS6000_BTI_ptr_vector_pair])
#define ptr_vector_quad_type_node (rs6000_builtin_types[RS6000_BTI_ptr_vector_quad])
+#define ptr_dmr_type_node (rs6000_builtin_types[RS6000_BTI_ptr_dmr])
#define ptr_long_long_integer_type_node (rs6000_builtin_types[RS6000_BTI_ptr_long_long])
#define ptr_long_long_unsigned_type_node (rs6000_builtin_types[RS6000_BTI_ptr_long_long_unsigned])
new file mode 100644
@@ -0,0 +1,63 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_dense_math_ok } */
+/* { dg-options "-mdejagnu-cpu=future -O2" } */
+
+/* Test basic load/store for __dmr type. */
+
+#ifndef CONSTRAINT
+#if defined(USE_D)
+#define CONSTRAINT "d"
+
+#elif defined(USE_V)
+#define CONSTRAINT "v"
+
+#elif defined(USE_WA)
+#define CONSTRAINT "wa"
+
+#else
+#define CONSTRAINT "wD"
+#endif
+#endif
+const char constraint[] = CONSTRAINT;
+
+void foo_mem_asm (__dmr *p, __dmr *q)
+{
+ /* 2 LXVP instructions. */
+ __dmr vq = *p;
+
+ /* 2 DMXXINSTDMR512 instructions to transfer VSX to DMR. */
+ __asm__ ("# foo (" CONSTRAINT ") %A0" : "+" CONSTRAINT (vq));
+ /* 2 DMXXEXTFDMR512 instructions to transfer DMR to VSX. */
+
+ /* 2 STXVP instructions. */
+ *q = vq;
+}
+
+void foo_mem_asm2 (__dmr *p, __dmr *q)
+{
+ /* 2 LXVP instructions. */
+ __dmr vq = *p;
+ __dmr vq2;
+ __dmr vq3;
+
+ /* 2 DMXXINSTDMR512 instructions to transfer VSX to DMR. */
+ __asm__ ("# foo1 (" CONSTRAINT ") %A0" : "+" CONSTRAINT (vq));
+ /* 2 DMXXEXTFDMR512 instructions to transfer DMR to VSX. */
+
+ vq2 = vq;
+ __asm__ ("# foo2 (wa) %0" : "+wa" (vq2));
+
+ /* 2 STXVP instructions. */
+ *q = vq2;
+}
+
+void foo_mem (__dmr *p, __dmr *q)
+{
+ /* 2 LXVP, 2 STXVP instructions, no DMR transfer. */
+ *q = *p;
+}
+
+/* { dg-final { scan-assembler-times {\mdmxxextfdmr512\M} 4 } } */
+/* { dg-final { scan-assembler-times {\mdmxxinstdmr512\M} 4 } } */
+/* { dg-final { scan-assembler-times {\mlxvp\M} 12 } } */
+/* { dg-final { scan-assembler-times {\mstxvp\M} 12 } } */