[2/4] Add support for integer vector pair built-ins

Message ID ZU64mCbRp3nb8OJL@cowardly-lion.the-meissners.org
State Unresolved
Headers
Series Add vector pair builtins to PowerPC |

Checks

Context Check Description
snail/gcc-patch-check warning Git am fail log

Commit Message

Michael Meissner Nov. 10, 2023, 11:11 p.m. UTC
  This patch adds a series of built-in functions to allow users to write code to
do a number of simple operations where the loop is done using the __vector_pair
type.  The __vector_pair type is an opaque type.  These built-in functions keep
the two 128-bit vectors within the __vector_pair together, and split the
operation after register allocation.

This patch provides vector pair operations for 8, 16, 32, and 64-bit integers.

I have built and tested these patches on:

    *	A little endian power10 server using --with-cpu=power10
    *	A little endian power9 server using --with-cpu=power9
    *	A big endian power9 server using --with-cpu=power9.

Can I check this patch into the master branch after the preceeding patch is
checked in?

2023-11-09  Michael Meissner  <meissner@linux.ibm.com>

gcc/

	* config/rs6000/rs6000-builtins.def (__builtin_vpair_i8*): Add built-in
	functions for integer vector pairs.
	(__builtin_vpair_i16*): Likeise.
	(__builtin_vpair_i32*): Likeise.
	(__builtin_vpair_i64*): Likeise.
	* config/rs6000/vector-pair.md (UNSPEC_VPAIR_V32QI): New unspec.
	(UNSPEC_VPAIR_V16HI): Likewise.
	(UNSPEC_VPAIR_V8SI): Likewise.
	(UNSPEC_VPAIR_V4DI): Likewise.
	(VP_INT_BINARY): New iterator for integer vector pair.
	(vp_insn): Add supoort for integer vector pairs.
	(vp_ireg): New code attribute for integer vector pairs.
	(vp_ipredicate): Likewise.
	(VP_INT): New int interator for integer vector pairs.
	(VP_VEC_MODE): Likewise.
	(vp_pmode): Likewise.
	(vp_vmode): Likewise.
	(vp_neg_reg): New int interator for integer vector pairs.
	(vpair_neg_<vp_pmode>): Add integer vector pair support insns.
	(vpair_not_<vp_pmode>2): Likewise.
	(vpair_<vp_insn>_<vp_pmode>3): Likewise.
	(vpair_andc_<vp_pmode): Likewise.
	(*vpair_iorc_<vp_pmode>): Likewise.
	(vpair_nand_<vp_pmode>_1): Likewise.
	(vpair_nand_<vp_pmode>_2): Likewise.
	(vpair_nor_<vp_pmode>_1): Likewise.
	(vpair_nor_<vp_pmode>_2): Likewise.
	* doc/extend.texi (PowerPC Vector Pair Built-in Functions): Document the
	integer vector pair built-in functions.

gcc/testsuite/

	* gcc.target/powerpc/vector-pair-5.c: New test.
	* gcc.target/powerpc/vector-pair-6.c: New test.
	* gcc.target/powerpc/vector-pair-7.c: New test.
	* gcc.target/powerpc/vector-pair-8.c: New test.
---
 gcc/config/rs6000/rs6000-builtins.def         | 144 +++++++++
 gcc/config/rs6000/vector-pair.md              | 280 +++++++++++++++++-
 gcc/doc/extend.texi                           |  72 +++++
 .../gcc.target/powerpc/vector-pair-5.c        | 193 ++++++++++++
 .../gcc.target/powerpc/vector-pair-6.c        | 193 ++++++++++++
 .../gcc.target/powerpc/vector-pair-7.c        | 193 ++++++++++++
 .../gcc.target/powerpc/vector-pair-8.c        | 194 ++++++++++++
 7 files changed, 1266 insertions(+), 3 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/powerpc/vector-pair-5.c
 create mode 100644 gcc/testsuite/gcc.target/powerpc/vector-pair-6.c
 create mode 100644 gcc/testsuite/gcc.target/powerpc/vector-pair-7.c
 create mode 100644 gcc/testsuite/gcc.target/powerpc/vector-pair-8.c
  

Patch

diff --git a/gcc/config/rs6000/rs6000-builtins.def b/gcc/config/rs6000/rs6000-builtins.def
index 89b248b50ef..3b2db39c1ab 100644
--- a/gcc/config/rs6000/rs6000-builtins.def
+++ b/gcc/config/rs6000/rs6000-builtins.def
@@ -4183,3 +4183,147 @@ 
 
   v256 __builtin_vpair_f64_sub (v256, v256);
     VPAIR_F64_SUB vpair_sub_v4df3 {mma,pair}
+
+;; vector pair built-in functions for 32 8-bit unsigned char or
+;; signed char values
+
+  v256 __builtin_vpair_i8_add (v256, v256);
+    VPAIR_I8_ADD vpair_add_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_and (v256, v256);
+    VPAIR_I8_AND vpair_and_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_ior (v256, v256);
+    VPAIR_I8_IOR vpair_ior_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_max (v256, v256);
+    VPAIR_I8_MAX vpair_smax_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_min (v256, v256);
+    VPAIR_I8_MIN vpair_smin_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_neg (v256);
+    VPAIR_I8_NEG vpair_neg_v32qi2 {mma,pair}
+
+  v256 __builtin_vpair_i8_not (v256);
+    VPAIR_I8_NOT vpair_not_v32qi2 {mma,pair}
+
+  v256 __builtin_vpair_i8_sub (v256, v256);
+    VPAIR_I8_SUB vpair_sub_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8_xor (v256, v256);
+    VPAIR_I8_XOR vpair_xor_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8u_max (v256, v256);
+    VPAIR_I8U_MAX vpair_umax_v32qi3 {mma,pair}
+
+  v256 __builtin_vpair_i8u_min (v256, v256);
+    VPAIR_I8U_MIN vpair_umin_v32qi3 {mma,pair}
+
+;; vector pair built-in functions for 16 16-bit unsigned short or
+;; signed short values
+
+  v256 __builtin_vpair_i16_add (v256, v256);
+    VPAIR_I16_ADD vpair_add_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_and (v256, v256);
+    VPAIR_I16_AND vpair_and_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_ior (v256, v256);
+    VPAIR_I16_IOR vpair_ior_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_max (v256, v256);
+    VPAIR_I16_MAX vpair_smax_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_min (v256, v256);
+    VPAIR_I16_MIN vpair_smin_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_neg (v256);
+    VPAIR_I16_NEG vpair_neg_v16hi2 {mma,pair}
+
+  v256 __builtin_vpair_i16_not (v256);
+    VPAIR_I16_NOT vpair_not_v16hi2 {mma,pair}
+
+  v256 __builtin_vpair_i16_sub (v256, v256);
+    VPAIR_I16_SUB vpair_sub_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16_xor (v256, v256);
+    VPAIR_I16_XOR vpair_xor_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16u_max (v256, v256);
+    VPAIR_I16U_MAX vpair_umax_v16hi3 {mma,pair}
+
+  v256 __builtin_vpair_i16u_min (v256, v256);
+    VPAIR_I16U_MIN vpair_umin_v16hi3 {mma,pair}
+
+;; vector pair built-in functions for 8 32-bit unsigned int or
+;; signed int values
+
+  v256 __builtin_vpair_i32_add (v256, v256);
+    VPAIR_I32_ADD vpair_add_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_and (v256, v256);
+    VPAIR_I32_AND vpair_and_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_ior (v256, v256);
+    VPAIR_I32_IOR vpair_ior_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_max (v256, v256);
+    VPAIR_I32_MAX vpair_smax_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_min (v256, v256);
+    VPAIR_I32_MIN vpair_smin_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_neg (v256);
+    VPAIR_I32_NEG vpair_neg_v8si2 {mma,pair}
+
+  v256 __builtin_vpair_i32_not (v256);
+    VPAIR_I32_NOT vpair_not_v8si2 {mma,pair}
+
+  v256 __builtin_vpair_i32_sub (v256, v256);
+    VPAIR_I32_SUB vpair_sub_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32_xor (v256, v256);
+    VPAIR_I32_XOR vpair_xor_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32u_max (v256, v256);
+    VPAIR_I32U_MAX vpair_umax_v8si3 {mma,pair}
+
+  v256 __builtin_vpair_i32u_min (v256, v256);
+    VPAIR_I32U_MIN vpair_umin_v8si3 {mma,pair}
+
+;; vector pair built-in functions for 4 64-bit unsigned long long or
+;; signed long long values
+
+  v256 __builtin_vpair_i64_add (v256, v256);
+    VPAIR_I64_ADD vpair_add_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_and (v256, v256);
+    VPAIR_I64_AND vpair_and_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_ior (v256, v256);
+    VPAIR_I64_IOR vpair_ior_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_max (v256, v256);
+    VPAIR_I64_MAX vpair_smax_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_min (v256, v256);
+    VPAIR_I64_MIN vpair_smin_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_neg (v256);
+    VPAIR_I64_NEG vpair_neg_v4di2 {mma,pair}
+
+  v256 __builtin_vpair_i64_not (v256);
+    VPAIR_I64_NOT vpair_not_v4di2 {mma,pair}
+
+  v256 __builtin_vpair_i64_sub (v256, v256);
+    VPAIR_I64_SUB vpair_sub_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64_xor (v256, v256);
+    VPAIR_I64_XOR vpair_xor_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64u_max (v256, v256);
+    VPAIR_I64U_MAX vpair_umax_v4di3 {mma,pair}
+
+  v256 __builtin_vpair_i64u_min (v256, v256);
+    VPAIR_I64U_MIN vpair_umin_v4di3 {mma,pair}
diff --git a/gcc/config/rs6000/vector-pair.md b/gcc/config/rs6000/vector-pair.md
index 2dcac6a31e2..cd14430f47a 100644
--- a/gcc/config/rs6000/vector-pair.md
+++ b/gcc/config/rs6000/vector-pair.md
@@ -29,38 +29,102 @@ 
 (define_c_enum "unspec"
   [UNSPEC_VPAIR_V4DF
    UNSPEC_VPAIR_V8SF
+   UNSPEC_VPAIR_V32QI
+   UNSPEC_VPAIR_V16HI
+   UNSPEC_VPAIR_V8SI
+   UNSPEC_VPAIR_V4DI
    ])
 
 ;; Iterator doing unary/binary arithmetic on vector pairs
 (define_code_iterator VP_FP_UNARY  [abs neg])
 (define_code_iterator VP_FP_BINARY [minus mult plus smin smax])
 
+(define_code_iterator VP_INT_BINARY  [and ior minus plus smax smin umax umin xor])
+
 ;; Return the insn name from the VP_* code iterator
 (define_code_attr vp_insn [(abs      "abs")
+			   (and      "and")
+			   (ior      "ior")
 			   (minus    "sub")
 			   (mult     "mul")
+			   (not      "one_cmpl")
 			   (neg      "neg")
 			   (plus     "add")
 			   (smin     "smin")
 			   (smax     "smax")
+			   (umin     "umin")
+			   (umax     "umax")
 			   (xor      "xor")])
 
+;; Return the register constraint ("v" or "wa") for the integer code iterator
+;; used.  For arithmetic operations, we need to use "v" in order to use the
+;; Altivec instruction.  For logical operations, we can use wa.
+(define_code_attr vp_ireg [(and   "wa")
+			   (ior   "wa")
+			   (minus "v")
+			   (not   "wa")
+			   (neg   "v")
+			   (plus  "v")
+			   (smax  "v")
+			   (smin  "v")
+			   (umax  "v")
+			   (umin  "v")
+			   (xor   "wa")])
+
+;; Return the register previdcate for the integer code iterator used
+(define_code_attr vp_ipredicate [(and   "vsx_register_operand")
+				 (ior   "vsx_register_operand")
+				 (minus "altivec_register_operand")
+				 (not   "vsx_register_operand")
+				 (neg   "altivec_register_operand")
+				 (plus  "altivec_register_operand")
+				 (smax  "altivec_register_operand")
+				 (smin  "altivec_register_operand")
+				 (umax  "altivec_register_operand")
+				 (umin  "altivec_register_operand")
+				 (xor   "vsx_register_operand")])
+
 ;; Iterator for creating the unspecs for vector pair built-ins
 (define_int_iterator VP_FP [UNSPEC_VPAIR_V4DF
 			    UNSPEC_VPAIR_V8SF])
 
+(define_int_iterator VP_INT [UNSPEC_VPAIR_V4DI
+			     UNSPEC_VPAIR_V8SI
+			     UNSPEC_VPAIR_V16HI
+			     UNSPEC_VPAIR_V32QI])
+
 ;; Map VP_* to vector mode of the arguments after they are split
 (define_int_attr VP_VEC_MODE [(UNSPEC_VPAIR_V4DF  "V2DF")
-			      (UNSPEC_VPAIR_V8SF  "V4SF")])
+			      (UNSPEC_VPAIR_V8SF  "V4SF")
+			      (UNSPEC_VPAIR_V32QI "V16QI")
+			      (UNSPEC_VPAIR_V16HI "V8HI")
+			      (UNSPEC_VPAIR_V8SI  "V4SI")
+			      (UNSPEC_VPAIR_V4DI  "V2DI")])
 
 ;; Map VP_* to a lower case name to identify the vector pair.
 (define_int_attr vp_pmode [(UNSPEC_VPAIR_V4DF  "v4df")
-			   (UNSPEC_VPAIR_V8SF  "v8sf")])
+			   (UNSPEC_VPAIR_V8SF  "v8sf")
+			   (UNSPEC_VPAIR_V32QI "v32qi")
+			   (UNSPEC_VPAIR_V16HI "v16hi")
+			   (UNSPEC_VPAIR_V8SI  "v8si")
+			   (UNSPEC_VPAIR_V4DI  "v4di")])
 
 ;; Map VP_* to a lower case name to identify the vector after the vector pair
 ;; has been split.
 (define_int_attr vp_vmode [(UNSPEC_VPAIR_V4DF  "v2df")
-			   (UNSPEC_VPAIR_V8SF  "v4sf")])
+			   (UNSPEC_VPAIR_V8SF  "v4sf")
+			   (UNSPEC_VPAIR_V32QI "v16qi")
+			   (UNSPEC_VPAIR_V16HI "v8hi")
+			   (UNSPEC_VPAIR_V8SI  "v4si")
+			   (UNSPEC_VPAIR_V4DI  "v2di")])
+
+;; Map VP_INT to constraints used for the negate scratch register.  For vectors
+;; of QI and HI, we need to change -a into 0 - a since we don't have a negate
+;; operation.  We do have a vnegw/vnegd operation for SI and DI modes.
+(define_int_attr vp_neg_reg [(UNSPEC_VPAIR_V32QI "&v")
+			     (UNSPEC_VPAIR_V16HI "&v")
+			     (UNSPEC_VPAIR_V8SI  "X")
+			     (UNSPEC_VPAIR_V4DI  "X")])
 
 
 ;; Vector pair floating point unary operations
@@ -327,3 +391,213 @@  (define_insn_and_split "*vpair_nfms_fpcontract_<vp_pmode>4"
 {
 }
   [(set_attr "length" "8")])
+
+
+;; Vector pair integer negate support.
+(define_insn_and_split "vpair_neg_<vp_pmode>2"
+  [(set (match_operand:OO 0 "altivec_register_operand" "=v")
+	(unspec:OO [(neg:OO
+		     (match_operand:OO 1 "altivec_register_operand" "v"))]
+		   VP_INT))
+   (clobber (match_scratch:<VP_VEC_MODE> 2 "=<vp_neg_reg>"))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(set (match_dup 2) (match_dup 3))
+   (set (match_dup 4) (minus:<VP_VEC_MODE> (match_dup 2)
+					   (match_dup 5)))
+   (set (match_dup 6) (minus:<VP_VEC_MODE> (match_dup 2)
+					   (match_dup 7)))]
+{
+  unsigned reg0 = reg_or_subregno (operands[0]);
+  unsigned reg1 = reg_or_subregno (operands[1]);
+  machine_mode vmode = <VP_VEC_MODE>mode;
+
+  operands[3] = CONST0_RTX (vmode);
+
+  operands[4] = gen_rtx_REG (vmode, reg0);
+  operands[5] = gen_rtx_REG (vmode, reg1);
+
+  operands[6] = gen_rtx_REG (vmode, reg0 + 1);
+  operands[7] = gen_rtx_REG (vmode, reg1 + 1);
+
+  /* If the vector integer size is 32 or 64 bits, we can use the vneg{w,d}
+     instructions.  */
+  if (vmode == V4SImode)
+    {
+      emit_insn (gen_negv4si2 (operands[4], operands[5]));
+      emit_insn (gen_negv4si2 (operands[6], operands[7]));
+      DONE;
+    }
+  else if (vmode == V2DImode)
+    {
+      emit_insn (gen_negv2di2 (operands[4], operands[5]));
+      emit_insn (gen_negv2di2 (operands[6], operands[7]));
+      DONE;
+    }
+}
+  [(set_attr "length" "8")])
+
+;; Vector pair integer not support.
+(define_insn_and_split "vpair_not_<vp_pmode>2"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO [(not:OO (match_operand:OO 1 "vsx_register_operand" "wa"))]
+		   VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_unary_vector_pair (<VP_VEC_MODE>mode, operands,
+			   gen_one_cmpl<vp_vmode>2);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+;; Vector pair integer binary operations.
+(define_insn_and_split "vpair_<vp_insn>_<vp_pmode>3"
+  [(set (match_operand:OO 0 "<vp_ipredicate>" "=<vp_ireg>")
+	(unspec:OO [(VP_INT_BINARY:OO
+		     (match_operand:OO 1 "<vp_ipredicate>" "<vp_ireg>")
+		     (match_operand:OO 2 "<vp_ipredicate>" "<vp_ireg>"))]
+		   VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_<vp_insn><vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+;; Optimize vector pair a & ~b
+(define_insn_and_split "*vpair_andc_<vp_pmode>"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO [(and:OO
+		     (unspec:OO
+		      [(not:OO
+			(match_operand:OO 1 "vsx_register_operand" "wa"))]
+		      VP_INT)
+		     (match_operand:OO 2 "vsx_register_operand" "wa"))]
+		   VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_andc<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+;; Optimize vector pair a | ~b
+(define_insn_and_split "*vpair_iorc_<vp_pmode>"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO [(ior:OO
+		     (unspec:OO
+		      [(not:OO
+			(match_operand:OO 1 "vsx_register_operand" "wa"))]
+		      VP_INT)
+		     (match_operand:OO 2 "vsx_register_operand" "wa"))]
+		   VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_orc<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+;; Optiomize vector pair ~(a & b) or ((~a) | (~b))
+(define_insn_and_split "*vpair_nand_<vp_pmode>_1"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO
+	 [(not:OO
+	   (unspec:OO [(and:OO
+			(match_operand:OO 1 "vsx_register_operand" "wa")
+			(match_operand:OO 2 "vsx_register_operand" "wa"))]
+		      VP_INT))]
+	 VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_nand<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+(define_insn_and_split "*vpair_nand_<vp_pmode>_2"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO
+	 [(ior:OO
+	   (unspec:OO
+	    [(not:OO
+	      (match_operand:OO 1 "vsx_register_operand" "wa"))]
+	    VP_INT)
+	   (unspec:OO
+	    [(not:OO
+	      (match_operand:OO 2 "vsx_register_operand" "wa"))]
+	    VP_INT))]
+	 VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_nand<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+;; Optiomize vector pair ~(a | b)  or ((~a) & (~b))
+(define_insn_and_split "*vpair_nor_<vp_pmode>_1"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO
+	 [(not:OO
+	   (unspec:OO [(ior:OO
+			(match_operand:OO 1 "vsx_register_operand" "wa")
+			(match_operand:OO 2 "vsx_register_operand" "wa"))]
+		      VP_INT))]
+	 VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_nor<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
+
+(define_insn_and_split "*vpair_nor_<vp_pmode>_2"
+  [(set (match_operand:OO 0 "vsx_register_operand" "=wa")
+	(unspec:OO
+	 [(ior:OO
+	   (unspec:OO
+	    [(not:OO (match_operand:OO 1 "vsx_register_operand" "wa"))]
+	    VP_INT)
+	   (unspec:OO
+	    [(not:OO (match_operand:OO 2 "vsx_register_operand" "wa"))]
+	    VP_INT))]
+	 VP_INT))]
+  "TARGET_MMA"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  split_binary_vector_pair (<VP_VEC_MODE>mode, operands,
+			    gen_nor<vp_vmode>3);
+  DONE;
+}
+  [(set_attr "length" "8")])
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index a830ad06b90..ff7918c7a58 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -21414,6 +21414,78 @@  __vector_pair __builtin_vpair_f64_min (__vector_pair, __vector_pair);
 __vector_pair __builtin_vpair_f64_sub (__vector_pair, __vector_pair);
 @end smallexample
 
+The following built-in functions operate on pairs of
+@code{vector long long} or @code{vector unsigned long long} values:
+
+@smallexample
+__vector_pair __builtin_vpair_i64_add (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_and (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_ior (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_min (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_neg (__vector_pair);
+__vector_pair __builtin_vpair_i64_not (__vector_pair);
+__vector_pair __builtin_vpair_i64_sub (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64_xor (__vector_pair, __vector_pair);
+
+__vector_pair __builtin_vpair_i64u_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i64u_min (__vector_pair, __vector_pair);
+@end smallexample
+
+The following built-in functions operate on pairs of
+@code{vector int} or @code{vector unsigned int} values:
+
+@smallexample
+__vector_pair __builtin_vpair_i32_add (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_and (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_ior (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_neg (__vector_pair);
+__vector_pair __builtin_vpair_i32_not (__vector_pair);
+__vector_pair __builtin_vpair_i32_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_min (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_sub (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32_xor (__vector_pair, __vector_pair);
+
+__vector_pair __builtin_vpair_i32u_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i32u_min (__vector_pair, __vector_pair);
+@end smallexample
+
+The following built-in functions operate on pairs of
+@code{vector short} or @code{vector unsigned short} values:
+
+@smallexample
+__vector_pair __builtin_vpair_i16_add (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_and (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_ior (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_min (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_neg (__vector_pair);
+__vector_pair __builtin_vpair_i16_not (__vector_pair);
+__vector_pair __builtin_vpair_i16_sub (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16_xor (__vector_pair, __vector_pair);
+
+__vector_pair __builtin_vpair_i16u_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i16u_min (__vector_pair, __vector_pair);
+@end smallexample
+
+The following built-in functions operate on pairs of
+@code{vector signed char} or @code{vector unsigned char} values:
+
+@smallexample
+__vector_pair __builtin_vpair_i8_add (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_and (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_ior (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_max (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_min (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_neg (__vector_pair);
+__vector_pair __builtin_vpair_i8_not (__vector_pair);
+__vector_pair __builtin_vpair_i8_sub (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_xor (__vector_pair, __vector_pair);
+
+__vector_pair __builtin_vpair_i8_umax (__vector_pair, __vector_pair);
+__vector_pair __builtin_vpair_i8_umin (__vector_pair, __vector_pair);
+@end smallexample
+
 @node PowerPC Hardware Transactional Memory Built-in Functions
 @subsection PowerPC Hardware Transactional Memory Built-in Functions
 GCC provides two interfaces for accessing the Hardware Transactional
diff --git a/gcc/testsuite/gcc.target/powerpc/vector-pair-5.c b/gcc/testsuite/gcc.target/powerpc/vector-pair-5.c
new file mode 100644
index 00000000000..924919cae1b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/vector-pair-5.c
@@ -0,0 +1,193 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-mdejagnu-cpu=power10 -O2" } */
+
+/* Test whether the vector buitin code generates the expected instructions for
+   vector pairs with 4 64-bit integer elements.  */
+
+void
+test_add (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vaddudm, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_add (*x, *y);
+}
+
+void
+test_sub (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vaddudm, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_sub (*x, *y);
+}
+
+void
+test_and (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxland, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_and (*x, *y);
+}
+
+void
+test_or (__vector_pair *dest,
+	 __vector_pair *x,
+	 __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlor, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_ior (*x, *y);
+}
+
+void
+test_xor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlxor, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_xor (*x, *y);
+}
+
+void
+test_smax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxsd, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_max (*x, *y);
+}
+
+void
+test_smin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminsd, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_min (*x, *y);
+}
+
+void
+test_umax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxud, 1 stxvp.  */
+  *dest = __builtin_vpair_i64u_max (*x, *y);
+}
+
+void
+test_umin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminud, 1 stxvp.  */
+  *dest = __builtin_vpair_i64u_min (*x, *y);
+}
+
+void
+test_negate (__vector_pair *dest,
+	     __vector_pair *x)
+{
+  /* 2 lxvp, 2 vnegd, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_neg (*x);
+}
+
+void
+test_not (__vector_pair *dest,
+	  __vector_pair *x)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  *dest = __builtin_vpair_i64_not (*x);
+}
+
+/* Combination of logical operators.  */
+
+void
+test_andc_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i64_not (*y);
+  *dest = __builtin_vpair_i64_and (*x, n);
+}
+
+void
+test_andc_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i64_not (*x);
+  *dest = __builtin_vpair_i64_and (n, *y);
+}
+
+void
+test_orc_1 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i64_not (*y);
+  *dest = __builtin_vpair_i64_ior (*x, n);
+}
+
+void
+test_orc_2 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i64_not (*x);
+  *dest = __builtin_vpair_i64_ior (n, *y);
+}
+
+void
+test_nand_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i64_and (*x, *y);
+  *dest = __builtin_vpair_i64_not (a);
+}
+
+void
+test_nand_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair nx = __builtin_vpair_i64_not (*x);
+  __vector_pair ny = __builtin_vpair_i64_not (*y);
+  *dest = __builtin_vpair_i64_ior (nx, ny);
+}
+
+void
+test_nor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i64_ior (*x, *y);
+  *dest = __builtin_vpair_i64_not (a);
+}
+
+/* { dg-final { scan-assembler-times {\mlxvp\M}    34 } } */
+/* { dg-final { scan-assembler-times {\mstxvp\M}   18 } } */
+/* { dg-final { scan-assembler-times {\mvaddudm\M}  2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxsd\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxud\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvminsd\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvminud\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvnegd\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvsubudm\M}  2 } } */
+/* { dg-final { scan-assembler-times {\mxxland\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mxxlandc\M}  4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnand\M}  4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnor\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlor\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxlorc\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlxor\M}   2 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/vector-pair-6.c b/gcc/testsuite/gcc.target/powerpc/vector-pair-6.c
new file mode 100644
index 00000000000..f22949c1f95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/vector-pair-6.c
@@ -0,0 +1,193 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-mdejagnu-cpu=power10 -O2" } */
+
+/* Test whether the vector buitin code generates the expected instructions for
+   vector pairs with 8 32-bit integer elements.  */
+
+void
+test_add (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vadduwm, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_add (*x, *y);
+}
+
+void
+test_sub (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vsubuwm, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_sub (*x, *y);
+}
+
+void
+test_and (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxland, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_and (*x, *y);
+}
+
+void
+test_or (__vector_pair *dest,
+	 __vector_pair *x,
+	 __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlor, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_ior (*x, *y);
+}
+
+void
+test_xor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlxor, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_xor (*x, *y);
+}
+
+void
+test_smax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxsw, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_max (*x, *y);
+}
+
+void
+test_smin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminsw, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_min (*x, *y);
+}
+
+void
+test_umax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxuw, 1 stxvp.  */
+  *dest = __builtin_vpair_i32u_max (*x, *y);
+}
+
+void
+test_umin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminuw, 1 stxvp.  */
+  *dest = __builtin_vpair_i32u_min (*x, *y);
+}
+
+void
+test_negate (__vector_pair *dest,
+	     __vector_pair *x)
+{
+  /* 2 lxvp, 2 vnegw, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_neg (*x);
+}
+
+void
+test_not (__vector_pair *dest,
+	  __vector_pair *x)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  *dest = __builtin_vpair_i32_not (*x);
+}
+
+/* Combination of logical operators.  */
+
+void
+test_andc_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i32_not (*y);
+  *dest = __builtin_vpair_i32_and (*x, n);
+}
+
+void
+test_andc_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i32_not (*x);
+  *dest = __builtin_vpair_i32_and (n, *y);
+}
+
+void
+test_orc_1 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i32_not (*y);
+  *dest = __builtin_vpair_i32_ior (*x, n);
+}
+
+void
+test_orc_2 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i32_not (*x);
+  *dest = __builtin_vpair_i32_ior (n, *y);
+}
+
+void
+test_nand_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i32_and (*x, *y);
+  *dest = __builtin_vpair_i32_not (a);
+}
+
+void
+test_nand_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair nx = __builtin_vpair_i32_not (*x);
+  __vector_pair ny = __builtin_vpair_i32_not (*y);
+  *dest = __builtin_vpair_i32_ior (nx, ny);
+}
+
+void
+test_nor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i32_ior (*x, *y);
+  *dest = __builtin_vpair_i32_not (a);
+}
+
+/* { dg-final { scan-assembler-times {\mlxvp\M}    34 } } */
+/* { dg-final { scan-assembler-times {\mstxvp\M}   18 } } */
+/* { dg-final { scan-assembler-times {\mvadduwm\M}  2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxsw\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxuw\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvminsw\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvminuw\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvnegw\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvsubuwm\M}  2 } } */
+/* { dg-final { scan-assembler-times {\mxxland\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mxxlandc\M}  4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnand\M}  4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnor\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlor\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxlorc\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlxor\M}   2 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/vector-pair-7.c b/gcc/testsuite/gcc.target/powerpc/vector-pair-7.c
new file mode 100644
index 00000000000..71452f59284
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/vector-pair-7.c
@@ -0,0 +1,193 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-mdejagnu-cpu=power10 -O2" } */
+
+/* Test whether the vector buitin code generates the expected instructions for
+   vector pairs with 16 16-bit integer elements.  */
+
+void
+test_add (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vadduhm, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_add (*x, *y);
+}
+
+void
+test_sub (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vsubuhm, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_sub (*x, *y);
+}
+
+void
+test_and (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxland, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_and (*x, *y);
+}
+
+void
+test_or (__vector_pair *dest,
+	 __vector_pair *x,
+	 __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlor, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_ior (*x, *y);
+}
+
+void
+test_xor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlxor, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_xor (*x, *y);
+}
+
+void
+test_smax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxsh, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_max (*x, *y);
+}
+
+void
+test_smin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminsh, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_min (*x, *y);
+}
+
+void
+test_umax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxuh, 1 stxvp.  */
+  *dest = __builtin_vpair_i16u_max (*x, *y);
+}
+
+void
+test_umin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminuh, 1 stxvp.  */
+  *dest = __builtin_vpair_i16u_min (*x, *y);
+}
+
+void
+test_negate (__vector_pair *dest,
+	     __vector_pair *x)
+{
+  /* 2 lxvp, 1 xxspltib, 2 vsubuhm, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_neg (*x);
+}
+
+void
+test_not (__vector_pair *dest,
+	  __vector_pair *x)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  *dest = __builtin_vpair_i16_not (*x);
+}
+
+/* Combination of logical operators.  */
+
+void
+test_andc_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i16_not (*y);
+  *dest = __builtin_vpair_i16_and (*x, n);
+}
+
+void
+test_andc_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i16_not (*x);
+  *dest = __builtin_vpair_i16_and (n, *y);
+}
+
+void
+test_orc_1 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i16_not (*y);
+  *dest = __builtin_vpair_i16_ior (*x, n);
+}
+
+void
+test_orc_2 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i16_not (*x);
+  *dest = __builtin_vpair_i16_ior (n, *y);
+}
+
+void
+test_nand_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i16_and (*x, *y);
+  *dest = __builtin_vpair_i16_not (a);
+}
+
+void
+test_nand_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair nx = __builtin_vpair_i16_not (*x);
+  __vector_pair ny = __builtin_vpair_i16_not (*y);
+  *dest = __builtin_vpair_i16_ior (nx, ny);
+}
+
+void
+test_nor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i16_ior (*x, *y);
+  *dest = __builtin_vpair_i16_not (a);
+}
+
+/* { dg-final { scan-assembler-times {\mlxvp\M}     34 } } */
+/* { dg-final { scan-assembler-times {\mstxvp\M}    18 } } */
+/* { dg-final { scan-assembler-times {\mvadduhm\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxsh\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxuh\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvminsh\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvminuh\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvsubuhm\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxland\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxlandc\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnand\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnor\M}    4 } } */
+/* { dg-final { scan-assembler-times {\mxxlor\M}     2 } } */
+/* { dg-final { scan-assembler-times {\mxxlorc\M}    4 } } */
+/* { dg-final { scan-assembler-times {\mxxlxor\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxspltib\M}  1 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/vector-pair-8.c b/gcc/testsuite/gcc.target/powerpc/vector-pair-8.c
new file mode 100644
index 00000000000..8db9056d4cc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/vector-pair-8.c
@@ -0,0 +1,194 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target power10_ok } */
+/* { dg-options "-mdejagnu-cpu=power10 -O2" } */
+
+/* Test whether the vector buitin code generates the expected instructions for
+   vector pairs with 32 8-bit integer elements.  */
+
+
+void
+test_add (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vaddubm, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_add (*x, *y);
+}
+
+void
+test_sub (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 vsububm, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_sub (*x, *y);
+}
+
+void
+test_and (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxland, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_and (*x, *y);
+}
+
+void
+test_or (__vector_pair *dest,
+	 __vector_pair *x,
+	 __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlor, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_ior (*x, *y);
+}
+
+void
+test_xor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlxor, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_xor (*x, *y);
+}
+
+void
+test_smax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxsb, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_max (*x, *y);
+}
+
+void
+test_smin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminsb, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_min (*x, *y);
+}
+
+void
+test_umax (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vmaxub, 1 stxvp.  */
+  *dest = __builtin_vpair_i8u_max (*x, *y);
+}
+
+void
+test_umin (__vector_pair *dest,
+	   __vector_pair *x,
+	   __vector_pair *y)
+{
+  /* 2 lxvp, 2 vminub, 1 stxvp.  */
+  *dest = __builtin_vpair_i8u_min (*x, *y);
+}
+
+void
+test_negate (__vector_pair *dest,
+	     __vector_pair *x)
+{
+  /* 2 lxvp, 1 xxspltib, 2 vsububm, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_neg (*x);
+}
+
+void
+test_not (__vector_pair *dest,
+	  __vector_pair *x)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  *dest = __builtin_vpair_i8_not (*x);
+}
+
+/* Combination of logical operators.  */
+
+void
+test_andc_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i8_not (*y);
+  *dest = __builtin_vpair_i8_and (*x, n);
+}
+
+void
+test_andc_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlandc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i8_not (*x);
+  *dest = __builtin_vpair_i8_and (n, *y);
+}
+
+void
+test_orc_1 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i8_not (*y);
+  *dest = __builtin_vpair_i8_ior (*x, n);
+}
+
+void
+test_orc_2 (__vector_pair *dest,
+	    __vector_pair *x,
+	    __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlorc, 1 stxvp.  */
+  __vector_pair n = __builtin_vpair_i8_not (*x);
+  *dest = __builtin_vpair_i8_ior (n, *y);
+}
+
+void
+test_nand_1 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i8_and (*x, *y);
+  *dest = __builtin_vpair_i8_not (a);
+}
+
+void
+test_nand_2 (__vector_pair *dest,
+	     __vector_pair *x,
+	     __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnand, 1 stxvp.  */
+  __vector_pair nx = __builtin_vpair_i8_not (*x);
+  __vector_pair ny = __builtin_vpair_i8_not (*y);
+  *dest = __builtin_vpair_i8_ior (nx, ny);
+}
+
+void
+test_nor (__vector_pair *dest,
+	  __vector_pair *x,
+	  __vector_pair *y)
+{
+  /* 2 lxvp, 2 xxlnor, 1 stxvp.  */
+  __vector_pair a = __builtin_vpair_i8_ior (*x, *y);
+  *dest = __builtin_vpair_i8_not (a);
+}
+
+/* { dg-final { scan-assembler-times {\mlxvp\M}     34 } } */
+/* { dg-final { scan-assembler-times {\mstxvp\M}    18 } } */
+/* { dg-final { scan-assembler-times {\mvaddubm\M}   2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxsb\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvmaxub\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvminsb\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvminub\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mvsububm\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxland\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxlandc\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnand\M}   4 } } */
+/* { dg-final { scan-assembler-times {\mxxlnor\M}    4 } } */
+/* { dg-final { scan-assembler-times {\mxxlor\M}     2 } } */
+/* { dg-final { scan-assembler-times {\mxxlorc\M}    4 } } */
+/* { dg-final { scan-assembler-times {\mxxlxor\M}    2 } } */
+/* { dg-final { scan-assembler-times {\mxxspltib\M}  1 } } */