Avoid generate vblendps with ymm16+

Message ID 20231109071457.2574044-1-lin1.hu@intel.com
State Accepted
Headers
Series Avoid generate vblendps with ymm16+ |

Checks

Context Check Description
snail/gcc-patch-check success Github commit url

Commit Message

Hu, Lin1 Nov. 9, 2023, 7:14 a.m. UTC
  This patch aims to avoid generate vblendps with ymm16+, And have
bootstrapped and tested on x86_64-pc-linux-gnu{-m32,-m64}. Ok for trunk?

gcc/ChangeLog:

	PR target/112435
	* config/i386/sse.md: Adding constraints to restrict the generation of
	vblendps.

gcc/testsuite/ChangeLog:

	PR target/112435
	* gcc.target/i386/pr112435-1.c: New test.
	* gcc.target/i386/pr112435-2.c: Ditto.
	* gcc.target/i386/pr112435-3.c: Ditto.
---
 gcc/config/i386/sse.md                     | 28 +++++---
 gcc/testsuite/gcc.target/i386/pr112435-1.c | 14 ++++
 gcc/testsuite/gcc.target/i386/pr112435-2.c | 64 ++++++++++++++++++
 gcc/testsuite/gcc.target/i386/pr112435-3.c | 79 ++++++++++++++++++++++
 4 files changed, 175 insertions(+), 10 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/i386/pr112435-1.c
 create mode 100644 gcc/testsuite/gcc.target/i386/pr112435-2.c
 create mode 100644 gcc/testsuite/gcc.target/i386/pr112435-3.c
  

Comments

Jakub Jelinek Nov. 10, 2023, 8:10 p.m. UTC | #1
On Thu, Nov 09, 2023 at 03:27:11PM +0800, Hongtao Liu wrote:
> On Thu, Nov 9, 2023 at 3:15 PM Hu, Lin1 <lin1.hu@intel.com> wrote:
> >
> > This patch aims to avoid generate vblendps with ymm16+, And have
> > bootstrapped and tested on x86_64-pc-linux-gnu{-m32,-m64}. Ok for trunk?
> >
> > gcc/ChangeLog:
> >
> >         PR target/112435
> >         * config/i386/sse.md: Adding constraints to restrict the generation of
> >         vblendps.
> It should be "Don't output vblendps when evex sse reg or gpr32 is involved."
> Others LGTM.

I've missed this patch, so wrote my own today, and am wondering

1) if it isn't better to use separate alternative instead of
   x86_evex_reg_mentioned_p, like in the patch below
2) why do you need the last two hunks in sse.md, both avx2_permv2ti and
   *avx_vperm2f128<mode>_nozero insns only use x in constraints, never v,
   so x86_evex_reg_mentioned_p ought to be always false there

Here is the untested patch, of course you have more testcases (though, I
think it is better to test dg-do assemble with avx512vl target rather than
dg-do compile and scan the assembler, after all, the problem was that it
didn't assemble).

2023-11-10  Jakub Jelinek  <jakub@redhat.com>

	PR target/112435
	* config/i386/sse.md (avx512vl_shuf_<shuffletype>32x4_1<mask_name>,
	<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>): Add
	alternative with just x instead of v constraints and use vblendps
	as optimization only with that alternative.

	* gcc.target/i386/avx512vl-pr112435.c: New test.

--- gcc/config/i386/sse.md.jj	2023-11-09 09:04:18.616543403 +0100
+++ gcc/config/i386/sse.md	2023-11-10 15:56:44.138499931 +0100
@@ -19235,11 +19235,11 @@ (define_expand "avx512dq_shuf_<shufflety
 })
 
 (define_insn "<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>"
-  [(set (match_operand:VI8F_256 0 "register_operand" "=v")
+  [(set (match_operand:VI8F_256 0 "register_operand" "=x,v")
 	(vec_select:VI8F_256
 	  (vec_concat:<ssedoublemode>
-	    (match_operand:VI8F_256 1 "register_operand" "v")
-	    (match_operand:VI8F_256 2 "nonimmediate_operand" "vm"))
+	    (match_operand:VI8F_256 1 "register_operand" "x,v")
+	    (match_operand:VI8F_256 2 "nonimmediate_operand" "xm,vm"))
 	  (parallel [(match_operand 3 "const_0_to_3_operand")
 		     (match_operand 4 "const_0_to_3_operand")
 		     (match_operand 5 "const_4_to_7_operand")
@@ -19254,7 +19254,7 @@ (define_insn "<mask_codefor>avx512dq_shu
   mask = INTVAL (operands[3]) / 2;
   mask |= (INTVAL (operands[5]) - 4) / 2 << 1;
   operands[3] = GEN_INT (mask);
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0)
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
   return "vshuf<shuffletype>64x2\t{%3, %2, %1, %0<mask_operand7>|%0<mask_operand7>, %1, %2, %3}";
 }
@@ -19386,11 +19386,11 @@ (define_expand "avx512vl_shuf_<shufflety
 })
 
 (define_insn "avx512vl_shuf_<shuffletype>32x4_1<mask_name>"
-  [(set (match_operand:VI4F_256 0 "register_operand" "=v")
+  [(set (match_operand:VI4F_256 0 "register_operand" "=x,v")
 	(vec_select:VI4F_256
 	  (vec_concat:<ssedoublemode>
-	    (match_operand:VI4F_256 1 "register_operand" "v")
-	    (match_operand:VI4F_256 2 "nonimmediate_operand" "vm"))
+	    (match_operand:VI4F_256 1 "register_operand" "x,v")
+	    (match_operand:VI4F_256 2 "nonimmediate_operand" "xm,vm"))
 	  (parallel [(match_operand 3 "const_0_to_7_operand")
 		     (match_operand 4 "const_0_to_7_operand")
 		     (match_operand 5 "const_0_to_7_operand")
@@ -19414,7 +19414,7 @@ (define_insn "avx512vl_shuf_<shuffletype
   mask |= (INTVAL (operands[7]) - 8) / 4 << 1;
   operands[3] = GEN_INT (mask);
 
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0)
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
 
   return "vshuf<shuffletype>32x4\t{%3, %2, %1, %0<mask_operand11>|%0<mask_operand11>, %1, %2, %3}";
--- gcc/testsuite/gcc.target/i386/avx512vl-pr112435.c.jj	2023-11-10 16:04:21.708046771 +0100
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435.c	2023-11-10 16:03:51.053479094 +0100
@@ -0,0 +1,13 @@
+/* PR target/112435 */
+/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */
+/* { dg-options "-mavx512vl -O2" } */
+
+#include <x86intrin.h>
+
+__m256i
+foo (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm16") = a;
+  asm ("" : "+v" (c));
+  return _mm256_shuffle_i32x4 (c, b, 2);
+}

	Jakub
  
Jakub Jelinek Nov. 13, 2023, 8:39 a.m. UTC | #2
On Mon, Nov 13, 2023 at 02:27:35PM +0800, Hongtao Liu wrote:
> > 1) if it isn't better to use separate alternative instead of
> >    x86_evex_reg_mentioned_p, like in the patch below
> vblendps doesn't support gpr32 which is checked by x86_evex_reg_mentioned_p.
> we need to use xjm for operands[1], (I think we don't need to set
> attribute addr to gpr16 for alternative 0 since the alternative 1 is
> alway available and recog will match alternative1 when gpr32 is used)

Ok, so like this then?  I've incorporated the other two tests into the patch
as well.

2023-11-13  Jakub Jelinek  <jakub@redhat.com>
	    Hu, Lin1  <lin1.hu@intel.com>

	PR target/112435
	* config/i386/sse.md (avx512vl_shuf_<shuffletype>32x4_1<mask_name>,
	<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>): Add
	alternative with just x instead of v constraints and xjm instead of
	vm and use vblendps as optimization only with that alternative.

	* gcc.target/i386/avx512vl-pr112435-1.c: New test.
	* gcc.target/i386/avx512vl-pr112435-2.c: New test.
	* gcc.target/i386/avx512vl-pr112435-3.c: New test.

--- gcc/config/i386/sse.md.jj	2023-11-11 08:52:20.377845673 +0100
+++ gcc/config/i386/sse.md	2023-11-13 09:31:08.568935535 +0100
@@ -19235,11 +19235,11 @@ (define_expand "avx512dq_shuf_<shufflety
 })
 
 (define_insn "<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>"
-  [(set (match_operand:VI8F_256 0 "register_operand" "=v")
+  [(set (match_operand:VI8F_256 0 "register_operand" "=x,v")
 	(vec_select:VI8F_256
 	  (vec_concat:<ssedoublemode>
-	    (match_operand:VI8F_256 1 "register_operand" "v")
-	    (match_operand:VI8F_256 2 "nonimmediate_operand" "vm"))
+	    (match_operand:VI8F_256 1 "register_operand" "x,v")
+	    (match_operand:VI8F_256 2 "nonimmediate_operand" "xjm,vm"))
 	  (parallel [(match_operand 3 "const_0_to_3_operand")
 		     (match_operand 4 "const_0_to_3_operand")
 		     (match_operand 5 "const_4_to_7_operand")
@@ -19254,7 +19254,7 @@ (define_insn "<mask_codefor>avx512dq_shu
   mask = INTVAL (operands[3]) / 2;
   mask |= (INTVAL (operands[5]) - 4) / 2 << 1;
   operands[3] = GEN_INT (mask);
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0)
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
   return "vshuf<shuffletype>64x2\t{%3, %2, %1, %0<mask_operand7>|%0<mask_operand7>, %1, %2, %3}";
 }
@@ -19386,11 +19386,11 @@ (define_expand "avx512vl_shuf_<shufflety
 })
 
 (define_insn "avx512vl_shuf_<shuffletype>32x4_1<mask_name>"
-  [(set (match_operand:VI4F_256 0 "register_operand" "=v")
+  [(set (match_operand:VI4F_256 0 "register_operand" "=x,v")
 	(vec_select:VI4F_256
 	  (vec_concat:<ssedoublemode>
-	    (match_operand:VI4F_256 1 "register_operand" "v")
-	    (match_operand:VI4F_256 2 "nonimmediate_operand" "vm"))
+	    (match_operand:VI4F_256 1 "register_operand" "x,v")
+	    (match_operand:VI4F_256 2 "nonimmediate_operand" "xjm,vm"))
 	  (parallel [(match_operand 3 "const_0_to_7_operand")
 		     (match_operand 4 "const_0_to_7_operand")
 		     (match_operand 5 "const_0_to_7_operand")
@@ -19414,7 +19414,7 @@ (define_insn "avx512vl_shuf_<shuffletype
   mask |= (INTVAL (operands[7]) - 8) / 4 << 1;
   operands[3] = GEN_INT (mask);
 
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0)
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
 
   return "vshuf<shuffletype>32x4\t{%3, %2, %1, %0<mask_operand11>|%0<mask_operand11>, %1, %2, %3}";
--- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-1.c.jj	2023-11-13 09:20:53.330643098 +0100
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-1.c	2023-11-13 09:20:53.330643098 +0100
@@ -0,0 +1,13 @@
+/* PR target/112435 */
+/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */
+/* { dg-options "-mavx512vl -O2" } */
+
+#include <x86intrin.h>
+
+__m256i
+foo (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm16") = a;
+  asm ("" : "+v" (c));
+  return _mm256_shuffle_i32x4 (c, b, 2);
+}
--- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-2.c.jj	2023-11-13 09:23:04.361788598 +0100
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-2.c	2023-11-13 09:34:57.186699876 +0100
@@ -0,0 +1,63 @@
+/* PR target/112435 */
+/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */
+/* { dg-options "-mavx512vl -O2" } */
+
+#include <x86intrin.h>
+
+/* vpermi128/vpermf128 */
+__m256i
+perm0 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_permute2x128_si256 (c, b, 50);
+}
+
+__m256i
+perm1 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_permute2x128_si256 (c, b, 18);
+}
+
+__m256i
+perm2 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_permute2x128_si256 (c, b, 48);
+}
+
+/* vshuf{i,f}{32x4,64x2} ymm .*/
+__m256i
+shuff0 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_shuffle_i32x4 (c, b, 2);
+}
+
+__m256
+shuff1 (__m256 a, __m256 b)
+{
+  register __m256 c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_shuffle_f32x4 (c, b, 2);
+}
+
+__m256i
+shuff2 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_shuffle_i64x2 (c, b, 2);
+}
+
+__m256d
+shuff3 (__m256d a, __m256d b)
+{
+  register __m256d c __asm__("ymm17") = a;
+  asm ("":"+v" (c));
+  return _mm256_shuffle_f64x2 (c, b, 2);
+}
--- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-3.c.jj	2023-11-13 09:24:52.518257838 +0100
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-3.c	2023-11-13 09:26:20.761008930 +0100
@@ -0,0 +1,78 @@
+/* PR target/112435 */
+/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */
+/* { dg-options "-mavx512vl -O2" } */
+
+#include <x86intrin.h>
+
+/* vpermf128 */
+__m256
+perm0 (__m256 a, __m256 b)
+{
+  register __m256 c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_ps (c, b, 50);
+}
+
+__m256
+perm1 (__m256 a, __m256 b)
+{
+  register __m256 c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_ps (c, b, 18);
+}
+
+__m256
+perm2 (__m256 a, __m256 b)
+{
+  register __m256 c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_ps (c, b, 48);
+}
+
+__m256i
+perm3 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_si256 (c, b, 50);
+}
+
+__m256i
+perm4 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_si256 (c, b, 18);
+}
+
+__m256i
+perm5 (__m256i a, __m256i b)
+{
+  register __m256i c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_si256 (c, b, 48);
+}
+
+__m256d
+perm6 (__m256d a, __m256d b)
+{
+  register __m256d c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_pd (c, b, 50);
+}
+
+__m256d
+perm7 (__m256d a, __m256d b)
+{
+  register __m256d c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_pd (c, b, 18);
+}
+
+__m256d
+perm8 (__m256d a, __m256d b)
+{
+  register __m256d c __asm__("ymm17") =a;
+  asm ("":"+v" (c));
+  return _mm256_permute2f128_pd (c, b, 48);
+}

	Jakub
  

Patch

diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 33198756bb0..666f931c88d 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -19254,7 +19254,8 @@ 
   mask = INTVAL (operands[3]) / 2;
   mask |= (INTVAL (operands[5]) - 4) / 2 << 1;
   operands[3] = GEN_INT (mask);
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied>
+      && !x86_evex_reg_mentioned_p (operands, 3))
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
   return "vshuf<shuffletype>64x2\t{%3, %2, %1, %0<mask_operand7>|%0<mask_operand7>, %1, %2, %3}";
 }
@@ -19414,7 +19415,8 @@ 
   mask |= (INTVAL (operands[7]) - 8) / 4 << 1;
   operands[3] = GEN_INT (mask);
 
-  if (INTVAL (operands[3]) == 2 && !<mask_applied>)
+  if (INTVAL (operands[3]) == 2 && !<mask_applied>
+      && !x86_evex_reg_mentioned_p (operands, 3))
     return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
 
   return "vshuf<shuffletype>32x4\t{%3, %2, %1, %0<mask_operand11>|%0<mask_operand11>, %1, %2, %3}";
@@ -26776,10 +26778,13 @@ 
 	else
 	  return "vmovaps\t{%2, %0|%0, %2}";
       }
-    if ((mask & 0xbb) == 18)
-      return "vblendps\t{$15, %2, %1, %0|%0, %1, %2, 15}";
-    if ((mask & 0xbb) == 48)
-      return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
+    if (!x86_evex_reg_mentioned_p (operands, 3))
+      {
+	if ((mask & 0xbb) == 18)
+	  return "vblendps\t{$15, %2, %1, %0|%0, %1, %2, 15}";
+	if ((mask & 0xbb) == 48)
+	  return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
+      }
     return "vperm2i128\t{%3, %2, %1, %0|%0, %1, %2, %3}";
   }
   [(set_attr "type" "sselog")
@@ -27433,10 +27438,13 @@ 
    && avx_vperm2f128_parallel (operands[3], <MODE>mode)"
 {
   int mask = avx_vperm2f128_parallel (operands[3], <MODE>mode) - 1;
-  if ((mask & 0xbb) == 0x12)
-    return "vblendps\t{$15, %2, %1, %0|%0, %1, %2, 15}";
-  if ((mask & 0xbb) == 0x30)
-    return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
+  if (!x86_evex_reg_mentioned_p (operands, 3))
+    {
+      if ((mask & 0xbb) == 0x12)
+	return "vblendps\t{$15, %2, %1, %0|%0, %1, %2, 15}";
+      if ((mask & 0xbb) == 0x30)
+	return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}";
+    }
   if ((mask & 0xbb) == 0x20)
     return "vinsert<i128>\t{$1, %x2, %1, %0|%0, %1, %x2, 1}";
   operands[3] = GEN_INT (mask);
diff --git a/gcc/testsuite/gcc.target/i386/pr112435-1.c b/gcc/testsuite/gcc.target/i386/pr112435-1.c
new file mode 100644
index 00000000000..ff56523b4e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112435-1.c
@@ -0,0 +1,14 @@ 
+/* PR target/112435 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -march=sapphirerapids" } */
+/* { dg-final { scan-assembler-not "vblendps" } } */
+
+#include<x86intrin.h>
+
+__m256i
+f(__m256i a, __m256i  b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_shuffle_i32x4 (t, b, 2);
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr112435-2.c b/gcc/testsuite/gcc.target/i386/pr112435-2.c
new file mode 100644
index 00000000000..27ba80b1e68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112435-2.c
@@ -0,0 +1,64 @@ 
+/* PR target/112435 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -march=sapphirerapids" } */
+/* { dg-final { scan-assembler-not "vblendps.*ymm17\$" } } */
+
+#include<x86intrin.h>
+
+/* Vpermi128/Vpermf128 */
+__m256i
+perm0 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_permute2x128_si256 (t, b, 50);
+}
+
+__m256i
+perm1 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_permute2x128_si256 (t, b, 18);
+}
+
+__m256i
+perm2 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_permute2x128_si256 (t, b, 48);
+}
+
+/* vshuf{i,f}{32x4,64x2} ymm .*/
+__m256i
+shuff0 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_shuffle_i32x4(t, b, 2);
+}
+
+__m256
+shuff1 (__m256 a, __m256 b)
+{
+  register __m256 t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_shuffle_f32x4(t, b, 2);
+}
+
+__m256i
+shuff2 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_shuffle_i64x2(t, b, 2);
+}
+
+__m256d
+shuff3 (__m256d a, __m256d b)
+{
+  register __m256d t __asm__("ymm17") = a;
+  asm("":"+v"(t));
+  return _mm256_shuffle_f64x2(t, b, 2);
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr112435-3.c b/gcc/testsuite/gcc.target/i386/pr112435-3.c
new file mode 100644
index 00000000000..f39820d4f37
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112435-3.c
@@ -0,0 +1,79 @@ 
+/* PR target/112435 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -march=sapphirerapids" } */
+/* { dg-final { scan-assembler-not "vblendps.*ymm17\$" } } */
+
+#include<x86intrin.h>
+
+/* Vpermf128 */
+__m256
+perm0 (__m256 a, __m256 b)
+{
+  register __m256 t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_ps (t, b, 50);
+}
+
+__m256
+perm1 (__m256 a, __m256 b)
+{
+  register __m256 t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_ps (t, b, 18);
+}
+
+__m256
+perm2 (__m256 a, __m256 b)
+{
+  register __m256 t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_ps (t, b, 48);
+}
+
+__m256i
+perm3 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_si256 (t, b, 50);
+}
+
+__m256i
+perm4 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_si256 (t, b, 18);
+}
+
+__m256i
+perm5 (__m256i a, __m256i b)
+{
+  register __m256i t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_si256 (t, b, 48);
+}
+
+__m256d
+perm6 (__m256d a, __m256d b)
+{
+  register __m256d t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_pd (t, b, 50);
+}
+
+__m256d
+perm7 (__m256d a, __m256d b)
+{
+  register __m256d t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_pd (t, b, 18);
+}
+
+__m256d
+perm8 (__m256d a, __m256d b)
+{
+  register __m256d t __asm__("ymm17") =a;
+  asm("":"+v"(t));
+  return _mm256_permute2f128_pd (t, b, 48);
+}