new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_mu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_mu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_mu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_mu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_mu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_mu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_mu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_mu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_mu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_mu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_mu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_mu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_mu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_mu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_mu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_mu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_mu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_mu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_mu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_mu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_mu(mask,vd,rs1,vs2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,572 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,vl);
+}
+
+
+vint8mf8_t test___riscv_vnmsub(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,572 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,31);
+}
+
+
+vint8mf8_t test___riscv_vnmsub(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,572 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(vd,rs1,vs2,32);
+}
+
+
+vint8mf8_t test___riscv_vnmsub(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub(mask,vd,rs1,vs2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tu(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tu(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tu(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tu(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tu(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tu(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tu(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tu(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tu(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tu(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tu(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tu(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tu(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tu(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tu(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tu(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tu(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tu(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tu(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tu(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tu(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tu(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tu(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tu(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tu(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tu(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tu(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tu(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tu(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tu(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tu(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tu(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tu(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tu(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tu(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tu(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tu(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tu(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tu(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tu(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tu(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tu(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tu(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tu(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tu(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tu(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tu(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tu(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tu(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tu(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tu(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tu(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tu(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tu(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tu(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tu(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tu(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tu(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tu(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tu(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tu(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tu(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tu(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tu(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tu(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tu(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tu(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tu(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tu(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tu(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tu(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tu(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tu(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tu(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tu(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tu(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tu(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tu(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tu(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tu(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tu(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tu(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tu(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tu(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tu(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tu(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tu(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tu(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tu(vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tu(vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tu(vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tu(vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tu(vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tu(vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tu(vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tu(vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tu(vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tu(vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tu(vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tu(vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tu(vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tu(vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tu(vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tu(vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tu(vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tu(vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tu(vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tu(vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tu(vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tu(vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tu(vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tu(vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tu(vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tu(vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tu(vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tu(vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tu(vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tu(vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tu(vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tu(vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tu(vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tu(vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tu(vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tu(vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tu(vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tu(vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tu(vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tu(vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tu(vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tu(vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tu(vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tu(vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tu(vd,rs1,vs2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tum(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tum(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tum(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tum(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tum(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tum(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tum(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tum(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tum(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tum(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tum(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tum(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tum(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tum(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tum(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tum(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tum(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tum(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tum(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tum(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tum(mask,vd,rs1,vs2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
new file mode 100644
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vint8mf8_t vd,int8_t rs1,vint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vint8mf4_t vd,int8_t rs1,vint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint8mf2_t vd,int8_t rs1,vint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vint8m1_t vd,int8_t rs1,vint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vint8m2_t vd,int8_t rs1,vint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vint8m4_t vd,int8_t rs1,vint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vint8m8_t vd,int8_t rs1,vint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vint16mf4_t vd,int16_t rs1,vint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint16mf2_t vd,int16_t rs1,vint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vint16m1_t vd,int16_t rs1,vint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vint16m2_t vd,int16_t rs1,vint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vint16m4_t vd,int16_t rs1,vint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vint16m8_t vd,int16_t rs1,vint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vint32mf2_t vd,int32_t rs1,vint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vint32m1_t vd,int32_t rs1,vint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vint32m2_t vd,int32_t rs1,vint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vint32m4_t vd,int32_t rs1,vint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vint32m8_t vd,int32_t rs1,vint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vint64m1_t vd,int64_t rs1,vint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vint64m2_t vd,int64_t rs1,vint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vint64m4_t vd,int64_t rs1,vint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vint64m8_t vd,int64_t rs1,vint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf8_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint8mf8_t vd,uint8_t rs1,vuint8mf8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf4_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint8mf4_t vd,uint8_t rs1,vuint8mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8mf2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint8mf2_t vd,uint8_t rs1,vuint8mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m1_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint8m1_t vd,uint8_t rs1,vuint8m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m2_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint8m2_t vd,uint8_t rs1,vuint8m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m4_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint8m4_t vd,uint8_t rs1,vuint8m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint8m8_t test___riscv_vnmsub_tumu(vbool1_t mask,vuint8m8_t vd,uint8_t rs1,vuint8m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf4_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint16mf4_t vd,uint16_t rs1,vuint16mf4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16mf2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint16mf2_t vd,uint16_t rs1,vuint16mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m1_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint16m1_t vd,uint16_t rs1,vuint16m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m2_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint16m2_t vd,uint16_t rs1,vuint16m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m4_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint16m4_t vd,uint16_t rs1,vuint16m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint16m8_t test___riscv_vnmsub_tumu(vbool2_t mask,vuint16m8_t vd,uint16_t rs1,vuint16m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32mf2_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint32mf2_t vd,uint32_t rs1,vuint32mf2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m1_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint32m1_t vd,uint32_t rs1,vuint32m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m2_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint32m2_t vd,uint32_t rs1,vuint32m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m4_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint32m4_t vd,uint32_t rs1,vuint32m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint32m8_t test___riscv_vnmsub_tumu(vbool4_t mask,vuint32m8_t vd,uint32_t rs1,vuint32m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m1_t test___riscv_vnmsub_tumu(vbool64_t mask,vuint64m1_t vd,uint64_t rs1,vuint64m1_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m2_t test___riscv_vnmsub_tumu(vbool32_t mask,vuint64m2_t vd,uint64_t rs1,vuint64m2_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m4_t test___riscv_vnmsub_tumu(vbool16_t mask,vuint64m4_t vd,uint64_t rs1,vuint64m4_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+vuint64m8_t test___riscv_vnmsub_tumu(vbool8_t mask,vuint64m8_t vd,uint64_t rs1,vuint64m8_t vs2,size_t vl)
+{
+ return __riscv_vnmsub_tumu(mask,vd,rs1,vs2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vnms[a-u][b-c]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vnms[a-u][b-c]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */