RISC-V: Add binary op vx constraint tests

Message ID 20230203073716.193547-1-juzhe.zhong@rivai.ai
State Accepted
Headers
Series RISC-V: Add binary op vx constraint tests |

Checks

Context Check Description
snail/gcc-patch-check success Github commit url

Commit Message

juzhe.zhong@rivai.ai Feb. 3, 2023, 7:37 a.m. UTC
  From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/base/binop_vx_constraint-1.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-10.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-11.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-12.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-13.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-14.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-15.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-16.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-17.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-18.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-19.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-2.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-20.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-21.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-22.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-23.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-24.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-25.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-26.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-27.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-28.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-29.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-3.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-30.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-31.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-32.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-33.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-34.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-35.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-36.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-37.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-38.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-39.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-4.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-40.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-41.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-42.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-43.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-44.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-45.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-46.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-47.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-48.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-49.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-5.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-50.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-51.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-52.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-53.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-54.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-55.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-56.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-57.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-58.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-59.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-6.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-60.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-61.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-62.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-63.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-64.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-65.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-66.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-67.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-68.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-69.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-7.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-70.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-71.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-72.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-73.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-74.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-75.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-76.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-77.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-78.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-79.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-8.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-80.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-81.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-82.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-83.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-84.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-85.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-86.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-87.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-88.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-89.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-9.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-90.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-91.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-92.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-93.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-94.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-95.c: New test.
        * gcc.target/riscv/rvv/base/binop_vx_constraint-96.c: New test.

---
 .../riscv/rvv/base/binop_vx_constraint-1.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-10.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-11.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-12.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-13.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-14.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-15.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-16.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-17.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-18.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-19.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-2.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-20.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-21.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-22.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-23.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-24.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-25.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-26.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-27.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-28.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-29.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-3.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-30.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-31.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-32.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-33.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-34.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-35.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-36.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-37.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-38.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-39.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-4.c    | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-40.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-41.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-42.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-43.c   | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-44.c   | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-45.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-46.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-47.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-48.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-49.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-5.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-50.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-51.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-52.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-53.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-54.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-55.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-56.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-57.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-58.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-59.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-6.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-60.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-61.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-62.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-63.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-64.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-65.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-66.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-67.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-68.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-69.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-7.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-70.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-71.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-72.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-73.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-74.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-75.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-76.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-77.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-78.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-79.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-8.c    | 160 ++++++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-80.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-81.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-82.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-83.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-84.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-85.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-86.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-87.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-88.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-89.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-9.c    | 132 +++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-90.c   |  18 ++
 .../riscv/rvv/base/binop_vx_constraint-91.c   | 123 ++++++++++++++
 .../riscv/rvv/base/binop_vx_constraint-92.c   |  72 ++++++++
 .../riscv/rvv/base/binop_vx_constraint-93.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-94.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-95.c   |  16 ++
 .../riscv/rvv/base/binop_vx_constraint-96.c   |  18 ++
 96 files changed, 8359 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-10.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-11.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-12.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-13.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-14.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-15.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-16.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-17.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-18.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-19.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-20.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-21.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-22.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-23.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-24.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-25.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-26.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-27.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-28.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-29.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-30.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-31.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-32.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-33.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-34.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-35.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-36.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-37.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-38.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-39.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-4.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-40.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-41.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-42.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-43.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-44.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-45.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-46.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-47.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-48.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-49.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-5.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-50.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-51.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-52.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-53.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-54.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-55.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-56.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-57.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-58.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-59.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-6.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-60.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-61.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-62.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-63.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-64.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-65.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-66.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-67.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-68.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-69.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-7.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-70.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-71.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-72.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-73.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-74.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-75.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-76.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-77.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-78.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-79.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-8.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-80.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-81.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-82.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-83.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-84.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-85.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-86.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-87.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-88.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-89.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-9.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-90.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-91.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-92.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-93.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-94.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-95.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-96.c
  

Patch

diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-1.c
new file mode 100644
index 00000000000..09e0e21925b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-1.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-10.c
new file mode 100644
index 00000000000..faf5ffb47f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-10.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tu (v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_m (mask, v3, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tu (v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_m (mask, v3, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-11.c
new file mode 100644
index 00000000000..54fe941f6ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-11.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tu (v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_m (mask, v3, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tu (v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_m (mask, v3, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-12.c
new file mode 100644
index 00000000000..8a18a1df535
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-12.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-13.c
new file mode 100644
index 00000000000..d844e1baf2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-13.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-14.c
new file mode 100644
index 00000000000..6779dfe859a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-14.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tu (v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_m (mask, v3, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tu (v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_m (mask, v3, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-15.c
new file mode 100644
index 00000000000..611a86f2b15
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-15.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tu (v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_m (mask, v3, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tu (v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_m (mask, v3, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-16.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-16.c
new file mode 100644
index 00000000000..0a7a1e88391
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-16.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vor_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vor_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-17.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-17.c
new file mode 100644
index 00000000000..eeea3517e01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-17.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-18.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-18.c
new file mode 100644
index 00000000000..328564fb029
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-18.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmul_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmul_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmul\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmul\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmul_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmul_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-19.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-19.c
new file mode 100644
index 00000000000..f4616b4c72b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-19.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-2.c
new file mode 100644
index 00000000000..2c02c35ef57
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-2.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tu (v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_m (mask, v3, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-20.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-20.c
new file mode 100644
index 00000000000..441573623ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-20.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmax_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmax_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmax_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmax_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-21.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-21.c
new file mode 100644
index 00000000000..c082f4059c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-21.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-22.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-22.c
new file mode 100644
index 00000000000..b4813626fc1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-22.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vmin_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vmin_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vmin_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vmin_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-23.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-23.c
new file mode 100644
index 00000000000..fd6fd6740ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-23.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-24.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-24.c
new file mode 100644
index 00000000000..d8ed5b186a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-24.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vmaxu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vmaxu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vmaxu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vmaxu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-25.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-25.c
new file mode 100644
index 00000000000..66891acc15a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-25.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-26.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-26.c
new file mode 100644
index 00000000000..b70a1360b3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-26.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vminu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vminu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vminu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vminu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vminu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vminu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-27.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-27.c
new file mode 100644
index 00000000000..6f068296e5a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-27.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, 5, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, 5, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-28.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-28.c
new file mode 100644
index 00000000000..a239a3380bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-28.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vdiv_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vdiv_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdiv\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdiv\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vdiv_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vdiv_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-29.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-29.c
new file mode 100644
index 00000000000..9424a46457e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-29.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-3.c
new file mode 100644
index 00000000000..1da0cb6e5ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-3.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tu (v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_m (mask, v3, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-30.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-30.c
new file mode 100644
index 00000000000..272c0eab273
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-30.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-31.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-31.c
new file mode 100644
index 00000000000..9424a46457e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-31.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-32.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-32.c
new file mode 100644
index 00000000000..272c0eab273
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-32.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vdivu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vdivu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vdivu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vdivu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vdivu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vdivu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-33.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-33.c
new file mode 100644
index 00000000000..6f2bca4b58f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-33.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-34.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-34.c
new file mode 100644
index 00000000000..45015d77d2d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-34.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-35.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-35.c
new file mode 100644
index 00000000000..6f2bca4b58f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-35.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tu (v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_m (mask, v3, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, 5, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tu (v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_m (mask, v3, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, 5, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tumu (mask, v3, v2, 5, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-36.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-36.c
new file mode 100644
index 00000000000..45015d77d2d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-36.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, uint32_t x)
+{
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tu (v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_m (mask, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, uint32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vuint32m1_t v = __riscv_vle32_v_u32m1 (in, 4);
+    vuint32m1_t v2 = __riscv_vle32_v_u32m1_tumu (mask, v, in, 4);
+    vuint32m1_t v3 = __riscv_vremu_vx_u32m1 (v2, x, 4);
+    vuint32m1_t v4 = __riscv_vremu_vx_u32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_u32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, uint8_t x)
+{
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tu (v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_m (mask, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vremu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vremu\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, uint8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vuint8mf8_t v = __riscv_vle8_v_u8mf8 (in, 4);
+    vuint8mf8_t v2 = __riscv_vle8_v_u8mf8_tumu (mask, v, in, 4);
+    vuint8mf8_t v3 = __riscv_vremu_vx_u8mf8 (v2, x, 4);
+    vuint8mf8_t v4 = __riscv_vremu_vx_u8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_u8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-37.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-37.c
new file mode 100644
index 00000000000..34de4458198
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-37.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-38.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-38.c
new file mode 100644
index 00000000000..1374becb847
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-38.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, -15, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tu (v3, v2, -15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, -15, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_m (mask, v3, -15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, -15, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tumu (mask, v3, v2, -15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, -15, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tu (v3, v2, -15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, -15, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_m (mask, v3, -15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, -15, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tumu (mask, v3, v2, -15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-39.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-39.c
new file mode 100644
index 00000000000..21b77b952e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-39.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vsub_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vsub_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-4.c
new file mode 100644
index 00000000000..297ed238477
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-4.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-40.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-40.c
new file mode 100644
index 00000000000..653f043e471
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-40.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 17, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tu (v3, v2, 17, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 17, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_m (mask, v3, 17, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vadd_vx_i32m1 (v2, 17, 4);
+    vint32m1_t v4 = __riscv_vadd_vx_i32m1_tumu (mask, v3, v2, 17, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 17, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tu (v3, v2, 17, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 17, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_m (mask, v3, 17, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vadd_vx_i8mf8 (v2, 17, 4);
+    vint8mf8_t v4 = __riscv_vadd_vx_i8mf8_tumu (mask, v3, v2, 17, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-41.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-41.c
new file mode 100644
index 00000000000..4ff352bd7af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-41.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-42.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-42.c
new file mode 100644
index 00000000000..975ebe709b9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-42.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tu (v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_m (mask, v3, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tu (v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_m (mask, v3, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-43.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-43.c
new file mode 100644
index 00000000000..4f3e9066f16
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-43.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tu (v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_m (mask, v3, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tu (v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_m (mask, v3, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-44.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-44.c
new file mode 100644
index 00000000000..d4dc4e0fe6c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-44.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vrsub_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vrsub_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vrsub_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vrsub_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-45.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-45.c
new file mode 100644
index 00000000000..2fab88009dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-45.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-46.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-46.c
new file mode 100644
index 00000000000..f6726e0b0a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-46.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vadd\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-47.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-47.c
new file mode 100644
index 00000000000..5a4f58bfc1a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-47.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vadd\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-48.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-48.c
new file mode 100644
index 00000000000..dd159f1b1f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-48.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vadd\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-49.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-49.c
new file mode 100644
index 00000000000..37e06d5781e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-49.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vadd_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vadd\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-5.c
new file mode 100644
index 00000000000..29eab66774e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-5.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-50.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-50.c
new file mode 100644
index 00000000000..3893e17511d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-50.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int32_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vadd_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vadd_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vadd\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-51.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-51.c
new file mode 100644
index 00000000000..9bb73586677
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-51.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-52.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-52.c
new file mode 100644
index 00000000000..905caa3817d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-52.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vand\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-53.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-53.c
new file mode 100644
index 00000000000..f1b21440256
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-53.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-54.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-54.c
new file mode 100644
index 00000000000..3dc22db3ce3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-54.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-55.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-55.c
new file mode 100644
index 00000000000..d6893951511
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-55.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vand_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-56.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-56.c
new file mode 100644
index 00000000000..b0ea553bf89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-56.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vand_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vand_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vand\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-57.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-57.c
new file mode 100644
index 00000000000..5c34220436e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-57.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-58.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-58.c
new file mode 100644
index 00000000000..0f6d6063aa0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-58.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-59.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-59.c
new file mode 100644
index 00000000000..9186d8dc861
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-59.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-6.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-6.c
new file mode 100644
index 00000000000..67fd655ece2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-6.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tu (v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_m (mask, v3, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, -16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tu (v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_m (mask, v3, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*-16,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, -16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tumu (mask, v3, v2, -16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-60.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-60.c
new file mode 100644
index 00000000000..f7a5f3e30c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-60.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-61.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-61.c
new file mode 100644
index 00000000000..17aeeb6fca7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-61.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vor_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-62.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-62.c
new file mode 100644
index 00000000000..350697d764d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-62.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vor_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vor_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-63.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-63.c
new file mode 100644
index 00000000000..0d02e95c716
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-63.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-64.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-64.c
new file mode 100644
index 00000000000..b424a49f55b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-64.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-65.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-65.c
new file mode 100644
index 00000000000..9ab9134e66d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-65.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vxor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-66.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-66.c
new file mode 100644
index 00000000000..0792458e53d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-66.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vxor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-67.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-67.c
new file mode 100644
index 00000000000..da2cf994b0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-67.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vxor_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vxor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-68.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-68.c
new file mode 100644
index 00000000000..0f138c5d3c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-68.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vxor_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vxor_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vxor\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-69.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-69.c
new file mode 100644
index 00000000000..2761f2275e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-69.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-7.c
new file mode 100644
index 00000000000..71a320a1619
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-7.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tu (v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_m (mask, v3, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 15, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tu (v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_m (mask, v3, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vxor\.vi\tv[1-9][0-9]?,\s*v[0-9]+,\s*15,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 15, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tumu (mask, v3, v2, 15, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-70.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-70.c
new file mode 100644
index 00000000000..e06d7f48a8a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-70.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmax\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-71.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-71.c
new file mode 100644
index 00000000000..e4408be6a81
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-71.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmax\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-72.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-72.c
new file mode 100644
index 00000000000..604b69a2e9b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-72.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmax\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-73.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-73.c
new file mode 100644
index 00000000000..42f0d5f30e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-73.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vmax_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmax\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-74.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-74.c
new file mode 100644
index 00000000000..f4cbf095357
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-74.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vmax_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vmax_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vmax\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-75.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-75.c
new file mode 100644
index 00000000000..ebcefc3794c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-75.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmin\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-76.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-76.c
new file mode 100644
index 00000000000..fcb8dfa5e08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-76.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmin\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-77.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-77.c
new file mode 100644
index 00000000000..6cffe86c418
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-77.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmin\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-78.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-78.c
new file mode 100644
index 00000000000..669fcbfa44c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-78.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vmin_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmin\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-79.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-79.c
new file mode 100644
index 00000000000..d606078e85f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-79.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vmin_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vmin_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vmin\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-8.c
new file mode 100644
index 00000000000..797abbd2cc0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-8.c
@@ -0,0 +1,160 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tu (v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_m (mask, v3, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**  ...
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vxor_vx_i32m1 (v2, 16, 4);
+    vint32m1_t v4 = __riscv_vxor_vx_i32m1_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tu (v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**  ...
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**  ...
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_m (mask, v3, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**  ...
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**  ...
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**  ...
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vxor\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vxor\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vxor_vx_i8mf8 (v2, 16, 4);
+    vint8mf8_t v4 = __riscv_vxor_vx_i8mf8_tumu (mask, v3, v2, 16, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-80.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-80.c
new file mode 100644
index 00000000000..d5316e0c1e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-80.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, -16, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1 (v3, -16, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, 15, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1 (v3, 15, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, 16, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1 (v3, 16, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vmaxu\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, 0xAAAAAAA, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-81.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-81.c
new file mode 100644
index 00000000000..5cd8e3582fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-81.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, 0xAAAAAAAA, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmaxu\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-82.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-82.c
new file mode 100644
index 00000000000..ad27f60ef91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-82.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmaxu\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-83.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-83.c
new file mode 100644
index 00000000000..1606f882703
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-83.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, uint64_t x, int n)
+{
+  vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
+  vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
+  vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, x, 4);
+  vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_u64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vmaxu\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-84.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-84.c
new file mode 100644
index 00000000000..bca55b239f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-84.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, uint64_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vuint64m1_t v = __riscv_vle64_v_u64m1 (in + i + 1, 4);
+    vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + i + 2, 4);
+    vuint64m1_t v3 = __riscv_vmaxu_vx_u64m1 (v2, x, 4);
+    vuint64m1_t v4 = __riscv_vmaxu_vx_u64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_u64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vmaxu\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-85.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-85.c
new file mode 100644
index 00000000000..0a0dece934f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-85.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, -15, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, -15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 17, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 17, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-86.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-86.c
new file mode 100644
index 00000000000..ec73670cdca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-86.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, -15, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, -15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vadd\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 17, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 17, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-87.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-87.c
new file mode 100644
index 00000000000..b0c35c1c76a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-87.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-88.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-88.c
new file mode 100644
index 00000000000..59b820459d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-88.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-89.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-89.c
new file mode 100644
index 00000000000..9f57c9ca89d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-89.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vsub_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-9.c
new file mode 100644
index 00000000000..ce786e15244
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-9.c
@@ -0,0 +1,132 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f1:
+**	vsetivli\tzero,4,e32,m1,tu,ma
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f1 (void * in, void *out, int32_t x)
+{
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tu (v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f2:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,ta,ma
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f2 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_m (mask, v3, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f3:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e32,m1,tu,mu
+**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse32.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f3 (void * in, void *out, int32_t x)
+{
+    vbool32_t mask = *(vbool32_t*)in;
+    asm volatile ("":::"memory");
+    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+    vint32m1_t v2 = __riscv_vle32_v_i32m1_tumu (mask, v, in, 4);
+    vint32m1_t v3 = __riscv_vand_vx_i32m1 (v2, x, 4);
+    vint32m1_t v4 = __riscv_vand_vx_i32m1_tumu (mask, v3, v2, x, 4);
+    __riscv_vse32_v_i32m1 (out, v4, 4);
+}
+
+/*
+** f4:
+**	vsetivli\tzero,4,e8,mf8,tu,ma
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vse8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f4 (void * in, void *out, int8_t x)
+{
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tu (v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tu (v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f5:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,ta,ma
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f5 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_m (mask, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_m (mask, v3, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
+
+/*
+** f6:
+**	vsetvli\t[a-x0-9]+,zero,e8,mf8,ta,ma
+**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vsetivli\tzero,4,e8,mf8,tu,mu
+**	vle8\.v\tv[0-9]+,0\([a-x0-9]+\)
+**	vle8.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
+**	vand\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vand\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
+**	vse8.v\tv[0-9]+,0\([a-x0-9]+\)
+**	ret
+*/
+void f6 (void * in, void *out, int8_t x)
+{
+    vbool64_t mask = *(vbool64_t*)in;
+    asm volatile ("":::"memory");
+    vint8mf8_t v = __riscv_vle8_v_i8mf8 (in, 4);
+    vint8mf8_t v2 = __riscv_vle8_v_i8mf8_tumu (mask, v, in, 4);
+    vint8mf8_t v3 = __riscv_vand_vx_i8mf8 (v2, x, 4);
+    vint8mf8_t v4 = __riscv_vand_vx_i8mf8_tumu (mask, v3, v2, x, 4);
+    __riscv_vse8_v_i8mf8 (out, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-90.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-90.c
new file mode 100644
index 00000000000..586e26499db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-90.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int32_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vsub_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vsub_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-91.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-91.c
new file mode 100644
index 00000000000..295c1f68a28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-91.c
@@ -0,0 +1,123 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f4:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f4 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f5:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f5 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f6:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f6 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-92.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-92.c
new file mode 100644
index 00000000000..cade110e8b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-92.c
@@ -0,0 +1,72 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+#include "riscv_vector.h"
+
+/*
+** f0:
+**  ...
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*-16
+**  ...
+**	ret
+*/
+void f0 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, -16, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, -16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f1:
+**  ...
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**	vrsub\.vi\tv[0-9]+,\s*v[0-9]+,\s*15
+**  ...
+**	ret
+*/
+void f1 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 15, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 15, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f2:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f2 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 16, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 16, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/*
+** f3:
+**  ...
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**	vrsub\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
+**  ...
+**	ret
+*/
+void f3 (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1 (v3, 0xAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-93.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-93.c
new file mode 100644
index 00000000000..27798098ca0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-93.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-94.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-94.c
new file mode 100644
index 00000000000..e51589c6d01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-94.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-95.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-95.c
new file mode 100644
index 00000000000..e06228a0c84
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-95.c
@@ -0,0 +1,16 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int64_t x, int n)
+{
+  vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
+  vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
+  vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, x, 4);
+  vint64m1_t v4 = __riscv_vrsub_vx_i64m1_tu (v3, v2, x, 4);
+  __riscv_vse64_v_i64m1 (out + 2, v4, 4);
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-96.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-96.c
new file mode 100644
index 00000000000..d1bbb78f5ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-96.c
@@ -0,0 +1,18 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int32_t x, int n)
+{
+  for (int i = 0; i < n; i++) {
+    vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
+    vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
+    vint64m1_t v3 = __riscv_vrsub_vx_i64m1 (v2, x, 4);
+    vint64m1_t v4 = __riscv_vrsub_vx_i64m1_tu (v3, v2, x, 4);
+    __riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
+  }
+}
+
+/* { dg-final { scan-assembler-times {vlse64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*zero\s+\.L[0-9]+\:\s+} 1 } } */
+/* { dg-final { scan-assembler-times {vsub\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {vmv} } } */