@@ -875,6 +875,7 @@ if [gas_64_check] then {
run_dump_test "x86-64-sysenter-amd"
run_list_test "x86-64-sysenter-amd" "-mamd64"
run_dump_test "insn-64"
+ run_dump_test "insn-Phi"
run_dump_test "noreg64"
run_list_test "noreg64"
run_dump_test "noreg64-data16"
@@ -0,0 +1,21 @@
+#as: --divide
+#objdump: -sj.text
+#name: .insn (Xeon Phi)
+#xfail: *-*-darwin*
+
+.*: +file format .*
+
+Contents of section .text:
+ 0000 c5fbae78 40c5faae 7840c4c1 7aaef0c4 .*
+ 0010 e1faaef1 c4e06074 e7c5d885 e0ffffff .*
+ 0020 c5f841d1 c5f842d1 c5f843d1 c56895c1 .*
+ 0030 c5e897f9 c4c3783e d103c5f8 48d1c5f8 .*
+ 0040 49d1c5f8 90d1c578 93d1c4c1 7892d1c5 .*
+ 0050 f844d1c5 f845d1c5 f898d1c5 f846d1c5 .*
+ 0060 f847d1c4 c17abdc8 c461fabd c1c57ab8 .*
+ 0070 c1c4c1fa b8c8c5fb aef1c4c1 fbaef0c4 .*
+ 0080 c17abcc8 c461fabc c1c57bbc c1c4c1fb .*
+ 0090 bcc8c5f8 184f40c4 c1781850 40c4c178 .*
+ 00a0 185f40c5 f8182d55 ffffffc4 a1781834 .*
+ 00b0 41c4c178 183c88c5 f81824c5 00000000 .*
+ 00c0 c5f81840 40.*
@@ -0,0 +1,44 @@
+ .text
+Phi:
+ .insn VEX.L0.f2.0f 0xae/7, 0x40(%rax) # clevict0 0x40(%rax)
+ .insn VEX.L0.f3.0f 0xae/7, 0x40(%rax) # clevict1 0x40(%rax)
+ .insn VEX.L0.f3.0f 0xae/6, %r8d # delay %r8d
+ .insn VEX.L0.f3.0f 0xae/6, %rcx # delay %rcx
+ .insn VEX.L0.W0 0x74, $Phi-1f{:s8}, %k3 # jkzd Phi, %k3
+1:
+ .insn VEX.L0.0f.W0 0x85, $Phi-2f{:s32}, %k4 # jknzd Phi, %k4
+2:
+ .insn VEX.L0.0f.W0 0x41, %k1, %k2 # kand %k1, %k2
+ .insn VEX.L0.0f.W0 0x42, %k1, %k2 # kandn %k1, %k2
+ .insn VEX.L0.0f.W0 0x43, %k1, %k2 # kandnr %k1, %k2
+ .insn VEX.L0.0f.W0 0x95, %k1, %k2, %r8 # kconcath %k1, %k2, %r8
+ .insn VEX.L0.0f.W0 0x97, %k1, %k2, %rdi # kconcatl %k1, %k2, %rdi
+ .insn VEX.L0.0f3a.W0 0x3e, $3, %r9, %k2 # kextract $3, %r9, %k2
+ .insn VEX.L0.0f.W0 0x48, %k1, %k2 # kmergel1h %k1, %k2
+ .insn VEX.L0.0f.W0 0x49, %k1, %k2 # kmergel1l %k1, %k2
+ .insn VEX.L0.0f.W0 0x90, %k1, %k2 # kmov %k1, %k2
+ .insn VEX.L0.0f.W0 0x93, %k1, %r10d # kmov %k1, %r10d
+ .insn VEX.L0.0f.W0 0x92, %r9d, %k2 # kmov %r9d, %k2
+ .insn VEX.L0.0f.W0 0x44, %k1, %k2 # knot %k1, %k2
+ .insn VEX.L0.0f.W0 0x45, %k1, %k2 # kor %k1, %k2
+ .insn VEX.L0.0f.W0 0x98, %k1, %k2 # kortest %k1, %k2
+ .insn VEX.L0.0f.W0 0x46, %k1, %k2 # kxnor %k1, %k2
+ .insn VEX.L0.0f.W0 0x47, %k1, %k2 # kxor %k1, %k2
+ .insn VEX.L0.f3.0f 0xbd, %r8d, %ecx # lzcnt %r8d, %ecx
+ .insn VEX.L0.f3.0f 0xbd, %rcx, %r8 # lzcnt %rcx, %r8
+ .insn VEX.L0.f3.0f 0xb8, %ecx, %r8d # popcnt %ecx, %r8d
+ .insn VEX.L0.f3.0f 0xb8, %r8, %rcx # popcnt %r8, %rcx
+ .insn VEX.L0.f2.0f 0xae/6, %ecx # spflt %ecx
+ .insn VEX.L0.f2.0f 0xae/6, %r8 # spflt %r8
+ .insn VEX.L0.f3.0f 0xbc, %r8d, %ecx # tzcnt %r8d, %ecx
+ .insn VEX.L0.f3.0f 0xbc, %rcx, %r8 # tzcnt %rcx, %r8
+ .insn VEX.L0.f2.0f 0xbc, %ecx, %r8d # tzcnti %ecx, %r8d
+ .insn VEX.L0.f2.0f 0xbc, %r8, %rcx # tzcnti %r8, %rcx
+ .insn VEX.L0.0f 0x18/1, 0x40(%rdi) # vprefetch0 0x40(%rdi)
+ .insn VEX.L0.0f 0x18/2, 0x40(%r8) # vprefetch1 0x40(%r8)
+ .insn VEX.L0.0f 0x18/3, 0x40(%r15) # vprefetch2 0x40(%r15)
+ .insn VEX.L0.0f 0x18/5, Phi(%rip) # vprefetche0 Phi(%rip)
+ .insn VEX.L0.0f 0x18/6, (%rcx,%r8,2) # vprefetche1 (%rcx,%r8,2)
+ .insn VEX.L0.0f 0x18/7, (%r8,%rcx,4) # vprefetche2 (%r8,%rcx,4)
+ .insn VEX.L0.0f 0x18/4, (,%rax,8) # vprefetchenta (,%rax,8)
+ .insn VEX.L0.0f 0x18/0, 0x40(%rax) # vprefetchnta 0x40(%rax)