@@ -139,6 +139,7 @@ arch_entry;
static void update_code_flag (int, int);
static void s_insn (int);
+static void s_noopt (int);
static void set_code_flag (int);
static void set_16bit_gcc_code_flag (int);
static void set_intel_syntax (int);
@@ -1231,7 +1232,7 @@ const pseudo_typeS md_pseudo_table[] =
{"value", cons, 2},
{"slong", signed_cons, 4},
{"insn", s_insn, 0},
- {"noopt", s_ignore, 0},
+ {"noopt", s_noopt, 0},
{"optim", s_ignore, 0},
{"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
{"code16", set_code_flag, CODE_16BIT},
@@ -4992,6 +4993,18 @@ optimize_encoding (void)
}
}
+static void
+s_noopt (int dummy ATTRIBUTE_UNUSED)
+{
+ if (!is_it_end_of_statement ())
+ as_warn (_("`.noopt' arguments ignored"));
+
+ optimize = 0;
+ optimize_for_space = 0;
+
+ ignore_rest_of_line ();
+}
+
/* Return non-zero for load instruction. */
static int
@@ -763,6 +763,10 @@ be suffixed by @code{@{:d@var{n}@}} to s
This can be combined with an embedded broadcast specifier:
@samp{8(%eax)@{1to8:d8@}}.
+@cindex @code{noopt} directive
+@item .noopt
+Disable instruction size optimization.
+
@c FIXME: Document other x86 specific directives ? Eg: .code16gcc,
@end table
@@ -614,6 +614,7 @@ if [gas_32_check] then {
run_list_test "optimize-6a" "-I${srcdir}/$subdir -march=+noavx -al"
run_dump_test "optimize-6b"
run_list_test "optimize-7" "-I${srcdir}/$subdir -march=+noavx2 -al"
+ run_dump_test "noopt"
run_dump_test "lea-optimize"
run_dump_test "lea16-optimize"
run_dump_test "lea16-optimize2"
@@ -0,0 +1,4 @@
+#as: -Os -I${srcdir}/$subdir
+#objdump: -drw
+#name: .noopt directive
+#dump: optimize-3.d
@@ -0,0 +1,2 @@
+ .noopt
+ .include "optimize-3.s"
@@ -1,4 +1,4 @@
-#as: -Os
+#as: -Os --defsym USE_PREFIX=1
#objdump: -drw
#name: optimized encoding 3 with -Os
@@ -1,24 +1,32 @@
# Check instructions with optimized encoding
+ .macro noopt insn:vararg
+ .ifdef USE_PREFIX
+ {nooptimize} \insn
+ .else
+ \insn
+ .endif
+ .endm
+
.text
_start:
- {nooptimize} testl $0x7f, %eax
+ noopt testl $0x7f, %eax
- {nooptimize} lock xchg %ecx, (%edx)
- {nooptimize} lock xchg (%ecx), %edx
+ noopt lock xchg %ecx, (%edx)
+ noopt lock xchg (%ecx), %edx
- {nooptimize} vmovdqa32 %ymm1, %ymm2
- {nooptimize} vmovdqa64 %ymm1, %ymm2
- {nooptimize} vmovdqu8 %xmm1, %xmm2
- {nooptimize} vmovdqu16 %xmm1, %xmm2
- {nooptimize} vmovdqu32 %xmm1, %xmm2
- {nooptimize} vmovdqu64 %xmm1, %xmm2
+ noopt vmovdqa32 %ymm1, %ymm2
+ noopt vmovdqa64 %ymm1, %ymm2
+ noopt vmovdqu8 %xmm1, %xmm2
+ noopt vmovdqu16 %xmm1, %xmm2
+ noopt vmovdqu32 %xmm1, %xmm2
+ noopt vmovdqu64 %xmm1, %xmm2
- {nooptimize} vpandd %xmm2, %xmm3, %xmm4
- {nooptimize} vpandq %ymm2, %ymm3, %ymm4
- {nooptimize} vpandnd %ymm2, %ymm3, %ymm4
- {nooptimize} vpandnq %xmm2, %xmm3, %xmm4
- {nooptimize} vpord %xmm2, %xmm3, %xmm4
- {nooptimize} vporq %ymm2, %ymm3, %ymm4
- {nooptimize} vpxord %ymm2, %ymm3, %ymm4
- {nooptimize} vpxorq %xmm2, %xmm3, %xmm4
+ noopt vpandd %xmm2, %xmm3, %xmm4
+ noopt vpandq %ymm2, %ymm3, %ymm4
+ noopt vpandnd %ymm2, %ymm3, %ymm4
+ noopt vpandnq %xmm2, %xmm3, %xmm4
+ noopt vpord %xmm2, %xmm3, %xmm4
+ noopt vporq %ymm2, %ymm3, %ymm4
+ noopt vpxord %ymm2, %ymm3, %ymm4
+ noopt vpxorq %xmm2, %xmm3, %xmm4