match.pd: Canonicalize (signed x << c) >> c [PR101955]
Checks
Commit Message
Canonicalizes (signed x << c) >> c into the lowest
precision(type) - c bits of x IF those bits have a mode precision or a
precision of 1. Also combines this rule with (unsigned x << c) >> c -> x &
((unsigned)-1 >> c) to prevent duplicate pattern. Tested successfully on
x86_64 and x86 targets.
PR middle-end/101955
gcc/ChangeLog:
* match.pd ((signed x << c) >> c): New canonicalization.
gcc/testsuite/ChangeLog:
* gcc.dg/pr101955.c: New test.
---
gcc/match.pd | 20 +++++++----
gcc/testsuite/gcc.dg/pr101955.c | 63 +++++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+), 6 deletions(-)
create mode 100644 gcc/testsuite/gcc.dg/pr101955.c
@@ -3758,13 +3758,21 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
- TYPE_PRECISION (TREE_TYPE (@2)))))
(bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
-/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
- types. */
+/* For (x << c) >> c, optimize into x & ((unsigned)-1 >> c) for
+ unsigned x OR truncate into the precision(type) - c lowest bits
+ of signed x (if they have mode precision or a precision of 1) */
(simplify
- (rshift (lshift @0 INTEGER_CST@1) @1)
- (if (TYPE_UNSIGNED (type)
- && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
- (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
+ (rshift (nop_convert? (lshift @0 INTEGER_CST@1)) @@1)
+ (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
+ (if (TYPE_UNSIGNED (type))
+ (bit_and @0 (rshift { build_minus_one_cst (type); } @1))
+ (if (INTEGRAL_TYPE_P (type))
+ (with {
+ int width = element_precision (type) - tree_to_uhwi (@1);
+ tree stype = build_nonstandard_integer_type (width, 0);
+ }
+ (if (width == 1 || type_has_mode_precision_p (stype))
+ (convert (convert:stype @0))))))))
/* Optimize x >> x into 0 */
(simplify
new file mode 100644
@@ -0,0 +1,63 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+__attribute__((noipa)) int
+t1 (int x)
+{
+ int y = x << 31;
+ int z = y >> 31;
+ return z;
+}
+
+__attribute__((noipa)) int
+t2 (unsigned int x)
+{
+ int y = x << 31;
+ int z = y >> 31;
+ return z;
+}
+
+__attribute__((noipa)) int
+t3 (int x)
+{
+ return (x << 31) >> 31;
+}
+
+__attribute__((noipa)) int
+t4 (int x)
+{
+ return (x << 24) >> 24;
+}
+
+__attribute__((noipa)) int
+t5 (int x)
+{
+ return (x << 16) >> 16;
+}
+
+__attribute__((noipa)) long long
+t6 (long long x)
+{
+ return (x << 63) >> 63;
+}
+
+__attribute__((noipa)) long long
+t7 (long long x)
+{
+ return (x << 56) >> 56;
+}
+
+__attribute__((noipa)) long long
+t8 (long long x)
+{
+ return (x << 48) >> 48;
+}
+
+__attribute__((noipa)) long long
+t9 (long long x)
+{
+ return (x << 32) >> 32;
+}
+
+/* { dg-final { scan-tree-dump-not " >> " "optimized" } } */
+/* { dg-final { scan-tree-dump-not " << " "optimized" } } */